blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbea848b872df7e523f2a8d574cf57a13b221faa | 96c445073de38a3d1616c1336f7b74b7e5ea54e2 | /hggdp_rec/hggdp/siat/siat_multinoise.py | e9290e6787964f58483a23706605577e0965e1d6 | [] | no_license | deepdumbo/HGGDP | f2eba44c5a8c9a0d066b30b754a0077a0bf57407 | 6952d62081a896b102bebb4b309194e116bfbc8c | refs/heads/master | 2023-01-18T21:57:22.818633 | 2020-11-19T08:28:33 | 2020-11-19T08:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,943 | py | import os
import cv2
import numpy as np
import torch
import torch.nn as nn
import math
from hggdp.models.cond_refinenet_dilated import CondRefineNetDilated
from torchvision.datasets import MNIST, CIFAR10
from torchvision import transforms
from torch.utils.data import DataLoader
from scipy.io import loadmat,savemat
import matplotlib.pyplot as plt
from hggdp.siat.compare_hfen import compare_hfen
from skimage.measure import compare_psnr,compare_ssim
import glob
import h5py
import time
savepath = './result_T80/'
__all__ = ['SIAT_MULTI_NOISE']
def show(image):
plt.figure(1)
plt.imshow(np.abs(image),cmap='gray')
plt.show()
def write_Data(result_all,undersample_method,undersample_factor,i):
with open(os.path.join(savepath,"psnr_"+undersample_method+undersample_factor+".txt".format(undersample_factor)),"w+") as f:
#print(len(result_all))
for i in range(len(result_all)):
f.writelines('current image {} PSNR : '.format(i) + str(result_all[i][0]) + \
" SSIM : " + str(result_all[i][1]) + " HFEN : " + str(result_all[i][2]))
f.write('\n')
class SIAT_MULTI_NOISE():
def __init__(self, args, config):
self.args = args
self.config = config
def test(self):
# make a network and load network weight to use later
states = torch.load(os.path.join(self.args.log, 'checkpoint_100000.pth'), map_location=self.config.device)
scorenet = CondRefineNetDilated(self.config).to(self.config.device)
scorenet = torch.nn.DataParallel(scorenet, device_ids=[0])
scorenet.load_state_dict(states[0])
scorenet.eval()
# prepare all test data and undersample masks
files_list = glob.glob('./SIAT_test_image31/*.mat')
files_list.sort()
for undersample_method in ['radial','random','cart']:#'radial','random','cart'
for undersample_factor in ['030','025','015','010']:#'030','025','015','010','007'
result_all = np.zeros([32,3])
for i,file_path in enumerate(files_list):
m = loadmat(file_path)
data=(m["Img"])
data=np.array(data)
data = data/np.max(np.abs(data))
kdata=np.fft.fft2(data)
print('value max min :',np.max(data),np.min(data))
mask=loadmat("./SIAT/mask_"+undersample_method+"_"+undersample_factor+".mat")["mask_"+undersample_method+"_"+undersample_factor]
mask = np.fft.fftshift(mask)
cv2.imwrite(os.path.join(self.args.image_folder, 'mask_.png' ),(mask*255).astype(np.uint8))
print(sum(sum(mask))/(256*256))
ksample=np.multiply(mask,kdata)
sdata=np.fft.ifft2(ksample)
self.write_images(255*np.abs(sdata), os.path.join(savepath,'img_{}_ZF_undersample_'.format(i)+undersample_method+undersample_factor+'.png'))
sdata=np.stack((sdata.real,sdata.imag))[np.newaxis,:,:,:]
self.write_images(255*np.abs(data), os.path.join(savepath,'img_{}_GT_undersample_'.format(i)+undersample_method+undersample_factor+'.png'))
x0 = nn.Parameter(torch.Tensor(1,6,256,256).uniform_(-1,1)).cuda()
x01 = x0
x02 = x0
x03 = x0
step_lr=0.05*0.00003
# Noise amounts
sigmas = np.array([1., 0.59948425, 0.35938137, 0.21544347, 0.12915497,
0.07742637, 0.04641589, 0.02782559, 0.01668101, 0.01])
n_steps_each = 80
max_psnr = 0
max_ssim = 0
min_hfen = 100
start_start = time.time()
for idx, sigma in enumerate(sigmas):
start_out = time.time()
print(idx)
lambda_recon = 1./sigma**2
labels = torch.ones(1, device=x0.device) * idx
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
print('sigma = {}'.format(sigma))
for step in range(n_steps_each):
start_in = time.time()
noise1 = torch.rand_like(x0)* np.sqrt(step_size * 2)
noise2 = torch.rand_like(x0)* np.sqrt(step_size * 2)
noise3 = torch.rand_like(x0)* np.sqrt(step_size * 2)
grad1 = scorenet(x01, labels).detach()
grad2 = scorenet(x02, labels).detach()
grad3 = scorenet(x03, labels).detach()
x0 = x0 + step_size * (grad1 + grad2 + grad3)/3.0
x01 = x0 + noise1
x02 = x0 + noise2
x03 = x0 + noise3
x0=np.array(x0.cpu().detach(),dtype = np.float32)
x_real = (x0.real.squeeze()[0,:,:]+x0.real.squeeze()[2,:,:]+x0.real.squeeze()[4,:,:])/3
x_imag = (x0.real.squeeze()[1,:,:]+x0.real.squeeze()[3,:,:]+x0.real.squeeze()[5,:,:])/3
x_complex = x_real + x_imag*1j
kx=np.fft.fft2(x_complex)
kx[mask==1]=ksample[mask==1]
x_complex = np.fft.ifft2(kx)
end_in = time.time()
print("内循环运行时间:%.2f秒"%(end_in-start_in))
psnr = compare_psnr(255*abs(x_complex),255*abs(data),data_range=255)
ssim = compare_ssim(abs(x_complex),abs(data),data_range=1)
hfen = compare_hfen(abs(x_complex),abs(data))
if max_psnr < psnr :
result_all[i,0] = psnr
max_psnr = psnr
result_all[31,0] = sum(result_all[:31,0])/31
savemat(os.path.join(savepath,'img_{}_Rec_'.format(i)+undersample_method+undersample_factor+'.mat'),{'data':np.array(x_complex,dtype=np.complex),'ZF':sdata})
if max_ssim < ssim :
result_all[i,1] = ssim
max_ssim = ssim
result_all[31,1] = sum(result_all[:31,1])/31
if min_hfen > hfen :
result_all[i,2] = hfen
min_hfen = hfen
result_all[31,2] = sum(result_all[:31,2])/31
write_Data(result_all,undersample_method,undersample_factor,i)
print("current {} step".format(step),'PSNR :', psnr,'SSIM :', ssim,'HFEN :', hfen)
x_real,x_imag = x_complex.real,x_complex.imag
x_real,x_imag = x_real[np.newaxis,:,:],x_imag[np.newaxis,:,:]
x0 = np.stack([x_real,x_imag,x_real,x_imag,x_real,x_imag],1)
x0 = torch.tensor(x0,dtype=torch.float32).cuda()
end_out = time.time()
print("外循环运行时间:%.2f秒"%(end_out-start_out))
end_end = time.time()
print("一张图循环运行时间:%.2f秒"%(end_end-start_start))
def write_images(self, x,image_save_path):
x = np.array(x,dtype=np.uint8)
cv2.imwrite(image_save_path, x)
| [
"noreply@github.com"
] | deepdumbo.noreply@github.com |
5b5eb5cda0fba8e8594dfdd2a26512967a17d5b7 | db861016e307fa7e1a57c1d07262b5d9c8051218 | /cookbook/ingredients/migrations/0001_initial.py | 91177488701b66d42e9147238624ca23682e9abb | [
"MIT"
] | permissive | mugagambi/cookbook-graphql-server | 794fedaf0d6c7fc5a7ffd21100d90c4f9ef16cba | d45044dc5e307d822e3338bcb3e4f8758c89a2f2 | refs/heads/master | 2021-01-25T14:33:55.992792 | 2018-03-03T17:18:52 | 2018-03-03T17:18:52 | 123,712,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # Generated by Django 2.0.2 on 2018-03-03 13:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('notes', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ingredients', to='ingredients.Category')),
],
),
]
| [
"mugagambi@gmail.com"
] | mugagambi@gmail.com |
037e535c5eaab9b8a023d131ac13eca425c05ad2 | d49bd9f3cbc4ae92ce7fc23ca73350f215027911 | /package/reader2/reader.py | 4e392fd00dca36ef0ddf2e1a449160f780c94151 | [] | no_license | haleyshi/python_learn | ce6cb850624cf23635df2a428012504f796dc7c0 | dae62dacf53f3b0d753f9e12fbb38b024b2eb731 | refs/heads/master | 2020-04-12T03:08:45.247023 | 2018-01-19T07:58:28 | 2018-01-19T07:58:28 | 46,245,154 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | class Reader:
def __init__(self, filename):
self.filename = filename
self.f = open(self.filename, 'rt')
def close(self):
self.f.close()
def read(self):
return self.f.read()
| [
"sgh1982@gmail.com"
] | sgh1982@gmail.com |
2059ac9bc021fbeced66fdc69e55069d1f886c73 | 85d8791ca630c4a57240d6deb77e9af041c3baaa | /6D Debuging.py | 8443f889f04199fb282297c77d6c2bab43f265a8 | [] | no_license | siuxoes/ComputerScienceCircle | 677f3058cdd98ca22240afec3467e0b93ed20f57 | 96a315064e04a7b9c3575ba7674bf25d7a6d660d | refs/heads/master | 2021-01-21T13:20:52.823633 | 2015-06-24T01:16:11 | 2015-06-24T01:16:11 | 37,951,664 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | __author__ = 'Siuxoes'
timbitsLeft = int(input()) # step 1: get the input
totalCost = 0 # step 2: initialize the total cost
# step 3: buy as many large boxes as you can
if timbitsLeft >= 40:
bigBoxes = int(timbitsLeft / 40)
totalCost = totalCost + bigBoxes * 6.19 # update the total price
timbitsLeft = timbitsLeft - 40 * bigBoxes # calculate timbits still needed
if timbitsLeft >= 20: # step 4, can we buy a medium box?
totalCost = totalCost + 3.39
timbitsLeft = timbitsLeft - 20
if timbitsLeft >= 10: # step 5, can we buy a small box?
totalCost = totalCost + 1.99
timbitsLeft = timbitsLeft - 10
totalCost = totalCost + timbitsLeft * 0.20 # step 6
print(totalCost) # step 7
| [
"sebastianleonteandroid@gmail.com"
] | sebastianleonteandroid@gmail.com |
89502b0cd5d0b335a4a18aeb229341a774ad9d71 | 44d5b0a1f411ce14181f1bc8b09e3acbc800e9e1 | /routes1.py | a1fbc536fb60722dd5222cad0edc061e93379366 | [] | no_license | mukosh123/Librarysysrtem | 5d74988af1aaec31a007f5aaddd9d8e3855a7662 | e74ed3328bc50336df28ec45fdf3775051407a27 | refs/heads/master | 2021-01-22T10:40:15.775179 | 2017-02-16T14:42:41 | 2017-02-16T14:42:41 | 82,023,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | from flask import *
import sqlite3
DATABASE = 'books.db'
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.route('/admin')
def admin():
g.db = connect_db()
cur = g.db.execute('select rep_title,category from reps')
books = [dict(rep_title=row[0],category=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('admin.html',books=books)
@app.route('/userlogin')
def userlogin():
error = None
if request.method == 'POST':
if request.form['email'] == 'mukosh@yahoo.com' or request.form['password']== 'admin':
return redirect (url_for('users'))
return render_template('userlogin.html')
@app.route('/users')
def users():
g.db = connect_db()
cur = g.db.execute('select rep_title,category from reps')
books = [dict(rep_title=row[0],category=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('users.html',books=books)
@app.route('/borrow')
def borrow():
if request.method == 'POST':
if request.form['book']:
g.db = connect_db()
cur = g.db.execute('select rep_title,category from reps')
books = [dict(rep_title=row[0],category=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('borrow.html',books=books)
@app.route('/',methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['email'] != 'mukosh@yahoo.com' or request.form['password']!= 'admin':
error = 'Invalid credentials .please try again'
else:
return redirect (url_for('admin'))
return render_template('login.html')
if __name__== '__main__':
app.run()
| [
"admin"
] | admin |
6d9825f22154b3af3c4694f93e30a6f7057e270a | a61564c8e32a253dde42803f8ad0c742625033ca | /demo/documented/dual-darcy/demo_dd-grouped.py | db98db1c283bc0f05eab80a381919441676e4439 | [
"BSD-2-Clause"
] | permissive | EomAA/pfibs | 5a8a4cdd440d77201d6f5786a81e6470229ad841 | 589724369b248971ba76da3f764f4b760b666761 | refs/heads/master | 2022-01-19T00:42:29.237452 | 2019-05-28T20:14:29 | 2019-05-28T20:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py | """Solves dual porosity/permeability darcy equations. Employs
nested fieldsplits with schur complements using builtin function calls."""
# -*- coding: utf-8 -*-
## Future-proofing for Python3+ ##
from __future__ import print_function
## Import preliminaries ##
from dolfin import *
#import petsc4py
#petsc4py.init('-log_view')
from pfibs import *
from petsc4py import PETSc
import numpy as np
## Create mesh ##
mesh = UnitSquareMesh(40,40)
V = FiniteElement("RT",mesh.ufl_cell(),1)
P = FiniteElement("DG",mesh.ufl_cell(),0)
W = MixedElement([V,P,V,P])
W = FunctionSpace(mesh,W)
(u1,p1,u2,p2) = TrialFunctions(W)
(v1,q1,v2,q2) = TestFunctions(W)
w = Function(W)
## Boundary ##
class Left(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0],0.0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0],1.0)
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1],0.0)
class Top(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1],1.0)
## Mark boundaries ##
boundaries = MeshFunction("size_t", mesh, mesh.topology().dim()-1)
boundaries.set_all(0)
Left().mark(boundaries, 1)
Right().mark(boundaries, 2)
Bottom().mark(boundaries, 3)
Top().mark(boundaries, 4)
ds = Measure("ds", domain=mesh, subdomain_data=boundaries)
## Pressure boundary conditions ##
p1_left = Expression("1/pi*sin(pi*x[1]) - exp(3.316625*x[1])", degree=5)
p1_right = Expression("1/pi*exp(pi)*sin(pi*x[1]) - exp(3.316625*x[1])", degree=5)
p1_bottom = Expression("-1.0", degree=5)
p1_top = Expression("-27.5671484", degree=5)
p2_left = Expression("1/pi*sin(pi*x[1]) + 10*exp(3.316625*x[1])", degree=5)
p2_right = Expression("1/pi*exp(pi)*sin(pi*x[1]) + 10*exp(3.316625*x[1])", degree=5)
p2_bottom = Expression("10.0", degree=5)
p2_top = Expression("-275.671484", degree=5)
## Weak formulation ##
n = FacetNormal(mesh)
alpha1, alpha2 = Constant(1), Constant(10)
a = dot(v1, alpha1*u1)*dx + dot(v2, alpha2*u2)*dx \
- div(v1)*p1*dx - div(v2)*p2*dx + q1*div(u1)*dx + q2*div(u2)*dx \
+ q1*(p1-p2)*dx - q2*(p1-p2)*dx
L = - dot(v1,n)*p1_left*ds(1) - dot(v2,n)*p2_left*ds(1) \
- dot(v1,n)*p1_right*ds(2) - dot(v2,n)*p2_right*ds(2) \
- dot(v1,n)*p1_bottom*ds(3) - dot(v2,n)*p2_bottom*ds(3) \
- dot(v1,n)*p1_top*ds(4) - dot(v2,n)*p2_top*ds(4)
## Setup block problem ##
params1 = {
"ksp_type":"preonly",
"pc_type":"bjacobi"
}
params2 = {
"ksp_type":"preonly",
"pc_type":"hypre",
}
schur = {
"ksp_type":"gmres",
"pc_fieldsplit_type":"schur",
"pc_fieldsplit_schur_fact_type":"full",
"pc_fieldsplit_schur_precondition":"selfp",
"ksp_monitor_true_residual": True
}
problem = BlockProblem(a,L,w,bcs=[])
problem.field('v',[0,2],solver=params1)
problem.field('p',[1,3],solver=params2)
problem.split('s1',['v','p'],solver=schur)
## Setup block solver ##
solver = LinearBlockSolver(problem)
solver.solve()
| [
"iprotaso@stc-30016s.nrel.gov"
] | iprotaso@stc-30016s.nrel.gov |
bc63b1a0b7d577ec29055b0578a0eb8ab066aa9c | 6a37691fe01bcff786b369a4b30d0b9dc4ef6567 | /python/OOP/data_hiding.py | 471f47a1ab2b81710b76cefd5f4dddfb3efef846 | [] | no_license | Sjaiswal1911/PS1 | ffe6266594b90a934e02a42ca0ac96f2a1b8c151 | ee5d0871421db9ef1148ea0752f1946c50a1b89b | refs/heads/master | 2022-10-29T15:01:02.030107 | 2020-06-17T11:28:55 | 2020-06-17T11:28:55 | 265,440,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # Privacy and control
# Data hiding
# declare the attribute with double underscore '__' as prefix
class JustCounter:
__secretCount = 0
def count(self):
self.__secretCount += 1
print (self.__secretCount)
def print_count(self):
print(self.__secretCount)
counter = JustCounter()
counter.count()
counter.count()
counter.print_count()
print (counter.__secretCount)
# this produces an error
| [
"noreply@github.com"
] | Sjaiswal1911.noreply@github.com |
2b1e6eb19088cc04482528d296fcbb47ed03b730 | ccd43911add6fe8de33dda13011e9c39909f7296 | /fresh/users/migrations/0003_auto_20170602_2328.py | 25256d76f337851d95a49da61850dca27b311f72 | [] | no_license | RyanDracula/DailyFresh | dc291ff438aeca25c14aca4953d7a005ea1eea41 | 18249d0d6450ee99689080d701d27bc14e266a1f | refs/heads/master | 2021-01-23T01:50:59.198640 | 2017-06-13T12:07:08 | 2017-06-13T12:07:08 | 92,894,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170531_1838'),
]
operations = [
migrations.AlterField(
model_name='adrressinfo',
name='youbian',
field=models.CharField(default=b'', max_length=8),
),
]
| [
"1404527748@qq.com"
] | 1404527748@qq.com |
a8dc6be3691fd6fa56fe9889c59ea30cf921c8d8 | 5cd54b64f2e59ec727c1f6542f94d047530592d9 | /Mall/Buyers/models.py | edd36245a11ea7748d14c6dbcb2d3ff3db41aa3a | [] | no_license | whwanyt/shop | 4c2f1c8bdfa7f65f6561d4fa1cdf5e7029b3abb3 | f78f3598e1975b3be7c1d9d6f31b4fd2eb137738 | refs/heads/master | 2020-07-14T17:00:44.882888 | 2019-08-30T10:24:40 | 2019-08-30T10:24:40 | 205,358,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,944 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ckeditor.fields import RichTextField
from django.db import models
# Create your models here.
# 商品
class Goods(models.Model):
gname = models.CharField(max_length=30)
gDate = models.DateField(auto_now=True)
gAbout = models.CharField(max_length=100)
gStore = models.IntegerField()
gcontent= RichTextField(verbose_name=u'内容')
gtype = models.ForeignKey('Back.Seller_User')
gPrice = models.FloatField()
gSales = models.IntegerField(default=0)
gimg = models.CharField(max_length=300)
gTtype = models.CharField(max_length=100)
gLtype = models.ForeignKey('Label')
class Meta():
db_table = 'Goods'
def __unicode__(self):
return self.gname
class GoodsImg(models.Model):
gnameing = models.CharField(max_length=50)
gimage = models.CharField(max_length=300)
gItype = models.ForeignKey('Goods')
class Meta():
db_table = 'GoodsImg'
def __unicode__(self):
return self.gnameing
class Label(models.Model):
goffers = models.CharField(max_length=30)
class Meta():
db_table = 'Label'
def __unicode__(self):
return self.goffers
# 商品结束
# 用户
class User(models.Model):
Upet = models.CharField(max_length=20)
# 用户名
Uname = models.CharField(max_length=20)
# 密码
Upswd = models.CharField(max_length=30)
# 电话
Uphone = models.CharField(max_length=20)
# 邮箱
Uemail = models.CharField(max_length=30)
# 收货地址
Uadd = models.CharField(max_length=100)
class Meta():
db_table = 'User'
def __unicode__(self):
return self.Uname
# 用户购买物品表
class Ucart(models.Model):
# 关联用户
Utype = models.ForeignKey('User')
# 查看过的商品
Uc_id = models.ForeignKey('Goods')
# 商品状态,查看过,加入购物车,已购买
Uc_state = models.NullBooleanField(default=None)
# 商品评价
Ucontent = models.CharField(max_length=300)
class Meta():
db_table = 'Ucart'
def __unicode__(self):
return self.Uc_id
# 用户文章
class Upost(models.Model):
# 文章名称
Pname = models.CharField(max_length=100)
# 状态,公开,隐私
Popen = models.BooleanField(default=True)
# 分类
Plabel = models.CharField(max_length=30)
# 文章
Pcontent = RichTextField(verbose_name=u'内容')
# 关联用户
Ptype = models.ForeignKey('User')
class Meta():
db_table = 'Upost'
def __unicode__(self):
return self.Pname
# 用户结束
# 全国省市县信息
class areainfo(models.Model):
title = models.CharField(max_length=20)
arealevel = models.IntegerField()
parea = models.ForeignKey('self',null=True,blank=True)
class Meta():
db_table = 'areainfo'
def __unicode__(self):
return self.title
| [
"2956860463@qq.com"
] | 2956860463@qq.com |
b46eb8ad515541f7d2dca44fc8545ec091fa2544 | 726a548766a9db586806ef540dcf8ea4d0a82a60 | /Python3/unit_testing/pytest/phonebook/phonebook.py | c9f4a4f2dcbac826ca6232de245f48fa455d4e4b | [] | no_license | ArseniD/learn_python | 6fd735a594ff83ea97888d6688e474e94182ea74 | d73fc790514f50a2f61c5cc198073299b0c71277 | refs/heads/master | 2022-05-28T04:53:54.603475 | 2019-08-27T10:15:29 | 2019-08-27T10:15:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import os
class Phonebook():
def __init__(self, cachedir):
self.entries = {}
self.filename = "phonebook.txt"
self.file_cache = open(os.path.join(str(cachedir), self.filename), "w")
def add(self, name, number):
self.entries[name] = number
def lookup(self, name):
return self.entries[name]
def names(self):
return self.entries.keys()
def numbers(self):
return self.entries.values()
def clear(self):
self.entries = {}
self.file_cache.close()
os.remove(self.filename)
| [
"arsenidudko@mail.ru"
] | arsenidudko@mail.ru |
4ce16e97b7aa8fccaeed9e627c0b98518358e6bd | 4a67eeb8221ee4737df4ba01bc9d4650d7aac123 | /eyou/models/__init__.py | ca2026326974e8c8d138463861943ee4a6886071 | [] | no_license | Abenezer/eyouapp | e117a16aed8aa92d75e03e9fd13921efa3cbec6e | 15070415e85b1905c3220010b90ca15703b7eb7c | refs/heads/master | 2021-01-20T10:15:49.816677 | 2017-09-12T02:09:07 | 2017-09-12T02:09:07 | 101,623,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | from sqlalchemy import engine_from_config
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import configure_mappers
import zope.sqlalchemy
# import or define all models here to ensure they are attached to the
# Base.metadata prior to any initialization routines
#from .mymodel import MyModel # noqa
from .core_models import *
from .user_models import *
# run configure_mappers after defining all of the models to ensure
# all relationships can be setup
configure_mappers()
def get_engine(settings, prefix='sqlalchemy.'):
return engine_from_config(settings, prefix)
def get_session_factory(engine):
factory = sessionmaker()
factory.configure(bind=engine)
return factory
def get_tm_session(session_factory, transaction_manager):
"""
Get a ``sqlalchemy.orm.Session`` instance backed by a transaction.
This function will hook the session to the transaction manager which
will take care of committing any changes.
- When using pyramid_tm it will automatically be committed or aborted
depending on whether an exception is raised.
- When using scripts you should wrap the session in a manager yourself.
For example::
import transaction
engine = get_engine(settings)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
"""
dbsession = session_factory()
zope.sqlalchemy.register(
dbsession, transaction_manager=transaction_manager)
return dbsession
def includeme(config):
"""
Initialize the model for a Pyramid app.
Activate this setup using ``config.include('Eyou.models')``.
"""
settings = config.get_settings()
settings['tm.manager_hook'] = 'pyramid_tm.explicit_manager'
# use pyramid_tm to hook the transaction lifecycle to the request
config.include('pyramid_tm')
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
# make request.dbsession available for use in Pyramid
config.add_request_method(
# r.tm is the transaction manager used by pyramid_tm
lambda r: get_tm_session(session_factory, r.tm),
'dbsession',
reify=True
)
| [
"abenezerm1@gmail.com"
] | abenezerm1@gmail.com |
dbbe0d817eafd31b824412f44b7d4c346b70beee | f3736373d9e943d33260b271e0d1515c03931687 | /experiments/scripts/run_script_2dOscillator_SCRIPTSTYLE_perturbTrivial.py | e64218ad5d36c57de9af061473cc2939736e14ff | [] | no_license | mattlevine22/mechRNN | 17ac1a5efef63ec19f3030b0677f564b33eca17a | 4261a8041ff5e57ecba7096a0851cd4eee495d07 | refs/heads/master | 2021-07-05T12:22:30.918672 | 2019-08-02T15:03:12 | 2019-08-02T15:03:12 | 172,997,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,725 | py | from utils import *
import numpy as np
import torch
import argparse
parser = argparse.ArgumentParser(description='mechRNN')
parser.add_argument('--epoch', type=int, default=100, help='number of epochs')
parser.add_argument('--lr', type=float, default=0.05, help='learning rate')
parser.add_argument('--delta_t', type=float, default=0.1, help='time step of simulation')
parser.add_argument('--t_end', type=float, default=2000, help='length of simulation')
parser.add_argument('--train_frac', type=float, default=0.8, help='fraction of simulated data for training')
parser.add_argument('--savedir', type=str, default='default_output', help='parent dir of output')
parser.add_argument('--model_solver', default=oscillator_2d, help='ode function')
parser.add_argument('--drive_system', type=str2bool, default=False, help='whether to force the system with a time-dependent driver')
parser.add_argument('--n_experiments', type=int, default=1, help='number of sim/fitting experiments to do')
parser.add_argument('--n_perturbations', type=int, default=1, help='number of random initializations for the RNN to perform')
FLAGS = parser.parse_args()
def main():
(a, b, c) = [1, 1, 1]
my_state_inits = [[1,0]]
lr = FLAGS.lr # learning rate
delta_t = FLAGS.delta_t #0.01
tspan = np.arange(0,FLAGS.t_end,delta_t) #np.arange(0,10000,delta_t)
sim_model = FLAGS.model_solver
rnn_sim_model = FLAGS.model_solver
drive_system = FLAGS.drive_system #False
n_sims = FLAGS.n_experiments #1
n_epochs = FLAGS.epoch #1
n_perturbations = FLAGS.n_perturbations
train_frac = FLAGS.train_frac #0.9995
i = 0
for state_init in my_state_inits:
i += 1
sim_model_params = {'state_names': ['x','y'], 'state_init':state_init, 'delta_t':delta_t, 'smaller_delta_t': min(delta_t, delta_t), 'ode_params':(a, b, c)}
rnn_model_params = {'state_names': ['x','y'], 'state_init':state_init, 'delta_t':delta_t, 'smaller_delta_t': min(delta_t, delta_t), 'ode_params':(a, b, c)}
all_dirs = []
np.random.seed()
# master output directory name
output_dir = FLAGS.savedir + '_output' + str(i+1)
# simulate clean and noisy data
input_data, y_clean, y_noisy = make_RNN_data(
sim_model, tspan, sim_model_params, noise_frac=0.05, output_dir=output_dir, drive_system=False)
###### do train/test split #######
n_train = int(np.floor(train_frac*len(y_clean)))
y_clean_train = y_clean[:n_train]
y_clean_test = y_clean[n_train:]
y_noisy_train = y_noisy[:n_train]
y_noisy_test = y_noisy[n_train:]
# x_train = input_data[:, :n_train]
# x_test = input_data[:, n_train:]
y_list = [y_clean_train, y_noisy_train, y_clean_test, y_noisy_test]
####### collect normalization information from TRAINING SET ONLY ######
normz_info_clean = {}
normz_info_clean['Ymax'] = np.max(y_clean_train,axis=0)
normz_info_clean['Ymin'] = np.min(y_clean_train,axis=0)
normz_info_clean['Ymean'] = np.mean(y_clean_train)
normz_info_clean['Ysd'] = np.std(y_clean_train)
# normz_info_clean['Xmean'] = np.mean(x_train)
# normz_info_clean['Xsd'] = np.std(x_train)
normz_info_noisy = {}
normz_info_noisy['Ymax'] = np.max(y_noisy_train,axis=0)
normz_info_noisy['Ymin'] = np.min(y_noisy_train,axis=0)
normz_info_noisy['Ymean'] = np.mean(y_noisy_train)
normz_info_noisy['Ysd'] = np.std(y_noisy_train)
# normz_info_noisy['Xmean'] = np.mean(x_train)
# normz_info_noisy['Xsd'] = np.std(x_train)
###### MINMAX normalize TRAINING data #######
# # y (MIN/MAX [0,1])
# y_clean_train = f_normalize_minmax(normz_info_clean, y_clean_train)
# y_noisy_train = f_normalize_minmax(normz_info_clean, y_noisy_train)
# # x (MIN/MAX [0,1])
# # y_clean_train = (y_clean_train - normz_info_clean['Ymean']) / normz_info_clean['Ysd']
# # y_noisy_train = (y_noisy_train - normz_info_noisy['Ymean']) / normz_info_noisy['Ysd']
# x_train = (x_train - normz_info_clean['Xmean']) / normz_info_clean['Xsd']
# ###### normalize TESTING data ########
# # y (MIN/MAX [0,1])
# y_clean_test = f_normalize_minmax(normz_info_clean, y_clean_test)
# y_noisy_test = f_normalize_minmax(normz_info_clean, y_noisy_test)
# x (MIN/MAX [0,1])
# y_clean_test = (y_clean_test - normz_info_clean['Ymean']) / normz_info_clean['Ysd']
# y_noisy_test = (y_noisy_test - normz_info_noisy['Ymean']) / normz_info_noisy['Ysd']
# x_test = (x_test - normz_info_clean['Xmean']) / normz_info_clean['Xsd']
########## NOW start running RNN fits ############
for hidden_size in [5]:
#### run mechRNN ###
forward = forward_chaos_hybrid_full
# train on clean data (random init)
normz_info = normz_info_clean
(y_clean_train_norm, y_noisy_train_norm,
y_clean_test_norm, y_noisy_test_norm) = [
f_normalize_minmax(normz_info, y) for y in y_list]
# train on clean data (trivial init)
# run_output_dir = output_dir + '/mechRNN_trivialInitEXACT_clean_hs{0}'.format(hidden_size)
# all_dirs.append(run_output_dir)
# torch.manual_seed(0)
# train_chaosRNN(forward,
# y_clean_train_norm, y_clean_train_norm,
# y_clean_test_norm, y_noisy_test_norm,
# rnn_model_params, hidden_size, max(1,int(n_epochs/10)), lr,
# run_output_dir, normz_info_clean, rnn_sim_model,
# trivial_init=True, perturb_trivial_init=False)
for sd_perturb in [0., 0.0001, 0.001, 0.01, 0.1]:
for nn in range(n_perturbations):
run_output_dir = output_dir + '/mechRNN_trivialInitPERTURBED{1}_iter{2}_clean_hs{0}'.format(hidden_size, sd_perturb, nn)
all_dirs.append(run_output_dir)
torch.manual_seed(nn)
if sd_perturb==0:
# use_n_epochs = int(np.ceil(n_epochs/100))
perturb_trivial_init = False
else:
# use_n_epochs = n_epochs
perturb_trivial_init = True
train_chaosRNN(forward,
y_clean_train_norm, y_clean_train_norm,
y_clean_test_norm, y_noisy_test_norm,
rnn_model_params, hidden_size, n_epochs, lr,
run_output_dir, normz_info_clean, rnn_sim_model,
trivial_init=True, perturb_trivial_init=perturb_trivial_init, sd_perturb=sd_perturb)
run_output_dir = output_dir + '/mechRNN_clean_hs{0}'.format(hidden_size)
all_dirs.append(run_output_dir)
torch.manual_seed(0)
train_chaosRNN(forward,
y_clean_train_norm, y_clean_train_norm,
y_clean_test_norm, y_noisy_test_norm,
rnn_model_params, hidden_size, n_epochs, lr,
run_output_dir, normz_info_clean, rnn_sim_model)
# plot comparative training errors
compare_fits([d for d in all_dirs if "clean" in d], output_fname=output_dir+'/model_comparisons_clean')
# compare_fits([d for d in all_dirs if "noisy" in d], output_fname=output_dir+'/model_comparisons_noisy')
if __name__ == '__main__':
main()
| [
"mattlevine22@gmail.com"
] | mattlevine22@gmail.com |
6d7219b1cb6e189121ca94c79d9edd7fca68e231 | ce5e769150f8a75d5a889815830181726a94de47 | /serviceWord2vec.py | 1901f71b034de7934d4f0342c55c0e7724f4c2fa | [] | no_license | siqiGao/nnn | dcd8bfc39696e69b0d6e415b4197d1f4b9368410 | 30f60cd652ddd6d4914dd3140811867f19208b5a | refs/heads/master | 2022-12-03T08:07:23.927012 | 2020-08-23T18:52:11 | 2020-08-23T18:53:11 | 289,747,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import config
import jieba
print ("Loading model:word2vec ......")
model = config.w2v_model
# 获取两个单词列表的相似度
def get_similarity(seg_list, word_list):
ws1 = []
ws2 = []
point = 0
for word in seg_list:
if word in model:
ws1.append(word)
for word in word_list:
if word in model:
ws2.append(word)
if len(ws1) > 0 and len(ws2) > 0:
point = model.n_similarity(ws1, ws2)
print(ws1, ws2, point)
return point
| [
"389602798@qq.com"
] | 389602798@qq.com |
5ad799020261e5ef8e682c32fc4af673b9e2dd05 | c71ee430716a542a596d54910c66eaf55c4e11bd | /engineer/build/engineer_nav/catkin_generated/pkg.develspace.context.pc.py | 0eb1bfc2730a85de680b81fdbd6f199858bc38d8 | [] | no_license | NeilAlishev/robotics | e34f810c4739507ee014408d19e47f9acd0d8af5 | 77b2b160307ced2399e2bb220bd793c889da9694 | refs/heads/master | 2020-12-30T11:59:50.177247 | 2018-04-02T11:39:12 | 2018-04-02T11:39:12 | 91,483,135 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "engineer_nav"
PROJECT_SPACE_DIR = "/home/neil/robotics/robotics/engineer/devel"
PROJECT_VERSION = "0.0.0"
| [
"alishev.neil@gmail.com"
] | alishev.neil@gmail.com |
d3ff1e8fd2b9310e9ac4b6e16b83c3b07946f17f | 349c4f37b6a003d10dd78d864395a0d596d24fe6 | /Learn_advanced/5201_container.py | 2c7ae775dfcca224e76bba0ef1aea78bf35bbcbc | [] | no_license | bwjubrother/Algorithms | 55c2980a4540a7e48cb3afd298cbd2e3d81c594e | 03daa2c778b1cc59ce1920363a27c88bec5ec289 | refs/heads/master | 2023-04-07T00:38:58.715786 | 2021-04-25T08:00:08 | 2021-04-25T08:00:08 | 279,016,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import sys
sys.stdin = open('5201.txt', 'r')
T = int(input())
for tc in range(T):
n, m = map(int, input().split())
ns = list(map(int, input().split()))
ms = list(map(int, input().split()))
ans = 0
while ns and ms:
if max(ns) <= max(ms):
ans += max(ns)
ns.remove(max(ns))
ms.remove(max(ms))
else:
ns.remove(max(ns))
print('#%d %d' % (tc+1, ans)) | [
"bwjubrother@gmail.com"
] | bwjubrother@gmail.com |
0b6a6e23efe28a1af8e2c40f4e5303ad9ea84029 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/r4.py | 221ab5ce6aa1a15e08da2cf1163ece2f4e587d08 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 82 | py | ii = [('LeakWTI3.py', 1), ('BackGNE.py', 1), ('BachARE.py', 1), ('SomeMMH.py', 4)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
bb4ee6ee9539c4701f45481fea3f104fa0fe41eb | b78bdaeb874b717d24374f76efec7dd062da9b4a | /CS3240/HW 4/encrypt2.py | a344d461dce064d6a0d00aec6f78461c5dcce53d | [] | no_license | ReidBix/Python | 2bb3198c7d46e83ed53f24045bc2622e89a760fc | c45c0eed284a80b6c0db27124702269426d9cbec | refs/heads/master | 2020-12-25T14:23:04.647486 | 2016-08-24T17:26:44 | 2016-08-24T17:26:44 | 66,485,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | #BORROWED AND MODIFIED FROM CNOX.PY AVAILABLE AT:
#https://github.com/nafscript/cnox/blob/master/cnox.py
import os
import struct
from Crypto.Cipher import AES
from Crypto import Random
block = AES.block_size #16
# Encryption
def Encrypt(in_file, key, out_file=None, chunksize=8192):
if not out_file:
out_file = in_file + '.enc'
iv = Random.new().read(AES.block_size)
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_file)
with open(in_file, 'rb') as infile:
with open(out_file, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
add = b' ' * (16 - len(chunk)%16)
chunk = chunk + add
outfile.write(encryptor.encrypt(chunk))
os.remove(in_file)
# Decryption
def Decrypt(in_file, key, out_file=None, chunksize=8192):
if not out_file:
out_file = os.path.splitext(in_file)[0]
with open(in_file, 'rb') as infile:
origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
iv = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, iv)
with open(out_file, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(origsize)
def main():
aesKey = Random.new().read(block)
Encrypt(in_file="Evatran_journal.docx", key=aesKey)
Decrypt(in_file="Evatran_journal.docx.enc", key=aesKey)
if __name__ == '__main__':
main() | [
"rmb3yz@virginia.edu"
] | rmb3yz@virginia.edu |
4c92bfac7c9797d2bb9dc85f1b4b7a00da5fdc9f | 43f15ce0b1ef5f3a7d52f1c61ce086e8601db366 | /daily_crawl/daily_forecasting_model.py | ce8d43e6827fec6990c0312404969ce9f65a870b | [] | no_license | Danesh-WQD180067/WQD7005-Group | 27211307d5f85d1f486ffd57e9a11b1abfbcd2f7 | a8e007513e27eee2d7c2d868db471c5daf16ebe7 | refs/heads/master | 2021-03-17T06:54:27.768451 | 2020-06-22T13:59:22 | 2020-06-22T13:59:22 | 246,972,044 | 0 | 1 | null | 2020-06-22T13:44:06 | 2020-03-13T02:27:07 | Jupyter Notebook | UTF-8 | Python | false | false | 3,986 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 21:27:32 2020
@author: USER
"""
import pandas as pd
from sklearn.externals import joblib
from keras.models import load_model
from datetime import datetime
from numpy import concatenate
# convert series to supervised learning
def series_to_supervised(df, n_in=1, n_out=1, dropnan=True):
data = df.values
n_vars = 1 if type(data) is list else data.shape[1]
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [( df.columns[j] + ' (t-%d)' % (i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [( df.columns[j] + ' (t)') for j in range(n_vars)]
else:
names += [( df.columns[j] + ' (t+%d)' % (i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
##############################################################################
# Prepare Data #
##############################################################################
# load dataset
dataset = pd.read_csv(r'output/dataset.csv', header=0, index_col=0)
dataset = dataset[['closing_price', 'neg', 'neu', 'pos', 'compound']]
# Get only past 30 days
dataset = dataset.tail(31)
lastdate = dataset.tail(1).index.item()
lastdate = datetime.strptime(lastdate, '%Y-%m-%d')
lastprice = dataset.tail(1).closing_price.item()
# Load Scaler
# And now to load...
scaler_filename = r'../pickles/scaler.gz'
scaler = joblib.load(scaler_filename)
# Normalize Features
values = dataset.values
scaled = scaler.fit_transform(values)
dataset = pd.DataFrame(scaled)
dataset.columns = ['closing_price', 'neg', 'neu', 'pos', 'compound']
n_days = 30
n_features = 5
# frame as supervised learning
reframed = series_to_supervised(dataset, 30, 1)
# drop columns we don't want to predict
# reframed.drop(['neg (t)', 'pos (t)'], axis=1, inplace=True)
print(reframed.head())
# split into train and test sets
values = reframed.values
n_train_days = len(values) - 30
test = values[:, :]
# split into input and outputs
n_obs = n_days * n_features
test_X, test_y = test[:, :n_obs], test[:, -n_features]
# reshape input to be 3D [samples, timesteps, features]
test_X = test_X.reshape((test_X.shape[0], n_days, n_features))
##############################################################################
# Test Model #
##############################################################################
model = load_model('../pickles/model.h5')
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], n_days*n_features))
# invert scaling for forecast
inv_yhat = concatenate((yhat, test_X[:, -4:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
##############################################################################
# Save Result #
##############################################################################
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, Float, DateTime, MetaData
engine = create_engine('sqlite:///../pickles/predictions.db', echo = True)
meta = MetaData()
predictions = Table(
'predictions', meta,
Column('id', Integer, primary_key = True),
Column('p_date', DateTime),
Column('p_price', Float(asdecimal=False)),
Column('f_price', Float(asdecimal=False)),
)
meta.create_all(engine)
ins = predictions.insert()
ins = predictions.insert().values(p_date = lastdate, p_price=lastprice, f_price = inv_yhat)
conn = engine.connect()
result = conn.execute(ins) | [
"wqd180067@siswa.um.edu.my"
] | wqd180067@siswa.um.edu.my |
01b617e6e058ce0246a7c101cf43bf4e1c81a5c1 | 7b798a55cf7bd42ab5d2d423ab77814c2564bd44 | /Easy/Longest Harmonious Subsequence.py | 6c543dadc03fa60cf5efa587aed5009fc279b69a | [] | no_license | addherbs/LeetCode | d933839eb0a2eb53c192f76c42152c6f3a6ef3f2 | cadd48225d93aa69745a94a214e55e7751996e19 | refs/heads/master | 2021-04-15T05:12:26.855696 | 2021-02-27T05:53:42 | 2021-02-27T05:53:42 | 126,174,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import collections
class Solution:
def findLHS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
p = collections.Counter (nums)
ans = 0
for num in nums:
temp = 1
if (num + 1 in p):
ans = max (ans, p[num + 1] + p[num])
print (p, ans)
return ans | [
"addherbs@gmail.com"
] | addherbs@gmail.com |
d781e56aa2a7dee0672b8024be04ffa77770c0c0 | b5c47c66fb48294460e72833e4f3c7ec0042ff0a | /tutorial/venv/lib/python3.9/site-packages/setuptools/_distutils/command/bdist_rpm.py | 9ce00ca35396d2e6d101f8bc3a258135e91566e5 | [] | no_license | MariaKireeva/projects | 19c3caae9ee5f6a92c69c8c61e152dbb8f5c704f | 8a1d030d1e8c094109b70c43216f8491df58a02d | refs/heads/main | 2023-08-27T21:26:11.517225 | 2021-09-22T17:39:46 | 2021-09-22T17:39:46 | 312,046,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,537 | py | """distutils.command.bdist_rpm
Implements the Distutils 'bdist_rpm' command (create RPM source and binary
distributions)."""
import subprocess, sys, os
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.file_util import write_file
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_rpm(Command):
description = "create an RPM distribution"
user_options = [
('bdist-base=', None,
"base directory for creating built distributions"),
('rpm-base=', None,
"base directory for creating RPMs (defaults to \"rpm\" under "
"--bdist-base; must be specified for RPM 2)"),
('dist-dir=', 'd',
"directory to put final RPM files in "
"(and .spec files if --spec-only)"),
('python=', None,
"path to Python interpreter to hard-code in the .spec news "
"(default: \"python\")"),
('fix-python', None,
"hard-code the exact path to the current Python interpreter in "
"the .spec news"),
('spec-only', None,
"only regenerate spec news"),
('source-only', None,
"only generate source RPM"),
('binary-only', None,
"only generate binary RPM"),
('use-bzip2', None,
"use bzip2 instead of gzip to create source distribution"),
# More meta-data: too RPM-specific to put in the setup script,
# but needs to go in the .spec news -- so we make these options
# to "bdist_rpm". The idea is that packagers would put this
# info in setup.cfg, although they are of course free to
# supply it on the command line.
('distribution-name=', None,
"name of the (Linux) distribution to which this "
"RPM applies (*not* the name of the module distribution!)"),
('group=', None,
"package classification [default: \"Development/Libraries\"]"),
('release=', None,
"RPM release number"),
('serial=', None,
"RPM serial number"),
('vendor=', None,
"RPM \"vendor\" (eg. \"Joe Blow <joe@example.com>\") "
"[default: maintainer or author from setup script]"),
('packager=', None,
"RPM packager (eg. \"Jane Doe <jane@example.net>\") "
"[default: vendor]"),
('doc-files=', None,
"list of documentation files (space or comma-separated)"),
('changelog=', None,
"RPM changelog"),
('icon=', None,
"name of icon news"),
('provides=', None,
"capabilities provided by this package"),
('requires=', None,
"capabilities required by this package"),
('conflicts=', None,
"capabilities which conflict with this package"),
('build-requires=', None,
"capabilities required to build this package"),
('obsoletes=', None,
"capabilities made obsolete by this package"),
('no-autoreq', None,
"do not automatically calculate dependencies"),
# Actions to take when building RPM
('keep-temp', 'k',
"don't clean up RPM build directory"),
('no-keep-temp', None,
"clean up RPM build directory [default]"),
('use-rpm-opt-flags', None,
"compile with RPM_OPT_FLAGS when building from source RPM"),
('no-rpm-opt-flags', None,
"do not pass any RPM CFLAGS to compiler"),
('rpm3-mode', None,
"RPM 3 compatibility mode (default)"),
('rpm2-mode', None,
"RPM 2 compatibility mode"),
# Add the hooks necessary for specifying custom scripts
('prep-script=', None,
"Specify a script for the PREP phase of RPM building"),
('build-script=', None,
"Specify a script for the BUILD phase of RPM building"),
('pre-install=', None,
"Specify a script for the pre-INSTALL phase of RPM building"),
('install-script=', None,
"Specify a script for the INSTALL phase of RPM building"),
('post-install=', None,
"Specify a script for the post-INSTALL phase of RPM building"),
('pre-uninstall=', None,
"Specify a script for the pre-UNINSTALL phase of RPM building"),
('post-uninstall=', None,
"Specify a script for the post-UNINSTALL phase of RPM building"),
('clean-script=', None,
"Specify a script for the CLEAN phase of RPM building"),
('verify-script=', None,
"Specify a script for the VERIFY phase of the RPM build"),
# Allow a packager to explicitly force an architecture
('force-arch=', None,
"Force an architecture onto the RPM build process"),
('quiet', 'q',
"Run the INSTALL phase of RPM building in quiet mode"),
]
boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
'no-autoreq', 'quiet']
negative_opt = {'no-keep-temp': 'keep-temp',
'no-rpm-opt-flags': 'use-rpm-opt-flags',
'rpm2-mode': 'rpm3-mode'}
def initialize_options(self):
self.bdist_base = None
self.rpm_base = None
self.dist_dir = None
self.python = None
self.fix_python = None
self.spec_only = None
self.binary_only = None
self.source_only = None
self.use_bzip2 = None
self.distribution_name = None
self.group = None
self.release = None
self.serial = None
self.vendor = None
self.packager = None
self.doc_files = None
self.changelog = None
self.icon = None
self.prep_script = None
self.build_script = None
self.install_script = None
self.clean_script = None
self.verify_script = None
self.pre_install = None
self.post_install = None
self.pre_uninstall = None
self.post_uninstall = None
self.prep = None
self.provides = None
self.requires = None
self.conflicts = None
self.build_requires = None
self.obsoletes = None
self.keep_temp = 0
self.use_rpm_opt_flags = 1
self.rpm3_mode = 1
self.no_autoreq = 0
self.force_arch = None
self.quiet = 0
def finalize_options(self):
self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
if self.rpm_base is None:
if not self.rpm3_mode:
raise DistutilsOptionError(
"you must specify --rpm-base in RPM 2 mode")
self.rpm_base = os.path.join(self.bdist_base, "rpm")
if self.python is None:
if self.fix_python:
self.python = sys.executable
else:
self.python = "python3"
elif self.fix_python:
raise DistutilsOptionError(
"--python and --fix-python are mutually exclusive options")
if os.name != 'posix':
raise DistutilsPlatformError("don't know how to create RPM "
"distributions on platform %s" % os.name)
if self.binary_only and self.source_only:
raise DistutilsOptionError(
"cannot supply both '--source-only' and '--binary-only'")
# don't pass CFLAGS to pure python distributions
if not self.distribution.has_ext_modules():
self.use_rpm_opt_flags = 0
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
self.finalize_package_data()
def finalize_package_data(self):
self.ensure_string('group', "Development/Libraries")
self.ensure_string('vendor',
"%s <%s>" % (self.distribution.get_contact(),
self.distribution.get_contact_email()))
self.ensure_string('packager')
self.ensure_string_list('doc_files')
if isinstance(self.doc_files, list):
for readme in ('README', 'README.txt'):
if os.path.exists(readme) and readme not in self.doc_files:
self.doc_files.append(readme)
self.ensure_string('release', "1")
self.ensure_string('serial') # should it be an int?
self.ensure_string('distribution_name')
self.ensure_string('changelog')
# Format changelog correctly
self.changelog = self._format_changelog(self.changelog)
self.ensure_filename('icon')
self.ensure_filename('prep_script')
self.ensure_filename('build_script')
self.ensure_filename('install_script')
self.ensure_filename('clean_script')
self.ensure_filename('verify_script')
self.ensure_filename('pre_install')
self.ensure_filename('post_install')
self.ensure_filename('pre_uninstall')
self.ensure_filename('post_uninstall')
# XXX don't forget we punted on summaries and descriptions -- they
# should be handled here eventually!
# Now *this* is some meta-data that belongs in the setup script...
self.ensure_string_list('provides')
self.ensure_string_list('requires')
self.ensure_string_list('conflicts')
self.ensure_string_list('build_requires')
self.ensure_string_list('obsoletes')
self.ensure_string('force_arch')
def run(self):
if DEBUG:
print("before _get_package_data():")
print("vendor =", self.vendor)
print("packager =", self.packager)
print("doc_files =", self.doc_files)
print("changelog =", self.changelog)
# make directories
if self.spec_only:
spec_dir = self.dist_dir
self.mkpath(spec_dir)
else:
rpm_dir = {}
for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
rpm_dir[d] = os.path.join(self.rpm_base, d)
self.mkpath(rpm_dir[d])
spec_dir = rpm_dir['SPECS']
# Spec news goes into 'dist_dir' if '--spec-only specified',
# build/rpm.<plat> otherwise.
spec_path = os.path.join(spec_dir,
"%s.spec" % self.distribution.get_name())
self.execute(write_file,
(spec_path,
self._make_spec_file()),
"writing '%s'" % spec_path)
if self.spec_only: # stop if requested
return
# Make a source distribution and copy to SOURCES directory with
# optional icon.
saved_dist_files = self.distribution.dist_files[:]
sdist = self.reinitialize_command('sdist')
if self.use_bzip2:
sdist.formats = ['bztar']
else:
sdist.formats = ['gztar']
self.run_command('sdist')
self.distribution.dist_files = saved_dist_files
source = sdist.get_archive_files()[0]
source_dir = rpm_dir['SOURCES']
self.copy_file(source, source_dir)
if self.icon:
if os.path.exists(self.icon):
self.copy_file(self.icon, source_dir)
else:
raise DistutilsFileError(
"icon news '%s' does not exist" % self.icon)
# build package
log.info("building RPMs")
rpm_cmd = ['rpmbuild']
if self.source_only: # what kind of RPMs?
rpm_cmd.append('-bs')
elif self.binary_only:
rpm_cmd.append('-bb')
else:
rpm_cmd.append('-ba')
rpm_cmd.extend(['--define', '__python %s' % self.python])
if self.rpm3_mode:
rpm_cmd.extend(['--define',
'_topdir %s' % os.path.abspath(self.rpm_base)])
if not self.keep_temp:
rpm_cmd.append('--clean')
if self.quiet:
rpm_cmd.append('--quiet')
rpm_cmd.append(spec_path)
# Determine the binary rpm names that should be built out of this spec
# news
# Note that some of these may not be really built (if the news
# list is empty)
nvr_string = "%{name}-%{version}-%{release}"
src_rpm = nvr_string + ".src.rpm"
non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
src_rpm, non_src_rpm, spec_path)
out = os.popen(q_cmd)
try:
binary_rpms = []
source_rpm = None
while True:
line = out.readline()
if not line:
break
l = line.strip().split()
assert(len(l) == 2)
binary_rpms.append(l[1])
# The source rpm is named after the first entry in the spec news
if source_rpm is None:
source_rpm = l[0]
status = out.close()
if status:
raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
finally:
out.close()
self.spawn(rpm_cmd)
if not self.dry_run:
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
if not self.binary_only:
srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
assert(os.path.exists(srpm))
self.move_file(srpm, self.dist_dir)
filename = os.path.join(self.dist_dir, source_rpm)
self.distribution.dist_files.append(
('bdist_rpm', pyversion, filename))
if not self.source_only:
for rpm in binary_rpms:
rpm = os.path.join(rpm_dir['RPMS'], rpm)
if os.path.exists(rpm):
self.move_file(rpm, self.dist_dir)
filename = os.path.join(self.dist_dir,
os.path.basename(rpm))
self.distribution.dist_files.append(
('bdist_rpm', pyversion, filename))
def _dist_path(self, path):
return os.path.join(self.dist_dir, os.path.basename(path))
def _make_spec_file(self):
"""Generate the text of an RPM spec news and return it as a
list of strings (one per line).
"""
# definitions and headers
spec_file = [
'%define name ' + self.distribution.get_name(),
'%define version ' + self.distribution.get_version().replace('-','_'),
'%define unmangled_version ' + self.distribution.get_version(),
'%define release ' + self.release.replace('-','_'),
'',
'Summary: ' + self.distribution.get_description(),
]
# Workaround for #14443 which affects some RPM based systems such as
# RHEL6 (and probably derivatives)
vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}')
# Generate a potential replacement value for __os_install_post (whilst
# normalizing the whitespace to simplify the test for whether the
# invocation of brp-python-bytecompile passes in __python):
vendor_hook = '\n'.join([' %s \\' % line.strip()
for line in vendor_hook.splitlines()])
problem = "brp-python-bytecompile \\\n"
fixed = "brp-python-bytecompile %{__python} \\\n"
fixed_hook = vendor_hook.replace(problem, fixed)
if fixed_hook != vendor_hook:
spec_file.append('# Workaround for http://bugs.python.org/issue14443')
spec_file.append('%define __os_install_post ' + fixed_hook + '\n')
# put locale summaries into spec news
# XXX not supported for now (hard to put a dictionary
# in a config news -- arg!)
#for locale in self.summaries.keys():
# spec_file.append('Summary(%s): %s' % (locale,
# self.summaries[locale]))
spec_file.extend([
'Name: %{name}',
'Version: %{version}',
'Release: %{release}',])
# XXX yuck! this filename is available from the "sdist" command,
# but only after it has run: and we create the spec news before
# running "sdist", in case of --spec-only.
if self.use_bzip2:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
else:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
spec_file.extend([
'License: ' + self.distribution.get_license(),
'Group: ' + self.group,
'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
'Prefix: %{_prefix}', ])
if not self.force_arch:
# noarch if no extension modules
if not self.distribution.has_ext_modules():
spec_file.append('BuildArch: noarch')
else:
spec_file.append( 'BuildArch: %s' % self.force_arch )
for field in ('Vendor',
'Packager',
'Provides',
'Requires',
'Conflicts',
'Obsoletes',
):
val = getattr(self, field.lower())
if isinstance(val, list):
spec_file.append('%s: %s' % (field, ' '.join(val)))
elif val is not None:
spec_file.append('%s: %s' % (field, val))
if self.distribution.get_url() != 'UNKNOWN':
spec_file.append('Url: ' + self.distribution.get_url())
if self.distribution_name:
spec_file.append('Distribution: ' + self.distribution_name)
if self.build_requires:
spec_file.append('BuildRequires: ' +
' '.join(self.build_requires))
if self.icon:
spec_file.append('Icon: ' + os.path.basename(self.icon))
if self.no_autoreq:
spec_file.append('AutoReq: 0')
spec_file.extend([
'',
'%description',
self.distribution.get_long_description()
])
# put locale descriptions into spec news
# XXX again, suppressed because config news syntax doesn't
# easily support this ;-(
#for locale in self.descriptions.keys():
# spec_file.extend([
# '',
# '%description -l ' + locale,
# self.descriptions[locale],
# ])
# rpm scripts
# figure out default build script
def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
def_build = "%s build" % def_setup_call
if self.use_rpm_opt_flags:
def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
# insert contents of files
# XXX this is kind of misleading: user-supplied options are files
# that we open and interpolate into the spec news, but the defaults
# are just text that we drop in as-is. Hmmm.
install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT '
'--record=INSTALLED_FILES') % def_setup_call
script_options = [
('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
('build', 'build_script', def_build),
('install', 'install_script', install_cmd),
('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
('verifyscript', 'verify_script', None),
('pre', 'pre_install', None),
('post', 'post_install', None),
('preun', 'pre_uninstall', None),
('postun', 'post_uninstall', None),
]
for (rpm_opt, attr, default) in script_options:
# Insert contents of news referred to, if no news is referred to
# use 'default' as contents of script
val = getattr(self, attr)
if val or default:
spec_file.extend([
'',
'%' + rpm_opt,])
if val:
with open(val) as f:
spec_file.extend(f.read().split('\n'))
else:
spec_file.append(default)
# files section
spec_file.extend([
'',
'%files -f INSTALLED_FILES',
'%defattr(-,root,root)',
])
if self.doc_files:
spec_file.append('%doc ' + ' '.join(self.doc_files))
if self.changelog:
spec_file.extend([
'',
'%changelog',])
spec_file.extend(self.changelog)
return spec_file
def _format_changelog(self, changelog):
"""Format the changelog correctly and convert it to a list of strings
"""
if not changelog:
return changelog
new_changelog = []
for line in changelog.strip().split('\n'):
line = line.strip()
if line[0] == '*':
new_changelog.extend(['', line])
elif line[0] == '-':
new_changelog.append(line)
else:
new_changelog.append(' ' + line)
# strip trailing newline inserted by first changelog entry
if not new_changelog[0]:
del new_changelog[0]
return new_changelog
| [
"mary5@list.ru"
] | mary5@list.ru |
2e630fe4db9510014ed195295467275c69dcb5e9 | 14f13d773cd3ad22e7838a3f543f9008bfedde99 | /test/hummingbot/connector/derivative/gate_io_perpetual/test_gate_io_perpetual_user_stream_data_source.py | d59459d8a8c4b9a073a78efbc4b84f433f321f35 | [
"Apache-2.0"
] | permissive | CoinAlpha/hummingbot | 0d1e2bd94de1280748647108c7d7800a09546eb8 | 82b1b47a0901ed8299c7176edaf21a9d6f49ef29 | refs/heads/master | 2023-09-01T11:24:43.322137 | 2023-04-26T15:02:09 | 2023-04-26T15:02:09 | 439,330,952 | 135 | 98 | Apache-2.0 | 2023-08-30T13:55:08 | 2021-12-17T12:50:42 | Python | UTF-8 | Python | false | false | 11,543 | py | import asyncio
import json
import unittest
from typing import Awaitable, Optional
from unittest.mock import AsyncMock, MagicMock, patch
from bidict import bidict
from hummingbot.client.config.client_config_map import ClientConfigMap
from hummingbot.client.config.config_helpers import ClientConfigAdapter
from hummingbot.connector.derivative.gate_io_perpetual import gate_io_perpetual_constants as CONSTANTS
from hummingbot.connector.derivative.gate_io_perpetual.gate_io_perpetual_auth import GateIoPerpetualAuth
from hummingbot.connector.derivative.gate_io_perpetual.gate_io_perpetual_derivative import GateIoPerpetualDerivative
from hummingbot.connector.derivative.gate_io_perpetual.gate_io_perpetual_user_stream_data_source import (
GateIoPerpetualAPIUserStreamDataSource,
)
from hummingbot.connector.test_support.network_mocking_assistant import NetworkMockingAssistant
from hummingbot.connector.time_synchronizer import TimeSynchronizer
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
class TestGateIoPerpetualAPIUserStreamDataSource(unittest.TestCase):
# the level is required to receive logs from the data source logger
level = 0
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop = asyncio.get_event_loop()
cls.base_asset = "COINALPHA"
cls.quote_asset = "HBOT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.ex_trading_pair = f"{cls.base_asset}_{cls.quote_asset}"
cls.api_key = "someKey"
cls.api_secret_key = "someSecretKey"
cls.user_id = "someUserId"
def setUp(self) -> None:
super().setUp()
self.log_records = []
self.listening_task: Optional[asyncio.Task] = None
self.mocking_assistant = NetworkMockingAssistant()
self.throttler = AsyncThrottler(CONSTANTS.RATE_LIMITS)
self.mock_time_provider = MagicMock()
self.mock_time_provider.time.return_value = 1000
self.auth = GateIoPerpetualAuth(
api_key=self.api_key,
secret_key=self.api_secret_key)
self.time_synchronizer = TimeSynchronizer()
self.time_synchronizer.add_time_offset_ms_sample(0)
client_config_map = ClientConfigAdapter(ClientConfigMap())
self.connector = GateIoPerpetualDerivative(
client_config_map=client_config_map,
gate_io_perpetual_api_key="",
gate_io_perpetual_secret_key="",
gate_io_perpetual_user_id="",
trading_pairs=[])
self.connector._web_assistants_factory._auth = self.auth
self.data_source = GateIoPerpetualAPIUserStreamDataSource(
self.auth,
trading_pairs=[self.trading_pair],
connector=self.connector,
user_id=self.user_id,
api_factory=self.connector._web_assistants_factory)
self.data_source.logger().setLevel(1)
self.data_source.logger().addHandler(self)
self.connector._set_trading_pair_symbol_map(bidict({self.ex_trading_pair: self.trading_pair}))
def tearDown(self) -> None:
self.listening_task and self.listening_task.cancel()
super().tearDown()
def handle(self, record):
self.log_records.append(record)
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage() == message
for record in self.log_records)
def async_run_with_timeout(self, coroutine: Awaitable, timeout: int = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch(
"hummingbot.connector.derivative.gate_io_perpetual.gate_io_perpetual_user_stream_data_source.GateIoPerpetualAPIUserStreamDataSource"
"._time")
def test_listen_for_user_stream_subscribes_to_orders_and_balances_events(self, time_mock, ws_connect_mock):
time_mock.return_value = 1000
ws_connect_mock.return_value = self.mocking_assistant.create_websocket_mock()
result_subscribe_orders = {
"time": 1611541000,
"channel": CONSTANTS.USER_ORDERS_ENDPOINT_NAME,
"event": "subscribe",
"error": None,
"result": {
"status": "success"
}
}
result_subscribe_trades = {
"time": 1611541000,
"channel": CONSTANTS.USER_TRADES_ENDPOINT_NAME,
"event": "subscribe",
"error": None,
"result": {
"status": "success"
}
}
result_subscribe_positions = {
"time": 1611541000,
"channel": CONSTANTS.USER_POSITIONS_ENDPOINT_NAME,
"event": "subscribe",
"error": None,
"result": {
"status": "success"
}
}
self.mocking_assistant.add_websocket_aiohttp_message(
websocket_mock=ws_connect_mock.return_value,
message=json.dumps(result_subscribe_orders))
self.mocking_assistant.add_websocket_aiohttp_message(
websocket_mock=ws_connect_mock.return_value,
message=json.dumps(result_subscribe_trades))
self.mocking_assistant.add_websocket_aiohttp_message(
websocket_mock=ws_connect_mock.return_value,
message=json.dumps(result_subscribe_positions))
output_queue = asyncio.Queue()
self.listening_task = self.ev_loop.create_task(self.data_source.listen_for_user_stream(output=output_queue))
self.mocking_assistant.run_until_all_aiohttp_messages_delivered(ws_connect_mock.return_value)
sent_subscription_messages = self.mocking_assistant.json_messages_sent_through_websocket(
websocket_mock=ws_connect_mock.return_value)
self.assertEqual(3, len(sent_subscription_messages))
expected_orders_subscription = {
"time": int(self.mock_time_provider.time()),
"channel": CONSTANTS.USER_ORDERS_ENDPOINT_NAME,
"event": "subscribe",
"payload": [self.user_id, self.ex_trading_pair],
"auth": {
"KEY": self.api_key,
"SIGN": '0fb3b313fe07c7d23164a4ae86adf306a48f5787c54b9a7595f0a50a164c01eb54d8de5d5ad65fbc3ea94e60e73446d999d23424e52f715713ee6cb32a7d0df1',# noqa: mock
"method": "api_key"},
}
self.assertEqual(expected_orders_subscription, sent_subscription_messages[0])
expected_trades_subscription = {
"time": int(self.mock_time_provider.time()),
"channel": CONSTANTS.USER_TRADES_ENDPOINT_NAME,
"event": "subscribe",
"payload": [self.user_id, self.ex_trading_pair],
"auth": {
"KEY": self.api_key,
"SIGN": 'a7681c836307cbb57c7ba7a66862120770c019955953e5ec043fd00e93722d478096f0a8238e3f893dcb3e0f084dc67a2a7ff6e6e08bc1bf0ad80fee57fff113',# noqa: mock
"method": "api_key"}
}
self.assertEqual(expected_trades_subscription, sent_subscription_messages[1])
self.assertTrue(self._is_logged(
"INFO",
"Subscribed to private order changes channels..."
))
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch(
"hummingbot.connector.derivative.gate_io_perpetual.gate_io_perpetual_user_stream_data_source.GateIoPerpetualAPIUserStreamDataSource"
"._time")
def test_listen_for_user_stream_skips_subscribe_unsubscribe_messages(self, time_mock, ws_connect_mock):
time_mock.return_value = 1000
ws_connect_mock.return_value = self.mocking_assistant.create_websocket_mock()
result_subscribe_orders = {
"time": 1611541000,
"channel": CONSTANTS.USER_ORDERS_ENDPOINT_NAME,
"event": "subscribe",
"error": None,
"result": {
"status": "success"
}
}
result_subscribe_trades = {
"time": 1611541000,
"channel": CONSTANTS.USER_TRADES_ENDPOINT_NAME,
"event": "subscribe",
"error": None,
"result": {
"status": "success"
}
}
self.mocking_assistant.add_websocket_aiohttp_message(
websocket_mock=ws_connect_mock.return_value,
message=json.dumps(result_subscribe_orders))
self.mocking_assistant.add_websocket_aiohttp_message(
websocket_mock=ws_connect_mock.return_value,
message=json.dumps(result_subscribe_trades))
output_queue = asyncio.Queue()
self.listening_task = self.ev_loop.create_task(self.data_source.listen_for_user_stream(output=output_queue))
self.mocking_assistant.run_until_all_aiohttp_messages_delivered(ws_connect_mock.return_value)
self.assertTrue(output_queue.empty())
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
def test_listen_for_user_stream_does_not_queue_pong_payload(self, mock_ws):
mock_pong = {
"time": 1545404023,
"channel": CONSTANTS.PONG_CHANNEL_NAME,
"event": "",
"error": None,
"result": None
}
mock_ws.return_value = self.mocking_assistant.create_websocket_mock()
self.mocking_assistant.add_websocket_aiohttp_message(mock_ws.return_value, json.dumps(mock_pong))
msg_queue = asyncio.Queue()
self.listening_task = self.ev_loop.create_task(
self.data_source.listen_for_user_stream(msg_queue)
)
self.mocking_assistant.run_until_all_aiohttp_messages_delivered(mock_ws.return_value)
self.assertEqual(0, msg_queue.qsize())
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch("hummingbot.core.data_type.user_stream_tracker_data_source.UserStreamTrackerDataSource._sleep")
def test_listen_for_user_stream_connection_failed(self, sleep_mock, mock_ws):
mock_ws.side_effect = Exception("TEST ERROR.")
sleep_mock.side_effect = asyncio.CancelledError # to finish the task execution
msg_queue = asyncio.Queue()
try:
self.async_run_with_timeout(self.data_source.listen_for_user_stream(msg_queue))
except asyncio.CancelledError:
pass
self.assertTrue(
self._is_logged("ERROR",
"Unexpected error while listening to user stream. Retrying after 5 seconds..."))
@patch("aiohttp.ClientSession.ws_connect", new_callable=AsyncMock)
@patch("hummingbot.core.data_type.user_stream_tracker_data_source.UserStreamTrackerDataSource._sleep")
def test_listen_for_user_stream_iter_message_throws_exception(self, sleep_mock, mock_ws):
msg_queue: asyncio.Queue = asyncio.Queue()
mock_ws.return_value = self.mocking_assistant.create_websocket_mock()
mock_ws.return_value.receive.side_effect = Exception("TEST ERROR")
sleep_mock.side_effect = asyncio.CancelledError # to finish the task execution
try:
self.async_run_with_timeout(self.data_source.listen_for_user_stream(msg_queue))
except asyncio.CancelledError:
pass
self.assertTrue(
self._is_logged(
"ERROR",
"Unexpected error while listening to user stream. Retrying after 5 seconds..."))
| [
"zhangbocheng@zhangboengdembp"
] | zhangbocheng@zhangboengdembp |
f9375a97d454e3fbccdd8b08e6a22b675e36e01f | 2977fcdf3164fe36e70c8736eac66db3b5256bb5 | /palindrome.py | d1ac7a5adb076bbfa6d222806ae06df83c1d7004 | [] | no_license | Blin113/python | fcdea233cb2df8d1c4f557e6b8efe7221c7a5fb9 | 534816abc4116253065dd2a989837ce98b5d6f36 | refs/heads/main | 2023-08-04T20:34:47.837858 | 2021-09-29T23:42:15 | 2021-09-29T23:42:15 | 411,860,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | def is_palindrome(s):
return s.lower() == rev_s.lower()
s = str(input("input a string: "))
new_s = ""
rev_s = ""
for _ in range(len(s)):
if s[_].isalpha():
new_s = new_s + s[_]
rev_s = s[_] + rev_s
print(new_s, "----->", rev_s)
print(is_palindrome(new_s))
| [
"noreply@github.com"
] | Blin113.noreply@github.com |
6e98d826beefd5c143919bd2e5d0497d48a2c685 | e4d0daf192f7967d6142b4486680cf4c7b8b4e32 | /account/forms.py | 207062b4f04134e2661b11e9ddd23c05de57ef62 | [] | no_license | aazhbd/medical_info01 | 0d623454a74b334020f2ccda48064cf229c732c4 | b08961089e6a7cdc567f879ab794e45067171418 | refs/heads/master | 2021-01-10T14:49:19.057064 | 2015-10-15T17:08:22 | 2015-10-15T17:08:22 | 44,332,500 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | # -*- coding: utf-8 -*-
from django import forms
from django.forms.extras import widgets
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from django.contrib.auth.hashers import UNUSABLE_PASSWORD
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.utils.http import int_to_base36
from django.template import loader
from django.utils.http import base36_to_int
class AuthenticationForm(forms.Form):
"""
A custom Authentication Form.
"""
username = forms.CharField(
label=_("Username/Email"), required=True,
widget=forms.TextInput(attrs={'placeholder':_("Username")})
)
password = forms.CharField(
label=_("Password"), required=True,
widget=forms.PasswordInput(attrs={'placeholder':_("Password")}, render_value=False)
)
error_messages = {
'invalid_login': _("Please enter a correct username and password. Note that both fields are case-sensitive."),
'no_cookies': _("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."),
'inactive': _("Your account is inactive."),
'not_activated': _("Your account is inactive. Please check your email and click the link to activate your account."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
if self.request:
self.request.session.set_test_cookie()
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
"""
Checks for the username and password.
"""
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(self.error_messages['invalid_login'])
elif not self.user_cache.is_active and self.user_cache.activation_set.filter(used=False):
raise forms.ValidationError(self.error_messages['not_activated'])
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
| [
"aazhbd@yahoo.com"
] | aazhbd@yahoo.com |
9195839daf3ffda75fd9e80c4a0533eef4ae3c3f | 444de968e5ec1ccc9e1cb0d004d729e90b7be44a | /profiles_api/urls.py | ed0cc5b15f50090e47f2167dc9220bc2e107a4d4 | [] | no_license | facundotoconas/api_celery | f4cc96eb9ae0aac40fc10047d88313786c0b4c13 | b77b17edd75f39b70842a4ad1760302f157205c8 | refs/heads/master | 2023-07-30T23:07:51.972623 | 2021-09-15T03:46:11 | 2021-09-15T03:46:11 | 406,603,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from django.urls import path
from profiles_api import views
urlpatterns = [
path('hello-view', views.HelloApiView.as_view()),
]
| [
"facutoconas88@gmail.com"
] | facutoconas88@gmail.com |
67161f3cd7ae978bd30837f12866afb50df77f04 | 975d8ded1f4b6e94a7db97092aec531a79d5448e | /E_Mart/Home_Module/Serializer.py | f45b0d4e3aac633c1155a9a9165f1d86e57d85ad | [] | no_license | ahtisham735/20210224 | f135bce18d290da5b464f7a8aec15e0a762152d8 | 1ee61035bb860f3197108f18cff0b7a47b37fc00 | refs/heads/master | 2023-03-21T06:35:36.778389 | 2021-03-09T17:01:51 | 2021-03-09T17:01:51 | 341,809,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields=['email','username','password']
| [
"sahtisham735@gmail.com"
] | sahtisham735@gmail.com |
206aa25cc0b6d6d218e794ed4e9de55caa5bcc20 | 16bf7af92a25c242f94d043223201d16275cc218 | /server/model/table.py | 3d03849d6001ac14200ce7ab645042ac017c97b8 | [] | no_license | markrookie/doudizhu | 4c7c87b46033a55de0bbb7538cc61ae4606037c9 | c06a19f36071b27f2b647bc5a33361fccfdbefbb | refs/heads/master | 2021-04-30T07:06:19.142443 | 2016-12-18T13:47:13 | 2016-12-18T13:47:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,572 | py | import logging
import random
import tornado.escape
import tornado.ioloop
import tornado.websocket
logger = logging.getLogger()
class Table(object):
def __init__(self):
self.pid = Table.gen_id()
self.players = [None, None, None]
self.state = 0 # 0 waiting 1 playing 2 end 3 closed
self.pokers = []
self.multiple = 1
self.callscore = 0
self.whoseTurn = 0
self.last_shot_seat = 0;
self.last_shot_poker = [];
self.room = 100
tornado.ioloop.IOLoop.current().add_callback(self.update)
def calc_coin(self, winner):
self.state = 2
coins = []
tax = 100
for p in self.players:
p.ready = False
coin = self.room * p.rank * self.callscore * self.multiple
if p.rank == winner.rank:
coins.append(coin - tax)
else:
coins.append(-coin - tax)
return coins
def update(self):
logger.info('table[%d] update', self.pid)
def add(self, player):
for i, p in enumerate(self.players):
if not p:
player.seat = i
self.players[i] = player
logger.info('Table[%d] add Player[%d]', self.pid, player.pid)
return True
logger.error('Player[%d] join a full Table[%d]', player.pid, self.pid)
return False
def remove(self, player):
for i, p in enumerate(self.players):
if p and p.pid == player.pid:
self.players[i] = None
else:
logger.error('Player[%d] not in Table[%d]', player.pid, self.pid)
if all(p == None for p in self.players):
self.state = 3
logger.error('Table[%d] close', self.pid)
return True
return False
def size(self):
return 3 - self.players.count(None)
def deal_poker(self):
if not all(p and p.ready for p in self.players):
return
self.state = 1
self.pokers = [i for i in range(54)]
random.shuffle(self.pokers)
for i in range(51):
self.players[i % 3].pokers.append(self.pokers.pop())
self.whoseTurn = random.randint(0, 2)
for p in self.players:
p.dealPoker()
counter = 0
@classmethod
def gen_id(cls):
cls.counter += 1
return cls.counter
if __name__ == '__main__':
t = Table()
print(t.pid)
t = Table()
print(t.pid)
t = Table()
print(t.pid)
t = Table()
print(t.pid)
| [
"mailgyc@163.com"
] | mailgyc@163.com |
ac79892ea7a04e438b9f617893c0aeddfc3de5db | 33daf4c69a8f46d7ad8d93eaa73fc60e36fd022d | /gestion/opos_2016/corregir_nombres.py | 74f44cc7b0761081bb11028fe73577b9f2112e9e | [] | no_license | OscarMaestre/estructurado | 81cfc9412b77d5015be1bebf66785c357746d8e2 | 7649747e48128cb9c17dee937574e9490fcc9087 | refs/heads/master | 2021-01-10T15:05:47.695362 | 2016-04-28T07:30:50 | 2016-04-28T07:30:50 | 53,923,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | #!/usr/bin/env python3
#coding=utf-8
from utilidades.ficheros.GestorFicheros import GestorFicheros
modificaciones=[
("La Torre de Esteban Hambran", "Torre de Esteban Hambran"),
("Cortijos de Arriba", "Cortijo de Arriba"),
("Villafranca de los Caballeros", "Villafranca de los"),
("Las Ventas con Peña Aguilera", "Ventas Con Peña Aguilera"),
("Herrera de la Mancha", "Centro Penitenciario Herrera")
]
gf=GestorFicheros()
sql_modificar_origen="update rutas set origen='{0}' where origen='{1}';"
sql_modificar_destino="update rutas set destino='{0}' where destino='{1}';"
ARCHIVO_BD="rutas.db"
for m in modificaciones:
pueblo_antes=m[0]
pueblo_como_debe_quedar=m[1]
comando_sql_1=sql_modificar_origen.format (pueblo_antes, pueblo_como_debe_quedar)
gf.ejecutar_comando(
"echo \"" + comando_sql_1+ "\"", "| sqlite3 "+ARCHIVO_BD
)
comando_sql_2=sql_modificar_destino.format (pueblo_antes, pueblo_como_debe_quedar)
gf.ejecutar_comando(
"echo \"" + comando_sql_2+ "\"", "| sqlite3 "+ARCHIVO_BD
) | [
"profesor.oscar.gomez@gmail.com"
] | profesor.oscar.gomez@gmail.com |
95ec4bc862a5962847f4a2417b1f107854719b08 | 699c4c15667166788a4eaf9d9c1197cf3393986f | /backend/gatherspace/manage.py | e4bfc5db43a4bb74f93d01ea4c3060c182757b4a | [] | no_license | Merevoli-DatLuu/GatherSpace | 2fdd07f5b0a59d27ee9567de952880dc1242b54e | 4305f392fb8f810cfd193d9ba5b11ac94e3ea24d | refs/heads/master | 2023-08-31T18:09:31.645095 | 2021-09-22T05:05:04 | 2021-09-22T05:05:04 | 409,068,869 | 0 | 0 | null | 2021-09-28T08:51:43 | 2021-09-22T04:55:48 | Python | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gatherspace.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"47155364+Merevoli-DatLuu@users.noreply.github.com"
] | 47155364+Merevoli-DatLuu@users.noreply.github.com |
9951868b35d55b8b9969caede6c4916b987b0f5c | e2f0806ca1cdd887ea40d050a19fa2710427bd38 | /기본 문제/05주차_스택2/2167_2차원 배열의 합/강승훈.py | a590cd46cdd52cc0944208211c63cc1350c95b58 | [] | no_license | JY-Dev/AlgorithmStudy-1 | 001f94d80097c850c79eeb2bc86971a01aa5bd5d | 2ad1df0fd65c72a6f6d1feeba09f889000ff8c15 | refs/heads/main | 2023-08-21T18:38:18.235994 | 2021-09-28T07:07:11 | 2021-09-28T07:07:11 | 406,208,087 | 1 | 0 | null | 2021-09-14T03:14:32 | 2021-09-14T03:14:31 | null | UTF-8 | Python | false | false | 607 | py | from sys import stdin
# 입력.
n,m = map(int, stdin.readline().split(" "))
arr = list(list(map(int, stdin.readline().split())) for _ in range(n))
test_case = int(stdin.readline().strip())
for _ in range(test_case):
i1, j1, i2, j2 = map(int, stdin.readline().split(" ")) # 좌표 입력.
sub_sum = 0 # 결과 저장 할 변수.
for i in range(i1-1, i2): # 일단, 입력으로 들어온 i1와 i2 넣으면 col연산 되고,
for j in range(j1-1, j2): # 포문 마다, j1부터 j2 까지 더하면 row연산 됨.
sub_sum += arr[i][j]
# 매번 출력.
print(sub_sum)
| [
"noreply@github.com"
] | JY-Dev.noreply@github.com |
979a71a39688b941580d1480aaa2802ebc8058a2 | 2af94f8a7609d47fdcea28a2132c4f8bacb103e3 | /src/services/service_manager.py | da578ee4f10fc46bdd44de29797f6e45099bc02f | [] | no_license | bernhara/DigiGateway4Raph | 685527723f0b306f387233c78d27fe9d78717c38 | f36ba29ef883d70f94b8609ff734b5dcde786c66 | refs/heads/master | 2020-07-05T19:56:27.027547 | 2019-08-19T06:10:46 | 2019-08-19T06:10:46 | 202,756,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,464 | py | ############################################################################
# #
# Copyright (c)2008, 2009, Digi International (Digi). All Rights Reserved. #
# #
# Permission to use, copy, modify, and distribute this software and its #
# documentation, without fee and without a signed licensing agreement, is #
# hereby granted, provided that the software is used on Digi products only #
# and that the software contain this copyright notice, and the following #
# two paragraphs appear in all copies, modifications, and distributions as #
# well. Contact Product Management, Digi International, Inc., 11001 Bren #
# Road East, Minnetonka, MN, +1 952-912-3444, for commercial licensing #
# opportunities for non-Digi products. #
# #
# DIGI SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED #
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, #
# PROVIDED HEREUNDER IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND. #
# DIGI HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, #
# ENHANCEMENTS, OR MODIFICATIONS. #
# #
# IN NO EVENT SHALL DIGI BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, #
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, #
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF #
# DIGI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #
# #
############################################################################
"""\
Manages the loading and instances of individual services.
The ServiceManager allows for the dynamic loading of services.
drivers as well as the ability to retrieve an instance of a service
by name.
The ServiceManager also provides an interface to start and stop
an instance as well as to query the instance for its configuration
parameters.
"""
# imports
from common.abstract_service_manager import AbstractServiceManager
# constants
# exception classes
# interface functions
# classes
class ServiceManager(AbstractServiceManager):
def __init__(self, core_services):
self.__core = core_services
self.__core.set_service("service_manager", self)
# Initialize our base class:
AbstractServiceManager.__init__(self, core_services, ('services',))
def driver_load(self, name):
"""\
Loads a service driver class dynamically.
If the driver has not been loaded previously, an unconfigured
instance of the driver will be created and managed by the
ServiceManager. If the driver has already been loaded
nothing will be done. In either case, this function will
return True.
If the service driver cannot be loaded for any reason, an
exception will be raised.
"""
return AbstractServiceManager.service_load(self, name)
# internal functions & classes
| [
"ORBA6563@S-ORBA65630.rd.francetelecom.fr"
] | ORBA6563@S-ORBA65630.rd.francetelecom.fr |
39231c851e4390fefee972dc33794a199ac03564 | 589b5eedb71d83c15d44fedf60c8075542324370 | /project/stock_project/alpha_model/alpha_factor/GrossProfitYOY.py | 7132439c922d47498483410e22ffd1b56dbe32b9 | [] | no_license | rlcjj/quant | 4c2be8a8686679ceb675660cb37fad554230e0d4 | c07e8f0f6e1580ae29c78c1998a53774a15a67e1 | refs/heads/master | 2020-03-31T07:15:48.111511 | 2018-08-27T05:29:00 | 2018-08-27T05:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | import pandas as pd
import numpy as np
from quant.stock.stock import Stock
from quant.stock.date import Date
from quant.stock.stock_factor_operate import StockFactorOperate
def GrossProfitYOY(beg_date, end_date):
"""
因子说明: 当季毛利润的同比增长
披露日期 为 最近财报
"""
# param
#################################################################################
factor_name = 'GrossProfitYOY'
ipo_num = 90
# read data
#################################################################################
income = Stock().get_factor_h5("OperatingIncome", None, "primary_mfc").T
cost = Stock().get_factor_h5("OperatingCost", None, "primary_mfc").T
[income, cost] = Stock().make_same_index_columns([income, cost])
gross_profit = income - cost
gross_profit_4 = gross_profit.shift(4)
gross_profit_yoy = gross_profit / gross_profit_4 - 1.0
gross_profit_yoy = gross_profit_yoy.T
gross_profit_yoy = StockFactorOperate().change_quarter_to_daily_with_report_date(gross_profit_yoy, beg_date, end_date)
# data precessing
#################################################################################
pass
# calculate data daily
#################################################################################
res = gross_profit_yoy.T.dropna(how='all').T
# save data
#############################################################################
Stock().write_factor_h5(res, factor_name, "alpha_dfc")
return res
#############################################################################
if __name__ == '__main__':
from datetime import datetime
beg_date = '2004-01-01'
end_date = datetime.today()
data = GrossProfitYOY(beg_date, end_date)
print(data)
| [
"1119332482@qq.com"
] | 1119332482@qq.com |
dd8ab2d571a58ee87ae1a817c194e3eb4d3922be | 89fbc481a60e717b8c5505ace6e2a0e27b3ed264 | /MNIST_MLP.py | f125fb5ed0f7e6d6b493043a27b0c29633a4b3e1 | [] | no_license | gagansingh894/Deep-Learning-with-PyTorch | 11a3cf3080fbab03af0d94ee970a3df864c1dc0d | 1865b74265afceadf4b517b31f4f47f6b35d7fa3 | refs/heads/master | 2022-12-27T03:44:29.437195 | 2020-10-06T09:13:03 | 2020-10-06T09:13:03 | 271,703,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import transforms, datasets
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
# Number of sub processes
num_workers = 0
# NUmber of samples per batch
batch_size = 20
valid_size = 0.2
transform = transforms.ToTensor()
# Train and Test
train_data = datasets.MNIST("MNIST_data", download=False, train=True, transform=transform)
test_data = datasets.MNIST("MNIST_data", download=False, train=False, transform=transform)
# Validation Split
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
class Net(nn.Module):
def __init__(self):
super().__init__()
hidden_1 = 512
hidden_2 = 512
self.fc1 = nn.Linear(28*28, hidden_1)
self.fc2 = nn.Linear(hidden_1, hidden_2)
self.fc3 = nn.Linear(hidden_2, 10)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# Flatten
x = x.view(-1, 28*28)
# Activation function on hidden layer
x = F.relu(self.fc1(x))
# Dropout
x = self.dropout(x)
# Activation function on hidden layer
x = F.relu(self.fc2(x))
# Dropout
x = self.dropout(x)
# Output Layer
x = self.fc3(x)
return x
model = Net()
# print(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Training the network
n_epochs = 50
valid_loss_min = np.Inf
for epoch in range(n_epochs):
train_loss = 0.0
valid_loss = 0.0
train_acc = 0.0
valid_acc = 0.0
model.train()
for images, labels in train_loader:
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, pred = torch.max(output, 1)
train_acc += torch.mean(pred.eq(labels).type(torch.FloatTensor))
model.eval()
with torch.no_grad():
for images, labels in valid_loader:
output = model(images)
loss = criterion(output, labels)
valid_loss += loss.item()
_, pred = torch.max(output, 1)
valid_acc += torch.mean(pred.eq(labels).type(torch.FloatTensor))
print('Epoch: {}/{} \tTraining Loss: {:.6f} \tTraining Accuracy: {:.2f}\t'
'Validation Loss: {:.6f} \tValidation Accuracy: {:.2f}'.format(
epoch+1, n_epochs,
train_loss/len(train_loader),
train_acc/len(train_loader),
valid_loss/len(valid_loader),
valid_acc/len(valid_loader)))
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'model.pt')
valid_loss_min = valid_loss
# model with lowest loss
model.load_state_dict(torch.load('model.pt'))
test_loss = 0.0
test_acc = 0.0
model.eval()
with torch.no_grad():
for images, labels in test_loader:
output = model(images)
loss = criterion(output, labels)
test_loss += loss.item() * images.size(0)
_, pred = torch.max(output, 1)
test_acc += torch.mean(pred.eq(labels).type(torch.FloatTensor))
test_loss = test_loss/len(test_loader)
test_acc = test_acc/len(test_loader)
print('Test Loss: {:.6f} \tTest Accuracy: {:.2f}'.format(
test_loss,
test_acc))
# JIT Save
jit_model = torch.jit.script(model)
jit_model.save('ann_model_jit.pt')
| [
"gds31.gagandeepsingh@gmail.com"
] | gds31.gagandeepsingh@gmail.com |
0461e8064ff0c865dfa4c9407e9aff532ffbab38 | 0a235fe3871433d4034679b71cd2f092559c2cf0 | /old-storm-module/ike/setrun.py | a31de3a55c21bfacfdda67ce08e7cac284cb544f | [] | no_license | hudaquresh/test-surge-examples | 8e31b67a8f51fc3b178f9d1d11dd959f54a3768f | 3153e0baba677d3f65a489dfba64f4282b7380a5 | refs/heads/master | 2020-03-23T05:42:26.280064 | 2019-02-08T19:13:37 | 2019-02-08T19:13:37 | 141,161,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,609 | py | # encoding: utf-8
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import datetime
import numpy as np
# Calculate landfall time
# Landfall for Ike in Houston was September 13th, at ~ 7:00:00 UTC
landfall = datetime.datetime(2008, 9, 13, 7) - \
datetime.datetime(2008, 1, 1, 0)
# Time Conversions
def days2seconds(days):
return days * 60.0**2 * 24.0
def seconds2days(seconds):
return seconds / (60.0**2 * 24.0)
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(os.environ["CLAW"], 'geoclaw', 'scratch')
# ------------------------------
def setrun(claw_pkg='geoclaw'):
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
# ------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
# ------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -99.0 # west longitude
clawdata.upper[0] = -70.0 # east longitude
clawdata.lower[1] = 8.0 # south latitude
clawdata.upper[1] = 32.0 # north latitude
# Number of grid cells:
degree_factor = 4 # (0.25º,0.25º) ~ (25237.5 m, 27693.2 m) resolution
clawdata.num_cells[0] = int(clawdata.upper[0] - clawdata.lower[0]) * \
degree_factor
clawdata.num_cells[1] = int(clawdata.upper[1] - clawdata.lower[1]) * \
degree_factor
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# First three are from shallow GeoClaw, fourth is friction and last 3 are
# storm fields
clawdata.num_aux = 3 + 1 + 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = days2seconds(landfall.days - 3) + landfall.seconds
# clawdata.t0 = days2seconds(landfall.days - 1) + landfall.seconds
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
# --------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style == 1:
# Output nout frames at equally spaced times up to tfinal:
# clawdata.tfinal = days2seconds(date2days('2008091400'))
clawdata.tfinal = days2seconds(landfall.days + 1.0) + \
landfall.seconds
recurrence = 4
clawdata.num_output_times = int((clawdata.tfinal - clawdata.t0) *
recurrence / (60**2 * 24))
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True
clawdata.output_format = 'ascii' # 'ascii' or 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all'
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 1
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 1
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none'
# ==> no source term (src routine never called)
# src_split == 1 or 'godunov'
# ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang'
# ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1, 0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 2
# List of refinement ratios at each level (length at least mxnest-1)
amrdata.refinement_ratios_x = [2, 2, 2, 6, 16]
amrdata.refinement_ratios_y = [2, 2, 2, 6, 16]
amrdata.refinement_ratios_t = [2, 2, 2, 6, 16]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft', 'center', 'center',
'center', 'center', 'center', 'center']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# == setregions.data values ==
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Gauges from Ike AWR paper (2011 Dawson et al)
rundata.gaugedata.gauges.append([1, -95.04, 29.07,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([2, -94.71, 29.28,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([3, -94.39, 29.49,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([4, -94.13, 29.58,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
# ------------------------------------------------------------------
# GeoClaw specific parameters:
# ------------------------------------------------------------------
rundata = setgeo(rundata)
return rundata
# end of function setrun
# ----------------------
# -------------------
def setgeo(rundata):
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geo_data = rundata.geo_data
except:
print("*** Error, this rundata has no geo_data attribute")
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
geo_data.rho = 1025.0
geo_data.rho_air = 1.15
geo_data.ambient_pressure = 101.3e3
# == Forcing Options
geo_data.coriolis_forcing = True
geo_data.friction_forcing = True
geo_data.friction_depth = 1e10
# == Algorithm and Initial Conditions ==
# Due to seasonal swelling of gulf we set sea level higher
geo_data.sea_level = 0.28
geo_data.dry_tolerance = 1.e-2
# Refinement Criteria
refine_data = rundata.refinement_data
refine_data.wave_tolerance = 1.0
refine_data.speed_tolerance = [1.0, 2.0, 3.0, 4.0]
refine_data.deep_depth = 300.0
refine_data.max_level_deep = 4
refine_data.variable_dt_refinement_ratios = True
# == settopo.data values ==
topo_data = rundata.topo_data
topo_data.topofiles = []
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
# See regions for control over these regions, need better bathy data for
# the smaller domains
topo_path = os.path.join(scratch_dir, 'gulf_caribbean.tt3')
topo_data.topofiles.append([3, 1, 5, rundata.clawdata.t0,
rundata.clawdata.tfinal,
topo_path])
# == setfixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# ================
# Set Surge Data
# ================
data = rundata.surge_data
# Source term controls - These are currently not respected
data.wind_forcing = True
data.drag_law = 1
data.pressure_forcing = True
# AMR parameters, m/s and m respectively
data.wind_refine = [20.0, 40.0, 60.0]
data.R_refine = [60.0e3, 40e3, 20e3]
# Storm parameters - Parameterized storm (Holland 1980)
data.storm_type = 1
data.landfall = days2seconds(landfall.days) + landfall.seconds
data.display_landfall_time = True
# Storm type 2 - Idealized storm track
data.storm_file = os.path.expandvars(os.path.join(os.getcwd(),
'ike_reanal.storm'))
# =======================
# Set Variable Friction
# =======================
data = rundata.friction_data
# Variable friction
data.variable_friction = True
# Region based friction
# Entire domain
data.friction_regions.append([rundata.clawdata.lower,
rundata.clawdata.upper,
[np.infty, 0.0, -np.infty],
[0.030, 0.022]])
# La-Tex Shelf
data.friction_regions.append([(-98, 25.25), (-90, 30),
[np.infty, -10.0, -200.0, -np.infty],
[0.030, 0.012, 0.022]])
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
| [
"hudaquresh@gmail.com"
] | hudaquresh@gmail.com |
0daf5be917cddcbebd7687ee5a74875046b04930 | 4a77062741383fb0b9e4a59cddb35cff6af846e1 | /lib/warofwords/utils.py | 4000c9fdfa0ae88ce81c21cbdb192d5242e368d8 | [] | no_license | indy-lab/war-of-words-2 | 38a4766134a58f53a57e527f4dae361b4c418a36 | a7ca2eac798248adf8c944becf4e5a791ed7d223 | refs/heads/main | 2023-04-13T21:50:09.157974 | 2021-04-25T13:58:51 | 2021-04-25T13:58:51 | 337,411,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | import pickle
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
def get_base_dir(file):
filename = Path(file) # Current script.
return Path(filename).resolve().parent.parent # Two levels above.
def parse_definition(definition):
leg = definition['legislature']
explicit = definition.get('explicit-features', None)
text = definition.get('text-features', False)
latent = definition.get('latent-features', False)
chronological = definition.get('chronological', False)
baseline = definition.get('baseline', None)
fit = definition.get('fit', False)
return leg, explicit, text, latent, chronological, baseline, fit
def build_name(leg, explicit, text, latent, chronological, baseline):
name = f'ep{leg}'
if baseline is not None:
name += f'-{baseline}'
else:
if explicit is None:
raise ValueError('You must specify some explicit features')
name += f'-{explicit}'
if latent:
name += '-latent'
if text:
name += '-text'
if chronological:
name += '-chronological'
return name
def load_pkl(path):
"""Load a pickle from path."""
with open(path, 'rb') as f:
return pickle.load(f)
def display_parameters(group, features, params, n=10):
feats = [
(features.get_name(idx), params[features.get_name(idx)])
for idx in features.get_group(group)
]
for name, param in sorted(feats, key=lambda k: k[1], reverse=True)[:n]:
if param != 0:
print(f'{param:.4f} {name}')
def train_save(model, trained_model, hyper, input_path, output_path, verbose):
# Load dataset.
features, featmats, labels = model.load_data(input_path)
train = list(zip(featmats, labels))
# Initialize model.
model = model(train, features, hyper, verbose)
# Train.
params, cost = model.fit()
trained = trained_model(features, hyper, **params)
trained.save(output_path)
def barchart(y7, y8, obj, config, figpath=None):
width = config['width']
offset = config['offset']
bars = [[y7[i], y8[i]] for i in range(len(obj))]
r0 = np.arange(len(bars[0]))
rs = [
[x * 0.75 + i * width + i * offset for x in r0]
for i in range(len(bars))
]
fig, ax = plt.subplots(figsize=(3.5, 1.9))
lines = list()
for i, (r, ys) in enumerate(zip(rs, bars)):
line = plt.bar(
r,
ys,
width=width,
color=config['colors'][i],
linewidth=1,
edgecolor=config['edgecolors'][i],
hatch=config['patterns'][i],
label=obj[i],
)
lines.append(line)
# Add xticks on the middle of the group bars
# plt.xlabel('group', fontweight='bold')
plt.ylabel(config['ylabel'])
# plt.xticks([r*0.75 + 2*width + 2*offset for r in range(len(r0))],
plt.xticks(
[r * 0.75 + width + offset for r in range(len(r0))],
[
r'7\textsuperscript{th} legislature',
r'8\textsuperscript{th} legislature',
],
)
plt.ylim([0.0, 1.0])
plt.legend(
lines,
obj,
loc='lower center',
frameon=True,
fontsize='x-small',
framealpha=0.9,
markerscale=0.1,
)
plt.tight_layout()
if figpath is not None:
plt.savefig(figpath)
plt.show()
def get_value(arr):
mid = len(arr) // 2
return arr[:mid], arr[mid:]
def k_fold_gen(data, k_fold=10):
subset_size = int(len(data) / k_fold)
for k in range(k_fold):
train = data[: k * subset_size] + data[(k + 1) * subset_size :]
valid = data[k * subset_size :][:subset_size]
yield train, valid
| [
"victor.kristof@epfl.ch"
] | victor.kristof@epfl.ch |
0718685c9acf42754e027fdb61eb13bfe5c3306b | afd55e68faf10bb06d0d57731d60b5c4c362277f | /migrations/versions/28d3f0c916e1_.py | cfc99f6784883995d6a04cd3ebdeb15844162056 | [] | no_license | FRRdev/shop_app | 6adc38dd48347c18d66a34965bcf51baf46a7f6b | 2a1ecaede9fbc5e4b302729ef42221975837c1b9 | refs/heads/master | 2023-07-12T02:42:50.784364 | 2021-08-20T11:44:02 | 2021-08-20T11:44:02 | 391,930,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | """empty message
Revision ID: 28d3f0c916e1
Revises: f1f500e82750
Create Date: 2021-08-09 13:24:00.785774
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '28d3f0c916e1'
down_revision = 'f1f500e82750'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password', sa.String(length=128), nullable=True))
op.drop_column('users', 'password_hash')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password_hash', sa.VARCHAR(length=128), autoincrement=False, nullable=True))
op.drop_column('users', 'password')
# ### end Alembic commands ###
| [
"mixail.critsyn@mail.ru"
] | mixail.critsyn@mail.ru |
cf435630f18449c74c3ca285671c8fdff5427654 | fec7e56d49189d9c7211d4ec3c7325d507487909 | /csv_operation.py | 24bab8ab84e7fa05d8de69dec7990356f12b787b | [] | no_license | naveentiwari0410/challenge | 10c881fba529f023f4d6040d5f2a629696458246 | c54a07f6fddd2ea7457ef7f1c04212f1b6c57011 | refs/heads/master | 2023-06-01T04:55:07.381578 | 2019-09-08T13:55:00 | 2019-09-08T13:55:00 | 206,931,318 | 0 | 0 | null | 2023-05-22T22:30:32 | 2019-09-07T07:18:36 | Python | UTF-8 | Python | false | false | 359 | py | import csv
class CSVOperation():
def __init__(self, filename, header):
self.filename = filename
self.header = header
def write(self, data):
with open(self.filename, mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=self.header)
writer.writeheader()
writer.writerows(data) | [
"naveentiwari0410@gmail.com"
] | naveentiwari0410@gmail.com |
9c52b01285882c13d7f38ac69f01c64c03c131d0 | a745fc07c76e5a24f7592dd2b594f0e592148b83 | /display_menu.py | 3824c153908bd204239ddca6de39ec435a99394e | [] | no_license | AJC2002/chess-bot | f75b1138439b193b877feec2be517f71fa98ca63 | 1c8981b989ae18d7e5d24d087f2f52318e61e76f | refs/heads/main | 2023-07-15T02:13:39.865530 | 2021-07-27T17:21:59 | 2021-07-27T17:21:59 | 393,813,391 | 0 | 0 | null | 2021-08-07T23:29:26 | 2021-08-07T23:29:26 | null | UTF-8 | Python | false | false | 1,736 | py | from draw_board import screen, screen_width, screen_height, RectangleSprite
import pygame
# class for the buttons used to display the menu
# num = number of buttons being drawn
# y = some int from 0 to num - 1, and states that this button is the yth counting from the top
# text = text displayed on the button
# Last Modified: 03/07/2021
class Button(RectangleSprite):
def __init__(self, y, num, text):
# font details
font = 'Calibri'
font_size = 100
font_colour = [255, 255, 255]
# defines button size
button_width = screen_width
button_height = screen_height//num
super().__init__(width=button_width, height=button_height, x=0, y=y*button_height,
text=text, font=font, font_size=font_size, font_colour=font_colour, bold=True)
# displays a menu page, given a list of options
# Last Modified: 02/07/2021
def display_menu(options):
buttons = pygame.sprite.Group()
i = 0
for option in options:
new_button = Button(i, len(options), option)
buttons.add(new_button)
i += 1
buttons.draw(screen)
pygame.display.flip()
# takes in a list of options and returns the option which the user has clicked on
# Last Modified: 02/07/2021
def get_option(options):
n = len(options)
option_chosen = False
# loops while the user hasn't chosen an option
while not option_chosen:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
mouse_x, mouse_y = pygame.mouse.get_pos()
y = mouse_y // (screen_height//n)
option_chosen = True
return y
| [
"noreply@github.com"
] | AJC2002.noreply@github.com |
2672287676d382a1dc4ba8765ae33b1ab67e8ec3 | 3b6a18c4edda876a640d026b8db3a0283118b3d6 | /tweepy/cache/RedisCache.py | 97381bbf4c86776e895ad8b80d5f3cfd9804d7c7 | [
"MIT"
] | permissive | rogeliorv/tweepy | 9a61febccfed1586cf7914e3e0487d44ebcc0f88 | ab889c22ef463fd1cd18f93b190b494c54e4b80b | refs/heads/master | 2021-01-18T06:10:50.840879 | 2012-07-03T10:37:05 | 2012-07-03T10:37:05 | 1,862,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,818 | py | '''
Created on Jun 30, 2011
@author: rogelio
'''
from tweepy.cache import Cache
import time
import itertools
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import redis
except:
pass
class RedisCache(Cache):
'''Cache running in a redis server.
Elements are stored as a normal record in redis. We also have a Set containing all the keys
in the cache which defaults to tweepy:keys and can be configured to be in whatever keys_container is.
We follow the implementation in a similar way to MemoryCache, but also use extra features as the
redis_expire function to expire keys instead of waiting for clean_up to be called. This can cause
the key_set to indicate we have more keys than what we have, if an exact number is necessary, call
cleanup before count.
'''
def __init__(self, client=None, timeout=60, keys_container = 'tweepy:keys', prefix = 'tweepy:'):
super(RedisCache, self).__init__(timeout)
self.keys_container = keys_container
self.prefix = prefix
try:
self.client = client or redis.Redis() # Attempt creating a client to localhost if no client
except:
raise TypeError, "You must provide a redis client instance"
def store_multiple(self, keys_values_dict, **kwargs):
'''Store the key, value pair in our redis server'''
# Get a pipe (to execute several redis commands in one step)
pipe = self.client.pipeline()
timeout = kwargs.get('timeout', self.timeout)
for key, value in keys_values_dict.items():
# Prepend tweepy to our key, this makes it easier to identify tweepy keys in our redis server
key = self.add_prefix(key)
# Set our values in a redis hash (similar to python dict)
pipe.set(key, pickle.dumps((time.time(), value)))
# Set the expiration
pipe.expire(key, timeout)
# Add the key to a set containing all the keys
pipe.sadd(self.keys_container, key)
# Execute the instructions in the redis server
return pipe.execute()
def store(self, key, value):
'''Reuse logic in store_multiple'''
return self.store_multiple(dict([(key, value)]))
def add_prefix(self, key):
'''Add the prefix to the key if it does not have it yet'''
key = unicode(key)
return key if key.startswith(self.prefix) else self.prefix + key
def get_multiple(self, *keys, **kwargs):
'''Given an iterable of keys, returns the corresponding elements in the cache'''
timeout = kwargs.get('timeout', self.timeout)
chunk_size = kwargs.get('chunk_size', 500)
# Divide into evenly sized chunks, this is to start returning values without
# the need to wait for all the results in a big query
it = iter(keys)
chunk = list(itertools.islice(it, chunk_size))
expired_keys = []
while chunk:
# Get the values in one go
pipe = self.client.pipeline()
for key in chunk:
key = self.add_prefix(key)
pipe.get(key)
unpickled_values = pipe.execute()
# Iterate over the keys and find expired keys
for key, u_value in zip(chunk, unpickled_values):
# If we receive none, it wasn't on the cache
if not u_value:
yield None
else:
value = pickle.loads(u_value)
if self._is_expired(value, timeout):
expired_keys.append(key)
yield None
else:
# 0 is timeout, 1 is object
yield value[1]
chunk = list(itertools.islice(it, chunk_size))
# Remove all the found expired keys
self.delete_entries(expired_keys)
def get(self, key):
return list(self.get_multiple(key))[0]
def count(self):
'''If we didn't have the set here we wouldn't be able to retrieve this as a number and would have to
get all keys and then count.'''
return self.client.scard(self.keys_container)
def delete_entries(self, *keys):
'''Delete an object from the redis table'''
pipe = self.client.pipeline()
for key in keys:
pipe.srem(self.keys_container, key)
pipe.delete(key)
return pipe.execute()
# An alias for the single version
delete_entry = delete_entries
def cleanup(self):
'''Cleanup all the expired keys'''
# Get the keys from the key container
keys = self.client.smembers(self.keys_container)
# Use a pipe to get all the values from the keys in one go
values_pipe = self.client.pipeline()
for key in keys: values_pipe.get(key)
values = values_pipe.execute()
dead_keys = []
# If the key did not exist or the key is expired, clean from the cache
for key, value in zip(keys, values):
if not value:
dead_keys.append(key)
else:
value = pickle.loads(value)
if self._is_expired(value, self.timeout):
dead_keys.append(key)
return self.delete_entries(*dead_keys)
def flush(self):
'''Delete all entries from the cache'''
keys = self.client.smembers(self.keys_container)
return self.delete_entries(*keys) | [
"rogeliorv@gmail.com"
] | rogeliorv@gmail.com |
291aa4ec2fd1b9bbe175d46971b1671007aa1718 | f55da8466d8c1f7d4b4a3dab00cc64a9912d5a24 | /tests/getProgressTest.py | b73d396ea0c1d321da2d9e3d02476a2754e8d512 | [] | no_license | bpl-code/Scribbles | 95359f8ee0affec92dba897bc9dafed80ecfc38b | 4d87e8e4805f72393d1edd3dce9644837c957e9d | refs/heads/master | 2020-05-03T08:40:57.018071 | 2019-04-17T15:38:16 | 2019-04-17T15:38:16 | 178,532,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | #get Progress Test
import scribbles
newTask = scribbles.task("Complete Test")
newTask.addNote(scribbles.note("Subtask 1"))
newTask.addNote(scribbles.note("Subtask 2"))
newTask.addNote(scribbles.note("Subtask 3"))
notes = newTask.getNotes()
notes[0].assignAsSubtask()
notes[1].assignAsSubtask()
print(newTask.getProgress())
| [
"rplewiscopy@gmail.com"
] | rplewiscopy@gmail.com |
54e62ce7e4fe08eebd0259b7f85689cb1426f1e6 | 09ec77caa852d4edcc005b64432d99ab1e75279e | /app/utils.py | d3d162b6610bd0fb9f5fe0797238fe4e42fc60d9 | [] | no_license | mjcarroll/madisoncrimes | d4ad4ba0cff66a2c817bb00c5c192d07276ef391 | bb5f419ab645c72ee385a1f2bebddf43b3591b76 | refs/heads/master | 2022-12-10T07:27:37.540226 | 2019-08-01T00:12:06 | 2019-08-01T02:02:47 | 39,786,162 | 0 | 1 | null | 2015-07-27T16:48:47 | 2015-07-27T16:48:46 | Python | UTF-8 | Python | false | false | 6,910 | py | #!/usr/bin/env python
from app import app
import os
import re
import datetime
from lxml import html
import requests
MADISON_URL = 'https://www.madisonal.gov/Archive.aspx'
INCIDENT_AMID = 67
ARREST_AMID = 68
def list_madison_reports(amid):
payload = {'AMID': amid, 'Type': '', 'ADID': ''}
page = requests.get(MADISON_URL, params=payload)
tree = html.fromstring(page.text)
urls = tree.xpath('//span[@class="archive"]/a')
ret = []
for url in urls:
url_s = url.attrib['href']
url_s = url_s.split('=')
if len(url_s) < 2:
continue
ret.append(url_s[1])
return ret
def list_madison_arrests():
return list_madison_reports(ARREST_AMID)
def list_madison_incidents():
return list_madison_reports(INCIDENT_AMID)
def download_report(fid):
payload = {'ADID': fid}
page = requests.get(MADISON_URL, params=payload)
fname = os.path.join(app.config['DATA_DIR'], 'reports', fid)
with open(fname, 'wb') as f:
f.write(page.content)
def is_downloaded(report):
return os.path.exists(os.path.join(app.config['DATA_DIR'], 'reports', report))
def clean_report(lines):
keep = []
rej = ['', '\n', 'Incident Report', 'Arrest Report', 'Madison Police Department', 'Date', 'Arrest Information', 'Arrest', 'Report Designed by the Law Enforcement Technology Coordinator']
for line in lines:
line = line.strip()
if line in rej:
continue
if re.match(r'Page \d* of \d*', line):
continue
if re.match(r'\((.*) To (.*)\)', line):
continue
keep.append(line)
return keep
def reformat_incident(lines):
new_lines = []
for line in lines:
if line.find('Time:') > 0:
new_lines.append(line[0:line.find('Time:')])
line = line[line.find('Time:'):]
if line.find('Shift:') > 0:
new_lines.append(line[0:line.find('Shift:')])
line = line[line.find('Shift:'):]
if line.find('Location:') > 0:
new_lines.append(line[0:line.find('Location:')])
line = line[line.find('Location:'):]
new_lines.append(line)
new_lines = [l.strip() for l in new_lines if len(l)]
return new_lines
def clean_incident(inc):
inc = inc.replace('-', ' ')
inc = inc.replace(' ', ' ')
inc = inc.replace(' ', ' ')
inc = inc.replace(' ', ' ')
inc = inc.replace('1st', '1')
inc = inc.replace('1ST', '1')
inc = inc.replace('2nd', '2')
inc = inc.replace('2ND', '2')
inc = inc.replace('3rd', '3')
inc = inc.replace('3RD', '3')
inc = inc.replace('4th', '4')
inc = inc.replace('4TH', '4')
inc = inc.strip('.')
return inc
def extract_arrests(report):
file = str(report.id)
lines = report.report_text.decode().split('\n')
lines = clean_report(lines)
records = []
record = None
date_lines = {}
cno_lines = {}
other_lines = {}
for ii, line in enumerate(lines):
date_match = re.match(r'(\d*)/(\d*)/(\d*)', line)
case_match = re.match(r'(\d\d)-(\d*)', line)
if date_match:
date_lines[ii] = line
elif case_match:
cno_lines[ii] = line
else:
other_lines[ii] = line
date_keys = sorted(date_lines.keys())
cno_keys = sorted(cno_lines.keys())
other_keys = sorted(other_lines.keys())
for ii in range(0, len(date_keys) - 1):
dk = date_keys[ii]
dkn = date_keys[ii+1]
ck = [ ck for ck in cno_keys if ck > dk and ck < dkn][0]
between_dk_ck = [ok for ok in other_keys if ok > dk and ok < ck]
between_ck_dkn = [ok for ok in other_keys if ok > ck and ok < dkn]
all_other = [other_lines[k] for k in between_dk_ck]
all_other.extend([other_lines[k] for k in between_ck_dkn])
all_other_str = ' '.join(all_other)
match = re.match(r'([\w\s]*), (.*) was arrested at (.*) on the charge\(s\) of:', all_other_str)
if match:
end = match.end()
g = 0
for ii, ao in enumerate(all_other):
if end >= len(ao):
end = end - (len(ao) + 1)
else:
g = ii
break
dt = datetime.datetime.strptime(date_lines[dk],
'%m/%d/%y')
record = {
'Name': match.groups()[0],
'Date': dt,
'Res': match.groups()[1],
'Location': match.groups()[2],
'Incident': [clean_incident(i) for i in all_other[g:]],
'Case': cno_lines[ck],
'File': file
}
records.append(record)
else:
continue
return records
def extract_incidents(report):
file = str(report.id)
lines = report.report_text.decode().split('\n')
lines = clean_report(lines)
lines = reformat_incident(lines)
records = []
record = None
cno_lines = {}
time_lines = {}
shift_lines = {}
date_lines = {}
loc_lines = {}
inc_lines = {}
line_idx = [ cno_lines, time_lines, shift_lines, date_lines, loc_lines, inc_lines]
case_str = "Case No.: "
time_str = "Time: "
shift_str = "Shift: "
date_str = "Date Reported: "
loc_str = "Location: "
inc_str = "Incident: "
strings = [ case_str, time_str, shift_str, date_str, loc_str, inc_str ]
# Sort each line into it's own dict by line type
for ii, line in enumerate(lines):
for (_str, _idx) in zip(strings, line_idx):
if line.find(_str) >= 0:
_idx[ii] = line[line.find(_str) + len(_str):].strip()
cno_keys = sorted(cno_lines.keys())
cno_keys_s = cno_keys[1:] + [1e6]
for key, key_plus in zip(cno_keys, cno_keys_s):
time = [v for (k,v) in time_lines.items() if
k > key and k < key_plus]
date = [v for (k,v) in date_lines.items() if
k > key and k < key_plus]
shift = [v for (k,v) in shift_lines.items() if
k > key and k < key_plus]
loc = [v for (k,v) in loc_lines.items() if
k > key and k < key_plus]
incs = [v for (k,v) in inc_lines.items() if
k > key and k < key_plus]
record = {}
if file:
record['File'] = file
record['Case'] = cno_lines[key]
dt = datetime.datetime.strptime(time[0] + ' ' + date[0],
'%I:%M %p %B %d, %Y')
new_incs = []
for inc in incs:
inc = clean_incident(inc)
new_incs.append(inc)
record['DateTime'] = dt
record['Shift'] = shift[0]
record['Address'] = loc[0]
record['Incident'] = new_incs
records.append(record)
return records
| [
"michael@openrobotics.org"
] | michael@openrobotics.org |
c1a7695378b465302787c7eaa57278bb48b5027b | d17a6b9330d77411eca343f955b45fe7d1ad1e0a | /random_setlist.py | 88b5a4c2ec84ec7a6fe7c8c177c3a115adee5be0 | [] | no_license | mdesantis999/setlist_generator | d8b215870e45ce3cc1752cfe0e119b28b4c225db | 101c0feeeced30cc3509b8d15ef75c4db19b176b | refs/heads/master | 2020-05-31T14:00:33.859112 | 2014-10-07T18:08:41 | 2014-10-07T18:08:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/python
import random
#name of the file with all songs, 1 song per line
songsource = 'songlist.txt'
#number of sets desired
numsets = 4
#legnth of each set
setlength = 8
#always start with position 0
currentsong = 0
lastsong = 0
#open the file
with open(songsource) as f:
allsongs = f.read().splitlines()
#randomize the order of songs
random.shuffle(allsongs)
#create the setlists
for l in range(1, numsets):
print "SET " + str(l)
lastsong = currentsong + setlength
for s in range(currentsong, lastsong):
print allsongs[s]
currentsong = lastsong
| [
"michaeljdesantis@gmail.com"
] | michaeljdesantis@gmail.com |
ab8165631384703591d1b857880922db5bf632cd | 9828c0fa2fd1822b0cce1ea9faf4ea69bf7c2f1d | /fnload_test.py | 85b8c792db74174fc24137d9b4c7a9e715c82870 | [] | no_license | vrajcm9/sleep-detection-using-cnn | 031f5eac660250090e5768a17e7ab868462de39f | eb80c5f9ae2a23b4f8fb06bb0f8132a220af2e67 | refs/heads/master | 2020-05-03T21:46:45.022934 | 2019-04-01T09:44:10 | 2019-04-01T09:44:10 | 178,830,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | def load_and_test(frame):
from keras.models import load_model
model = load_model('face.h5')
#{'closed': 0, 'open': 1}
from keras.preprocessing import image
from haar import faces
faces(frame)
test_image = image.load_img('img.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
import numpy as np
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
if(result==1):
print("open eyes")
else:
print("closed eyes")
| [
"noreply@github.com"
] | vrajcm9.noreply@github.com |
3d5ed464cc8308cc50c33b422cdfd67410bc9b09 | 3d5a1845525325bb0238dac99339acf54e2895d3 | /segContent.py | 754bbf462562c0d344c14815402455eaa56a99ca | [] | no_license | Ynglylife/Bert_ner | 57403f0accaec47cad232bdba86210391ce701cb | fb1a91aa6e6e5ffc10ac001983c878855ebcc532 | refs/heads/master | 2020-05-09T19:51:17.393013 | 2019-04-26T09:10:52 | 2019-04-26T09:10:52 | 181,389,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 19-4-2 上午10:02
# @Author : liyang
# @File : createWords.py
import pandas as pd
import time
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import cpu_count
from config import *
from nlpUtil import NLPUtil
def segData(nthread=None):
if nthread == None:
nthread = cpu_count() - 3
data = pd.read_json(TRAIN_DATA, orient='records', lines=True)
size = data.shape[0] // nthread
res = []
for i in range(nthread - 1):
sub_data = data.loc[i*size: (i+1)*size-1]
res.append(sub_data)
sub_data = data.loc[(nthread-1)*size:]
res.append(sub_data)
return res
def segContentWithMultiProc(nthread=None, seg='jieba'):
start = time.time()
if nthread == None:
nthread = cpu_count() - 3
#procs = Pool(nthread)
procs = ProcessPoolExecutor(nthread)
seg_data = segData(nthread)
if seg == 'jieba':
res = procs.map(segContentWithJieba, seg_data)
procs.shutdown()
end = time.time()
print('runs %0.2f seconds.' % (end - start))
data = pd.concat(res)
print(data.shape)
data.to_csv(TRAIN_EXTRACTS_SEG % seg, columns=['newsId', 'content_seg'], sep='\t', index=False,
encoding='utf-8')
elif seg == 'pkuseg':
res = procs.map(segContentWithPkuseg, seg_data)
procs.shutdown()
end = time.time()
print('runs %0.2f seconds.' % (end - start))
data = pd.concat(res)
print(data.shape)
data.to_csv(TRAIN_EXTRACTS_SEG % seg, columns=['newsId', 'content_seg'], sep='\t', index=False,
encoding='utf-8')
elif seg == 'hanlp':
res = procs.map(segContentWithHanLP, seg_data)
procs.shutdown()
end = time.time()
print('runs %0.2f seconds.' % (end - start))
data = pd.concat(res)
print(data.shape)
data.to_csv(TRAIN_EXTRACTS_SEG % seg, columns=['newsId', 'content_seg'], sep='\t', index=False,
encoding='utf-8')
else:
print("No such segment choice")
exit(0)
def segContentWithJieba(sub_data):
tqdm.pandas(desc="jieba Process:")
sub_data['content_seg'] = sub_data['content'].progress_apply(NLPUtil.divideWordWithJieba)
return sub_data
def segContentWithPkuseg(sub_data):
tqdm.pandas(desc="pkuseg Process:")
sub_data['content_seg'] = sub_data['content'].progress_apply(NLPUtil.divideWordWithPkuseg)
return sub_data
def segContentWithHanLP(sub_data):
tqdm.pandas(desc="hanlp Process:")
sub_data['content_seg'] = sub_data['content'].progress_apply(NLPUtil.divideWordWithHanLP)
return sub_data
if __name__ == '__main__':
segContentWithMultiProc(nthread=20, seg='jieba')
| [
"yngly@outlook.com"
] | yngly@outlook.com |
64be6af05f645e9788495fd6f6fd58ae28280847 | da3d76d3edae78d7c8a00f9ffb12f17a26a3afa6 | /release/mySupport.py | f2323fb8c6ed955c9dcaeac68ac212efb075ceb8 | [] | no_license | NENUBioCompute/SeqTMPPI | d50abffa05748da70835f64a49468c58041d562e | 0850552976becba2ec12fcb53aaaaca0369db97f | refs/heads/master | 2023-08-10T20:00:17.714393 | 2021-10-03T03:09:18 | 2021-10-03T03:09:18 | 357,561,012 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,262 | py | # encoding: utf-8
"""
@author: julse@qq.com
@time: 2020/4/15 22:07
@desc:
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras import models
from calculate_performance import calculate_performance
from common import handleBygroup, check_path
from myData import BaseData
from myEvaluate import MyEvaluate
def plot_result(history_dict,outdir):
for key in history_dict.keys():
print('%s,%s'%(key,str(history_dict[key][-1])))
if 'val_' in key:continue
epochs = range(1, len(history_dict[key]) + 1)
plt.clf() # 清除数字
fig = plt.figure()
plt.plot(epochs, history_dict[key], 'bo', label='Training %s'%key)
plt.plot(epochs, history_dict['val_'+key], 'b', label='Validation val_%s' %key)
plt.title('Training and validation %s'%key)
plt.xlabel('Epochs')
plt.ylabel(key)
plt.yticks(np.arange(0,1,0.1))
plt.legend()
# plt.show()
fig.savefig(os.path.join(outdir ,'%s.png' % key))
def saveDataset(foutdir,x_train, y_train, x_test, y_test,onehot=False):
if onehot:
x_train = np.array([[np.argmax(one_hot)for one_hot in x_train[i]] for i in range(x_train.shape[0])])
x_test = np.array([[np.argmax(one_hot)for one_hot in x_test[i]] for i in range(x_test.shape[0])])
train_data = np.hstack([x_train,y_train.reshape(len(y_train),1)])
test_data = np.hstack([x_test,y_test.reshape(len(y_test),1)])
np.savetxt(os.path.join(foutdir,'_train_data.txt'),train_data,fmt='%d',delimiter=',')
np.savetxt(os.path.join(foutdir,'_test_data.txt'),test_data,fmt='%d',delimiter=',')
def calculateResults(dirout,dirin,filename='_evaluate.txt',row = 0,resultfilename = 'result.csv'):
"""
%s\%s\_evaluate.txt
:param dirin: contains a list of \%s\_evaluate.txt
:return:
dirin = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_ratio/p_fw_v1_train_validate_v2_fixpositive_2/2/test_DIP/'
dirout = dirin
calculateResults(dirout,dirin,filename='log.txt',row = 2,resultfilename = 'result.csv')
"""
# dirin = r'E:\githubCode\BioComputeCodeStore\JiangJiuhong\data\PPI\stage2\processPair2445\pair\positiveV1\onehot\result'
check_path(dirout)
count = 0
data = []
# columns = ['loss', 'acc', 'metric_precision', 'metric_recall', 'metric_F1score', 'matthews_correlation']
columns = ['Loss', 'Acc', 'Precision', 'Recall', 'F1score', 'MCC']
indexs = []
print(columns)
print(dirin)
for eachdir in os.listdir(dirin):
print(eachdir)
if '.' in eachdir :continue
fin = os.path.join(dirin, eachdir)
sep = '\\' if '\\' in filename else '/'
if sep in filename:
for f in filename.split(sep):
fin = os.path.join(fin, f)
else:
fin = os.path.join(fin,filename)
# fin = '%s\%s\_evaluate.txt' % (dirin, eachdir)
if not os.access(fin, os.F_OK):
print('not access to:',fin)
continue
with open(fin, 'r') as fi:
real_row=0
while(real_row!=row):
fi.readline()
real_row = real_row + 1
line = fi.readline()[:-1]
# sum += np.array(line.split(':')[-1][1:-1].split(','))
line = line.replace('nan','0')
print('****************',line,'********************')
data.append(line.split(':')[-1][1:-1].split(','))
indexs.append(eachdir)
count = count + 1
print(str(line.split(':')[-1][1:-1].split(','))[1:-1])
mydata = pd.DataFrame(data)
mydata.replace('nan',0,inplace=True)
t = mydata.apply(pd.to_numeric)
t.loc['mean'] = t.apply(lambda x: x.mean())
indexs.append('mean')
t.index = indexs
t.columns = columns
t.sort_index(inplace=True)
t.to_csv(os.path.join(dirout, resultfilename), index=True, header=True)
# t.to_csv(os.path.join(dirout, 'result.csv'), index=True, header=True,float_format = '%.3f')
def groupCalculate(dirin,filetype='all'):
"""
/home/jjhnenu/data/PPI/release/result/group/p_fp_1_1/1/all/_evaluate.txt
:param dirin: /home/jjhnenu/data/PPI/release/result/group
:return:
"""
for eachdir in os.listdir(dirin):
subdir = os.path.join(dirin,eachdir) # /home/jjhnenu/data/PPI/release/result/group/p_fp_1_1/
data = []
columns = ['Loss', 'Acc', 'Precision', 'Recall', 'F1score', 'MCC']
# columns = ['loss', 'acc', 'metric_precision', 'metric_recall', 'metric_F1score', 'matthews_correlation']
print(columns)
for eachsubdir in os.listdir(subdir): # 0 1 2 3 4 5
fin = os.path.join(subdir,eachsubdir,filetype,'_evaluate.txt')
# fin = os.path.join(subdir,eachsubdir,filetype,'_history_dict.txt')
if not os.access(fin, os.F_OK): continue
with open(fin, 'r') as fi:
line = fi.readline()[:-1]
# sum += np.array(line.split(':')[-1][1:-1].split(','))
data.append(line.split(':')[-1][1:-1].split(','))
print(str(line.split(':')[-1][1:-1].split(','))[1:-1])
mydata = pd.DataFrame(data)
t = mydata.apply(pd.to_numeric)
t.columns = columns
t.loc['mean'] = t.apply(lambda x: x.mean())
dirout = os.path.join(subdir.replace('result','statistic'))
check_path(dirout)
# float_format = '%.3f'
t.sort_index(inplace=True)
t.to_csv(os.path.join(dirout, 'result.csv'),index=True, header=True)
print(dirout)
def savepredict(fin_pair,dir_in,fin_model,dirout_result):
# fin_pair = '/home/19jiangjh/data/PPI/release/pairdata/p_fw/1/0/test.txt'
# dir_in = '/home/19jiangjh/data/PPI/release/feature/p_fp_fw_19471'
# fin_model = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_ratio/p_fw_train_validate/1/_my_model.h5'
# dirout_result = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_ratio/p_fw_train_validate/1/test'
check_path(dirout_result)
onehot = True
dataarray = BaseData().loadTest(fin_pair, dir_in,onehot=onehot,is_shuffle=False)
x_test, y_test =dataarray
model = models.load_model(fin_model, custom_objects=MyEvaluate.metric_json)
result = model.evaluate(x_test, y_test, verbose=False,batch_size=90)
result_predict = model.predict(x_test,batch_size=90)
result_predict = result_predict.reshape(-1)
result_class = model.predict_classes(x_test,batch_size=90)
result_class = result_class.reshape(-1)
y_test = y_test.reshape(-1)
print('Loss:%f,ACC:%f' % (result[0], result[1]))
df = pd.read_table(fin_pair,header=None)
# df.columns = ['tmp', 'nontmp']
df.rename(columns={0: 'tmp', 1: 'nontmp'}, inplace=True)
df['real_label'] = list(y_test)
df['predict_label'] = result_class
df['predict'] = result_predict
df.to_csv(os.path.join(dirout_result,'result.csv'),index=False)
result_manual = MyEvaluate().evaluate_manual(y_test, result_predict)
print('[acc,metric_precision, metric_recall, metric_F1score, matthews_correlation]')
print(result_manual)
print('[acc,precision,sensitivity,f1,mcc,aps,aucResults,specificity]')
result_manual2 =calculate_performance(len(x_test), y_test, result_class, result_predict)
print(result_manual2)
with open(os.path.join(dirout_result,'log.txt'),'w') as fo:
fo.write('test dataset %s\n'%fin_pair)
fo.write('Loss:%f,ACC:%f\n' % (result[0], result[1]))
fo.write('evaluate result:'+str(result)+'\n')
fo.write('manual result:[acc,metric_precision, metric_recall, metric_F1score, matthews_correlation]\n')
fo.write('manual result:' + str(result_manual) + '\n')
fo.write('manual result2:[acc,precision,sensitivity,f1,mcc,aps,aucResults,specificity]\n')
fo.write('manual result2:'+str(result_manual2)+'\n')
fo.flush()
if __name__ == '__main__':
print()
# dirin = '/home/jjhnenu/data/PPI/release/result_epoch80/group'
# groupCalculate(dirin, filetype='all')
# dirin = '/home/jjhnenu/data/PPI/release/result_in_paper/alter_param/alter_kernel_size/early_stop/opotimize_90_200_90_group'
# dirout = dirin
# calculateResults(dirout,dirin)
'''
calclulate 5 group
'''
# dirin = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_dataset/p_fp_1_1'
# dirout = dirin
# calculateResults(dirout,dirin)
# dirin = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_dataset/p_fw_1_1'
# dirout = dirin
# calculateResults(dirout,dirin)
# dirin = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_dataset/p_fp_fw_2_1_1'
# dirout = dirin
# calculateResults(dirout,dirin)
'''
calculate every group result
'''
# basedir = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_param/alter_filters_k_99_f300'
# for eachdir in os.listdir(basedir):
# dirin = os.path.join(basedir,eachdir)
# if os.path.isfile(dirin):continue
# dirout = dirin
# calculateResults(dirout,dirin)
'''
extract mean result of each kernel
'''
# df = pd.DataFrame()
# # basedir = '/home/19jiangjh/data/PPI/release/result_in_paper/alter_param/alter_filters'
# for eachdir in os.listdir(basedir):
# basedir2 = os.path.join(basedir,eachdir)
# if os.path.isfile(basedir2):continue
# for eachkernel in os.listdir(basedir2):
# dirin = os.path.join(basedir2, eachkernel)
# if os.path.isfile(dirin):
# print(dirin)
# data = pd.read_csv(dirin,index_col=0)
# if df.empty:
# df = pd.DataFrame(columns = data.columns)
# # df.loc[eachdir] = data.iloc[-1].values
# df.loc[int(eachdir)] = data.iloc[-1].values
# df.sort_index(inplace=True)
# df.to_csv(os.path.join(basedir,'result.csv'))
fin = r'E:\githubCode\data\PPI\release\result_in_paper_2\alter_param\batch_size\70\0'
outdir = r'E:\githubCode\data\PPI\release\result_in_paper_2\alter_param\batch_size\70\0'
with open(fin) as fi:
line = fi.readline()
mydict = eval(line)
plot_result(mydict, outdir)
| [
"julse@qq.com"
] | julse@qq.com |
6a8656099706c194292fcda2ee0f6418c8e70f36 | c5d79eb6acb52f37e36db863c59a8b35a29c315e | /01_Python基础/hm_06_小明.py | 6f986dd743ecbf0f9c73b9f17bc4cdfeaf80662c | [] | no_license | taurusyuan/python | 1bef863572f06d25dce97a1541d84b3bec9965a4 | 248b7dc3d15a4ab9dbbb9c8c26d126bcc3073269 | refs/heads/master | 2020-04-17T00:14:06.473725 | 2019-04-25T08:49:55 | 2019-04-25T08:49:55 | 166,042,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | """
姓名:小明
年龄:18岁
性别:男生
身高:1.75米
体重:75 Kg
"""
password = input("请输入你的密码:")
print(password)
first_name = "张"
second_name = "小明"
name = first_name + second_name
age = 18
sex = "man"
height = 1.75
weight = 75
print(name, age, sex, height, weight)
| [
"linyuanjie514@gmail.com"
] | linyuanjie514@gmail.com |
51ceb3570f27107ac2da3e8147aab9eefffd42dc | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit_noisy42.py | a8bfb10ba754da62197c75c385a3e667cff3009b | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,214 | py | # qubit number=3
# total number=7
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[2]) # number=2
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.z(input_qubit[2]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy42.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
ce3a1bea2ae26b242441338791493345cfc404ed | fc9276e5688ecfa7ecc49ac881060931864967db | /jianzhi_offer/day9_变态青蛙跳demo.py | 28ab2cadd7e0687a105590afae84048ff434606c | [] | no_license | dashayudabao/practise_jianzhioffer | 7b69ff891d9ec4374ad7d13f9c32a1055e9745f8 | b54c892b1bc512b124029dc74e26ceedb93a7fa4 | refs/heads/master | 2020-05-20T09:49:42.164507 | 2019-05-08T02:43:00 | 2019-05-08T02:43:00 | 185,512,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # Author: Baozi
#-*- codeing:utf-8 -*-
"""题目描述
一只青蛙一次可以跳上1级台阶,也可以跳上2级……它也可以跳上n级。求该青蛙跳上一个n级的台阶总共有多少种跳法。
"""
class Solution:
def jumpFloorII(self, number):
if number == 1:
return 1
result = 1
for i in range(1,number):
result *= 2
return result
if __name__ == '__main__':
s = Solution()
print(s.jumpFloorII(4))
print(s.jumpFloorII(11)) | [
"17ycheng@stu.edu.cn"
] | 17ycheng@stu.edu.cn |
ad636338d8bc7e671f0a905f2c3cadea7ea06d78 | a07d8221391d159d46978319af4fc45463c73e73 | /upvotes/migrations/0005_auto_20200512_1127.py | e9b4249e489b32344287b0ce2bc628c389d02479 | [] | no_license | Code-Institute-Submissions/MothRadar | e77841423b32f1a048f510327f59fb421a4a98ce | 4ade1aebc9ad80e250ebe67eb385a6194769000a | refs/heads/master | 2022-07-05T03:29:00.359219 | 2020-05-18T18:37:41 | 2020-05-18T18:37:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # Generated by Django 3.0.5 on 2020-05-12 09:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('upvotes', '0004_auto_20200512_0846'),
]
operations = [
migrations.AlterField(
model_name='upvote',
name='upvoter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"igor.basuga@gmail.com"
] | igor.basuga@gmail.com |
126f5faa6d66c5a63f2ce4dc6fed55e424baf542 | 09b86a78febfec7107b404bc21378dbe20b49a53 | /bin/flask | 3c02c2b68d6ecd870c760775db99d8947f8f966b | [] | no_license | SteveKipp/flask-tox-tutorial | d7752e83bb98501fb63f90488fecfed473779649 | b39ff61f06a54a69059d3933fcd852766099b0dd | refs/heads/master | 2022-12-12T17:54:38.865467 | 2020-09-14T05:47:01 | 2020-09-14T05:47:01 | 295,247,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | #!/home/skipp/Documents/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sk1pp@protonmail.com"
] | sk1pp@protonmail.com | |
1393b6316d1e0a3f66c872c6f21c11da41fbb9e9 | ad715f9713dc5c6c570a5ac51a18b11932edf548 | /tensorflow/lite/testing/op_tests/ceil.py | 02d6ab3f76b56e09d806372a7e08eef7ec137d0a | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | rockzhuang/tensorflow | f1f31bc8edfa402b748c500efb97473c001bac95 | cb40c060b36c6a75edfefbc4e5fc7ee720273e13 | refs/heads/master | 2022-11-08T20:41:36.735747 | 2022-10-21T01:45:52 | 2022-10-21T01:45:52 | 161,580,587 | 27 | 11 | Apache-2.0 | 2019-01-23T11:00:44 | 2018-12-13T03:47:28 | C++ | UTF-8 | Python | false | false | 1,835 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for ceil."""
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_ceil_tests(options):
"""Make a set of tests to do ceil."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the ceil op testing graph."""
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.math.ceil(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
039400f6e90ddb926cfa170a11dbcee19f18dc13 | 11e42a760f903be9813e0a62d6f70479fbc29618 | /supplycrate/controllers/nigredo.py | ce949b7988f1bd7f24f426ba7861e6a84bfa1131 | [] | no_license | RedGlow/supplycrate | d50e3bc5c43358befc6a1ee62bfcf3942c33aaac | 127d9b57b74e754fcd144ac0f9231b2633e6097e | refs/heads/master | 2020-12-24T13:53:17.419783 | 2014-03-28T11:42:17 | 2014-03-28T11:42:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,157 | py | from sqlalchemy.orm import aliased
from sqlalchemy.sql import functions, literal_column, or_, and_
from supplycrate import models
import supplycrate.tablerenderer.model as tmodels
from supplycrate.utils import decimal_str
__author__ = 'Mattia'
class NigredoTable(tmodels.Table):
name = "nigredo"
default_sorting_column = 5
default_sorting_asc = False
categories = {
'kind': [
'Mystic Weapons',
'Various',
'Material Promotions (common)',
'Material Promotions (fine)',
'Material Promotions (rare)',
'Siege Blueprints'
]
}
def __init__(self, session):
# prepare aliases
self._output_item_alias = aliased(models.Item, name='output_item')
self._ingredient_1_item_alias = aliased(models.Item, name='ingredient_1_item')
self._ingredient_1_vendor_data_alias = aliased(models.VendorData, name='ingredient_1_vendor_data')
self._ingredient_2_item_alias = aliased(models.Item, name='ingredient_2_item')
self._ingredient_2_vendor_data_alias = aliased(models.VendorData, name='ingredient_2_vendor_data')
self._ingredient_3_item_alias = aliased(models.Item, name='ingredient_3_item')
self._ingredient_3_vendor_data_alias = aliased(models.VendorData, name='ingredient_3_vendor_data')
self._ingredient_4_item_alias = aliased(models.Item, name='ingredient_4_item')
self._ingredient_4_vendor_data_alias = aliased(models.VendorData, name='ingredient_4_vendor_data')
self._ingredient_item_aliases = [
self._ingredient_1_item_alias,
self._ingredient_2_item_alias,
self._ingredient_3_item_alias,
self._ingredient_4_item_alias
]
self._ingredient_vendor_data_aliases = [
self._ingredient_1_vendor_data_alias,
self._ingredient_2_vendor_data_alias,
self._ingredient_3_vendor_data_alias,
self._ingredient_4_vendor_data_alias
]
# produce the labeled columns
sum_func = lambda t1, t2: t1 + t2
skill_point_cost = self._fold(
lambda i: functions.coalesce(self._ingredient_vendor_data_aliases[i].skill_point_cost, literal_column("0")),
sum_func
).label("skill_point_cost")
ingredients_are_sold = and_(*self._map(
lambda i: or_(self._ingredient_vendor_data_aliases[i].skill_point_cost != None,
self._ingredient_vendor_data_aliases[i].copper_cost != None,
self._ingredient_item_aliases[i].sell_count > literal_column("0"))
)).label("ingredients_are_sold")
self.__ingredients_are_sold = ingredients_are_sold
output_is_bought = (self._output_item_alias.buy_count > literal_column("0")).label("output_is_bought")
self.__output_is_bought = output_is_bought
cost_bo, cost_bi = self._buy_o(
lambda buy, o:
self._fold(
lambda i: self._if(
self._ingredient_vendor_data_aliases[i].copper_cost == None,
self._get_price(self._ingredient_item_aliases[i], buy),
self._least(self._ingredient_vendor_data_aliases[i].copper_cost,
self._get_price(self._ingredient_item_aliases[i], buy))
) * self._get_ingredient_count(i),
sum_func
).label("cost_b" + o)
)
cost_b = {
"o": cost_bo,
"i": cost_bi
}
profit_so, profit_si = self._buy_i(
lambda buy, i:
(self._get_price(self._output_item_alias, buy) * models.SkillPointRecipe.output_count *
literal_column("85") / literal_column("100")).
label("profit_s" + i)
)
profit_s = {
"o": profit_so,
"i": profit_si
}
net_profit_bo_so_per_sp, net_profit_bo_si_per_sp, \
net_profit_bi_so_per_sp, net_profit_bi_si_per_sp = self._b_s(
lambda b, s, buy, sell: (
self._round(
(profit_s[s] - cost_b[b]) /
self._fold(
lambda i: functions.coalesce(
self._ingredient_vendor_data_aliases[i].skill_point_cost,
literal_column("0")),
sum_func))
).label("net_profit_b" + b + "_s" + s + "_per_sp")
)
# produce the query
queryset = session.query(
models.SkillPointRecipe,
self._output_item_alias,
self._ingredient_1_item_alias,
self._ingredient_1_vendor_data_alias,
self._ingredient_2_item_alias,
self._ingredient_2_vendor_data_alias,
self._ingredient_3_item_alias,
self._ingredient_3_vendor_data_alias,
self._ingredient_4_item_alias,
self._ingredient_4_vendor_data_alias,
skill_point_cost,
ingredients_are_sold,
output_is_bought,
cost_bo,
cost_bi,
profit_so,
profit_si,
net_profit_bo_so_per_sp,
net_profit_bo_si_per_sp,
net_profit_bi_so_per_sp,
net_profit_bi_si_per_sp
). \
join(self._output_item_alias, models.SkillPointRecipe.output_item). \
outerjoin((self._ingredient_1_item_alias, models.SkillPointRecipe.ingredient_1_item),
(self._ingredient_1_vendor_data_alias,
self._ingredient_1_vendor_data_alias.item_id == self._ingredient_1_item_alias.data_id)). \
outerjoin((self._ingredient_2_item_alias, models.SkillPointRecipe.ingredient_2_item),
(self._ingredient_2_vendor_data_alias,
self._ingredient_2_vendor_data_alias.item_id == self._ingredient_2_item_alias.data_id)). \
outerjoin((self._ingredient_3_item_alias, models.SkillPointRecipe.ingredient_3_item),
(self._ingredient_3_vendor_data_alias,
self._ingredient_3_vendor_data_alias.item_id == self._ingredient_3_item_alias.data_id)). \
outerjoin((self._ingredient_4_item_alias, models.SkillPointRecipe.ingredient_4_item),
(self._ingredient_4_vendor_data_alias,
self._ingredient_4_vendor_data_alias.item_id == self._ingredient_4_item_alias.data_id))
# create column definitions
column_descriptions = [
tmodels.ColumnDescription('Item', True, self._output_item_alias.name, name='item'),
tmodels.ColumnDescription('Ingredients', True, None, name='ingredients'),
tmodels.ColumnDescription('Skill point cost', True, skill_point_cost, name='skillpointcost'),
tmodels.ColumnDescription('Gold cost', True, [[cost_bo, cost_bo], [cost_bi, cost_bi]], name='goldcost'),
tmodels.ColumnDescription('Profit', True, [[profit_so, profit_si], [profit_so, profit_si]], name='profit'),
tmodels.ColumnDescription('Net profit per skill point', True,
[[net_profit_bo_so_per_sp, net_profit_bo_si_per_sp],
[net_profit_bi_so_per_sp, net_profit_bi_si_per_sp]], name='netprofit')
]
# call super constructor
tmodels.Table.__init__(self, column_descriptions, queryset)
def _get_ingredient_count(self, i):
if i == 0:
return models.SkillPointRecipe.ingredient_1_count
elif i == 1:
return models.SkillPointRecipe.ingredient_2_count
elif i == 2:
return models.SkillPointRecipe.ingredient_3_count
elif i == 3:
return models.SkillPointRecipe.ingredient_4_count
def filter_by_buy_sell(self, queryset, buy_instant, sell_instant):
if buy_instant:
queryset = queryset.filter(self.__ingredients_are_sold == 1)
if sell_instant:
queryset = queryset.filter(self.__output_is_bought == 1)
return queryset
def filter_by_category(self, queryset, category_name, category_values):
my_categories = self.categories['kind']
if len(category_values) == len(my_categories):
return queryset # all categories are in
else:
return queryset.filter(
or_(
*(models.SkillPointRecipe.category == self.categories['kind'].index(category) for category in
category_values)))
def create_row(self, query_row):
# create ingredient list
ingredients = []
for idx in xrange(4):
i = self._get_qr(query_row, self._ingredient_item_aliases[idx])
c = query_row.SkillPointRecipe.get_ingredient_count(idx)
v = self._get_qr(query_row, self._ingredient_vendor_data_aliases[idx])
extra = []
if c > 1:
extra.append(tmodels.Text([str(c) + ' x']))
sp_cost = v.skill_point_cost if v is not None else None
if sp_cost is None:
extra.append(tmodels.Cost([[i.buy_price, i.buy_price],
[i.sell_price, i.sell_price]]))
else:
extra.append(tmodels.Text([str(sp_cost), tmodels.Text.SKILL_POINT_SYMBOL]))
ingredients.append(tmodels.Item(i, extra_pre_line=extra))
# create row
spc = decimal_str(query_row.skill_point_cost) # TODO: perhaps dict access?
row = tmodels.Row([
tmodels.Cell([tmodels.Item(self._get_qr(query_row, self._output_item_alias))]),
tmodels.Cell(ingredients),
tmodels.Cell([tmodels.Text([spc, tmodels.Text.SKILL_POINT_SYMBOL])]),
tmodels.Cell([tmodels.Cost.from_object(query_row, 'cost_b%(b)s')]),
tmodels.Cell([tmodels.Cost.from_object(query_row, 'profit_s%(s)s')]),
tmodels.Cell([tmodels.Cost.from_object(query_row, 'net_profit_b%(b)s_s%(s)s_per_sp')])
])
# return it
return row | [
"mattia.belletti@gmail.com"
] | mattia.belletti@gmail.com |
a1ff4766613a6a27fc4395c92e158607ac3292fc | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/vse-naloge-brez-testov/DN4-Z-225.py | 9583739c790cad2cec42909834833817e30398cc | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | # Tu pišite svoje funkcije:
from math import *
def koordinate(ime, kraji):
for kraj, x, y in kraji:
if kraj == ime:
return(x, y)
else:
return None
def razdalja_koordinat(x1, y1, x2, y2):
return sqrt((x1-x2) ** 2 + (y1-y2) ** 2)
def razdalja(ime1, ime2, kraji):
x1, y1 = koordinate(ime1, kraji)
x2, y2 = koordinate(ime2, kraji)
return razdalja_koordinat(x1, y1, x2, y2)
def v_dometu(ime, domet, kraji):
s = []
for mesto, x, y in kraji:
if mesto != ime:
if razdalja(ime, mesto, kraji) <= domet:
s.append(mesto)
return s
def najbolj_oddaljeni(ime, imena, kraji):
s = []
naj_r = 0
naj_k = ''
for kraj, x, y in kraji:
if kraj in imena:
r = razdalja(ime, kraj, kraji)
s.append((kraj, r))
for kraj, r in s:
if r > naj_r:
naj_r = r
naj_k = kraj
return naj_k
def zalijemo(ime, domet, kraji):
return najbolj_oddaljeni(ime, v_dometu(ime,domet,kraji), kraji)
def presek(s1, s2):
return list(set(s1).intersection(s2))
def skupno_zalivanje(ime1, ime2, domet, kraji):
mes1 = []
mes2 = []
for mesto, x, y in kraji:
if mesto == ime1:
for mesto, x, y in kraji:
if razdalja(mesto,ime1,kraji) <= domet:
mes1.append(mesto)
if mesto == ime2:
for mesto, x, y in kraji:
if razdalja(mesto,ime2,kraji) <= domet:
mes2.append(mesto)
return presek(mes1, mes2)
| [
"lenart.motnikar@gmail.com"
] | lenart.motnikar@gmail.com |
ce739380e97a96bf00fcdc9d4059e29f2e122645 | 099256b28df65fb7c90c077b060dca16b8655235 | /unsupervised_learning/0x03-hyperparameter_tuning/2-gp.py | 948f3c23d718cd4522f60f7ce711796142e5c0e1 | [] | no_license | Immaannn2222/holbertonschool-machine_learning | 1cebb9a889b363669bed7645d102dc56ab943c08 | 80bf8d3354702f7fb9f79bbb5ed7e00fc19f788d | refs/heads/master | 2023-08-01T05:35:00.180472 | 2021-09-22T20:28:17 | 2021-09-22T20:28:17 | 317,624,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/env python3
"""HYPERPARAÙETER"""
import numpy as np
class GaussianProcess:
""" represents a noiseless 1D Gaussian process"""
def __init__(self, X_init, Y_init, l=1, sigma_f=1):
"""class constructor"""
self.X = X_init
self.Y = Y_init
self.l = l
self.sigma_f = sigma_f
self.K = self.kernel(X_init, X_init)
def kernel(self, X1, X2):
"""calculates the covariance kernel matrix between two matrices"""
exp_term = (X1 - X2.T) ** 2
RBF = (((self.sigma_f) ** 2) * (np.exp(exp_term * (
-0.5 / self.l ** 2))))
return RBF
def predict(self, X_s):
"""predicts mean, standard deviation of points in a Gaussian process"""
K_ss = self.kernel(X_s, X_s)
K = self.kernel(self.X, self.X)
decompositon = np.linalg.cholesky(K)
K_k = self.kernel(self.X, X_s)
result = np.linalg.solve(decompositon, K_k)
mu = np.dot(result.T, np.linalg.solve(decompositon, self.Y)).reshape((
X_s.shape[0],))
s2 = np.diag(K_ss) - np.sum(result**2, axis=0)
return mu, s2
def update(self, X_new, Y_new):
"""updates a Gaussian Process"""
self.X = np.append(self.X, X_new).reshape(-1, 1)
self.Y = np.append(self.Y, Y_new).reshape(-1, 1)
self.K = self.kernel(self.X, self.X)
| [
"imennaayari@gmail.com"
] | imennaayari@gmail.com |
645b63632592b9f0bd01c042320e85d5eb4e8efb | a3d01be31136d42434ac2cc33b5057dbeaa77061 | /devnet_gui_functions_ebo.py | c8450935823b4a639bc84c771ac82b2a52444023 | [
"BSD-3-Clause"
] | permissive | ebarredo84/DEVNET_PREP_SW | 705fcc748f7dc85d54349988b21d3f194d81a451 | de3548e49e4f625dc571c9e675066f9e1646a50e | refs/heads/main | 2023-03-18T23:09:16.452636 | 2021-03-12T15:27:18 | 2021-03-12T15:27:18 | 346,786,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,648 | py | #------USER INTERFACE (VIEW)----------
import tkinter as tk
#------CONTROLLER--------------------
import json
import base64
#----------SECUNDARY WINDOW----------
class SECUNDARY_WINDOW:
def __init__(self,window2,msg):
#VENTANA
window2.geometry("600x100")
window2.title("ASK PARAMETER")
#CUADRO DE TEXTO
text_box2=tk.Text(window2,width=75,height=25) #creo el objeto para poner texto
text_box2.place(x=0,y=0)
text_box2.insert("1.0",str(msg))
text_box2.config(state="disabled") #solo lectura
#CUADRO DE ENTRADA
self.entry2=tk.Entry(window2,fg="blue",bg="white",
width=40) #creo el objeto de escritura
self.entry2.place(x=5,y=35)
#BOTON FW
button_fw2=tk.Button(window2,text="Send",
fg="black",bg="grey",
width=5,height=1,
command=window2.quit) #creo el objeto del botón
button_fw2.place(x=5,y=60)
#BOTON EXIT
#button_exit2=tk.Button(window2,text="Exit",
# fg="black",bg="grey",
# width=5,height=1,
# command=window2.destroy) #creo el objeto del botón
#button_exit2.place(x=55,y=60)
def SEND(self,window2):
param=self.entry2.get()
return param
def ASK_PARAMETER(msg):
app2=tk.Tk()
window2=SECUNDARY_WINDOW(app2,msg)
app2.mainloop()
parameter=window2.SEND(window2)
app2.destroy()
return parameter
#--------ERASE AND PRINT FUNCTIONS-------------
def PRINT_STATUS_CODE(response,text_box):
text_box.config(state="normal") #habilito la escritura
text_box.delete("1.0", tk.END) #para borrar el texto
text_box.insert("1.0","Request status: "+str(response.status_code)+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_HEADERS(headers,text_box):
text_box.config(state="normal") #habilito la escritura
text_box.delete("1.0", tk.END) #para borrar el texto
text_box.insert("1.0","Headers: "+str(headers)+"\n")
text_box.config(state="disabled") #solo lectura
#--------PRINT FUNCTIONS----------------------
def PRINT_RESPONSE_JSON(resp,text_box):
response_json = resp.json()
json_formatted_str = json.dumps(response_json, indent=4)
#IMPRIMO EN EL TEXT BOX
text_box.config(state="normal") #habilito la escritura
text_box.insert(tk.END,json_formatted_str+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_RESPONSE(resp,text_box):
json_formatted_str = json.dumps(resp, indent=4)
#IMPRIMO EN EL TEXT BOX
text_box.config(state="normal") #habilito la escritura
text_box.insert(tk.END,json_formatted_str+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_CONTENT_JSON(resp,text_box):
json_formatted_str=json.dumps(json.loads(resp.content),indent=4)
#IMPRIMO EN EL TEXT BOX
text_box.config(state="normal") #habilito la escritura
text_box.insert(tk.END,json_formatted_str+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_TABLE_IN_TEXT(text_box,dictionary,**kwargs):
num_arg=len(kwargs)
text_box.config(state="normal")
#----------IMPRIME 2 KEY PAIRS VALUES
if num_arg==6:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]))+"\n")
#----------IMPRIME 3 KEY PAIRS VALUES---------------
elif num_arg==9:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]))+"\n")
#----------IMPRIME 4 KEY PAIRS VALUES---------------
elif num_arg==12:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}{5:1}{6:'+kwargs['size4']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'], "|",
kwargs['name4'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
d4=kwargs['data4']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]), "|",
str(ITEM[d4]))+"\n")
#----------IMPRIME 5 KEY PAIRS VALUES---------------
elif num_arg==15:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}{5:1}{6:'+kwargs['size4']+'s}{7:1}{8:'+kwargs['size5']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'], "|",
kwargs['name4'], "|",
kwargs['name5'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
d4=kwargs['data4']
d5=kwargs['data5']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]), "|",
str(ITEM[d4]), "|",
str(ITEM[d5]))+"\n")
#----------IMPRIME 6 KEY PAIRS VALUES---------------
elif num_arg==18:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}{5:1}{6:'+kwargs['size4']+'s}{7:1}{8:'+kwargs['size5']+'s}{9:1}{10:'+kwargs['size6']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'], "|",
kwargs['name4'], "|",
kwargs['name5'], "|",
kwargs['name6'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
d4=kwargs['data4']
d5=kwargs['data5']
d6=kwargs['data6']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]), "|",
str(ITEM[d4]), "|",
str(ITEM[d5]), "|",
str(ITEM[d6]))+"\n")
text_box.config(state="disabled") | [
"noreply@github.com"
] | ebarredo84.noreply@github.com |
a1b2bad95b13f703a82dd338ceddd232fadb93c8 | 5597b3c7f94cf4232438d7f826a84632995ab705 | /backend/settings.py | 5c70da56028a987fa68943bdb60659998a19d1fc | [] | no_license | denokenya/Full-stack-ecommerce-website | 3df80878d1fbd261e8190728d12e8231e2a41e17 | 0b3b18979ca0106d8de9cdcddb07bfa83005e7f3 | refs/heads/main | 2023-04-29T02:35:49.516948 | 2021-05-17T11:47:35 | 2021-05-17T11:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,606 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-bze)zl)ve*xy*wzv_ifz=n)1h50jwlmj-4$p5h)dms_h@%xwzc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'frontend/build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000"
]
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
BASE_DIR / 'static',
BASE_DIR/'frontend/build/static'
]
MEDIA_ROOT = BASE_DIR / 'static/images'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
from datetime import timedelta
...
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(days=1),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'UPDATE_LAST_LOGIN': False,
'ALGORITHM': 'HS256',
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
} | [
"nagar.amit1999@gmail.com"
] | nagar.amit1999@gmail.com |
2d43a084e4862492a0024985ee4c913fc0c046a5 | 715e892707f1dd897d753601c37b3c6de72aad05 | /productApi/lists/migrations/0001_initial.py | 284bba08c56ad66ce3e277b9c4875aa954ae0f11 | [] | no_license | GirijaDas9/Django_API_1 | 913a514cb80d1076e915088d39e4ddccc7413848 | 29ff83d5fa776954cab0c2464980833a64a3420f | refs/heads/main | 2023-07-03T23:05:55.051789 | 2021-08-07T14:37:08 | 2021-08-07T14:37:08 | 393,706,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # Generated by Django 3.2.6 on 2021-08-07 12:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('utils', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('timestamp_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='utils.timestamp')),
('name', models.CharField(max_length=200)),
('description', models.TextField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=('utils.timestamp', models.Model),
),
]
| [
"ddas0124@gmail.com"
] | ddas0124@gmail.com |
9054f273b7bdab89ce112692bd0210e20e3b3b29 | dbac88b924b820e101869859adb5631550438253 | /examen_webtech4/settings.py | 5cf285d11546de1988fce9a45c0c2610bf12edf5 | [] | no_license | misterpieter/examen_webtech4_2019_Django | 0227fad8556e4ff6b6061975f3211d97647dd382 | 7d3596e188e40818eaf38fb628e716ab9eb72f66 | refs/heads/master | 2020-05-01T16:59:06.506579 | 2019-03-25T15:46:59 | 2019-03-25T15:46:59 | 177,587,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | """
Django settings for examen_webtech4 project.
Generated by 'django-admin startproject' using Django 2.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'urf8)c$l&++%-*_id#_5y#_i*+1lz)bd1y-b7lxo7ezd65$3aj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Brexit.apps.BrexitConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'examen_webtech4.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'examen_webtech4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"pieter@mbp-van-pieter.alpaca.int"
] | pieter@mbp-van-pieter.alpaca.int |
1fdff2355056e27e500ea5cdf901d1daf5a9985e | 32a2413c8b65d38c8b010e91f6d6a1f7466d72aa | /sales/views.py | 901d074d1bc5fd92c9eedf8549e602c15fb11f9b | [] | no_license | Oranizor/-Django- | e2572aaf89e5a9e620aceaef51a243aeb023ebe6 | 0b87b3f20d3c381a72f7885bd914b97df6be0ec0 | refs/heads/master | 2023-03-29T02:16:54.789597 | 2021-04-01T04:14:31 | 2021-04-01T04:14:31 | 350,289,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,605 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from common.models import Customer
def listorders(request):
return HttpResponse("下面是系统中的所有订单信息...")
def listcustomers(request):
# 返回一个QuerySet对象 包含所有的表记录
# 每条表记录都是一个dict对象
# key是字段名 value是字段值
# object 可以跟:
# object.all()<注意all输出的是对象>
# object.value()<如果跟了内容就是输出一列。如果没跟内容就是直接把对象摊开>
# object.filter()<筛选出一组>
# object.get()<筛选出一个>
# object.exclude()<筛选出除了自己的一组>
# object.exist()<是否存在>
# object.count()<数个数>
# object.first()<第一个>
# object.last()<最后一个>
# object.aggregate(Max('age'))
# 各个之间可以链式调用
#
# 切片:如果object[1:3] 则只有1号 2号(0号没有 3号没有) 不能是负数
#
# 参数 :属性__运算符(gt lt gte lte in contains endwith...) 前面添加i表示忽略 如iexact
# pk 表示主键
#
# 使用__符 沿表关联方向向下渗透
# 如grades=Grade.objects.filer(student__s_name='Jack')
#
# F对象 输出字段直接的值
# 当要比较一个类里两个属性的大小 比如 男生比女生多的公司的时候 可以用F函数 F函数输出的是一个数就是字段的值
# companies=Company.objects.filter(c_boy_num__lt=F('c_girl_num')-15)
#
# Q对象 对条件进行封装,以进行逻辑操作
# companies=Company.object.filter(Q(c_boy_num__gt=1)&Q(c_girl_num__gt=10))
#
# 例如:students.objects().filter(name = ‘张三’).values('id'), 只返回名为张三的学生的id,不返回其他属性了。
# students.objects().all().values('name')即获取到所有的表中的姓名,返回一个字典组成的列表[{‘name’:‘张三’},{‘name’:‘李四’},。。。]
# students.objects().all() 是获取表中所有的数据
print(">>>>>>all",Customer.objects.all())
print(">>>>>>value",Customer.objects.all().values())
print(">>>>>>abc", Customer.abc.all())
qs=Customer.objects.values()
# 例如 .../?phoneNumber=139291921
ph=request.GET.get('phoneNumber',None)
if ph:
qs=qs.filter(phoneNumber=ph)
#定义返回字符串
resStr=''
for customer in qs:
for name,value in customer.items():
resStr+=f'{name}:{value}|'
resStr+='<br>'
return HttpResponse(resStr) | [
"854072006@qq.com"
] | 854072006@qq.com |
141d27c765c734b5c4fc2c96c4660234357d1c72 | 33ed8fc7605076aa63ca6f8aef30a88fec29983f | /users/forms.py | 7db21917f18fe59a5935d6fae90b490cf82fc0a5 | [] | no_license | nworks16/Drop | ffecc3eb287fcaa6e98ff59c47c682d95f85a9a2 | 4bde54d998e71af6204b23aba78c278b41f55d9e | refs/heads/master | 2021-05-31T10:09:02.536618 | 2016-05-11T22:11:44 | 2016-05-11T22:11:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | from django import forms
from models import Cliente, Usuario, UserP, FriendList
from django.contrib.auth.models import User
class UsuarioForm(forms.ModelForm):
class Meta:
model = User
fields = ["first_name","last_name","email"]
class LoginForm(forms.Form):
username = forms.CharField(label=(u'Usuario'))
password = forms.CharField(label=(u'Password'), widget=forms.PasswordInput(render_value=False))
class UsuarioForm2(forms.ModelForm):
class Meta:
model = User
fields = ["username","password","first_name","last_name","email"]
class UserForm(forms.ModelForm):
password = forms.CharField(label=(u'Password'), widget=forms.PasswordInput(render_value=False))
class Meta:
model = User
fields = ["username","password","email"]
class UserPr(forms.ModelForm):
class Meta:
model = UserP
fields = ["picture"]
class FriendForm(forms.ModelForm):
class Meta:
model = FriendList
fields = ["user","friend","friendship"]
| [
"nworks16@gmail.com"
] | nworks16@gmail.com |
ad67e390dd5eac85aa2568fc26641ea46e1d5c2c | 7d0804b07d1914cde2ebb3613b38a83b9f2bd7cb | /database.py | e45b8c307e01aabeeb592f89ea1e02f5c7de8956 | [] | no_license | foliwe/questo | 0b54d2eb76f12679620378c072698532cecf721e | bca8d197f387b8a045021f18b0a5fde1231b06ac | refs/heads/main | 2023-06-07T04:25:56.630088 | 2021-07-07T19:33:34 | 2021-07-07T19:33:34 | 383,900,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import sqlite3
from flask import g
def connect_db():
sql = sqlite3.connect('/home/foli/Documents/flask/question/data.db')
sql.row_factory = sqlite3.Row
return sql
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
| [
"foliwe@gmail.com"
] | foliwe@gmail.com |
7d1bcd8e386680914a0800493669b944fd4b31b4 | 04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4 | /Lib/objc/_BiomeFoundation.py | 46963683901ead01d0776eb2ce541ab36ad30317 | [
"MIT"
] | permissive | ColdGrub1384/Pyto | 64e2a593957fd640907f0e4698d430ea7754a73e | 7557485a733dd7e17ba0366b92794931bdb39975 | refs/heads/main | 2023-08-01T03:48:35.694832 | 2022-07-20T14:38:45 | 2022-07-20T14:38:45 | 148,944,721 | 884 | 157 | MIT | 2023-02-26T21:34:04 | 2018-09-15T22:29:07 | C | UTF-8 | Python | false | false | 324 | py | """
Classes from the 'BiomeFoundation' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
BMCoreAnalyticsEvents = _Class("BMCoreAnalyticsEvents")
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
d4845616b6177385c5a83fef76ced4dcaf344d94 | 238461387e135c0bd8cbfae3451f39ac041bf330 | /venv/bin/easy_install | ece1bc63f0aae2209204b5cb8f1d15b9071b5ecf | [] | no_license | arcanine525/Amazon-Crawler | f2cfa81cfa897108a0551c12d25c079b4a1f4394 | 892bea70f70d6b973701aa6936352f002ed9eb54 | refs/heads/master | 2020-08-19T20:58:14.277312 | 2019-10-16T16:16:44 | 2019-10-16T16:16:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | #!/Users/beshoy/Amazon-Crawler/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"beshoyibraheem@gmail.com"
] | beshoyibraheem@gmail.com | |
7c768c864fc394690d7e294114bc2812cc0e4217 | 9b8844efc34ee402dc50855853b1a689c0d80792 | /stopwatch-game/stopwatch-game.py | ba1bf4916ae680981c3cfd68f7ef9e110a8c8f77 | [] | no_license | chintani/learn-python | 501d8755f0734f80efd70febaeab5dda2013f95e | 81b07711e905ed5c764f5a66d6f866da8c572b9e | refs/heads/master | 2016-09-05T16:58:17.719340 | 2015-03-19T03:20:25 | 2015-03-19T03:20:25 | 13,546,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | '''
Created on Oct 31, 2013
@author: Tania
'''
# template for "Stopwatch: The Game"
import simplegui
import math
import random
# define global variables
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
pass
# define event handlers for buttons; "Start", "Stop", "Reset"
# define event handler for timer with 0.1 sec interval
# define draw handler
# create frame
# register event handlers
# start frame
# Please remember to review the grading rubric
| [
"cybercorpindustries@gmail.com"
] | cybercorpindustries@gmail.com |
7be5ed4aea4a76ce09cdec432b58df844ff0f186 | 111ad6838f1d6fb859e148fe5791bd129986979b | /py_kill_all_by_user.py | da16c157bac7440f9b167aee0a77859700bc7b70 | [] | no_license | jjasghar/scripts | b7da5a5caa058f2f10e847762250ae734096adb9 | 794be04e9af4438b75b281deafe57a6175c3668a | refs/heads/master | 2021-01-18T14:28:53.105303 | 2013-10-03T00:35:38 | 2013-10-03T00:35:38 | 4,654,976 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | #!/usr/bin/env python
#
# This is an attempt to kill all procs by an owner
#
import os
import sys
import subprocess
def main():
if (len(sys.argv) > 1):
ps_grep(sys.argv[1:])
else:
print "no args"
def ps_grep(u):
ps="ps"
psopt="-ej"
grep="grep"
cmd="ps aux |grep "
for i in u:
## James suggested this is "cleaner"
#p1=subprocess.Popen([ps,psopt],stdout=subprocess.PIPE)
#p2=subprocess.Popen([grep, i],stdin=p1.stdout,stdout=subprocess.PIPE)
#p1.stdout.close()
#print p2.communicate()[0]
##
## This just "works"
subprocess.call(cmd + i, shell=True)
if __name__ == "__main__":
main()
| [
"jjasghar@utexas.edu"
] | jjasghar@utexas.edu |
c02eadd70fd20beb463e37b26ca9d8a724e7c506 | 7b9e6ce86ef732a596d9af86eacc4d1afad7ff65 | /project/migrations/0005_alter_appointment_address.py | 0ecb5150fc2d95c998912a773155c7263cbfc622 | [] | no_license | Aniakacp/docplanner | 3bc3de98e401b01f4f1764d67637ad398acd4b26 | fe1d60528f6594056cb34c058cec0fc16465fc18 | refs/heads/master | 2023-07-13T14:10:51.791056 | 2021-08-22T13:48:36 | 2021-08-22T13:48:36 | 378,492,028 | 1 | 0 | null | 2021-08-22T13:48:36 | 2021-06-19T19:40:28 | Python | UTF-8 | Python | false | false | 476 | py | # Generated by Django 3.2.4 on 2021-06-21 19:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0004_alter_doctor_user'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='address',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.clinic'),
),
]
| [
"aniakacp@op.pl"
] | aniakacp@op.pl |
97569bdf951b5d5170d4ef389584f5e08f41a1f3 | e77ace18f60573527fad138921c1cdc6731a8721 | /participant_registration/admin.py | a89f60feb5684e30c104e05eaf158f6a782b387a | [] | no_license | oode45/conference_site | 682acaeb269c1c56d570bc535c58aafbd9ad8681 | a24cf9ed265e9ffd808ef8766beaca46ca72f105 | refs/heads/master | 2023-03-13T00:45:40.206439 | 2021-03-01T08:34:59 | 2021-03-01T08:34:59 | 343,035,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | from django.contrib import admin
# Register your models here.
from .models import Country, Section, Participant, RegistrationStatus
class ParticipantAdmin(admin.ModelAdmin):
def short_content(self, obj):
return obj.__str__()
def registration_year(self, obj):
return obj.registration_date.year
short_content.short_description = u'Описание'
registration_year.short_description = u'Год'
fields = [
'last_name',
'first_name',
'middle_name',
'age',
'country',
'current_status',
'phone',
'email',
'organization',
'section',
'participation_type',
'author_list',
'paper_name',
'paper_file',
'chief_last_name',
'chief_first_name',
'chief_middle_name',
'chief_phone',
'chief_email',
'chief_organization',
'chief_position',
'chief_degree',
'chief_review',
'status_type',
'reviewer',
'reviewer_corrected_manuscript_pdf',
'reviewer_corrected_manuscript',
]
list_display = ['registration_year', 'section', 'short_content', 'id', 'reviewer', 'email', 'status_type', 'paper_name', 'participation_type', 'registration_date','reviewer_corrected_manuscript','reviewer_corrected_manuscript_pdf']
list_display_links = ['short_content']
registration_year.admin_order_field = 'registration_year'
ordering = ['section', 'last_name']
list_filter = ['status_type']
search_fields = ['last_name']
class RegistrationStatusAdmin(admin.ModelAdmin):
fields = ['is_active', 'registration_inactive_text']
list_display = ['is_active', 'registration_inactive_text']
admin.site.register(Country)
admin.site.register(Section)
admin.site.register(Participant, ParticipantAdmin)
admin.site.register(RegistrationStatus, RegistrationStatusAdmin)
| [
"55568515+oode45@users.noreply.github.com"
] | 55568515+oode45@users.noreply.github.com |
1a113e39024e17830518e548d9edbf161cb4665c | 6caab8d886e8bd302d1994ff663cf5ccb5e11522 | /MyNotes_01/Step02/1-Data_Structure/day03/demo04_order_set.py | bea67a7c9c1c6c6023282873b66b421d9bb4c5d7 | [] | no_license | ZimingGuo/MyNotes01 | 7698941223c79ee754b17296b9984b731858b238 | 55e6681da1a9faf9c0ec618ed60f5da9ecc6beb6 | refs/heads/master | 2022-07-30T21:30:32.100042 | 2020-05-19T16:59:09 | 2020-05-19T16:59:09 | 265,254,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,924 | py | # author: Ziming Guo
# time: 2020/3/14
'''
三种排序集合的汇总:冒泡;选择;插入
'''
# 冒泡排序:双层循环
def bubble(list_):
for m in range(0, len(list_) - 1):
for n in range(m + 1, len(list_)): # 注意这个地方一定是从 m 开始的
if list_[m] > list_[n]:
list_[m], list_[n] = list_[n], list_[m]
# 选择排序
def seq_ord(list_target):
"""
把一个列表用选择排序的方式从小到大排列
思想:
1) 找剩下的元素里最小的元素
2) 把这个元素和最前面的那个元素交换
:param list_target: 要排列的列表
"""
for m in range(0, len(list_target) - 1):
dict_min = {}
dict_min["val"] = list_target[m]
dict_min["ind"] = m
for i in range(m + 1, len(list_target)):
if list_target[i] < dict_min["val"]:
dict_min["val"] = list_target[i] # 找到了最小的元素,存数据
dict_min["ind"] = i # 找到了最小元素,存索引值
list_target[m], list_target[dict_min["ind"]] = dict_min["val"], list_target[m]
# 插入排序
def insert_ord(list_target):
"""
思想:
# 1 按顺序拿出来一个元素,和前面的进行比较
# 2 当比到一个比他小的元素后,就插在他的前面一个位置
# 注意:比较是和前面的元素进行比较
# 注意:是插入而不是交换(insert)
:param list_target: 要进行排序的列表
"""
for m in range(1, len(list_target)):
for n in range(m - 1, -1, -1):
if list_target[n] < list_target[m]:
list_target.insert(n + 1, list_target[m])
del list_target[m + 1]
break
elif n == 0:
list_target.insert(0, list_target[m])
del list_target[m + 1]
| [
"guoziming99999@icloud.com"
] | guoziming99999@icloud.com |
61b4cdb93612dde44672fc2ceda9f4c5e7e07d60 | ae7f4a70a0bdb2e98d13c996c75d274241c25278 | /basics/bubble_sort.py | ec8166badc0e61cd877a955c07c700c7d8f6268f | [
"MIT"
] | permissive | zi-NaN/algorithm_exercise | 5d17e1f6c3cae89ed3c7523b344e55c5a10e3e62 | 817916a62774145fe6387b715f76c5badbf99197 | refs/heads/master | 2020-03-30T12:00:46.694490 | 2019-06-23T11:04:34 | 2019-06-23T11:04:34 | 151,204,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | def bubble_sort(arr:'list'):
# inplace sort
for i in range(len(arr)-1):
for j in range(len(arr)-1, 0, -1):
if arr[j-1] > arr[j]:
arr[j-1], arr[j] = arr[j], arr[j-1]
return arr
# test
if __name__ == '__main__':
print(bubble_sort([1, 3, 2, 4])) | [
"zinanzhao2-c@my.cityu.edu.hk"
] | zinanzhao2-c@my.cityu.edu.hk |
53cae389b0208827be2727b40f13adcfe54ff563 | 0d0aa879f854e561d1e2a626cb3297b679553038 | /client.py | a34f30237874f4cc8d47070f32d23b9ca866cc06 | [] | no_license | rohanpaspallu/Dummy | d13865503dedd6290cf80ed7ec77ff99122eff36 | 0bb5038f1984d0dcd0a0f20be5d43cc2ecb68872 | refs/heads/master | 2022-08-05T01:12:05.053750 | 2020-05-24T01:41:19 | 2020-05-24T01:41:19 | 266,450,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,464 | py | import socket
print("For client side")
HOST=socket.gethostname()
PORT=12345
s=socket.socket()
s.connect((HOST,PORT))
while True:
def menu():
value = input(
'1. Find customer \n2. Add customer \n3. Delete customer \n4. Update customer age \n5. Update customer address \n6. Update customer phone\n7. Print Report \n8. Exit\n')
if value == '1':
print('value 1')
name = input().strip()
message = '|' + value +'|'+name
s.send(message.encode())
reply = s.recv(4096).decode()
print("Received", repr(reply))
# menu()
elif value == '2':
name = input().strip()
age = input().strip()
address = input().strip()
phone = input().strip()
if name=='':
print('please enter name.')
# menu()
else:
message = '|' + value + '|' + name + '|' + age + '|' + address+ '|' + phone
s.send(message.encode())
reply = s.recv(4096).decode()
print("Received", repr(reply))
# menu()
elif value == '3':
name = input().strip()
message = '|' + value + '|' + name
s.send(message.encode())
reply = s.recv(4096).decode()
print("Received", repr(reply))
# menu()
elif value == '4':
name = input().strip()
age = input().strip()
message = '|' + value + '|' + name + '|' + age
s.send(message.encode())
reply = s.recv(4096).decode()
print("Received", repr(reply))
# menu()
elif value == '5':
name = input().strip()
address = input().strip()
message = '|' + value + '|' + name + '|' + address
s.send(message.encode())
reply = s.recv(4096).decode()
print("Received", repr(reply))
# menu()
elif value == '6':
name = input().strip()
phone = input().strip()
message = '|' + value + '|' + name + '|' + phone
s.send(message.encode())
reply = s.recv(4096)
print("Received", repr(reply))
# menu()
elif value == '7':
message = '|' + value
s.send(message.encode())
reply = s.recv(4096).decode()
print("Received", repr(reply))
# menu()
elif value == '8':
message = '|' + value +'|abc'
s.send(message.encode())
reply = s.recv(4096).decode()
print("Received", repr(reply))
s.close()
exit()
menu()
# message=input("Ur msg: ")
# s.send(message.encode())
# if message=='end':
# break
# import socket
# import select
# import errno
#
# HEADER_LENGTH = 10
#
# IP = socket.gethostname()
# PORT = 60000
# my_username = input("Username: ")
#
# # Create a socket
# # socket.AF_INET - address family, IPv4, some otehr possible are AF_INET6, AF_BLUETOOTH, AF_UNIX
# # socket.SOCK_STREAM - TCP, conection-based, socket.SOCK_DGRAM - UDP, connectionless, datagrams, socket.SOCK_RAW - raw IP packets
# client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#
# # Connect to a given ip and port
# client_socket.connect((IP, PORT))
#
# # Set connection to non-blocking state, so .recv() call won;t block, just return some exception we'll handle
# client_socket.setblocking(False)
#
# # Prepare username and header and send them
# # We need to encode username to bytes, then count number of bytes and prepare header of fixed size, that we encode to bytes as well
# username = my_username.encode('utf-8')
# username_header = f"{len(username):<{HEADER_LENGTH}}".encode('utf-8')
# client_socket.send(username_header + username)
#
# while True:
#
# # Wait for user to input a message
# message = input(f'{my_username} > ')
#
# # If message is not empty - send it
# if message:
#
# # Encode message to bytes, prepare header and convert to bytes, like for username above, then send
# message = message.encode('utf-8')
# message_header = f"{len(message):<{HEADER_LENGTH}}".encode('utf-8')
# client_socket.send(message_header + message)
#
# try:
# # Now we want to loop over received messages (there might be more than one) and print them
# while True:
#
# # Receive our "header" containing username length, it's size is defined and constant
# username_header = client_socket.recv(HEADER_LENGTH)
#
# # If we received no data, server gracefully closed a connection, for example using socket.close() or socket.shutdown(socket.SHUT_RDWR)
# if not len(username_header):
# print('Connection closed by the server')
# sys.exit()
#
# # Convert header to int value
# username_length = int(username_header.decode('utf-8').strip())
#
# # Receive and decode username
# username = client_socket.recv(username_length).decode('utf-8')
#
# # Now do the same for message (as we received username, we received whole message, there's no need to check if it has any length)
# message_header = client_socket.recv(HEADER_LENGTH)
# message_length = int(message_header.decode('utf-8').strip())
# message = client_socket.recv(message_length).decode('utf-8')
#
# # Print message
# print(f'{username} > {message}')
#
# except IOError as e:
# # This is normal on non blocking connections - when there are no incoming data error is going to be raised
# # Some operating systems will indicate that using AGAIN, and some using WOULDBLOCK error code
# # We are going to check for both - if one of them - that's expected, means no incoming data, continue as normal
# # If we got different error code - something happened
# if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
# print('Reading error: {}'.format(str(e)))
# sys.exit()
#
# # We just did not receive anything
# continue
#
# except Exception as e:
# # Any other exception - something happened, exit
# print('Reading error: '.format(str(e)))
# sys.exit() | [
"paspallu.rohan@gmail.com"
] | paspallu.rohan@gmail.com |
c925646523b8c22c441c079047c6290e9bb5895d | 3affa3ff5ad70b740fe98f6c0e4fcd73f0aabb5e | /profile_handler.py | 32fb0c23baee0525a86b263d63abffced304f768 | [] | no_license | JordyHB/Jord-bot | 3d3175d2403eb526411fd49330378fc379dc88bb | 57ca83e475428c661b84f16360cd07d6a19b00c1 | refs/heads/master | 2023-04-28T13:47:27.738654 | 2021-03-07T07:04:13 | 2021-03-07T07:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,205 | py | import gspread
import json
from oauth2client.service_account import ServiceAccountCredentials
from datetime import datetime
from datetime import timedelta
class PHandler():
"""The class that handles fetching profiles from the google doc"""
def __init__(self):
"""initializes self"""
# Sets the start time of the program
self.program_start_time = datetime.now()
# Creates an empty flag that allows printing errors to the user
self.error = ''
# Creates an empty dict to temp store profiles for
self.unclaimed_profiles = {}
# Creates an empty list to temp store formatted profiles to show user
self.unclaimed_profiles_list = []
# stores the info of the last user to avoid having to look it up again
self.cached_users = {}
# List that stores the required keys for the prof_mods dict
self.mod_keys = ['str', 'dex', 'con', 'int', 'wis', 'cha', 'ac', 'saves', 'skills', 'prof_b', 'spell_dc', 'spell_a']
# list that stores valid skill inputs
self.valid_types = ['str', 'dex', 'con', 'int', 'wis', 'cha']
# Dicts to get the right modifiers for the right skills
self.skills = {}
self.skills['str'] = {'ath': 'Athletics'}
self.skills['dex'] = {'acro': 'Acrobatics', 'slei': 'Sleight Of Hand', 'stea': 'Stealth'}
self.skills['int'] = {'arc': 'Arcana', 'his': 'History', 'inv': 'Investigation', 'nat': 'Nature', 'rel': 'Religion'}
self.skills['wis'] = {'ani': 'Animal Handling', 'insi': 'Insight', 'med': 'Medicine', 'perc': 'Perception', 'surv': 'Survival'}
self.skills['cha'] = {'dec': 'Deception', 'inti': 'Intimidation', 'perf': 'Performance', 'pers': 'Persuasion'}
def cache_user(self, user):
"""puts user in the cached_users dict to speed up the requests"""
# Stores the location of the user and the active profile
new_user = {}
new_user['user_loc'] = self.check_user_sheet(user)
new_user['active_profile'] = self.check_active_profile(user)
new_user['profile_row'] = \
profile_sheet.find(new_user['active_profile']).row
# Stores a dict of profile names + their locations
new_user['claimed_p_dict'] = self.fetch_claimed_profiles(user)
if new_user['claimed_p_dict'] is None:
new_user['claimed_p_dict'] = {}
# Stores the new user in cache by the user ID
self.cached_users[user] = new_user
print(self.cached_users)
# Caches the mods if the user has an active profile
if new_user['active_profile'] != '':
self.cache_cur_mod(user)
def cache_cur_mod(self, user):
"""Gets all the modifiers for the current profile and adds them"""
# Empty dict to store the values in
prof_mods = {}
# Variable to keep track of the right colom for the key
mod_col = 3
# Loops through the modkeys and fills a dict with the correct values
for key in self.mod_keys:
prof_mods[key] = profile_sheet.cell(self.cached_users[user]['profile_row'], mod_col).value
mod_col = mod_col + 1
# adds the mod dict to the user cache
self.cached_users[user]['mods'] = prof_mods
def save_cache(self):
"""Saves the cached_users dict to a Json"""
with open('cached_data.json', 'w') as f:
json.dump(self.cached_users, f)
def load_cache(self):
"""loads cached_users dict"""
with open('cached_data.json', 'r') as f:
raw_cached_users = json.load(f)
# Converts the strings back to ints
for key in raw_cached_users.keys():
self.cached_users[int(key)] = raw_cached_users[key]
print('loaded cache!')
def fetch_empty_col(self, user_location):
"""Find an empty colom to write the profiles in"""
occupied_col = user_sheet.row_values(user_location)
empty_col = len(occupied_col) + 1
return empty_col
def fetch_unclaimed_profiles(self):
"""Shows all unclaimed profiles on the google sheet"""
self.refresh_auth()
# Creates a list with all the unclaimed profiles
raw_unclaimed_profiles = profile_sheet.col_values(2)
# Clears all the flags before proceeding
self.unclaimed_profiles.clear()
self.unclaimed_profiles_list = []
self.error = ''
self.print_back = ''
# Fills a dict and assigns all the profile names a numerical key
for profile in range(1, len(raw_unclaimed_profiles)):
# Checks the status for each profile
current_profile = profile_sheet.cell(profile + 1, 15).value
# Sets the new empty statusses to unclaimed
if current_profile == '':
profile_sheet.update_cell(profile + 1, 15, 'unclaimed')
# Adds it to the unclaimed List
self.unclaimed_profiles_list.append(
(str(profile + 1) + ": " +
str(raw_unclaimed_profiles[profile]))
)
# Checks for claimed profiles moving on if they are
elif current_profile == 'claimed':
pass
# Adds unclaimed profiles to the unclaimed profile dict
elif current_profile == 'unclaimed':
self.unclaimed_profiles_list.append(
(str(profile + 1) + ": " +
str(raw_unclaimed_profiles[profile]))
)
# Adds entry to the dict
self.unclaimed_profiles[str(profile + 1)] = \
str(raw_unclaimed_profiles[profile])
# Sets corrupted files to unclaimed
else:
profile_sheet.update_cell(profile, 15, 'unclaimed')
# Gets the dict out of the global flag so quick inputs dont break it
d_unclaimed_profiles = self.unclaimed_profiles
return d_unclaimed_profiles
def fetch_claimed_profiles(self, user):
"""Fetches all profiles linked to a specific user"""
# Get all the claimed profiles from the user sheet
raw_c_profiles = user_sheet.row_values(self.check_user_sheet(user))
claimed_p_list = raw_c_profiles[2:]
print(claimed_p_list)
# Finds the profile location adds it to a dict and returns it
claimed_p_dict = {}
for profile in claimed_p_list:
if profile != '':
claimed_p_dict[str(profile_sheet.find(profile).row)] = profile
return claimed_p_dict
def check_active_profile(self, user):
"""Checks which profile the user has active"""
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
# Returns the active profile stored in cache
activated_profile = self.cached_users[user]['active_profile']
return activated_profile
else:
# Finds the user in the database
user_location = self.check_user_sheet(user)
activated_profile = user_sheet.cell(user_location, 2).value
return activated_profile
def check_user_sheet(self, user):
"""Checks user sheet for existing profiles"""
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
user_location = self.cached_users[user]['user_loc']
else:
# Fetches all the data from the first colom for user IDs
raw_user_data = user_sheet.col_values(1)
# Checks if the user is already on the sheet
if str(user) in raw_user_data:
cell = user_sheet.find(str(user))
user_location = cell.row
return user_location
else:
# Adds the user to sheet if not found before returning the loc
empty_cell = len(raw_user_data) + 1
update1 = user_sheet.cell(empty_cell, 1)
update1.value = str(user)
update2 = user_sheet.cell(empty_cell, 2)
update2.value = ''
user_sheet.update_cells([update1, update2])
user_location = empty_cell
return user_location
def check_claim_input(self, claim_input, user):
"""Checks wether the input matches anything in dict"""
# Refreshes the Dict
d_unclaimed_profiles = self.fetch_unclaimed_profiles()
try:
# Sets a filtered input for use further down the process
filtered_input = d_unclaimed_profiles[str(claim_input)]
self.claim_printback = filtered_input
self.claim_profile(claim_input, user, filtered_input)
except IndexError:
self.error = 'No unclaimed profile found by that number.'
except KeyError:
self.error = 'No unclaimed profile found by that number.'
def claim_profile(self, claim_input, user, filtered_input):
"""Allows you to claim a profile"""
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
self.refresh_auth()
else:
self.refresh_auth()
self.cache_user(user)
# Gets values ready for a batch update to the profile sheet
update1 = profile_sheet.cell(int(claim_input), 15)
update2 = profile_sheet.cell(int(claim_input), 16)
update1.value = 'claimed'
update2.value = str(user)
# Makes this the active profile if user doesnt already have one
if self.cached_users[user]['active_profile'] == '' or \
self.cached_users[user]['active_profile'] == 'none':
# sets the profile as active on the profiles sheet
update3 = profile_sheet.cell(int(claim_input), 17)
update3.value = 'active'
# Fetches users location in the database
user_location = self.cached_users[user]['user_loc']
# Adds the profile to batch for user and to set it as active
update4 = user_sheet.cell(user_location, 2)
update4.value = filtered_input
# Adds newly claimed profile to the cache of claimed profiles
self.cached_users[user]['active_profile'] = filtered_input
self.cached_users[user]['claimed_p_dict'][str(profile_sheet.find(filtered_input).row)] = filtered_input
# Finds an empty col to add the profile
empty_col = self.fetch_empty_col(user_location)
if empty_col == 2:
empty_col = empty_col + 1
update5 = user_sheet.cell(user_location, empty_col)
update5.value = filtered_input
# Updates the cell batches for the profile sheet and the user sheet
profile_sheet.update_cells([update1, update2, update3])
user_sheet.update_cells([update4, update5])
else:
# sets the profile as active on the profiles sheet
profile_sheet.update_cell(int(claim_input), 17, 'inactive')
# Fetches users location in the database
user_location = self.cached_users[user]['user_loc']
# Finds an empty col to add the profile
empty_col = self.fetch_empty_col(user_location)
profile_sheet.update_cells([update1, update2])
user_sheet.update_cell(user_location, empty_col, filtered_input)
# Adds newly claimed profile to the cache of claimed profiles
self.cached_users[user]['claimed_p_dict'][str(profile_sheet.find(filtered_input).row)] = filtered_input
def select_active_profile(self, user, request_p_loc):
"Lets you pick which profile you want active"
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
self.refresh_auth()
else:
self.refresh_auth()
self.cache_user(user)
# Checks if you have actually claimed the requested profile
try:
print(self.cached_users[user]['claimed_p_dict'])
_ = self.cached_users[user]['claimed_p_dict'][request_p_loc]
# checks wether the profile is already active
if profile_sheet.cell(request_p_loc, 2).value != \
self.cached_users[user]['active_profile']:
# Handles setting the old active profile to inactive
old_ap_loc = self.cached_users[user]['profile_row']
profile_sheet.update_cell(old_ap_loc, 17, 'inactive')
# Updates the cache and sets new profile to active
profile_sheet.update_cell(request_p_loc, 17, 'active')
self.cached_users[user]['profile_row'] = request_p_loc
# updates the cache and the active profile on the user sheet
user_loc = self.cached_users[user]['user_loc']
self.cached_users[user]['active_profile'] = \
profile_sheet.cell(request_p_loc, 2).value
user_sheet.update_cell(
user_loc, 2, self.cached_users[user]['active_profile']
)
# Gives an error if you select the already active profile
elif profile_sheet.cell(request_p_loc, 2).value == \
self.cached_users[user]['active_profile']:
self.error = ' The profile you have selected is already active'
except KeyError:
self.error = ' Could not find a claimed profile by that number.' + \
' Please type "!myprofiles" to find the correct number'
def show_claimed_profiles(self, user):
"""Shows the claimed profiles to the user"""
# Resets the error flag
self.error = ''
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
pass
else:
self.refresh_auth()
self.cache_user(user)
# Checks if the list isnt empty
if self.cached_users[user]['claimed_p_dict']:
# Creates an empty list to append too
formatted_claims = []
for key, value in self.cached_users[user]['claimed_p_dict'].items():
# formats the key + value
formatted_claim = key + ': ' + value
# before adding it to a list for easy printback
formatted_claims.append(formatted_claim)
return formatted_claims
else:
self.error = ' You have no claimed profiles'
return False
def unclaim_profile(self, profile_number, user):
"""Function that lets you unclaim profiles"""
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
self.refresh_auth()
else:
self.refresh_auth()
self.cache_user(user)
try:
unwanted_profile = self.cached_users[user]['claimed_p_dict'][profile_number]
# Updates the cells in batch to unclaim on the profile sheet
update1 = profile_sheet.cell(int(profile_number), 15)
update1.value = 'unclaimed'
update2 = profile_sheet.cell(int(profile_number), 16)
update2.value = ''
update3 = profile_sheet.cell(int(profile_number), 17)
update3.value = 'inactive'
profile_sheet.update_cells([update1, update2, update3])
# Checks if the profile being unclaimed was the active one
if unwanted_profile == self.cached_users[user]['active_profile']:
user_sheet.update_cell(
self.cached_users[user]['user_loc'], 2, ''
)
self.cached_users[user]['active_profile'] = ''
# Finds the location of the unwanted profile in the user sheet
prof_user_loc = user_sheet.find(unwanted_profile)
# Updates the cells to unclaim on the profile sheet
user_sheet.update_cell(
self.cached_users[user]['user_loc'], prof_user_loc.col, ''
)
self.print_back = unwanted_profile
# Updates the cached user
del self.cached_users[user]['claimed_p_dict'][profile_number]
except KeyError:
self.error = ' You have no claimed profile by that number'
def find_value(self, user, request, request_type):
"""Function that finds associated values on the google sheets"""
# resets the error flag
self.error = ''
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
pass
else:
self.refresh_auth()
self.cache_user(user)
if request_type == 'check':
# Checks that you input a valid skin
if request.lower() in self.valid_types:
# Gets the value out of the dict
r_value = self.cached_users[user]['mods'][request.lower()]
return r_value
else:
self.error = ' No check by that name try (str/dex/con/int/wis/cha)'
elif request_type == 'save':
# Checks that you input a valid skin
if request.lower() in self.valid_types:
# Gets the value out of the dict
r_value = self.cached_users[user]['mods'][request.lower()]
# Checks if you are proficient in the requested save
if request.lower() in self.cached_users[user]['mods']['saves']:
# adds prof bonus to the mod
r_value = int(r_value) + int(self.cached_users[user]['mods']['prof_b'])
if r_value >= 0:
r_value = '+' + str(r_value)
return r_value
else:
return r_value
else:
self.error = ' No save by that name try (str/dex/con/int/wis/cha)'
elif request_type == 'skill':
# An empty dict to store the skill type and name
skill_type = {}
# Checks if the input is valid and gets the right modifier for it
for key in self.skills:
# Checks if you use an abbreviation before adding the skill
# and type of skill to the empty dict
if request.lower() in self.skills[key].keys():
skill_type[key] = self.skills[key][request.lower()]
break
# adds unabbreviated version to the dict
elif request.lower().title() in self.skills[key].values():
skill_type[key] = request.lower().title()
break
if not skill_type:
self.error = \
' no skill found by that name, type !skillhelp for abbreviations'
else:
# Gets the key which has the type of modifeir it needs to add.
for key in skill_type.keys():
# Checks if you are proficient in the skill
if skill_type[key] in self.cached_users[user]['mods']['skills']:
# If you are proficient gets the right mod and adds
# prof bonus
r_value = int(self.cached_users[user]['mods'][key]) + \
int(self.cached_users[user]['mods']['prof_b'])
# Adds a + for the roll
if r_value >= 0:
r_value = '+' + str(r_value)
return r_value
# otherwise just grabs the mod and hands it off
else:
r_value = self.cached_users[user]['mods'][key]
return r_value
def request_mod(self, user, request, request_type):
"""this part is called by user so needs some checks"""
# resets the error flag
self.error = ''
# Checks if the user is already in the cache.
if user in self.cached_users.keys():
pass
else:
self.refresh_auth()
self.cache_user(user)
# Checks that the user has an active profile
if self.cached_users[user]['active_profile'] == '':
self.error = ' You have no active profile please select one with !selectprofile'
else:
# Gets the requested value from the cache and returns it
r_value = self.find_value(user, request, request_type)
return r_value
def refresh_auth(self):
"""Reauthorizes the token"""
# Checks if 15 min has passed otherwise ignores the call for a refresh
if datetime.now() > self.program_start_time + timedelta(minutes=15):
Client.login()
self.program_start_time = datetime.now()
else:
pass
def shutdown(self, user):
"""shuts down the bot after saving"""
# Checks if it Jord requesting the shutdown
if int(user) == 267425491034701824:
self.save_cache()
self.print_back = ' Goodnight! :D'
return True
else:
self.print_back = " You have no power here! >:D"
return False
def repair_cache(self, user):
"""shuts down the bot after saving"""
# Checks if it is Jord requesting the repair
if int(user) == 267425491034701824:
self.cached_users = {}
self.print_back = \
" Well, I fixed it for you because you screwed up you dummy!"
else:
self.print_back = " Please contact Jord with any issues you have!"
# Sets the all the basic stuff
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('profile_auth_secret.json', scope)
Client = gspread.authorize(creds)
profile_sheet = Client.open('dnd_profiles').sheet1
user_sheet = Client.open('dnd_profiles').get_worksheet(1)
| [
"45539002+jord1990@users.noreply.github.com"
] | 45539002+jord1990@users.noreply.github.com |
9cd46e03f33b06fb86e9282001379cb6ff3007b5 | 35fc68fd80383c1be4fba160ae36a7ae3cc07f4a | /reproduction/coreference_resolution/model/util.py | 42cd09fe0ad73b1f8429e6f30123761fede6d371 | [
"Apache-2.0"
] | permissive | irfan11111111/fastNLP | 38d5ec91fabc02ba43689a0f4ae91bdde68b8c89 | 148ad1dcb7aa4990ac30d9a62ae8b89b6e706f8c | refs/heads/master | 2023-01-04T18:38:00.058306 | 2020-10-30T08:39:53 | 2020-10-30T08:39:53 | 309,295,757 | 1 | 0 | Apache-2.0 | 2020-11-02T07:54:03 | 2020-11-02T07:54:02 | null | UTF-8 | Python | false | false | 2,703 | py | import os
import errno
import collections
import torch
import numpy as np
import pyhocon
# flatten the list
def flatten(l):
return [item for sublist in l for item in sublist]
def get_config(filename):
return pyhocon.ConfigFactory.parse_file(filename)
# safe make directions
def mkdirs(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
return path
def load_char_dict(char_vocab_path):
vocab = ["<unk>"]
with open(char_vocab_path) as f:
vocab.extend(c.strip() for c in f.readlines())
char_dict = collections.defaultdict(int)
char_dict.update({c: i for i, c in enumerate(vocab)})
return char_dict
# 加载embedding
def load_embedding_dict(embedding_path, embedding_size, embedding_format):
print("Loading word embeddings from {}...".format(embedding_path))
default_embedding = np.zeros(embedding_size)
embedding_dict = collections.defaultdict(lambda: default_embedding)
skip_first = embedding_format == "vec"
with open(embedding_path) as f:
for i, line in enumerate(f.readlines()):
if skip_first and i == 0:
continue
splits = line.split()
assert len(splits) == embedding_size + 1
word = splits[0]
embedding = np.array([float(s) for s in splits[1:]])
embedding_dict[word] = embedding
print("Done loading word embeddings.")
return embedding_dict
# safe devide
def maybe_divide(x, y):
return 0 if y == 0 else x / float(y)
def shape(x, dim):
return x.get_shape()[dim].value or torch.shape(x)[dim]
def normalize(v):
norm = np.linalg.norm(v)
if norm > 0:
return v / norm
else:
return v
class RetrievalEvaluator(object):
def __init__(self):
self._num_correct = 0
self._num_gold = 0
self._num_predicted = 0
def update(self, gold_set, predicted_set):
self._num_correct += len(gold_set & predicted_set)
self._num_gold += len(gold_set)
self._num_predicted += len(predicted_set)
def recall(self):
return maybe_divide(self._num_correct, self._num_gold)
def precision(self):
return maybe_divide(self._num_correct, self._num_predicted)
def metrics(self):
recall = self.recall()
precision = self.precision()
f1 = maybe_divide(2 * recall * precision, precision + recall)
return recall, precision, f1
if __name__=="__main__":
print(load_char_dict("../data/char_vocab.english.txt"))
embedding_dict = load_embedding_dict("../data/glove.840B.300d.txt.filtered",300,"txt")
print("hello")
| [
"yexu_i@qq.com"
] | yexu_i@qq.com |
b2b35d886356081d3977dfd84225e608547787b1 | 2a5db373b40ed08d2b1a524f3030d75d0026db1e | /distribution.py | 5ae7cc616c043e1a0c8cb715fe5af5d7b8d843a0 | [] | no_license | kmein/lexifer | 8ce1ae4502bb81f52434a40f73d3e5e162cb9656 | 434ada618f8dedbd4e3d55b75f5ff2bea841a78c | refs/heads/master | 2022-08-05T02:34:09.522046 | 2020-05-30T06:13:07 | 2020-05-30T06:13:07 | 268,015,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Revision: 1.1 $
#
# Copyright (c) 2015 William S. Annis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
# When you are selecting from a pool a *lot*, this
# will speed things up a bit. Takes a dict of keys
# and weights.
class WeightedSelector(object):
__slots__ = ['keys', 'weights', 'sum', 'n']
def __init__(self, dic):
# build parallel arrays for indexing
self.keys = []
self.weights = []
for key, weight in dic.items():
self.keys.append(key)
self.weights.append(weight)
self.sum = sum(self.weights) - 1
self.n = len(self.keys)
def select(self):
pick = random.uniform(0, self.sum)
tmp = 0
for i in range(self.n):
tmp += self.weights[i]
if pick < tmp:
return self.keys[i]
def __iter__(self):
return iter(self.keys)
#m = WeightedSelector({'a': 7, 'b': 5, 'c': 1})
#for i in range(10):
# print(m.select())
| [
"kieran.meinhardt@gmail.com"
] | kieran.meinhardt@gmail.com |
d6f1397c0cc5511e1c3d763640513cf72b3fe252 | e80c741c72fe8c3f258a27622b44482612469651 | /目标检测入门基础/边界框演示代码/anchor_box_show.py | 8f81f7d1009f57ce352b788d31b52d1e17470cc4 | [
"Apache-2.0"
] | permissive | cjh3020889729/Target-detection-study-notes | bcb45477783af3744d5456f08e223ac6bc2d390d | a9b82b1b82fbeb045b50e6ddd50ba5d1df4fc326 | refs/heads/main | 2023-02-20T18:59:29.546954 | 2021-01-23T07:24:43 | 2021-01-23T07:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,101 | py | # -*- coding: utf-8 -*-
# @Author: 二月三
# @Date: 2021-01-22 15:57:34
# @Last Modified by: 二月三
# @Last Modified time: 2021-01-23 14:45:09
'''展示锚框的代码演示
1. 完成锚框的绘制
'''
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as image
from bounding_box_show import draw_rectangle # 绘制矩形——返回一个窗体句柄
from bounding_box_show import BoundingBox_Denote # 框数据表示的转换形式
def draw_anchor(ax, center, length, scales, ratios, img_height, img_width, color='r'):
'''绘制锚框————同一中心点三个不同大小的锚框
ax: plt的窗体句柄——用于调用矩形绘制
center:中心点坐标
length:基本长度
scales:尺寸
ratios:长宽比
img_height: 图片高
img_width: 图片宽
一个锚框的大小,由基本长度+尺寸+长宽比有关
同时锚框的最终计算值与图片实际大小有关——不能超过图片实际范围嘛
'''
bboxs = [] # 这里的边界框bbox是指的锚框
for scale in scales: # 遍历尺寸情况
for ratio in ratios: # 同一尺寸下遍历不同的长宽比情况
# 利用基本长度、尺寸与长宽比进行锚框长宽的转换
h = length * scale * np.math.sqrt(ratio)
w = length * scale / np.math.sqrt(ratio)
# 利用求得的长宽,确定绘制矩形需要的左上角顶点坐标和右下角顶点坐标
# 不同的绘制API可能有不同的参数需要,相应转换即可
x1 = max(center[0] - w / 2., 0.)
y1 = max(center[1] - h / 2., 0.)
x2 = min(center[0] + w / 2. - 1.0, img_width - 1.) # center[0] + w / 2 -1.0 是考虑到边框不超过边界
y2 = min(center[1] + h / 2. - 1.0, img_height - 1.)
bbox = [x1, y1, x2, y2]
print('An Anchor: ', bbox)
bboxs.append(bbox) # 押入生成的anchor
for bbox in bboxs:
denote_mode = True # 当前的目标数据形式: True: (x1, y1, x2, y2)
denote_bbox = BoundingBox_Denote(bbox=bbox, mode=denote_mode)
# 绘制anchor的矩形框
rect = draw_rectangle(bbox=denote_bbox, mode=True, color=color)
ax.add_patch(rect)
def main():
# 先读取图像,再绘制
fig = plt.figure(figsize=(12, 8))
ax = plt.gca()
# 图片路径
img_path = os.path.join(os.getcwd(), 'img', '1.jpg')
img = image.imread(img_path) # 读取图片数据
plt.imshow(img) # 展示图片
print(img.shape[0])
print(img.shape[1])
# # center: [310, 160]
# draw_anchor(ax=ax, center=[310, 160],
# length=200, scales=[1.0], ratios=[0.5, 1.0, 2.0],
# img_height=img.shape[0], img_width=img.shape[1],
# color='b')
# # center: [200, 200]
# draw_anchor(ax=ax, center=[200, 200],
# length=100, scales=[1.0], ratios=[0.5, 1.0, 2.0],
# img_height=img.shape[0], img_width=img.shape[1],
# color='r')
# 每间隔100个像素上绘制三个基本长度为120的锚框
for i in range(0, img.shape[0], 100): # y值
for j in range(0, img.shape[1], 100): # x值
# center: x, y
y = i
x = j
draw_anchor(ax=ax, center=[x, y],
length=120, scales=[1.0], ratios=[0.5, 1.0, 2.0],
img_height=img.shape[0], img_width=img.shape[1],
color='b')
# # center: [310, 160]
# draw_anchor(ax=ax, center=[310, 160],
# length=200, scales=[1.0], ratios=[0.5, 1.0, 2.0],
# img_height=img.shape[0], img_width=img.shape[1],
# color='r')
plt.show()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | cjh3020889729.noreply@github.com |
649f37d0e22387265f46b1c24d6a487f61f86b55 | 99363622844c467a5fcb54f83030f4085f6b8549 | /uartprogram | 8de946415de3725dcea24a425871c16ebb8423f5 | [] | no_license | crj598080709/hid_download_py | 8a6f238b35c60677ea91e9b807aa6ead29b26cb9 | 205342ecf58843c0dfed3437a54b9ac7d67861a5 | refs/heads/master | 2023-07-30T16:47:21.844791 | 2021-09-27T09:05:58 | 2021-09-27T09:05:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | #!/usr/bin/env python3
# encoding: utf8
#
# HID Download Tool
#
# Copyright (c) BekenCorp. (chunjian.tian@bekencorp.com). All rights reserved.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import sys
import os
import argparse
from bkutils import UartDownloader
import hid
import threading
# parse commandline arguments
def parse_args():
description = '''Beken Uart Downloader.'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-d', '--device',
default='/dev/ttyUSB0',
help="Uart device, default /dev/ttyUSB0")
parser.add_argument('-s', '--startaddr', type=lambda x: int(x, 16),
default=0x11000,
help="burn flash address, defaults to 0x11000")
parser.add_argument('-b', '--baudrate', type=int,
default=1500000,
help="burn uart baudrate, defaults to 1500000")
parser.add_argument('-u', '--unprotect', action="store_true",
help="unprotect flash first, used by BK7231N")
parser.add_argument('filename',
help='specify file_crc.bin')
args = parser.parse_args()
return args
args = parse_args()
downloader = UartDownloader(args.device, args.baudrate, args.unprotect)
downloader.programm(args.filename, args.startaddr)
| [
"chunjian.tian@bekencorp.com"
] | chunjian.tian@bekencorp.com | |
bfbbb0075ac6f1b0090ae0effa92a185884a3d20 | ceb9978dc136e36d716d118923108c60c16bc74f | /Equipe3/server/app/domains/validations/__init__.py | 5fb7f7d2a0c78c41371ccc1e4c789c1f88d0f48c | [] | no_license | lucianapda/2021-1-ProgWebII | 58097fd8fdc3548988ee89313fd0aa102e854882 | f49648864381e799ad90b69e2fb53a670495b31e | refs/heads/main | 2023-05-06T01:29:07.520264 | 2021-05-29T01:14:23 | 2021-05-29T01:14:23 | 356,406,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,944 | py | from datetime import date
from server.app.exceptions import UnprocessableException, NotFoundException
from server.database.db_access import get_by_id
from typing import Dict, List, final, Any
import string
class Validator:
_errors: List[str] = list()
def __init__(self):
self._all_error_messages: List[str] = list()
self.errors = list()
self._punctuations: final = list(string.punctuation)
self._uppercase: final = list(string.ascii_uppercase)
self._lowercase: final = list(string.ascii_lowercase)
self._letters: final = list(string.ascii_letters)
self._numbers: final = list(string.digits)
self._seat: final = r""" àèìòùáéíóúýâêîôûãõäëïöüÿçÀÈÌÒÙÁÉÍÓÚÝÂÊÎÔÛÃÕÄËÏÖÜŸÇ"""
@property
def errors(self) -> List[str]:
return self._errors
@errors.setter
def errors(self, value: List) -> None:
if not isinstance(value, list):
self._errors = list()
else:
self._errors = value
@property
def all_error_messages(self) -> List[str]:
return self._all_error_messages
@all_error_messages.setter
def all_error_messages(self, error_message: List or str) -> None:
"""
Add or clear error messages to the private list '_all_error_messages'.
"""
if isinstance(error_message, list):
self._all_error_messages.clear()
if isinstance(error_message, str):
self._all_error_messages.append(error_message)
def _shows_all_invalid_data_messages(self) -> None:
"""
Checks if there are messages, if any,
returns an unprocessable exception with all messages.
"""
if self.all_error_messages:
raise UnprocessableException(self.all_error_messages)
def _check_for_null_fields(self, data: Dict, allowed_column: List = []) -> None:
"""
Method that checks for null values
in the dictionary and whether they can contain nulls,
returns an 'unprocessable exception' with all null and disallowed columns.
"""
for column in data:
if data.get(column) is None or column not in allowed_column:
self.errors.append(f"{column} column contains error: null field found!")
if self.errors:
raise UnprocessableException(self._errors)
@staticmethod
def _check_the_id_in_database(table, id: str) -> None:
"""
Method that takes an identification string and
checks whether it exists in the database as real data,
returns an error message if it is invalid.
"""
if not get_by_id(table, id):
new_msg = f"ID column not found in database."
raise NotFoundException(msg=new_msg)
def _contains_uppercase_letters(self, argument: str) -> bool:
"""
Method that receives a string and
verify that it does not contain uppercase letters,
returns a boolean and an error message if it is invalid.
"""
has: bool = False
try:
for character in argument:
if character in self._uppercase:
self.errors.append(f"contains uppercase letters. Found: {character}")
has = True
break
finally:
return has
def _contains_lowercase_letters(self, argument: str) -> bool:
"""
Method that receives a string and
verify that it does not contain lowercase letters,
returns a boolean and an error message if it is invalid.
"""
has: bool = False
try:
for character in argument:
if character in self._lowercase:
self.errors.append(f"contains lowercase letters. Found: {character}")
has = True
break
finally:
return has
def _contains_letters(self, argument: str) -> bool:
"""
Method that receives a string and
verify that it does not contain letters,
returns a boolean and an error message if it is invalid.
"""
has: bool = False
try:
for character in argument:
if character in self._letters:
self.errors.append(f"contains letters. Found: {character}")
has = True
break
finally:
return has
def _contains_numbers(self, argument: str) -> bool:
"""
Method that receives a string and
verify that it does not contain numbers,
returns a boolean and an error message if it is invalid.
"""
has: bool = False
try:
for character in argument:
if character in self._numbers:
self.errors.append(f"contains numbers. Found: {character}")
has = True
break
finally:
return has
def _contains_special_characters(self, argument: str, symbols_released: str = '') -> bool:
"""
Method that takes a string and
checks that it does not contain special characters,
returns a boolean and an error message if it is invalid.
"""
has: bool = False
try:
for character in argument:
if character not in self._letters \
and character not in self._numbers \
and character not in symbols_released:
self.errors.append(f"contains special characters. Found: {character}")
has = True
break
finally:
return has
def _date_is_valid(self, value: str) -> bool:
"""
Method that receives the value str e
checks if the year, month and day are valid values,
creates an error message if it is invalid
returns index error if not in the American standard and returns a boolean.
"""
maximum_year = date.today().year
minimum_year = maximum_year - 150
try:
date_list = value.split('-')
year = date_list[0]
month = date_list[1]
day = date_list[2]
if minimum_year > int(year) or int(year) > maximum_year:
self.errors.append(f"contains a year outside the "
f"{minimum_year} to {maximum_year} range")
return False
if 1 > int(month) or int(month) > 12:
self.errors.append(f"contains a month outside the 1 to 12 range")
return False
if 1 > int(day) or int(day) > 31:
self.errors.append(f"contains a day outside the 1 to 31 range")
return False
except IndexError:
raise UnprocessableException(f'date format is not on US keyboard or invalid')
return True
def _value_is_string(self, value: Any) -> bool:
"""
Method that receives any value and
check if it is of type string,
creates an error message if it is invalid and returns a boolean.
"""
if not isinstance(value, str):
self.errors.append("must be of string type")
return False
return True
def _type_is_valid(self, argument: str) -> bool:
"""
Method that receive a string and
verify if is either 'residential' or 'commercial'
returns a boolean and and error message if the argument is invalid
"""
result: bool = True
try:
if argument != "residential" and argument != 'commercial':
self.errors.append(f"wrong syntax! Expected: 'residential' or 'commercial'. Actual: {argument}")
result = False
finally:
return result
| [
"thomasmichels.bnu@gmail.com"
] | thomasmichels.bnu@gmail.com |
ba970c1c834e0df9cd037a4f55b4d5b18ac5bce1 | 9a2aa9e541a60691086f60ad8288bab1b587a1ee | /eeg_plot_blink_event.py | 589ac27762abafaefc11295c1dbbc354f5f3a87d | [] | no_license | maratumba/erp_blink | a2dd0cf358da20a694db1f6077b6dcf9f54840c0 | 80df789b7a5939fcd4a4d9ba910495ec587ded09 | refs/heads/master | 2020-04-04T16:35:14.179087 | 2018-11-04T19:31:48 | 2018-11-04T19:31:48 | 156,083,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | #!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description='Plot events and blinks.')
parser.add_argument('vhdr_file', help='vhdr file name')
args = parser.parse_args()
import os.path as op
import numpy as np
import mne
try:
raw=mne.io.read_raw_brainvision(args.vhdr_file, preload=True)
except:
raise OSError('Failed to read file: {}'.format(args.vhdr_file))
mne.add_reference_channels(raw, 'LiRef', copy=False)
raw.set_eeg_reference(['ReRef','LiRef'])
raw.filter(0.1,30, method='fir', picks=[27,28,29,30])
event_id = {'sem-yes-x': 203,
'sem-no-x': 208,
'world-yes-x': 213,
'world-no-x': 218,
'rel-yes-x': 223,
'rel-no-x': 228,
'abs—min-yes-x': 233,
'abs—min-no-x': 238,
'abs—max-yes-x': 243,
'abs—max-no-x': 248}
raw.set_channel_types({'EOGli':'eog','EOGre':'eog','EOGobre':'eog','EOGunre':'eog'})
picks = mne.pick_types(raw.info,eog=True)
events = mne.find_events(raw)
rev_event_dict = {
'sem-yes': 200, 'sem-yes-das': 201, 'sem-yes-ist': 202,'sem-yes-x': 203,
'sem-no': 205, 'sem-no-das': 206, 'sem-no-ist': 207,'sem-no-x': 208,
'world-yes': 210, 'world-yes-das': 211, 'world-yes-ist': 212,'world-yes-x': 213,
'world-no': 215, 'world-no-das': 216, 'world-no-ist': 217,'world-no-x': 218,
'rel-yes': 220, 'rel-yes-das': 221, 'rel-yes-ist': 222,'rel-yes-x': 223,
'rel-no': 225, 'rel-no-das': 226, 'rel-no-ist': 227,'rel-no-x': 228,
'abs—min-yes': 230, 'abs—min-yes-das': 231, 'abs—min-yes-ist': 232,'abs—min-yes-x': 233,
'abs—min-no': 235, 'abs—min-no-das': 236, 'abs—min-no-ist': 237,'abs—min-no-x': 238,
'abs—max-yes': 240, 'abs—max-yes-das': 241, 'abs—max-yes-ist': 242,'abs—max-yes-x': 243,
'abs—max-no': 245, 'abs—max-no-das': 246, 'abs—max-no-ist': 247,'abs—max-no-x': 248,
'display': 195, 'correct': 196, 'incorrect': 197, 'timeout': 199,
'block1': 181, 'block2': 182, 'block3': 183, 'block4': 184, 'block5': 185, 'block6': 186, 'block7': 187
}
event_dict ={v: k for k, v in rev_event_dict.items()}
# Construct event annotations:
event_annotations=[]
for event in events:
if event[2] in event_dict.keys():
event_annotations.append(event_dict[event[2]])
else:
event_annotations.append(str(event[2]))
eog_events = mne.preprocessing.find_eog_events(raw)
n_blinks = len(eog_events)
n_events = len(events)
# Center to cover the whole blink with full duration of 0.5s:
onset_bl = eog_events[:, 0] / raw.info['sfreq'] - 0.25
duration_bl = np.repeat(0.5, n_blinks)
onset_ev = events[:, 0] / raw.info['sfreq'] - 0.25
duration_ev = np.repeat(0.5, n_events)
onset = np.hstack((onset_bl,onset_ev))
duration = np.hstack((duration_bl,duration_ev))
comb_events = np.vstack((eog_events,events))
raw.annotations = mne.Annotations(onset, duration, ['bad blink'] * n_blinks + event_annotations,
orig_time=raw.info['meas_date'])
print(raw.annotations) # to get information about what annotations we have
raw.plot(events=comb_events) # To see the annotated segments. | [
"dandik@gmail.com"
] | dandik@gmail.com |
820f38bf30daf5136c4875063a9f1e8671bb2d8a | 0e5aef351700f3598a8e09290895ba779ecfb238 | /biadmin/urls.py | 5272e9b06db167ffc8eb460d5e9d8606ccc39ee0 | [] | no_license | jackmarron/primera_app_django | b025017bd9c97a9430df33dd58f0bfb22d40f445 | ba87ea47d51d0a4cc0be4e148a4c211049ea70d2 | refs/heads/master | 2020-04-06T03:57:27.753204 | 2017-02-24T22:46:02 | 2017-02-24T22:46:02 | 83,089,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | """biadmin URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"jacinto.moya@gmail.com"
] | jacinto.moya@gmail.com |
e729fadaa3091243a5fedbb0b96399543d53d82a | d2aa30899042e9f4755700850839dd1df38a723f | /filter.py | cbe6dfe1639ff2c734cf7286e36710ca3f1e9381 | [] | no_license | DhvanilVadher/Intrusion-Detection-System-using-ANN. | 40edcfd9b3fb588fdbdd40e447813cb99acc5bad | ab3149af97826e2e7b81cd424b4af18acfe7e5ef | refs/heads/main | 2023-04-13T02:58:42.725005 | 2021-04-23T15:18:42 | 2021-04-23T15:18:42 | 345,471,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 11 11:41:39 2021
@author: dhvanil
"""
import csv
input = open('CombinedWEDFRI.csv', 'r')
output = open('CombinedWEDFRIBenign.csv', 'w')
writer = csv.writer(output)
cnt = 0
for row in csv.reader(input):
if cnt == 0:
writer.writerow(row)
cnt = cnt + 1
if row[11]=="BENIGN":
writer.writerow(row)
input.close()
output.close() | [
""
] | |
d9f337ae82395ca2126ef35e5e85c4451c50399d | 21bf43b6186e3e18391020cbc718a62c3a91dcf7 | /TensorPack/A3C/A3Cv1_5.py | 7601a23fd61122a1f119758e66eb116f4334fcb7 | [] | no_license | AIMan-Zzx/douzhizhu | 6b138d8c5d69001af1505668a1607f4d81245e38 | de799647cc15df69f46a5f065e1f8c8d5a66018b | refs/heads/main | 2023-04-22T03:39:04.754384 | 2021-05-07T16:01:51 | 2021-05-07T16:01:51 | 365,273,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,935 | py | import numpy as np
import os
import uuid
import argparse
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
import os
import sys
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(FILE_PATH, '../..'))
sys.path.append(ROOT_PATH)
sys.path.insert(0, os.path.join(ROOT_PATH, 'build/Release' if os.name == 'nt' else 'build'))
from env import Env as CEnv
from utils import get_seq_length, pick_minor_targets, to_char, discard_onehot_from_s_60
from utils import pick_main_cards
from six.moves import queue
from card import action_space
from TensorPack.MA_Hierarchical_Q.env import Env
import tensorflow.contrib.slim as slim
import tensorflow.contrib.rnn as rnn
from tensorpack import *
from tensorpack.utils.concurrency import ensure_proc_terminate, start_proc_mask_signal
from tensorpack.utils.serialize import dumps
from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils import get_current_tower_context, optimizer
from TensorPack.A3C.simulator import SimulatorProcess, SimulatorMaster, TransitionExperience, ROLE_IDS_TO_TRAIN
from TensorPack.A3C.model_loader import ModelLoader
from TensorPack.A3C.evaluator import Evaluator
from TensorPack.PolicySL.Policy_SL_v1_4 import conv_block as policy_conv_block
from TensorPack.ValueSL.Value_SL_v1_4 import conv_block as value_conv_block
import six
import numpy as np
if six.PY3:
from concurrent import futures
CancelledError = futures.CancelledError
else:
CancelledError = Exception
GAMMA = 0.99
POLICY_INPUT_DIM = 60 + 120
POLICY_LAST_INPUT_DIM = 60 * 2
POLICY_WEIGHT_DECAY = 1e-3
VALUE_INPUT_DIM = 60 * 3
LORD_ID = 2
SIMULATOR_PROC = 20
# number of games per epoch roughly = STEPS_PER_EPOCH * BATCH_SIZE / 100
STEPS_PER_EPOCH = 2500
BATCH_SIZE = 8
PREDICT_BATCH_SIZE = 32
PREDICTOR_THREAD_PER_GPU = 4
PREDICTOR_THREAD = None
def get_player():
return CEnv()
class Model(ModelDesc):
def get_policy(self, role_id, state, last_cards, lstm_state):
# policy network, different for three agents
batch_size = tf.shape(role_id)[0]
gathered_outputs = []
indices = []
# train landlord only
for idx in range(1, 4):
with tf.variable_scope('policy_network_%d' % idx):
lstm = rnn.BasicLSTMCell(1024, state_is_tuple=False)
id_idx = tf.where(tf.equal(role_id, idx))
indices.append(id_idx)
state_id = tf.gather_nd(state, id_idx)
last_cards_id = tf.gather_nd(last_cards, id_idx)
lstm_state_id = tf.gather_nd(lstm_state, id_idx)
with slim.arg_scope([slim.fully_connected, slim.conv2d],
weights_regularizer=slim.l2_regularizer(POLICY_WEIGHT_DECAY)):
with tf.variable_scope('branch_main'):
flattened_1 = policy_conv_block(state_id[:, :60], 32, POLICY_INPUT_DIM // 3,
[[128, 3, 'identity'],
[128, 3, 'identity'],
[128, 3, 'downsampling'],
[128, 3, 'identity'],
[128, 3, 'identity'],
[256, 3, 'downsampling'],
[256, 3, 'identity'],
[256, 3, 'identity']
], 'branch_main1')
flattened_2 = policy_conv_block(state_id[:, 60:120], 32, POLICY_INPUT_DIM // 3,
[[128, 3, 'identity'],
[128, 3, 'identity'],
[128, 3, 'downsampling'],
[128, 3, 'identity'],
[128, 3, 'identity'],
[256, 3, 'downsampling'],
[256, 3, 'identity'],
[256, 3, 'identity']
], 'branch_main2')
flattened_3 = policy_conv_block(state_id[:, 120:], 32, POLICY_INPUT_DIM // 3,
[[128, 3, 'identity'],
[128, 3, 'identity'],
[128, 3, 'downsampling'],
[128, 3, 'identity'],
[128, 3, 'identity'],
[256, 3, 'downsampling'],
[256, 3, 'identity'],
[256, 3, 'identity']
], 'branch_main3')
flattened = tf.concat([flattened_1, flattened_2, flattened_3], axis=1)
fc, new_lstm_state = lstm(flattened, lstm_state_id)
active_fc = slim.fully_connected(fc, 1024)
active_logits = slim.fully_connected(active_fc, len(action_space), activation_fn=None, scope='final_fc')
with tf.variable_scope('branch_passive'):
flattened_last = policy_conv_block(last_cards_id, 32, POLICY_LAST_INPUT_DIM,
[[128, 3, 'identity'],
[128, 3, 'identity'],
[128, 3, 'downsampling'],
[128, 3, 'identity'],
[128, 3, 'identity'],
[256, 3, 'downsampling'],
[256, 3, 'identity'],
[256, 3, 'identity']
], 'last_cards')
passive_attention = slim.fully_connected(inputs=flattened_last, num_outputs=1024,
activation_fn=tf.nn.sigmoid)
passive_fc = passive_attention * active_fc
passive_logits = slim.fully_connected(passive_fc, len(action_space), activation_fn=None, reuse=True, scope='final_fc')
gathered_output = [active_logits, passive_logits, new_lstm_state]
if idx not in ROLE_IDS_TO_TRAIN:
for k in range(len(gathered_output)):
gathered_output[k] = tf.stop_gradient(gathered_output[k])
gathered_outputs.append(gathered_output)
# 3: B * ?
outputs = []
for i in range(3):
scatter_shape = tf.cast(tf.stack([batch_size, gathered_outputs[0][i].shape[1]]), dtype=tf.int64)
# scatter_shape = tf.Print(scatter_shape, [tf.shape(scatter_shape)])
outputs.append(tf.add_n([tf.scatter_nd(indices[k], gathered_outputs[k][i], scatter_shape) for k in range(3)]))
return outputs
def get_value(self, role_id, state):
with tf.variable_scope('value_network'):
# not adding regular loss for fc since we need big scalar output [-1, 1]
with tf.variable_scope('value_conv'):
flattened_1 = value_conv_block(state[:, :60], 32, VALUE_INPUT_DIM // 3, [[128, 3, 'identity'],
[128, 3, 'identity'],
[128, 3, 'downsampling'],
[128, 3, 'identity'],
[128, 3, 'identity'],
[256, 3, 'downsampling'],
[256, 3, 'identity'],
[256, 3, 'identity']
], 'value_conv1')
flattened_2 = value_conv_block(state[:, 60:120], 32, VALUE_INPUT_DIM // 3, [[128, 3, 'identity'],
[128, 3, 'identity'],
[128, 3, 'downsampling'],
[128, 3, 'identity'],
[128, 3, 'identity'],
[256, 3, 'downsampling'],
[256, 3, 'identity'],
[256, 3, 'identity']
], 'value_conv2')
flattened_3 = value_conv_block(state[:, 120:], 32, VALUE_INPUT_DIM // 3, [[128, 3, 'identity'],
[128, 3, 'identity'],
[128, 3, 'downsampling'],
[128, 3, 'identity'],
[128, 3, 'identity'],
[256, 3, 'downsampling'],
[256, 3, 'identity'],
[256, 3, 'identity']
], 'value_conv3')
flattened = tf.concat([flattened_1, flattened_2, flattened_3], axis=1)
with tf.variable_scope('value_fc'):
value = slim.fully_connected(flattened, num_outputs=1, activation_fn=None)
value = tf.squeeze(value, 1)
indicator = tf.cast(tf.equal(role_id, LORD_ID), tf.float32) * 2 - 1
return -value * indicator
def inputs(self):
return [
tf.placeholder(tf.int32, [None], 'role_id'),
tf.placeholder(tf.float32, [None, POLICY_INPUT_DIM], 'policy_state_in'),
tf.placeholder(tf.float32, [None, VALUE_INPUT_DIM], 'value_state_in'),
tf.placeholder(tf.float32, [None, POLICY_LAST_INPUT_DIM], 'last_cards_in'),
tf.placeholder(tf.int32, [None], 'action_in'),
tf.placeholder(tf.int32, [None], 'mode_in'),
tf.placeholder(tf.float32, [None], 'history_action_prob_in'),
tf.placeholder(tf.float32, [None], 'discounted_return_in'),
tf.placeholder(tf.float32, [None, 1024 * 2], 'lstm_state_in')
]
def build_graph(self, role_id, prob_state, value_state, last_cards, action_target, mode, history_action_prob, discounted_return, lstm_state):
active_logits, passive_logits, new_lstm_state = self.get_policy(role_id, prob_state, last_cards, lstm_state)
new_lstm_state = tf.identity(new_lstm_state, name='new_lstm_state')
active_prob = tf.nn.softmax(active_logits, name='active_prob')
passive_prob = tf.nn.softmax(passive_logits, name='passive_prob')
mode_out = tf.identity(mode, name='mode_out')
value = self.get_value(role_id, value_state)
# this is the value for each agent, not the global value
value = tf.identity(value, name='pred_value')
is_training = get_current_tower_context().is_training
if not is_training:
return
action_target_onehot = tf.one_hot(action_target, len(action_space))
# active mode
active_logpa = tf.reduce_sum(action_target_onehot * tf.log(
tf.clip_by_value(active_prob, 1e-7, 1 - 1e-7)), 1)
# passive mode
passive_logpa = tf.reduce_sum(action_target_onehot * tf.log(
tf.clip_by_value(passive_prob, 1e-7, 1 - 1e-7)), 1)
# B * 2
logpa = tf.stack([active_logpa, passive_logpa], axis=1)
idx = tf.stack([tf.range(tf.shape(prob_state)[0]), mode], axis=1)
# B
logpa = tf.gather_nd(logpa, idx)
# importance sampling
active_pa = tf.reduce_sum(action_target_onehot * tf.clip_by_value(active_prob, 1e-7, 1 - 1e-7), 1)
passive_pa = tf.reduce_sum(action_target_onehot * tf.clip_by_value(passive_prob, 1e-7, 1 - 1e-7), 1)
# B * 2
pa = tf.stack([active_pa, passive_pa], axis=1)
idx = tf.stack([tf.range(tf.shape(prob_state)[0]), mode], axis=1)
# B
pa = tf.gather_nd(pa, idx)
# using PPO
ppo_epsilon = tf.get_variable('ppo_epsilon', shape=[], initializer=tf.constant_initializer(0.2),
trainable=False)
importance_b = pa / (history_action_prob + 1e-8)
# advantage
advantage_b = tf.subtract(discounted_return, tf.stop_gradient(value), name='advantage')
policy_loss_b = -tf.minimum(importance_b * advantage_b, tf.clip_by_value(importance_b, 1 - ppo_epsilon, 1 + ppo_epsilon) * advantage_b)
entropy_loss_b = pa * logpa
value_loss_b = tf.square(value - discounted_return)
entropy_beta = tf.get_variable('entropy_beta', shape=[], initializer=tf.constant_initializer(0.005),
trainable=False)
value_weight = tf.get_variable('value_weight', shape=[], initializer=tf.constant_initializer(0.2), trainable=False)
# regularization loss
ctx = get_current_tower_context()
if ctx.has_own_variables: # be careful of the first tower (name='')
l2_loss = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)
else:
l2_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(l2_loss) > 0:
logger.info("regularize_cost_from_collection() found {} regularizers "
"in REGULARIZATION_LOSSES collection.".format(len(l2_loss)))
# 3 * 2
l2_losses = []
for role in range(1, 4):
scope = 'policy_network_%d' % role
l2_loss_role = [l for l in l2_loss if l.op.name.startswith(scope)]
l2_active_loss = [l for l in l2_loss_role if 'branch_passive' not in l.name]
l2_passive_loss = l2_loss_role
print('l2 active loss: {}'.format(len(l2_active_loss)))
print('l2 passive loss: {}'.format(len(l2_passive_loss)))
# 2
losses = [tf.add_n(l2_active_loss), tf.add_n(l2_passive_loss)]
losses = tf.stack(losses, axis=0)
if role == 1 or role == 3:
losses = tf.stop_gradient(losses)
l2_losses.append(losses)
# 3 * 2
l2_losses = tf.stack(l2_losses, axis=0)
# B * 2
l2_losses = tf.gather(l2_losses, role_id)
# B
l2_losses = tf.gather_nd(l2_losses, idx)
print(l2_losses.shape)
# print(policy_loss_b.shape)
# print(entropy_loss_b.shape)
# print(value_loss_b.shape)
# print(advantage_b.shape)
costs = []
for i in range(1, 4):
mask = tf.equal(role_id, i)
valid_batch = tf.reduce_sum(tf.cast(mask, tf.float32))
# print(mask.shape)
l2_loss = tf.truediv(tf.reduce_sum(tf.boolean_mask(l2_losses, mask)), valid_batch, name='l2_loss_%d' % i)
pred_reward = tf.truediv(tf.reduce_sum(tf.boolean_mask(value, mask)), valid_batch, name='predict_reward_%d' % i)
true_reward = tf.truediv(tf.reduce_sum(tf.boolean_mask(discounted_return, mask)), valid_batch, name='true_reward_%d' % i)
advantage = tf.sqrt(tf.truediv(tf.reduce_sum(tf.square(tf.boolean_mask(advantage_b, mask))), valid_batch), name='rms_advantage_%d' % i)
policy_loss = tf.truediv(tf.reduce_sum(tf.boolean_mask(policy_loss_b, mask)), valid_batch, name='policy_loss_%d' % i)
entropy_loss = tf.truediv(tf.reduce_sum(tf.boolean_mask(entropy_loss_b, mask)), valid_batch, name='entropy_loss_%d' % i)
value_loss = tf.truediv(tf.reduce_sum(tf.boolean_mask(value_loss_b, mask)), valid_batch, name='value_loss_%d' % i)
cost = tf.add_n([policy_loss, entropy_loss * entropy_beta, value_weight * value_loss, l2_loss], name='cost_%d' % i)
# cost = tf.truediv(cost, tf.reduce_sum(tf.cast(mask, tf.float32)), name='cost_%d' % i)
costs.append(cost)
importance = tf.truediv(tf.reduce_sum(tf.boolean_mask(importance_b, mask)), valid_batch, name='importance_%d' % i)
add_moving_summary(policy_loss, entropy_loss, value_loss, l2_loss, pred_reward, true_reward, advantage, cost, importance, decay=0)
return tf.add_n(costs)
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
opt = tf.train.AdamOptimizer(lr)
gradprocs = [MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.5))]
opt = optimizer.apply_grad_processors(opt, gradprocs)
return opt
class MySimulatorWorker(SimulatorProcess):
def _build_player(self):
return CEnv()
class MySimulatorMaster(SimulatorMaster, Callback):
def __init__(self, pipe_c2s, pipe_s2c, gpus):
super(MySimulatorMaster, self).__init__(pipe_c2s, pipe_s2c)
self.queue = queue.Queue(maxsize=BATCH_SIZE * 8 * 2)
self._gpus = gpus
def _setup_graph(self):
# create predictors on the available predictor GPUs.
nr_gpu = len(self._gpus)
predictors = [self.trainer.get_predictor(
['role_id', 'policy_state_in', 'value_state_in', 'last_cards_in', 'mode_in', 'lstm_state_in'],
['active_prob', 'passive_prob', 'mode_out', 'new_lstm_state'],
self._gpus[k % nr_gpu])
for k in range(PREDICTOR_THREAD)]
self.async_predictor = MultiThreadAsyncPredictor(
predictors, batch_size=PREDICT_BATCH_SIZE)
def _before_train(self):
self.async_predictor.start()
def _on_state(self, role_id, prob_state, all_state, last_cards_onehot, mask, mode, lstm_state, client):
"""
Launch forward prediction for the new state given by some client.
"""
def cb(outputs):
# logger.info('async predictor callback')
try:
output = outputs.result()
except CancelledError:
logger.info("Client {} cancelled.".format(client.ident))
return
new_lstm_state = output[-1]
mode = output[-2]
distrib = (output[:-2][mode] + 1e-7) * mask
assert np.all(np.isfinite(distrib)), distrib
action = np.random.choice(len(distrib), p=distrib / distrib.sum())
client.memory[role_id - 1].append(TransitionExperience(
prob_state, all_state, action, reward=0, lstm_state=lstm_state,
last_cards_onehot=last_cards_onehot, mode=mode, prob=distrib[action]))
self.send_queue.put([client.ident, dumps((action, new_lstm_state))])
self.async_predictor.put_task([role_id, prob_state, all_state, last_cards_onehot, mode, lstm_state], cb)
def _process_msg(self, client, role_id, prob_state, all_state, last_cards_onehot, mask, mode, lstm_state, reward, isOver):
"""
Process a message sent from some client.
"""
# in the first message, only state is valid,
# reward&isOver should be discarde
if isOver:
# should clear client's memory and put to queue
assert reward != 0
for i in range(3):
if i != 1:
continue
# notice that C++ returns the reward for farmer, transform to the reward in each agent's perspective
client.memory[i][-1].reward = reward if i != 1 else -reward
self._parse_memory(0, client)
# feed state and return action
self._on_state(role_id, prob_state, all_state, last_cards_onehot, mask, mode, lstm_state, client)
def _parse_memory(self, init_r, client):
# for each agent's memory
for role_id in range(1, 4):
if role_id not in ROLE_IDS_TO_TRAIN:
continue
mem = client.memory[role_id - 1]
mem.reverse()
R = float(init_r)
for idx, k in enumerate(mem):
R = k.reward + GAMMA * R
self.queue.put([role_id, k.prob_state, k.all_state, k.last_cards_onehot, k.action, k.mode, k.prob, R, k.lstm_state])
client.memory[role_id - 1] = []
def train():
dirname = os.path.join('train_log', 'A3C-LSTM')
logger.set_logger_dir(dirname)
# assign GPUs for training & inference
nr_gpu = get_nr_gpu()
global PREDICTOR_THREAD
if nr_gpu > 0:
if nr_gpu > 1:
# use all gpus for inference
predict_tower = list(range(nr_gpu))
else:
predict_tower = [0]
PREDICTOR_THREAD = len(predict_tower) * PREDICTOR_THREAD_PER_GPU
train_tower = list(range(nr_gpu))[:-nr_gpu // 2] or [0]
logger.info("[Batch-A3C] Train on gpu {} and infer on gpu {}".format(
','.join(map(str, train_tower)), ','.join(map(str, predict_tower))))
else:
logger.warn("Without GPU this model will never learn! CPU is only useful for debug.")
PREDICTOR_THREAD = 1
predict_tower, train_tower = [0], [0]
# setup simulator processes
name_base = str(uuid.uuid1())[:6]
if os.name == 'nt':
namec2s = 'tcp://127.0.0.1:8000'
names2c = 'tcp://127.0.0.1:9000'
else:
prefix = '@' if sys.platform.startswith('linux') else ''
namec2s = 'ipc://{}sim-c2s-{}'.format(prefix, name_base)
names2c = 'ipc://{}sim-s2c-{}'.format(prefix, name_base)
procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)]
ensure_proc_terminate(procs)
start_proc_mask_signal(procs)
master = MySimulatorMaster(namec2s, names2c, predict_tower)
dataflow = BatchData(DataFromQueue(master.queue), BATCH_SIZE)
config = AutoResumeTrainConfig(
always_resume=True,
# starting_epoch=0,
model=Model(),
dataflow=dataflow,
callbacks=[
ModelSaver(),
MaxSaver('true_reward_2'),
HumanHyperParamSetter('learning_rate'),
# ScheduledHyperParamSetter('learning_rate', [(20, 0.0003), (120, 0.0001)]),
# ScheduledHyperParamSetter('entropy_beta', [(80, 0.005)]),
master,
StartProcOrThread(master),
Evaluator(
100, ['role_id', 'policy_state_in', 'last_cards_in', 'lstm_state_in'],
['active_prob', 'passive_prob', 'new_lstm_state'], get_player),
# SendStat(
# 'export http_proxy=socks5://127.0.0.1:1080 https_proxy=socks5://127.0.0.1:1080 && /home/neil/anaconda3/bin/curl --header "Access-Token: o.CUdAMXqiVz9qXTxLYIXc0XkcAfZMpNGM" -d type=note -d title="doudizhu" '
# '-d body="lord win rate: {lord_win_rate}\n policy loss: {policy_loss_2}\n value loss: {value_loss_2}\n entropy loss: {entropy_loss_2}\n'
# 'true reward: {true_reward_2}\n predict reward: {predict_reward_2}\n advantage: {rms_advantage_2}\n" '
# '--request POST https://api.pushbullet.com/v2/pushes',
# ['lord_win_rate', 'policy_loss_2', 'value_loss_2', 'entropy_loss_2',
# 'true_reward_2', 'predict_reward_2', 'rms_advantage_2']
# ),
],
# session_init=SaverRestore('./train_log/a3c_action_1d/max-true_reward_2'),
# session_init=ModelLoader('policy_network_2', 'SL_policy_network', 'value_network', 'SL_value_network'),
steps_per_epoch=STEPS_PER_EPOCH,
max_epoch=1000,
)
trainer = SimpleTrainer() if config.nr_tower == 1 else AsyncMultiGPUTrainer(train_tower)
launch_train_with_config(config, trainer)
if __name__ == '__main__':
train()
| [
"noreply@github.com"
] | AIMan-Zzx.noreply@github.com |
ce87850232817f8b891133409ceac9ed5d7786c6 | 3d5825c6baae8f0ebc963a38a0216eedc8300a06 | /01-webotron/webotron/bucket.py | 51783bf462c9a7affa962e44710bd7af3cb86007 | [] | no_license | CashewRose/automating-aws-with-python | e760f6cae3881e2b210071ce6ac2e30ef77b5717 | 81504654c5c24956d9f8ac4202afc759bc790653 | refs/heads/master | 2022-02-04T22:54:16.743553 | 2019-12-17T15:02:43 | 2019-12-17T15:02:43 | 224,026,160 | 0 | 0 | null | 2022-01-21T20:11:06 | 2019-11-25T19:33:58 | Python | UTF-8 | Python | false | false | 5,042 | py | # -*- coding: utf-8 -*-
"""Classes for S3 Buckets."""
from pathlib import Path
import mimetypes
from functools import reduce
import boto3
from botocore.exceptions import ClientError
from hashlib import md5
import util
class BucketManager:
"""Manage an S3 Bucket."""
CHUNK_SIZE = 8388608
def __init__(self, session):
"""Create a BucketManager object."""
self.session = session
self.s3 = self.session.resource('s3')
self.transfer_config = boto3.s3.transfer.TransferConfig(
multipart_chunksize = self.CHUNK_SIZE,
multipart_threshold = self.CHUNK_SIZE
)
self.manifest = {}
def all_buckets(self):
"""Get an iterator for all buckets."""
return self.s3.buckets.all()
def get_region_name(self, bucket):
"""Get the bucket's region name."""
client = self.s3.meta.client
bucket_location = client.get_bucket_location(Bucket=bucket.name)
return bucket_location["LocationConstraint"] or 'us-east-1'
def get_bucket_url(self, bucket):
"""Get the website url for this bucket."""
return "http://{}.{}".format(
bucket.name,
util.get_endpoint(self.get_region_name(bucket)).host)
def all_objects(self, bucket_name):
"""Get an iterator for all objects in bucket."""
return self.s3.Bucket(bucket_name).objects.all()
def init_bucket(self, bucket_name):
"""Create new bucket, or return existing one by name."""
s3_bucket = None
try:
s3_bucket = self.s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': self.session.region_name
}
)
except ClientError as error:
if error.response['Error']['Code'] == 'BucketAlreadyOwnedByYou':
s3_bucket = self.s3.Bucket(bucket_name)
else:
raise error
return s3_bucket
def set_policy(self, bucket):
"""Set bucket policy to be readable by everyone."""
policy = """
{
"Version":"2012-10-17",
"Statement":[{
"Sid":"PublicReadGetObject",
"Effect":"Allow",
"Principal": "*",
"Action":["s3:GetObject"],
"Resource":["arn:aws:s3:::%s/*"
]
}
]
}
""" % bucket.name
policy = policy.strip()
pol = bucket.Policy()
pol.put(Policy=policy)
def configure_website(self, bucket):
"""Configure s3 website hosting for bucket."""
bucket.Website().put(WebsiteConfiguration={
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
})
def load_manifest(self, bucket):
"""Load manifest for caching purposes."""
paginator = self.s3.meta.client.get_paginator('list_objects_v2')
for page in paginator.paginate(Bucket=bucket.name):
for obj in page.get("Contents", []):
self.manifest[obj['Key']] = obj['ETag']
@staticmethod
def hash_data(data):
"""Generate md5 hash for data."""
hash = md5()
hash.update(data)
return hash
def gen_etag(self, path):
"""Generate etag for file."""
hashes = []
with open(path, 'rb') as f:
while True:
data = f.read(self.CHUNK_SIZE)
if not data:
break
hashes.append(self.hash_data(data))
if not hashes:
return
elif len(hashes) == 1:
return '"{}"'.format(hashes[0].hexdigest())
else:
hash = self.hash_data(reduce(lambda x, y: x + y, (h.digest() for h in hashes)))
return '"{}-{}"'.format(hash.hexdigest(), len(hashes))
def upload_file(self, bucket, path, key):
"""Upload path to s3_bucket at key."""
content_type = mimetypes.guess_type(key)[0] or 'text/plain'
etag = self.gen_etag(path)
if self.manifest.get(key, '') == etag:
print("Skipping {}, etags match".format(key))
return
return bucket.upload_file(
path,
key,
ExtraArgs={
'ContentType': content_type
},
Config=self.transfer_config
)
def sync(self, pathname, bucket_name):
"""Sync contents of path to bucket."""
bucket = self.s3.Bucket(bucket_name)
self.load_manifest(bucket)
root = Path(pathname).expanduser().resolve()
def handle_directory(target):
for p in target.iterdir():
if p.is_dir():
handle_directory(p)
if p.is_file():
self.upload_file(bucket, str(p), str(p.relative_to(root)))
handle_directory(root)
| [
"Cashew.Agnoletti@healthstream.com"
] | Cashew.Agnoletti@healthstream.com |
488e7df2d2f6e38349be3512ebc552b14c96aabf | 23e8694a7ff0927d2339b762719eac9dc4c509b0 | /public/models.py | 626ff0632e1f5ec7a6f1f744cfa92eaadb8ab730 | [] | no_license | Lolaji/personal-blog | 738e04667f4403644179cadd434212aee0b20d68 | 61890415a8a39a5da81c6006c884e471c19ed1f8 | refs/heads/master | 2023-07-18T06:52:48.345190 | 2021-08-28T22:22:25 | 2021-08-28T22:22:25 | 400,896,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.models import User
class Post (models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog-detail', kwargs={'pk':self.pk}) | [
"ayo.lolade@gmail.com"
] | ayo.lolade@gmail.com |
5d0b3e65f94c2fbabeaee06a668a4dfe46d94fe6 | 1f115a9f3af7d7523989ffbe16f3e0f50851768d | /pydash/pydash_logger/logger.py | 1fac34218c4ed98b0c040eb3fef76c321035513c | [] | no_license | RUGSoftEng/2018-PyDash.io | f86f5d2ef586129d96c82fca5b504bfad44af3f5 | 7b9dd176f53bcf1afe7b2356599407fea41678a9 | refs/heads/development | 2022-12-15T20:54:53.030794 | 2020-09-05T13:09:00 | 2020-09-05T13:09:00 | 121,507,896 | 8 | 2 | null | 2022-12-08T18:30:39 | 2018-02-14T12:23:45 | Python | UTF-8 | Python | false | false | 2,775 | py | """
Logger object will log messages and errors to date-stamped '.log' files in the /logs directory of the project. Simply
import the class and use it to log messages.
"""
import logging
import os
from datetime import datetime
class Logger:
def __init__(self, name=__name__):
"""
Sets up default logging utility for logger object
:param: name: namespace where you want the logger to be. Suggested value = __name__. Defaults to
'pydash_app.impl.logger'.
"""
logging.basicConfig(level=logging.INFO)
self._default_logger = logging.getLogger(name)
self._default_handler = logging.FileHandler(os.getcwd() + '/logs/' + str(datetime.today().date()) + '.log')
self._default_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self._default_handler.setFormatter(self._default_formatter)
self._default_logger.addHandler(self._default_handler)
def _log(self, msg, level):
"""
Helper function to abstract setting the filename with each call. This is done by removing the current
FileHandler and setting a new one up with the correct path.
:param: msg: the message to be logged.
:param: level: the level at which the message will be logged.
"""
self._default_logger.removeHandler(self._default_handler)
self._default_handler = logging.FileHandler(os.getcwd() + '/logs/' + str(datetime.today().date()) + '.log')
self._default_handler.setFormatter(self._default_formatter)
self._default_logger.addHandler(self._default_handler)
# Log at correct level
if level == logging.DEBUG:
self._default_logger.debug(msg)
if level == logging.INFO:
self._default_logger.info(msg)
if level == logging.WARNING:
self._default_logger.warning(msg)
if level == logging.ERROR:
self._default_logger.error(msg)
def debug(self, msg):
"""
Takes a message and logs it at the logging.DEBUG level
:param: msg: the message to be logged
"""
self._log(msg, logging.DEBUG)
def info(self, msg):
"""
Takes a message and logs it at the logging.INFO level
:param: msg: the message to be logged
"""
self._log(msg, logging.INFO)
def warning(self, msg):
"""
Takes a message and logs it at the logging.WARN level
:param: msg: the message to be logged
"""
self._log(msg, logging.WARNING)
def error(self, msg):
"""
Takes a message and logs it at the logging.ERROR level
:param: msg: the message to be logged
"""
self._log(msg, logging.ERROR)
| [
"jeroenlanghorst@gmail.com"
] | jeroenlanghorst@gmail.com |
d8d97c3d31225279dc881369146f87ddfda687b0 | a3e12b2f9e0ff3002e6516c73da49b41becfeecf | /pauli_lindblad_per/tomography/experiment.py | 1dee4676ab75cfef6de08da0268d52ae7265e0b4 | [] | no_license | benmcdonough20/AutomatedPERTools | 14a9a5b65837b2331c678510be4e5de1dd3272d1 | 782b1a350ef6360bf6f75196545c9a23d50558e1 | refs/heads/main | 2023-05-24T18:13:08.019828 | 2023-05-12T00:31:14 | 2023-05-12T00:31:14 | 542,316,140 | 4 | 0 | null | 2023-05-12T00:33:47 | 2022-09-27T22:44:17 | Jupyter Notebook | UTF-8 | Python | false | false | 3,872 | py | from tomography.processorspec import ProcessorSpec
from tomography.layerlearning import LayerLearning
from tomography.analysis import Analysis
from framework.percircuit import PERCircuit
from per.perexperiment import PERExperiment
from typing import List, Any
import logging
logging.basicConfig(filename="experiment.log",
format='%(asctime)s %(message)s',
filemode='w')
logger = logging.getLogger("experiment")
logger.setLevel(logging.INFO)
from primitives.circuit import QiskitCircuit
from primitives.processor import QiskitProcessor
import pickle
class SparsePauliTomographyExperiment:
"""This class carries out the full experiment by creating and running a LayerLearning
instance for each distinct layer, running the analysis, and then returning a PERCircuit
with NoiseModels attached to each distinct layer"""
def __init__(self, circuits, inst_map, backend):
circuit_interface = None
if circuits[0].__class__.__name__ == "QuantumCircuit":
circuit_interface = QiskitCircuit
processor = QiskitProcessor(backend)
else:
raise Exception("Unsupported circuit type")
self._profiles = set()
for circuit in circuits:
circ_wrap = circuit_interface(circuit)
parsed_circ = PERCircuit(circ_wrap)
for layer in parsed_circ._layers:
if layer.cliff_layer:
self._profiles.add(layer.cliff_layer)
logger.info("Generated layer profile with %s layers:"%len(self._profiles))
for layer in self._profiles:
logger.info(layer)
self._procspec = ProcessorSpec(inst_map, processor)
self.instances = []
self._inst_map = inst_map
self._layers = None
self._layers = []
for l in self._profiles:
learning = LayerLearning(l,self._procspec)
self._layers.append(learning)
self.analysis = Analysis(self._layers, self._procspec)
def generate(self, samples, single_samples, depths):
"""This method is used to generate the experimental benchmarking procedure. The samples
are the number of times to sample from the Pauli twirl. The single_samples controls
how many twirl samples to take from the degeneracy-lifting measurements. It may desirable
to make this higher since the error on these measurements will generally be higher.
The depths control the different circuit depths to use for the exponential fits."""
if len(depths) < 2:
raise Exception("Exponental fit requires 3 or more depth data points.")
for l in self._layers:
l.procedure(samples, single_samples, depths)
def run(self, executor):
"""This method produces a list of circuits in the native representation, passes them
as a list to the executor method, and associates the result with the benchmark instances
that produced it"""
instances = []
for l in self._layers:
instances += l.instances
circuits = [inst.get_circuit() for inst in instances]
results = executor(circuits)
for res,inst in zip(results, instances): #TODO: find out if order can be preserved
inst.add_result(res)
def analyze(self):
"""Runs analysis on each layer representative and stores for later plotting/viewing"""
self.analysis.analyze()
return self.analysis.noisedataframe
def create_per_experiment(self, circuits : Any) -> PERExperiment:
experiment = PERExperiment(circuits, self._inst_map, self.analysis.noisedataframe, backend = None, processor = self._procspec._processor)
return experiment
def save(self):
raise NotImplementedError()
def load(self):
raise NotImplementedError()
| [
"benmcdonough20@gmail.com"
] | benmcdonough20@gmail.com |
a5c7419980fc1ab00d390b2848352fdb78126dca | 083dfa48d1c9efe74886a71c5377a41aefb73e1c | /a_smarter_way_textbook/write_test.py | eee603052fff609e13c08f332ad98fdab626435a | [] | no_license | Jabernathy88/code-katas | b68f2b7ecceeacf3cc4797beb2b1a938549ff7cd | c8660994fc7bc34adaf2f10e4a71ad908682f5de | refs/heads/master | 2021-09-10T05:54:39.125471 | 2021-08-31T23:53:59 | 2021-08-31T23:53:59 | 209,199,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | with open("terrasque_append.txt", "w") as f:
f.write("I am a 20th")
with open("terrasque_append.txt", "a") as f:
f.write(" level monster!")
with open("terrasque_append.txt") as f:
print(f.read())
| [
"jea@JEREMYs-MacBook-Pro.local"
] | jea@JEREMYs-MacBook-Pro.local |
779d5c42376e0a38239abdb8e61c83a569e86a8b | c2e0dded1c67cb8c7a1e721853fedda3d22addef | /2016/05-2.py | 40fcc84d60f2c1bb0d4711c217e0c15e983c1449 | [
"MIT"
] | permissive | LK/advent-of-code | 8b60b96b88581956b8a2257d1a8e1308c34ee421 | fc9ba3315608bc3b20ebb2c1212145f970b888d6 | refs/heads/master | 2020-06-15T06:11:25.165683 | 2016-12-21T05:55:17 | 2016-12-21T05:55:17 | 75,320,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import hashlib
doorid = raw_input()
password = [None] * 8
done = 0
i = 0
while done < 8:
hash = hashlib.md5(doorid + str(i)).hexdigest()
if hash[:5] == '00000':
try:
pos = int(hash[5], 10)
if pos < 8:
if password[pos] == None:
password[pos] = hash[6]
done += 1
except:
pass
i += 1
print password | [
"lenny.khazan@gmail.com"
] | lenny.khazan@gmail.com |
f0854c67101d14e317c013d042d3a110dd01e05b | c20a7a651e63c1e7b1c5e6b5c65c8150898bbaf2 | /KG/BiLSTM+CRF.py | cd232b9f936518d33947a35a03e4efc752e6c09d | [] | no_license | Nobody0321/MyCodes | 08dbc878ae1badf82afaf0c9fc608b70dfce5cea | b60e2b7a8f2ad604c7d28b21498991da60066dc3 | refs/heads/master | 2023-08-19T14:34:23.169792 | 2023-08-15T15:50:24 | 2023-08-15T15:50:24 | 175,770,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,542 | py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
def to_scalar(var): #var是Variable,维度是1
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec): #vec是1*5, type是Variable
max_score = vec[0, argmax(vec)]
#max_score维度是1, max_score.view(1,-1)维度是1*1,max_score.view(1, -1).expand(1, vec.size()[1])的维度是1*5
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1]) # vec.size()维度是1*5
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))#为什么指数之后再求和,而后才log呢
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2, num_layers=1, bidirectional=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return (autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)),
autograd.Variable(torch.randn(2, 1, self.hidden_dim // 2)))
#预测序列的得分
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
forward_var = autograd.Variable(init_alphas) #初始状态的forward_var,随着step t变化
# Iterate through the sentence
for feat in feats: #feat的维度是5
alphas_t = [] # The forward variables at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = feat[next_tag].view(1, -1).expand(1, self.tagset_size) #维度是1*5
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1) #维度是1*5
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
#第一次迭代时理解:
# trans_score所有其他标签到B标签的概率
# 由lstm运行进入隐层再到输出层得到标签B的概率,emit_score维度是1*5,5个值是相同的
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var))
forward_var = torch.cat(alphas_t).view(1, -1)#到第(t-1)step时5个标签的各自分数
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
#得到feats
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
#embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
embeds = self.word_embeds(sentence)
embeds = embeds.unsqueeze(1)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
#得到gold_seq tag的score
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = autograd.Variable(torch.Tensor([0]))
tags = torch.cat([torch.LongTensor([self.tag_to_ix[START_TAG]]), tags]) #将START_TAG的标签3拼接到tag序列上
for i, feat in enumerate(feats):
#self.transitions[tags[i + 1], tags[i]] 实际得到的是从标签i到标签i+1的转移概率
#feat[tags[i+1]], feat是step i 的输出结果,有5个值,对应B, I, E, START_TAG, END_TAG, 取对应标签的值
score = score + self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
#解码,得到预测的序列,以及预测序列的得分
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.Tensor(1, self.tagset_size).fill_(-10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = autograd.Variable(init_vvars)
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + self.transitions[next_tag] #其他标签(B,I,E,Start,End)到标签next_tag的概率
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id])
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)#从step0到step(i-1)时5个序列中每个序列的最大score
backpointers.append(bptrs_t) #bptrs_t有5个元素
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]#其他标签到STOP_TAG的转移概率
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):#从后向前走,找到一个best路径
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()# 把从后向前的路径正过来
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # dont confuse this with _forward_alg above.
# Get the emission scores from the BiLSTM
lstm_feats = self._get_lstm_features(sentence)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
START_TAG = "<START>"
STOP_TAG = "<STOP>"
EMBEDDING_DIM = 5
HIDDEN_DIM = 4
# Make up some training data
training_data = [("the wall street journal reported today that apple corporation made money".split(), "B I I I O O O B I O O".split()),
("georgia tech is a university in georgia".split(), "B I O O O O B".split())]
word_to_ix = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
# Check predictions before training
# precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
# precheck_tags = torch.LongTensor([tag_to_ix[t] for t in training_data[0][1]])
# print(model(precheck_sent))
# Make sure prepare_sequence from earlier in the LSTM section is loaded
for epoch in range(1): # again, normally you would NOT do 300 epochs, it is toy data
for sentence, tags in training_data:
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Step 2. Get our inputs ready for the network, that is,
# turn them into Variables of word indices.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = torch.LongTensor([tag_to_ix[t] for t in tags])
# Step 3. Run our forward pass.
neg_log_likelihood = model.neg_log_likelihood(sentence_in, targets)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
neg_log_likelihood.backward()
optimizer.step()
# Check predictions after training
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
print(model(precheck_sent)[0]) #得分
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
print(model(precheck_sent)[1]) #tag sequence | [
"cyk19951@gmail.com"
] | cyk19951@gmail.com |
3a723fde48bf140e248273db4728d2725859178c | 718a433750fd7679b99d8f80bc43d65ea60ea47c | /named-entity-recognition/utils/count_ner_items.py | 23bf3d9b3c7247bf229776b54e528a7304c2c794 | [
"MIT"
] | permissive | YaleDHLab/lab-workshops | 39bb915c19edc0eeced0b6889c55840acfcef3a8 | 21dfc105b515d638daa9070d21f1a35b39aded25 | refs/heads/master | 2022-12-05T20:46:53.564138 | 2021-10-19T12:03:51 | 2021-10-19T12:03:51 | 59,770,071 | 141 | 78 | MIT | 2022-11-21T21:51:58 | 2016-05-26T17:32:49 | Jupyter Notebook | UTF-8 | Python | false | false | 1,599 | py | from collections import Counter, defaultdict
from bs4 import BeautifulSoup
import codecs, glob, sys, os
d = defaultdict(Counter)
prev_type = ''
# Define the infiles to parse
if len(sys.argv) > 1:
infiles = glob.glob( os.path.join(sys.argv[1], '*') )
else:
infiles = glob.glob('*.xml')
for c, i in enumerate(infiles):
print(' * processed', c+1, 'of', len(infiles), 'files')
with codecs.open(i) as f:
f = f.read()
soup = BeautifulSoup(f, 'lxml')
for j in soup.find_all('token'):
entity_type = j.find('ner').string
# Only process non-0 entity types
if entity_type != 'O':
# Handle consecutive instances of the same entity type
if entity_type == prev_type:
if entity:
entity += j.find('word').string + ' '
else:
entity = j.find('word').string + ' '
# Increment the extant entity's count (if present)
else:
try:
d[prev_type][entity] += 1
except NameError:
pass
# Store the new entity word
entity = j.find('word').string + ' '
# Store the current entity type
prev_type = entity_type
# Handle the last identified entity
try:
if entity:
d[prev_type][entity] += 1
entity = ''
except NameError:
pass
# Write an output file
with codecs.open('ner_counts.txt', 'w', 'utf8') as out:
out.write('\t'.join(['type', 'count', 'value']))
for entity_type in d:
for ner in d[entity_type]:
out.write( '\t'.join([entity_type, str(d[entity_type][ner]), ner]) + '\n') | [
"douglas.duhaime@gmail.com"
] | douglas.duhaime@gmail.com |
2f17c9a7504f6ecc55ad7e13f545605af548658f | cb163661edcf53d1eb5c6b427b5cb864f7272e6a | /Base/__init__.py | 991e5f876e9f725b4666504d84d67eae13a700c6 | [] | no_license | chanduDS/WebScraping-IMDB-Movies | ed838e9442dfef0f7eeb5891c6404abae13b27ff | b473e9067baf335aa4bc7db01dbb955677f244c9 | refs/heads/master | 2022-11-05T19:01:18.679803 | 2020-06-16T05:59:24 | 2020-06-16T05:59:24 | 271,594,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | from . import Initiate_Driver | [
"sreechandraduppala1@gmail.com"
] | sreechandraduppala1@gmail.com |
afbf184de2b729d5379eb24438e13e83ad33883d | 137a11b30a7da783c4d7cd1a6f6ed3f538780611 | /Astra Final/createcommandwords3.py | f9ad3f44e09f562c3e2d12557d6e7cdbc130ca44 | [] | no_license | Sharu183/Astra | 93f5383c6e71e613c48359daba17a575ecb27043 | 154b635baace1eed06f442a6c250288850538222 | refs/heads/master | 2021-01-01T16:54:22.510920 | 2017-07-29T10:47:37 | 2017-07-29T10:47:37 | 97,950,008 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import os
def createcommandwords3():
l=("F820","F840","F860","F880","F8A0","F8C0","F8E0","F8F0","F800","F810")
file=open(os.getcwd()+"\\commandwords3.txt","w+")
for word in l:
file.write(word+"\n")
createcommandwords3()
| [
"noreply@github.com"
] | Sharu183.noreply@github.com |
b7fece0855ce88c201595fc030463fe7de7ddcfb | 435ed78115bf30c2fdf4591a478c8aec44edd463 | /src/manage.py | ab810d5fac1e7bca3cd22f2d68cba7f81d2fbf0b | [] | no_license | abderrahman-bns/Books-Rest-API-Django | 3067bc3cc0ee8e813aa499b3b5217d4a861fde83 | 0d74b89a7941d59e1909c49b3dc22120ca7cc79e | refs/heads/main | 2023-07-16T01:57:13.107988 | 2021-08-18T19:14:53 | 2021-08-18T19:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Rest_API.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"40049722+xDEVELYNNx@users.noreply.github.com"
] | 40049722+xDEVELYNNx@users.noreply.github.com |
f403b3bbe3b0904d7ffdb16d836d2c7a9d3c1c28 | cc7a5b76cc87f15bc44c5385932f80d1e4418a9e | /concurrent_session.py | d30886bf10fa3e2562903263eece33e386579487 | [] | no_license | bave/tcp_ana | 9f7ba69c0885d92e3b6abf7e160e16a160a22fe4 | 7d375dddf5de8f1e86914fe76e5cbb28c77888d2 | refs/heads/master | 2021-01-10T13:22:59.457901 | 2013-03-01T08:03:26 | 2013-03-01T08:03:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,506 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import sys
import dpkt
import socket
from pylab import *
from datetime import *
FIN = 0x01
SYN = 0x02
RST = 0x04
ACK = 0x10
FIN_ACK = 0x11
SYN_ACK = 0x12
RST_ACK = 0x14
S_SRT = "request"
S_EST = "connecting"
S_END = "release"
class FlowEntry:
def __init__(self, key_ip):
self.flow_list = {}
self.key_ip = key_ip
self.concurrent_session = []
self._tick_time = None
self._start_tick_time = None
return
#end_def
def __del__(self):
del self.flow_list
del self.key_ip
return
#end_def
def set_flow(self, ip1, port1, ip2, port2, flags, ts):
if ip1 == self.key_ip:
return self.__set_flow(ip1, port1, ip2, port2, flags, ts)
elif ip2 == self.key_ip:
return self.__set_flow(ip2, port2, ip1, port1, flags, ts)
else:
return False
#end_def
return True
#end_def
def __set_flow(self, src_ip, src_port, dst_ip, dst_port, flags, ts):
date = datetime.fromtimestamp(int(ts))
unixtime_sec = int(str(ts).split(".")[0])
unixtime_msec = int(str(ts).split(".")[1])
flow = "TCP "+src_ip+":"+str(src_port)+" "+dst_ip+":"+str(dst_port)
if self._start_tick_time == None:
self._start_tick_time = unixtime_sec
self._tick_time = unixtime_sec
#end_if
if self._tick_time != unixtime_sec:
tick_span = self._tick_time - self._start_tick_time
while len(self.concurrent_session) < tick_span:
self.concurrent_session.append(self.concurrent_session[-1])
self._tick_time = unixtime_sec
session_count = 0
for i in self.flow_list:
if self.flow_list[i]["session"] == S_EST:
session_count = session_count + 1
#end_if
#end_for
self.concurrent_session.append(session_count)
#end_if
if self.flow_list.has_key(flow):
if flags == SYN:
if self.flow_list[flow]["session"] == S_END:
self.flow_list[flow]["session"] = S_SRT
self.flow_list[flow]["s_time"] = unixtime_sec
self.flow_list[flow]["e_time"] = None
self.flow_list[flow]["span"] = 0
self.flow_list[flow]["count"] = self.flow_list[flow]["count"] + 1
#print "SYN"
return True
#end_if
elif flags == SYN_ACK:
if self.flow_list[flow]["session"] == S_SRT:
self.flow_list[flow]["session"] = S_EST
self.flow_list[flow]["e_time"] = unixtime_sec
self.flow_list[flow]["span"] = self.flow_list[flow]["e_time"] - self.flow_list[flow]["s_time"]
#print "EST"
return True
else:
return False
#end_if
elif flags == FIN or flags == RST or flags == FIN_ACK or flags == RST_ACK:
if self.flow_list[flow]["session"] == S_EST:
self.flow_list[flow]["session"] = S_END
self.flow_list[flow]["e_time"] = unixtime_sec
self.flow_list[flow]["span"] = self.flow_list[flow]["e_time"] - self.flow_list[flow]["s_time"]
#print "END"
return True
else:
return False
#end_if
else:
self.flow_list[flow]["e_time"] = unixtime_sec
self.flow_list[flow]["span"] = self.flow_list[flow]["e_time"] - self.flow_list[flow]["s_time"]
return True
#end_if
else:
if flags == SYN:
self.flow_list[flow] = {}
self.flow_list[flow]["session"] = S_SRT
self.flow_list[flow]["s_time"] = unixtime_sec
self.flow_list[flow]["e_time"] = None
self.flow_list[flow]["span"] = 0
self.flow_list[flow]["count"] = 0
#print "SYN"
return True
else:
return False
#end_if
#end_if
#end_def
def get_flow_list(self):
return self.flow_list
#end_def
def get_concurrent_session(self):
return self.concurrent_session
#end_def
def print_flow_list(self):
for i in self.flow_list:
print i, self.flow_list[i]
return self.flow_list
#end_def
def print_concurrent_session(self):
for i in self.concurrent_session:
print i
#end_for
#end_def
#end_class
def usage():
print u"python %s [key_ip_address] [pcap_file]" % sys.argv[0]
print u" key_ip_address : 切り出すソースIPアドレス(IPv4)"
print u" pcap_file : そのまま"
return
#end_def
def main():
if (len(sys.argv) != 3):
usage()
return 1
#end_if
key_ip = sys.argv[1]
if key_ip == None:
return -1
filename = sys.argv[2]
if filename == None:
return -1
outputfile = filename
#filename = u'./test4.pcap'
#key_ip = u'150.65.206.242'
f_entry = FlowEntry(key_ip)
pcr = dpkt.pcap.Reader(open(filename,'rb'))
packet_count = 0
for (ts, buf) in pcr:
packet_count += 1
try:
eth = dpkt.ethernet.Ethernet(buf)
except:
print 'Fail parse:', packet_count, ': skipping'
continue
#end_try
if type(eth.data) == dpkt.ip.IP:
packet = eth.data
src_ip = socket.inet_ntoa(packet.src)
dst_ip = socket.inet_ntoa(packet.dst)
segment = packet.data
if type(packet.data) == dpkt.udp.UDP:
src_port = segment.sport
dst_port = segment.dport
continue
elif type(packet.data) == dpkt.tcp.TCP:
src_port = segment.sport
dst_port = segment.dport
flags = segment.flags
f_entry.set_flow(src_ip, src_port, dst_ip, dst_port, flags, ts)
#end_if
#end_if
#end_for
#f_entry.print_flow_list()
#flow_list = f_entry.get_flow_list()
f_entry.print_concurrent_session()
#end_def
if __name__ == '__main__':
main()
| [
"inoue.tomoya@gmail.com"
] | inoue.tomoya@gmail.com |
c060ef565a4d8d529fd14814948ba9a3e75b0630 | 530d470375ddb57b089f241a2481f5a9d880128a | /scanner.py | 3d1d02c1252a655a8ea39b139e980e0449235c4c | [] | no_license | williamclift/weightedGraphParser | b61340e039584e3998efceb470152202f6dd079c | 593094a19122f417e24556c3b6caf2b9dcfdaa7f | refs/heads/main | 2023-04-14T22:46:55.898930 | 2021-04-30T14:53:28 | 2021-04-30T14:53:28 | 363,172,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | '''
Scanner
@author William Clift
@date 20 April 2021
Prin. Prog. Lang. - Dr. Mongan
Run:
python scanner.py
'''
'''
Samples to Run (Include the quotations):
'someNode - 5 > anotherNode'
'someNode - > anotherNode'
'someNode - 5 > anotherNode < - aFinalNode'
'''
'''
Weighted and Directed Graphs - Defines the connections between nodes with the option for weighted edges
0. - > node - > node
1. < - node < - node
2(weight). - [0-9]* > node - weight > node
3(weight). < [0-9]* - node < weight - node
4. weight [0-9]*
'''
const = ['-', '<', '>']
nodes = []
tokens = []
def isWeight(w):
if w.isdigit():
return True
else:
return False
def getToken():
result = tokens.pop()
if result == '-' or result == '<':
next = tokens.pop()
if isWeight(next):
next += ' ' + tokens.pop()
result += ' ' + next
return result
def getNode():
return nodes.pop()
def processString(string):
arr = string.split(' ')
i = 0
while i in range(len(arr)):
e = arr[i]
if e == '-':
next = arr[i+1]
if isWeight(next):
i+=1
tokens.insert(0, str(2)+'('+str(next)+')')
else:
tokens.insert(0, str(0))
i+=1
elif e == '<':
next = arr[i+1]
if isWeight(next):
i+=1
tokens.insert(0, str(3)+'('+str(next)+')')
else:
tokens.insert(0, str(1))
i+=1
else:
nodes.insert(0, e)
i+=1
string = input("Graph Structure: ")
print(string)
processString(string)
print(nodes)
print(tokens)
while len(nodes) > 0:
print(getNode())
if len(tokens) > 0:
print(getToken())
| [
"noreply@github.com"
] | williamclift.noreply@github.com |
b1cd81ed718f1c7ff47135965d98fabcf7c2a72d | 0032fcf63cb7e6461ff0d0bad3062b83509a0f82 | /models/blog.py | 10823febd829fbe4db12d05796633c1bef7197bc | [] | no_license | sunny812546/Mongodb_Python | 4d5fdb828bc104563acb2faef8aabfcb6429ab1b | ec9a148bffcec89e80b4ae493691c7718790946a | refs/heads/master | 2020-05-13T17:13:25.446250 | 2019-04-16T08:08:53 | 2019-04-16T08:08:53 | 181,642,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | import uuid
from models.post import Post
import datetime
from database import Database
class Blog(object):
def __init__(self,author,title,description,id=None):
self.author=author
self.title=title
self.description=description
self.id=uuid.uuid4().hex if id is None else id
def new_post(self):
title=input("Enter post title")
content=input("Enter post content")
date=input("Enter post date,or leave black for today:DDMMYYYY")
if date=="":
date=datetime.datetime.utcnow()
else:
date=datetime.datetime.strptime(date, "%d%m%Y")
post=Post(blog_id=self.id,
title=title,
content=content,
author=self.author,
date=date)
post.save_to_mongo()
def get_posts(self):
return Post.from_blog(self.id)
def save_to_mongo(self):
Database.insert(collection='blogs',data=self.json())
def json(self):
return {'author':self.author,
'title':self.title,
'description':self.description,
'id':self.id}
@classmethod
def get_from_mongo(cls,id):
blog_data=Database.find_one(collection='blogs',query={'id':id})
return cls(author=blog_data['author'],
title=blog_data['title'],
description=blog_data['description'],
id=blog_data['id'])
| [
"sun812546@gmail.com"
] | sun812546@gmail.com |
6caff764e90187ff4e2663de7936344131ffd79e | b00b9b6818a20d8d379e4dd3e78503526ee2d402 | /formularios/wsgi.py | 34f68939bae850e538162f947bcb7fe5b49d509e | [] | no_license | silviosoto/Django-formularios | 5a305105800c54c3fd8967c2309bd838930c8b13 | 3265144f1d16abc42eae537609e1d8632dbe6abc | refs/heads/master | 2020-04-03T01:03:06.575948 | 2018-10-27T02:42:47 | 2018-10-27T02:42:47 | 154,917,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for formularios project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "formularios.settings")
application = get_wsgi_application()
| [
"silviojsoto@gmail.com"
] | silviojsoto@gmail.com |
842af5b2770d0870083da5e5752010763a589898 | 9ead5fcc5efaf7a73c4c585d813c1cddcb89666d | /m5/src/dev/alpha/Tsunami.py | 5440486b62667925e8db33622d94a3c1766730aa | [
"BSD-3-Clause"
] | permissive | x10an14/tdt4260Group | b539b6271c8f01f80a9f75249779fb277fa521a4 | 1c4dc24acac3fe6df749e0f41f4d7ab69f443514 | refs/heads/master | 2016-09-06T02:48:04.929661 | 2014-04-08T10:40:22 | 2014-04-08T10:40:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,154 | py | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from m5.proxy import *
from BadDevice import BadDevice
from AlphaBackdoor import AlphaBackdoor
from Device import BasicPioDevice, IsaFake, BadAddr
from Pci import PciConfigAll
from Platform import Platform
from Uart import Uart8250
class TsunamiCChip(BasicPioDevice):
type = 'TsunamiCChip'
tsunami = Param.Tsunami(Parent.any, "Tsunami")
class TsunamiIO(BasicPioDevice):
type = 'TsunamiIO'
time = Param.Time('01/01/2009',
"System time to use ('Now' for actual time)")
year_is_bcd = Param.Bool(False,
"The RTC should interpret the year as a BCD value")
tsunami = Param.Tsunami(Parent.any, "Tsunami")
frequency = Param.Frequency('1024Hz', "frequency of interrupts")
class TsunamiPChip(BasicPioDevice):
type = 'TsunamiPChip'
tsunami = Param.Tsunami(Parent.any, "Tsunami")
class Tsunami(Platform):
type = 'Tsunami'
system = Param.System(Parent.any, "system")
cchip = TsunamiCChip(pio_addr=0x801a0000000)
pchip = TsunamiPChip(pio_addr=0x80180000000)
pciconfig = PciConfigAll()
fake_sm_chip = IsaFake(pio_addr=0x801fc000370)
fake_uart1 = IsaFake(pio_addr=0x801fc0002f8)
fake_uart2 = IsaFake(pio_addr=0x801fc0003e8)
fake_uart3 = IsaFake(pio_addr=0x801fc0002e8)
fake_uart4 = IsaFake(pio_addr=0x801fc0003f0)
fake_ppc = IsaFake(pio_addr=0x801fc0003bb)
fake_OROM = IsaFake(pio_addr=0x800000a0000, pio_size=0x60000)
fake_pnp_addr = IsaFake(pio_addr=0x801fc000279)
fake_pnp_write = IsaFake(pio_addr=0x801fc000a79)
fake_pnp_read0 = IsaFake(pio_addr=0x801fc000203)
fake_pnp_read1 = IsaFake(pio_addr=0x801fc000243)
fake_pnp_read2 = IsaFake(pio_addr=0x801fc000283)
fake_pnp_read3 = IsaFake(pio_addr=0x801fc0002c3)
fake_pnp_read4 = IsaFake(pio_addr=0x801fc000303)
fake_pnp_read5 = IsaFake(pio_addr=0x801fc000343)
fake_pnp_read6 = IsaFake(pio_addr=0x801fc000383)
fake_pnp_read7 = IsaFake(pio_addr=0x801fc0003c3)
fake_ata0 = IsaFake(pio_addr=0x801fc0001f0)
fake_ata1 = IsaFake(pio_addr=0x801fc000170)
fb = BadDevice(pio_addr=0x801fc0003d0, devicename='FrameBuffer')
io = TsunamiIO(pio_addr=0x801fc000000)
uart = Uart8250(pio_addr=0x801fc0003f8)
backdoor = AlphaBackdoor(pio_addr=0x80200000000, disk=Parent.simple_disk)
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.cchip.pio = bus.port
self.pchip.pio = bus.port
self.pciconfig.pio = bus.default
bus.responder_set = True
bus.responder = self.pciconfig
self.fake_sm_chip.pio = bus.port
self.fake_uart1.pio = bus.port
self.fake_uart2.pio = bus.port
self.fake_uart3.pio = bus.port
self.fake_uart4.pio = bus.port
self.fake_ppc.pio = bus.port
self.fake_OROM.pio = bus.port
self.fake_pnp_addr.pio = bus.port
self.fake_pnp_write.pio = bus.port
self.fake_pnp_read0.pio = bus.port
self.fake_pnp_read1.pio = bus.port
self.fake_pnp_read2.pio = bus.port
self.fake_pnp_read3.pio = bus.port
self.fake_pnp_read4.pio = bus.port
self.fake_pnp_read5.pio = bus.port
self.fake_pnp_read6.pio = bus.port
self.fake_pnp_read7.pio = bus.port
self.fake_ata0.pio = bus.port
self.fake_ata1.pio = bus.port
self.fb.pio = bus.port
self.io.pio = bus.port
self.uart.pio = bus.port
self.backdoor.pio = bus.port
| [
"chrischa@stud.ntnu.no"
] | chrischa@stud.ntnu.no |
c88045f96416fcf1efcc2cb2b62f628cb9515c00 | 785e6e41b16ab7c702987d0dcd01793668da6f98 | /python/lib/sublexical_semantics/data/sogou_news.py | 310ecb95f26b28e9510eb78a64606863dac37153 | [] | no_license | andrely/sublexical-features | 748c18419405a8184c81253a16ed0bd4445a6ffd | 4191ec5ea3f95dfa1741c441da90cbbd1a1c2a02 | refs/heads/master | 2021-01-17T15:09:53.766421 | 2017-05-03T18:05:08 | 2017-05-03T18:05:08 | 16,731,407 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | # coding=utf-8
import logging
import os
import re
from glob import glob
from pandas import DataFrame
EN_CATEGORIES = {u'传媒': 'media', '体育': 'sports', u'健康': 'health', u'公益': 'general',
u'军事': 'military', u'国内': 'domestic', u'国际': 'international', u'女人': 'woman',
u'女性': 'woman', u'娱乐': 'entertainment', u'媒体': 'media', u'房产': 'real estate',
u'招聘': 'jobs', u'教育': 'education', u'文化': 'culture', u'旅游': 'travel', u'时尚': 'fashion',
u'校园': 'campus', u'汽车': 'auto', u'社会': 'society', u'科技': 'technology', u'财经': 'finance',
u'IT': 'technology'}
def read_categories(fn):
categories = []
with open(fn, encoding='gb18030') as f:
site = None
cat = None
for line in f:
line = line.strip()
if line == '':
continue
m = re.search('^\d+\.\s*(\w+)', line, re.UNICODE)
if m:
site = m.group(1)
continue
m = re.search('^http:', line)
if m:
categories.append((site, cat, line))
continue
cat = line
# remove fullwidth colon
if cat[-1] == u'\uFF1A':
cat = cat[:-1]
return categories
def read_docs(fns, limit=None):
count = 0
for fn in fns:
logging.info('Reading file %s ...' % fn)
with open(fn, encoding='gb18030') as f:
doc = {}
for line in f:
line = line.strip()
if line == '<doc>':
doc = {}
elif line == '</doc>':
yield doc
count += 1
if count % 100000 == 0:
logging.info('Read %d documents ...' % count)
if limit and count >= limit:
return
elif line.startswith('<url>'):
m = re.match('<url>(.*)</url>', line, re.UNICODE)
if m:
doc['url'] = m.group(1)
elif line.startswith('<docno>'):
m = re.match('<docno>(.*)</docno>', line, re.UNICODE)
if m:
doc['docno'] = m.group(1)
elif line.startswith('<content>'):
m = re.match('<content>(.*)</content>', line, re.UNICODE)
if m:
doc['content'] = m.group(1)
elif line.startswith('<contenttitle>'):
m = re.match('<contenttitle>(.*)</contenttitle>', line, re.UNICODE)
if m:
doc['contenttitle'] = m.group(1)
def sogou_news_dataset(dataset_path, limit=None):
categories = read_categories(os.path.join(dataset_path, 'categories_2012.txt'))
fns = glob(os.path.join(dataset_path, 'Sogou*.mini.txt')) + glob(os.path.join(dataset_path, 'news*_xml.dat'))
logging.info("Reading files %s ..." % ', '.join(fns))
for doc in read_docs(fns, limit=limit):
doc = add_category(doc, categories)
yield doc
def add_category(doc, categories):
for entry in categories:
site, cat, url = entry
if 'url' in doc and doc['url'].startswith(url):
doc['cat'] = cat
doc['cat_en'] = EN_CATEGORIES.get(cat)
doc['site'] = site
break
return doc
def sogou_news_df(dataset_path, tokenizer=None, limit=None):
def preproc(doc):
if tokenizer:
doc['content'] = ' '.join(tokenizer(doc['content']))
doc['contenttitle'] = ' '.join(tokenizer(doc['contenttitle']))
return doc
return DataFrame(data=(preproc(doc) for doc in sogou_news_dataset(dataset_path, limit=limit)))
| [
"and-ly@online.no"
] | and-ly@online.no |
244d50cea282092239e50d4c7fae5eae2ae5d443 | d2a564ee5ecc46ad55ba4a17504dd79b26f77d0f | /educa/courses/migrations/0002_content_file_image_text_video.py | 672f9544b897995f2adb898129ce06f0b7bb6096 | [] | no_license | Da1anna/Educa | ab5eead0337a2447b87271a6a06c2bcfc61f09a2 | 736fd9840c66221212275f2cfa7374cb521e79ff | refs/heads/master | 2022-12-30T12:31:36.014607 | 2020-10-15T03:52:49 | 2020-10-15T03:52:49 | 303,141,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,898 | py | # Generated by Django 2.0.5 on 2020-09-18 14:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(limit_choices_to={'model__in': ('text', 'image', 'file', 'video')}, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contents', to='courses.Module')),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.FileField(upload_to='files')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='file_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.ImageField(upload_to='images')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Text',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.TextField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='text_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content', models.URLField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='video_related', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| [
"1318176575@qq.com"
] | 1318176575@qq.com |
a43e5cebb85b853fef00f062fbee3501a690bf39 | 5508d054c695858dd1912a07abccad048d0b8965 | /code/reinforce/network.py | fd03c58597baabf4dbd1b7fadd695c19febac9d7 | [] | no_license | MegaYEye/quadrotor_reinforcement_learning | 4495bebb5bb8f98a8c3e7be10e12b6e0b1c0e42b | 984417864c774b35c567d85e0e174740bd318df0 | refs/heads/master | 2021-09-16T21:02:29.062686 | 2018-06-25T03:39:14 | 2018-06-25T03:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def make_network_config(network_json_config):
raise NotImplementedError
def make_network(network_config):
raise NotImplementedError
# Network description
def NetworkConfig(object):
def __init__(self,
head_config,
tail_config_array,
unpack_description=None,
network_type=None):
self.head_config = head_config
self.tail_config_array = tail_config_array
self.unpack_description = unpack_description
self.network_type = network_type
# Network itself.
class Network(nn.Module):
def __init__(self, network_config):
raise NotImplementedError
def forward(self, x):
raise NotImplementedError
def _make_tails(self, x):
raise NotImplementedError
def _make_head(self, tails):
raise NotImplementedError
# Optimizers
class OptimizerConfig(object):
def __init__(self, optimizer_name="adam", lr="0.001"):
self.optimizer_name = optimizer_name
self.lr = lr
def make_optimizer(optimizer_config, network):
if optimizer_config.optimizer_type == "adam":
return torch.optim.Adam(network.parameters())
else:
raise ValueError("Unknown optimizer_type.")
# Nonlinearities
def make_nonlinearity(nonlinearity_name):
if nonlinearity_name == "relu":
return F.relu
else:
raise ValueError("Unknown nonlinearity.")
| [
"nekrald@gmail.com"
] | nekrald@gmail.com |
8e9801e303cb314c3edbaf306146510777a4151d | 90f2802c31be0d3d8d6f494b430243f108f3cd85 | /python.problem5.py | c34f9422840f74ea3cf8515708ace17bdcfb2257 | [] | no_license | beelahkay/Stochastic-Simulations | 00e2e5fd5256914db5f450c21f94b818ea1b4b80 | c42f376833548daad7d2e5c62cd4c6b373d1fd7f | refs/heads/master | 2020-04-03T23:41:42.661721 | 2018-10-31T22:07:50 | 2018-10-31T22:07:50 | 155,458,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | import numpy as np
pi = np.array([1, 0, 0])
P = np.array([[0.5, 0.3, 0.2],
[0.4, 0.6, 0],
[0.1, 0.9, 0]])
for i in range(10000):
pi = np.matmul(pi, P)
print('Simulated Result =', pi)
# Which corresponds to the theoretical stationary matrix [ 5/12 1/2 1/12] | [
"blakeshaw4@yahoo.com"
] | blakeshaw4@yahoo.com |
96c732b39274d27dba371d3ba780deafa53399a0 | 8dbb2a3e2286c97b1baa3ee54210189f8470eb4d | /kubernetes-stubs/client/api/autoscaling_v1_api.pyi | 483ed490e8ae7275b864b7af091f1df71f67ac70 | [] | no_license | foodpairing/kubernetes-stubs | e4b0f687254316e6f2954bacaa69ff898a88bde4 | f510dc3d350ec998787f543a280dd619449b5445 | refs/heads/master | 2023-08-21T21:00:54.485923 | 2021-08-25T03:53:07 | 2021-08-25T04:45:17 | 414,555,568 | 0 | 0 | null | 2021-10-07T10:26:08 | 2021-10-07T10:26:08 | null | UTF-8 | Python | false | false | 5,142 | pyi | import typing
import kubernetes.client
class AutoscalingV1Api:
def __init__(
self, api_client: typing.Optional[kubernetes.client.ApiClient] = ...
) -> None: ...
def get_api_resources(self) -> kubernetes.client.V1APIResourceList: ...
def list_horizontal_pod_autoscaler_for_all_namespaces(
self,
*,
allow_watch_bookmarks: typing.Optional[bool] = ...,
_continue: typing.Optional[str] = ...,
field_selector: typing.Optional[str] = ...,
label_selector: typing.Optional[str] = ...,
limit: typing.Optional[int] = ...,
pretty: typing.Optional[str] = ...,
resource_version: typing.Optional[str] = ...,
timeout_seconds: typing.Optional[int] = ...,
watch: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscalerList: ...
def list_namespaced_horizontal_pod_autoscaler(
self,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
allow_watch_bookmarks: typing.Optional[bool] = ...,
_continue: typing.Optional[str] = ...,
field_selector: typing.Optional[str] = ...,
label_selector: typing.Optional[str] = ...,
limit: typing.Optional[int] = ...,
resource_version: typing.Optional[str] = ...,
timeout_seconds: typing.Optional[int] = ...,
watch: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscalerList: ...
def create_namespaced_horizontal_pod_autoscaler(
self,
namespace: str,
body: kubernetes.client.V1HorizontalPodAutoscaler,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def delete_collection_namespaced_horizontal_pod_autoscaler(
self,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
body: typing.Optional[kubernetes.client.V1DeleteOptions] = ...,
_continue: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_selector: typing.Optional[str] = ...,
grace_period_seconds: typing.Optional[int] = ...,
label_selector: typing.Optional[str] = ...,
limit: typing.Optional[int] = ...,
orphan_dependents: typing.Optional[bool] = ...,
propagation_policy: typing.Optional[str] = ...,
resource_version: typing.Optional[str] = ...,
timeout_seconds: typing.Optional[int] = ...
) -> kubernetes.client.V1Status: ...
def read_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
exact: typing.Optional[bool] = ...,
export: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def replace_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
body: kubernetes.client.V1HorizontalPodAutoscaler,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def delete_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
*,
pretty: typing.Optional[str] = ...,
body: typing.Optional[kubernetes.client.V1DeleteOptions] = ...,
dry_run: typing.Optional[str] = ...,
grace_period_seconds: typing.Optional[int] = ...,
orphan_dependents: typing.Optional[bool] = ...,
propagation_policy: typing.Optional[str] = ...
) -> kubernetes.client.V1Status: ...
def patch_namespaced_horizontal_pod_autoscaler(
self,
name: str,
namespace: str,
body: typing.Any,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...,
force: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def read_namespaced_horizontal_pod_autoscaler_status(
self, name: str, namespace: str, *, pretty: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def replace_namespaced_horizontal_pod_autoscaler_status(
self,
name: str,
namespace: str,
body: kubernetes.client.V1HorizontalPodAutoscaler,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
def patch_namespaced_horizontal_pod_autoscaler_status(
self,
name: str,
namespace: str,
body: typing.Any,
*,
pretty: typing.Optional[str] = ...,
dry_run: typing.Optional[str] = ...,
field_manager: typing.Optional[str] = ...,
force: typing.Optional[bool] = ...
) -> kubernetes.client.V1HorizontalPodAutoscaler: ...
| [
"nikhil.benesch@gmail.com"
] | nikhil.benesch@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.