blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dfc8cb1da9c4b2f08c12ee2bfc107197fc3606b0 | 54d140dc8c88e7300c76b9cca48ed796bca5390e | /TensorFlow/test1.py | 99e634109d7fdd70ca537d3bdea9e3b30dccc61a | [] | no_license | kulinbin/- | e051274a882c4a2476d3bf9cb4bd3eb1daee7e9d | 4cd6b76bcff74a430e33ce2f6727f7ebbaebdb26 | refs/heads/master | 2020-03-22T13:05:03.548206 | 2018-11-13T11:36:42 | 2018-11-13T11:36:42 | 140,082,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import numpy as np
import tensorflow as tf
x_vals=np.linspace(0,10,10)
# print(x_vals)
# y_vals=x_vals+np.random.normal(0,1,100)
x_vals_column=np.transpose(np.matrix(x_vals))
print(x_vals_column)
# x_vals_column1=np.matrix(x_vals)
#
# ones_column=np.transpose(np.matrix(np.repeat(1,100)))
#
# A=np.column_stack((x_vals_column,ones_column))
# print(A) | [
"noreply@github.com"
] | kulinbin.noreply@github.com |
93aa588758a36f8310d5622ae90b9ff2dc12bbc1 | fb14fac18a614287ca3ccfc4b9123209629953e7 | /HackerRank/Easy/get-total-x.py | e6218e7113324266cf9b6ad4a18cdfcc129710a1 | [] | no_license | nickdunn2/Python-Practice | 0585c640da757ba5f6927450b2068491a5919b94 | 809c3a5878b934b083923279212598a2462d61a0 | refs/heads/master | 2020-05-24T17:44:29.464037 | 2019-07-21T14:00:26 | 2019-07-21T14:00:26 | 187,393,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py |
'''
You will be given two arrays of integers and asked to determine all integers that satisfy the following two conditions:
1) The elements of the first array are all factors of the integer being considered
2) The integer being considered is a factor of all elements of the second array
These numbers are referred to as being between the two arrays. You must determine how many such numbers exist.
For example, given the arrays a = [2, 6] and b = [24, 36], there are two numbers between them: 6 and 12.
Explanation:
6 % 2 = 0, 6 % 6 = 0, and 24 % 6 = 0, 36 % 6 = 0 for the first value.
Similarly, 12 % 2 = 0, 12 % 6 = 0 and 24 % 12 = 0, 36 % 12 = 0.
Complete the get_total_x function. It should return the number of integers that are between the sets.
get_total_x has the following parameter(s):
a: an array of integers
b: an array of integers
Output: Print the number of integers that are considered to be between a and b.
See here: https://www.hackerrank.com/challenges/between-two-sets/problem
'''
from functools import reduce
# HELPER FUNCTIONS FOR LCM AND GCD
def gcd(a, b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a, b):
"""Return lowest common multiple."""
return a * b // gcd(a, b)
def lcmm(*args):
"""Return lcm of args."""
return reduce(lcm, args)
def gcdm(*args):
"""Return gcd of args."""
return reduce(gcd, args)
def get_total_x(a, b):
a_lcm = lcmm(*a)
b_gcd = gcdm(*b)
ints = 0
ticker = a_lcm
while ticker <= b_gcd:
if ticker % a_lcm == 0 and b_gcd % ticker == 0:
ints += 1
ticker += 1
return ints
print(get_total_x([2, 6], [24, 36])) # 2 (6, 12)
print(get_total_x([2, 4], [16, 32, 96])) # 3 (4, 8, 16)
print(get_total_x([1], [100])) # 9 (1, 2, 4, 5, 10, 20, 25, 50, 100)
| [
"nick.dunn@flosports.tv"
] | nick.dunn@flosports.tv |
3b804e792b3544778740271e6b7778c33013a07a | 8b0fc85f784618b81a63a038d65cad2354e674ac | /cmo_hr_expense_report/models/__init__.py | 4a84a60ed89128ff60182e89b87d001de8b6a8ec | [] | no_license | ecosoft-odoo/cmo_specific | 2e284fc0be2cf2de7986cbfe9fe233ef2c964d41 | 12cc42d9555b3b587f148cb36bac8e7e3f7c1593 | refs/heads/master | 2021-09-18T03:33:35.342095 | 2021-04-12T04:25:28 | 2021-04-12T04:25:28 | 113,545,569 | 3 | 12 | null | 2022-01-20T11:47:52 | 2017-12-08T07:31:34 | Python | UTF-8 | Python | false | false | 42 | py | # -*- coding: utf-8 -*-
from . import hr
| [
"kittiu@gmail.com"
] | kittiu@gmail.com |
fc75d328f8dbb6b847c911b32bb7c014917eb8b0 | 87fb8eb9bf3e5b6bd6f3cbcd2a4f63ae467e03f9 | /Poller/tests/multithreaded_xkcd.py | 6b7c54c38ed0a085131e09d1576da556e5b88acb | [] | no_license | open-analysis/Poller | 05261b9103734c368a350f1c36755dece164d34b | c5b7fe326241d6d0290116ebbb67dcc9f10dfbc4 | refs/heads/master | 2023-03-24T17:43:29.205371 | 2021-03-28T00:46:27 | 2021-03-28T00:46:27 | 300,897,749 | 1 | 0 | null | 2021-01-22T21:26:23 | 2020-10-03T14:21:17 | Jupyter Notebook | UTF-8 | Python | false | false | 1,728 | py | #! python3
# threadedDownloadXkcd.py - Downloads XKCD comics using multiple threads.
import requests, os, bs4, threading
os.makedirs('xkcd', exist_ok=True) # store comics in ./xkcd
def downloadXkcd(startComic, endComic):
for urlNumber in range(startComic, endComic):
# Download the page.
print('Downloading page https://xkcd.com/%s...' % (urlNumber))
res = requests.get('https://xkcd.com/%s' % (urlNumber))
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
# Find the URL of the comic image.
comicElem = soup.select('#comic img')
if comicElem == []:
print('Could not find comic image.')
else:
comicUrl = comicElem[0].get('src')
# Download the image.
print('Downloading image %s...' % (comicUrl))
res = requests.get('https:' + comicUrl)
res.raise_for_status()
# Save the image to ./xkcd.
imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
for chunk in res.iter_content(100000):
imageFile.write(chunk)
imageFile.close()
# Create and start the Thread objects.
downloadThreads = [] # a list of all the Thread objects
for i in range(0, 140, 10): # loops 14 times, creates 14 threads
start = i
end = i + 9
if start == 0:
start = 1 # There is no comic 0, so set it to 1.
downloadThread = threading.Thread(target=downloadXkcd, args=(start, end))
downloadThreads.append(downloadThread)
downloadThread.start()
# Wait for all threads to end.
for downloadThread in downloadThreads:
downloadThread.join()
print('Done.') | [
"41541186+open-analysis@users.noreply.github.com"
] | 41541186+open-analysis@users.noreply.github.com |
f57f586f2055c52504a57a64f09fa9dd43ad918f | 7bf5c317ced4b2e3816dce319524a22dfcfce47a | /backend/piclab/user/serializers.py | db67fa5b2bda71a378e1d4565d93c4eb91792137 | [] | no_license | Prikers/piclab-old | 7e62b2cdb35a2635d2941d27fcd695d3ebe2437a | 8555ef39bed0e775c9e8a07095293eb9ac746e24 | refs/heads/master | 2023-03-05T18:24:21.308035 | 2020-12-31T16:41:58 | 2020-12-31T16:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,501 | py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from piclab.api.serializers import ProjectSerializer
from .models import Profile
User = get_user_model()
class RegisterSerializer(serializers.ModelSerializer):
password_confirm = serializers.CharField(style={'input_type': 'password'}, write_only=True)
class Meta:
model = User
fields = ['email', 'username', 'password', 'password_confirm']
extra_kwargs = {'password': {'write_only': True}}
def save(self):
user = User(
email=self.validated_data['email'],
username=self.validated_data['username'],
)
password = self.validated_data['password']
password_confirm = self.validated_data['password_confirm']
if password != password_confirm:
raise serializers.ValidationError({'password': 'Passwords must match...'})
user.set_password(password)
user.save()
return user
class ProfileSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.id', read_only=True)
username = serializers.CharField(source='user.username', read_only=True)
email = serializers.CharField(source='user.email', read_only=True)
projects = ProjectSerializer(source='user.projects', read_only=True, many=True)
class Meta:
model = Profile
fields = ['id', 'user', 'username', 'email', 'projects', 'current_project']
def update(self, instance, validated_data):
request = self.context.get('request')
if request and hasattr(request, 'user'):
user = User.objects.filter(
email=request.user.email,
username=request.user.username,
).first()
instance.user.id = user.id
project = validated_data.get('current_project')
if project.owner.id != user.id:
raise serializers.ValidationError(
detail=f'You do not have permission to access this project ({project.name}).'
'Please contact project\'s administrator to request access.')
for attr, value in validated_data.items():
setattr(instance, attr, value)
instance.save()
return instance
raise serializers.ValidationError(
detail='You are not recognized as a valid user... Please login and try again.')
| [
"melaine.euzenat@gmail.com"
] | melaine.euzenat@gmail.com |
a4f78581f4f2c66d886be1f873d2f5d0bbb85539 | 18e16b3eb7982fae1441557227109f33b2c08e17 | /Arch/SPARC/Models/pso_plus.py | 06126d45ab97297a89c330564a5e78194cd0d61e | [] | no_license | ramzpat/rBMC | 423a3fe61a9faf3836a63eb2ca1954214aa84968 | 98e393c7a52a7badf47a4daf0a4a935825afe98d | refs/heads/master | 2021-01-22T22:44:36.685487 | 2017-08-22T06:21:12 | 2017-08-22T06:21:12 | 92,787,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,152 | py | # PSO+ Model
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname(path.dirname(path.dirname( path.dirname( path.abspath(__file__) ) ) )))
from HWModel.model import *
import HWModel.hw_z3 as hw
else:
from HWModel.model import *
import HWModel.hw_z3 as hw
from sparc_z3 import *
from z3 import *
class PSOPlusModel(HWModel):
def __str__(self):
return "PSO+ Model"
# # Additional Op
# MembarWR = DeclareSort('MEMBAR(WR)') # MEMBER(WR) operation in TSO+ (spac v8+)
# STBar = DeclareSort('STBar')
membarOp = Function('f_membar', MembarWR, hw.FenceOp) # a wrapper function
stbarOp = Function('f_stbar', STBar, hw.FenceOp) # a wrapper function
def __init__(self):
hw.FenceOp.cast = (lambda val:
val if (val.sort() == hw.FenceOp)
else self.stbarOp(val) if (val.sort() == STBar)
else self.membarOp(val)
)
# Relations
spo = Function('spo', hw.Opr, hw.Opr, BoolSort()) # Significant program order
spo1 = Function('spo1', hw.Opr, hw.Opr, BoolSort()) # Significant program order spo'
spo2 = Function('spo2', hw.Opr, hw.Opr, BoolSort()) # Significant program order spo''
sco = Function('sco', hw.Opr, hw.Opr, BoolSort()) # Significant conflict order
loopRel = Function('loopRel', hw.Opr, hw.Opr, BoolSort()) # Helping_relation
# xo_mul1 = Function('xo_mul1', SubOpr, SubOpr, BoolSort())
# xo_mul2 = Function('xo_mul1', SubOpr, SubOpr, BoolSort())
# xo_mul3 = Function('xo_mul1', SubOpr, SubOpr, BoolSort())
spo.domain = (lambda i: hw.Opr)
spo1.domain = (lambda i: hw.Opr)
spo2.domain = (lambda i: hw.Opr)
sco.domain = (lambda i: hw.Opr)
loopRel.domain = (lambda i: hw.Opr)
def spo_relation(self, info):
# return []
po = info['po']
reads = info['MemOp']['read']
writes = info['MemOp']['write']
rmw = info['MemOp']['rmw']
spo1 = self.spo1
spo2 = self.spo2
spo = self.spo
sco = self.sco
# MembarWR = self.MembarWR
# STBar = self.STBar
def is_PO(po, x, y):
result = False
for p in po:
result = is_po(p, x, y)
if result:
break
return result
x, y = Consts('x y', hw.MemOp)
# spo'(X,Y) if ...
# R -po-> RW
# W -po-> W
# W -po-> MEMBAR(WR) -po-> R
SPO = [ ForAll([x], Not( spo(x,x) )) ]
SPO1 = [ ForAll([x], Not( spo1(x,x) )) ]
SPO2 = [ ForAll([x], Not( spo2(x,x) )) ]
write_p_rmw = writes #+ [(hw.atomic_write(a),l,i) for (a, l, i) in rmw]
read_p_rmw = reads #+ [(hw.atomic_read(a),l,i) for (a, l, i) in rmw]
atom_w = [w for (r, w) in rmw]
rw1, rw2, rw3 = Consts('rw1 rw2 rw3', hw.MemOp)
r = Const('tempR', hw.ReadOp)
w1, w2 = Consts('tempW1 tempW2', hw.WriteOp)
wr = Const('wr_fence', MembarWR)
st = Const('st_fence', STBar)
# a_rmw = Const('a_rmw', hw.AtomicOp)
SPO2 += [
ForAll([rw1, rw2],
# R -po-> RW
If(Exists([r], And(restrict(r, read_p_rmw), rw1 == hw.read(r), hw.po(r, rw2))),
spo2(rw1, rw2),
# W -po-> STBAR -po-> W
If(Exists([w1, w2, st], And(Not(w1 == w2), restrict(w1, write_p_rmw), restrict(w2, write_p_rmw),
rw1 == hw.write(w1), rw2 == hw.write(w2), hw.po(w1, st), hw.po(st, w2))),
spo2(rw1, rw2),
# W -po-> MEMBAR(WR) -po-> R
If(Exists([w1, r, wr], And(restrict(w1, write_p_rmw), restrict(r, read_p_rmw),
rw1 == hw.write(w1), rw2 == hw.read(r),
hw.po(w1, wr), hw.po(wr, r))),
spo2(rw1, rw2),
Not(spo2(rw1, rw2)))
))
)
]
SPO1 += [
ForAll([rw1, rw2],
# W (in RMW) -po-> R
If( Exists([r,w1], And(
# restrict(a_rmw, rmw),
# rw1 == write(hw.atomic_write(a_rmw)),
restrict(r, read_p_rmw),
restrict(w1, atom_w),
rw2 == hw.read(r),
rw1 == hw.write(w1),
hw.po(rw1, rw2))),
spo1(rw1, rw2), Not(spo1(rw1, rw2)))
)
]
memOps = [ (hw.MemOp.cast(rw),l,i) for (rw, l, i) in write_p_rmw + read_p_rmw]
SPO += [
ForAll([rw1, rw2],
Implies( And(restrict(rw1, memOps), restrict(rw2, memOps)),
If(spo1(rw1, rw2), spo(rw1, rw2),
If(spo2(rw1, rw2), spo(rw1, rw2),
If(Exists([rw3], And(spo(rw1, rw3), spo(rw3, rw2)) ), spo(rw1, rw2), Not(spo(rw1, rw2))))
)
)
)
]
return SPO + SPO1 + SPO2
def sco_relation(self, info):
po = info['po']
reads = info['MemOp']['read']
writes = info['MemOp']['write']
rmw = info['MemOp']['rmw']
spo1 = self.spo1
spo2 = self.spo2
spo = self.spo
sco = self.sco
# MembarWR = self.MembarWR
write_p_rmw = writes #+ [(hw.atomic_write(a),l,i) for (a, l, i) in rmw]
read_p_rmw = reads #+ [(hw.atomic_read(a),l,i) for (a, l, i) in rmw]
memOps = [ (hw.MemOp.cast(rw),l,i) for (rw, l, i) in write_p_rmw + read_p_rmw]
x, y = Consts('x y', hw.MemOp)
r1, r2 = Consts('r1 r2', hw.ReadOp)
w = Const('w', hw.WriteOp)
SCO = [ ForAll([x], Not(sco(x,x))) ]
SCO += [
ForAll([x, y],
If(And(restrict(x,memOps), restrict(y,memOps), hw.co(x,y)), sco(x,y),
If(Exists([r1,r2, w], And(Not(r1 == r2), restrict(r1, read_p_rmw), restrict(r2, read_p_rmw),
restrict(w, write_p_rmw),
hw.read(r1) == x, hw.read(r2) == y, hw.co(x,w), hw.co(w,y) )),
sco(x,y), Not(sco(x,y)))
)
)
]
return SCO
def model_axioms(self, info):
# return self.tso_axioms
# Relations
spo = self.spo
spo1 = self.spo1
spo2 = self.spo2
sco = self.sco
loopRel = self.loopRel
# ------ variables
rw1, rw2, rw3 = Consts('rw1 rw2 rw3', hw.MemOp)
a, b = Consts('a b', hw.Opr)
r = Const('r', hw.ReadOp)
w = Const('w', hw.WriteOp)
r1, r2 = Consts('r1 r2', hw.ReadOp)
w1, w2 = Consts('w1 w2', hw.WriteOp)
i, j = Consts('i j', hw.Proc)
# stbar = Const('stbar', FenceOp)
# rmw = Const('rmw', hw.AtomicOp)
memb_wr = Const('membar_wr', MembarWR)
# Conditions
tso_axioms = [
# % Uniproc RW -po-> W
# xo(subOpr(X,I), subOpr(Y,I)) :- conflict(X,Y), subOpr(X,I), subOpr(Y,I), pOrder(X,Y), isWrite(Y), isRW(X).
ForAll([rw1, w2, i],
Implies(
And(
hw.conflict(rw1, w2),
hw.po(rw1, w2),
),
hw.xo(hw.subOpr(rw1, i), hw.subOpr(w2, i))
)
),
# % Coherence W -co-> W
# xo(subOpr(X,I), subOpr(Y,I)) :- conflict(X,Y), subOpr(X,I), subOpr(Y,I), isWrite(X), isWrite(Y), co(X,Y).
ForAll([w1, w2, i],
Implies(
And(
hw.conflict(w1, w2),
hw.co(w1, w2),
),
hw.xo(hw.subOpr(w1, i), hw.subOpr(w2, i))
)
),
# % Multi - 1 W -co-> R -spo-> RW
# xo(subOpr(W,I), subOpr(RW,I)) :- conflict(W,RW), subOpr(W,I), subOpr(RW,I), isWrite(W), isRead(R), isRW(RW), co(W,R), spo(R,RW).
ForAll([w1, rw2, i, r],
Implies(
And(
conflict(w1, rw2),
co(w1, r),
spo(r, rw2),
),
xo(subOpr(w1, i), subOpr(rw2, i))
)
),
# LoopRel def
ForAll([rw1, rw2],
If( Exists(a, And(sco(rw1, a), spo(a, rw2))), loopRel(rw1, rw2),
If( Exists([a], And(loopRel(rw1,a), loopRel(a, rw2)) ) ,
loopRel(rw1, rw2) , Not(loopRel(rw1, rw2)) )
)
),
# not reflexive
ForAll([rw1, rw2],
Implies(loopRel(rw1,rw2), rw1 != rw2)
),
# % Multi - 2
# % RW -spo-> { A -sco-> B -spo-> }+ RW *)
# xo(subOpr(RW,I), subOpr(RW2,I)) :- conflict(RW,RW2), subOpr(RW,I), subOpr(RW2,I), isRW(RW), isRW(RW2), spo(RW,AA), loopRel(AA,BB), spo(BB,RW2).
ForAll([rw1, rw2, a, i],
Implies(
And(
hw.conflict(rw1, rw2),
spo(rw1, a),
loopRel(a, rw2),
# spo(b, rw2),
),
hw.xo(hw.subOpr(rw1, i), hw.subOpr(rw2, i))
)
),
# % Multi - 3
# %% W -sco-> R -spo-> { A -sco-> B -spo-> }+ R
# xo(subOpr(W,I), subOpr(R2,I)) :- conflict(W,R2), subOpr(W,I), subOpr(R2,I), isWrite(W), isRead(R), isRead(R2), sco(W,R), spo(R,AA), loopRel(AA,BB), spo(BB,R2).
ForAll([w1, r2, i, a, r],
Implies(
And(
hw.conflict(w1, r2),
sco(w1, r),
spo(r, a),
loopRel(a, r2),
# spo(b, r2),
),
hw.xo(hw.subOpr(w1, i), hw.subOpr(r2, i))
)
),
]
return (tso_axioms) + self.spo_relation(info) + self.sco_relation(info)
if __name__ == "__main__":
print "[Debug]"
pass
# MembarWR = TSOPlusModel.MembarWR
# membarOp = TSOPlusModel.membarOp
| [
"pattaravut.m@gmail.com"
] | pattaravut.m@gmail.com |
38d18606d96104aa292bca4a498980f4e60e5c2e | 41258863a3a766e70a71d8371a6f2408e10962bd | /prospectos/migrations/0008_prospectoaccion_tipo_accion.py | 4026009c808c4c43d1b50a7ddac25960ac549e07 | [] | no_license | diegolego/angloapp | 7ce51c8fa0acc71127ea1eb1bfa199182d1fa8b3 | 964e9db9c4d4a6d11ee3c1b3fd766771e6624f51 | refs/heads/master | 2020-03-23T09:13:50.089696 | 2018-07-18T03:28:22 | 2018-07-18T03:28:22 | 141,375,371 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # Generated by Django 2.0.2 on 2018-03-23 03:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('prospectos', '0007_auto_20180322_2106'),
]
operations = [
migrations.AddField(
model_name='prospectoaccion',
name='tipo_accion',
field=models.ForeignKey(help_text='Selecciona el tipo de acción', null=True, on_delete=django.db.models.deletion.SET_NULL, to='prospectos.Accion'),
),
]
| [
"noreply@github.com"
] | diegolego.noreply@github.com |
6eb3041541f8ba7b3c286999f48a64c9e2cc3ed4 | 5dbc0f6d4c4cdff62612566d622da8cf80d90d05 | /catkin_ws/src/tabular_dyna_q/scripts/agents/q_learning.py | 62c71bf226cbbe1ce3dda84646670319d6ba6af6 | [] | no_license | Li-Jinjie/gazebo_drone_tutorials | 9622cecdb4a443cf5309d969227c6e0d41dbe68f | e7e0dd592176b21b3e7779d9692e923edb23032b | refs/heads/master | 2021-01-03T07:41:12.458912 | 2020-07-12T14:16:18 | 2020-07-12T14:16:18 | 239,985,497 | 2 | 0 | null | 2020-04-01T13:37:18 | 2020-02-12T10:29:40 | null | UTF-8 | Python | false | false | 4,705 | py | #!/usr/bin/env python
# coding=utf-8
'''
@Author : LI Jinjie
@Date : 2020-05-03 17:42:04
@LastEditors : LI Jinjie
@LastEditTime : 2020-05-07 13:01:26
@Units : None
@Description : The implementation code of q-learning algorithm
@Dependencies : None
@NOTICE : None
'''
import numpy as np
from template_agent import BaseAgent
# Q-Learning agent here
class QLearningAgent(BaseAgent):
def agent_init(self, agent_init_info):
"""Setup for the agent called when the experiment first starts.
Args:
agent_init_info (dict), the parameters used to initialize the agent. The dictionary contains:
{
num_states (int): The number of states,
num_actions (int): The number of actions,
epsilon (float): The epsilon parameter for exploration,
step_size (float): The step-size,
discount (float): The discount factor,
}
"""
# Store the parameters provided in agent_init_info.
self.num_actions = agent_init_info["num_actions"]
self.num_states = agent_init_info["num_states"]
self.epsilon = agent_init_info["epsilon"]
self.step_size = agent_init_info["step_size"]
self.discount = agent_init_info["discount"]
self.rand_generator = np.random.RandomState(agent_init_info["seed"])
# Create an array for action-value estimates and initialize it to zero.
# The array of action-value estimates.
self.q = np.zeros((self.num_states, self.num_actions))
def agent_start(self, state):
"""The first method called when the episode starts, called after
the environment starts.
Args:
state (int): the state from the
environment's evn_start function.
Returns:
action (int): the first action the agent takes.
"""
# Choose action using epsilon greedy.
current_q = self.q[state, :]
if self.rand_generator.rand() < self.epsilon:
action = self.rand_generator.randint(self.num_actions)
else:
action = self.argmax(current_q)
self.prev_state = state
self.prev_action = action
return action
def agent_step(self, reward, state):
"""A step taken by the agent.
Args:
reward (float): the reward received for taking the last action taken
state (int): the state from the
environment's step based on where the agent ended up after the
last step.
Returns:
action (int): the action the agent is taking.
"""
# Choose action using epsilon greedy.
current_q = self.q[state, :]
if self.rand_generator.rand() < self.epsilon:
action = self.rand_generator.randint(self.num_actions)
else:
action = self.argmax(current_q)
# Perform an update (1 line)
### START CODE HERE ###
self.q[self.prev_state, self.prev_action] += self.step_size * \
(reward + self.discount *
max(self.q[state, :]) - self.q[self.prev_state, self.prev_action])
### END CODE HERE ###
self.prev_state = state
self.prev_action = action
return action
def agent_end(self, reward):
"""Run when the agent terminates.
Args:
reward (float): the reward the agent received for entering the
terminal state.
"""
# Perform the last update in the episode (1 line)
### START CODE HERE ###
self.q[self.prev_state, self.prev_action] += self.step_size * \
(reward - self.q[self.prev_state, self.prev_action])
### END CODE HERE ###
def argmax(self, q_values):
"""argmax with random tie-breaking
Args:
q_values (Numpy array): the array of action-values
Returns:
action (int): an action with the highest value
"""
top = float("-inf")
ties = []
for i in range(len(q_values)):
if q_values[i] > top:
top = q_values[i]
ties = []
if q_values[i] == top:
ties.append(i)
return self.rand_generator.choice(ties)
def agent_cleanup(self):
"""Cleanup done after the agent ends."""
pass
def agent_message(self, message):
"""A function used to pass information from the agent to the experiment.
Args:
message: The message passed to the agent.
Returns:
The response (or answer) to the message.
"""
pass
if __name__ == "__main__":
pass
| [
"lijinjie362@outlook.com"
] | lijinjie362@outlook.com |
8cc84568e1fd8e83ae8a031d834f6b227e0516ce | f62e728198697b11fac3088dec84762ded173c22 | /tests/homework6/test_hw6_task2.py | 3036832114e63341162a5dbd07205d8f1e5262f5 | [] | no_license | Inna99/homework | 43849a753cca8332ac2e01b491d08ba5173f64f2 | bfbb30c17d1529ac8b46d31d6c9dcedcebdc0528 | refs/heads/master | 2023-08-28T10:40:05.362790 | 2021-11-01T22:23:22 | 2021-11-01T22:23:22 | 400,244,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,895 | py | import pytest
from homework6.task2 import DeadlineError, HomeworkResult, Student, Teacher
@pytest.mark.xfail(raises=TypeError)
def test_homework_result_raises_exeption():
"""calling an exception when creating an instance of a class"""
good_student = Student("Lev", "Sokolov")
HomeworkResult(good_student, "fff", "Solution")
@pytest.mark.xfail(raises=DeadlineError)
def test_create_homework_raises_exeption():
"""calling an exception when creating an instance of a class"""
opp_teacher = Teacher("Daniil", "Shadrin")
epam_hw = opp_teacher.create_homework("Do hw6", 0)
lazy_student = Student("Roman", "Petrov")
lazy_student.do_homework(epam_hw, "done")
def test_create_homework_done():
"""correct creation of a class instance"""
opp_teacher = Teacher("Daniil", "Shadrin")
epam_hw = opp_teacher.create_homework("Do hw6", 5)
lazy_student = Student("Roman", "Petrov")
result_3 = lazy_student.do_homework(epam_hw, "done")
assert isinstance(result_3, HomeworkResult)
def test_check_homework():
"""after checking, the homework is recorded in homework_done"""
opp_teacher = Teacher("Daniil", "Shadrin")
advanced_python_teacher = Teacher("Aleksandr", "Smetanin")
good_student = Student("Lev", "Sokolov")
oop_hw = opp_teacher.create_homework("Learn OOP", 1)
docs_hw = opp_teacher.create_homework("Read docs", 5)
result_1 = good_student.do_homework(oop_hw, "I have done this hw")
result_2 = good_student.do_homework(docs_hw, "I have done this hw too")
result_3 = good_student.do_homework(docs_hw, "done")
check = opp_teacher.check_homework(result_1)
assert check
check = opp_teacher.check_homework(result_3)
assert not check
temp_1 = opp_teacher.homework_done
advanced_python_teacher.check_homework(result_2)
temp_2 = Teacher.homework_done
assert temp_1 == temp_2
def test_reset_results_delete_one():
"""checks that when transferring a homework instance, it deletes only
the results of this task from homework_done and
checks that if nothing is transmitted, then completely reset homework_done"""
oop_teacher = Teacher("Daniil", "Shadrin")
lazy_student = Student("Roman", "Petrov")
good_student = Student("Lev", "Sokolov")
oop_hw = oop_teacher.create_homework("Learn OOP", 1)
docs_hw = oop_teacher.create_homework("Read docs", 5)
result_1 = good_student.do_homework(oop_hw, "I have done this hw too")
result_2 = good_student.do_homework(docs_hw, "I have done this hw too")
result_3 = lazy_student.do_homework(docs_hw, "done")
oop_teacher.check_homework(result_1)
oop_teacher.check_homework(result_2)
oop_teacher.check_homework(result_3)
oop_teacher.reset_results(oop_hw)
assert oop_teacher.homework_done[oop_hw] == []
oop_teacher.reset_results()
assert not any(oop_teacher.homework_done)
| [
"ikunygina@yandex.ru"
] | ikunygina@yandex.ru |
c6f2b6082dc30a89e9c7b53ce2895a00e3fb3b7f | f002d3181badb8055babf24f9c4d201d8a5e3025 | /BOJ_python/2152_not_solved.py | d58c144f137adbbfe9e49279120648bbf5255062 | [] | no_license | suprodigy/Algorithm | 1dd996dbcebe710b704338dfcfc0fecf52e2566a | f91baaddaddde60236d979798f88c8b0dc814d55 | refs/heads/master | 2021-01-12T09:47:18.914375 | 2017-06-16T08:09:05 | 2017-06-16T08:09:05 | 76,175,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import sys
sys.stdin = open('input.txt', 'r')
from queue import Queue
def span(adj, S, T):
global N
checked = [False] * (N+1)
pq = Queue()
pq.put(S)
checked[S] = True
count = 0
while not pq.empty():
now = pq.get()
count += 1
for next in adj[now]:
if not checked[next]:
checked[next] = True
pq.put(next)
if not checked[T]:
return 0
else:
return count
data = [int(x) for x in input().split()]
N, M, S, T = data[0], data[1], data[2], data[3]
adj = [[] for i in range(N+1)]
for i in range(M):
temp = input().split()
u, v = int(temp[0]), int(temp[1])
adj[u].append(v)
ans = span(adj, S, T)
print(ans)
| [
"suprodigy@gmail.com"
] | suprodigy@gmail.com |
d0e0538eb3e75b5a46dd622374d7661d14eb6ead | e5749ae0b7600ed8cf34d3284671c0bfab7c61a2 | /cmsplugin_page_templates/__init__.py | 5ae6758772bc4d734b15e9ab257fb033656c733c | [] | no_license | lmanzurv/cmsplugin-page-templates | ba03fe4130f8058e6e7b20e3c68e00861043edf0 | e8f277a5f091b7823b8e053b8b7ba011a854de80 | refs/heads/master | 2021-01-18T07:49:40.325522 | 2017-08-24T13:10:43 | 2017-08-24T13:10:43 | 100,353,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | # -*- coding: utf-8 -*-
default_app_config = 'cmsplugin_page_templates.apps.PageTemplatesConfig'
| [
"lmanzurv@gmail.com"
] | lmanzurv@gmail.com |
a95c21f11f4fb94441aa905dc1ea4b078022ec26 | 76cd0c59cf23106c71b1b0ca63918ddae43d6006 | /52_N-Queens.py | e690f1ee93a1553e21169af4b1780ee3222f449d | [] | no_license | gao288/MyLeetCodeSolution | e1d1c0a7b16f80a04717b07f9e5860eeef1bad87 | f878dc4193525fe0cc544f0e78d897c485ebcbcd | refs/heads/master | 2020-07-21T15:04:57.716430 | 2020-01-06T06:47:15 | 2020-01-06T06:47:15 | 206,903,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | from typing import *
class Solution:
def __init__(self):
self.diagonal1 = None
self.diagonal2 = None
self.col = None
self.ret = 0
self.n = 0
def totalNQueens(self, n: int) -> List[List[str]]:
self.diagonal1 = set(x for x in range(2*n-1))
self.diagonal2 = set(x for x in range(2*n-1))
self.col = set(x for x in range(n))
self.n = n
self.solveN(n-1,[])
return self.ret
def solveN(self,count,s_array):
if count== -1:
self.ret += 1
return
row = self.n - count - 1
for col in range(self.n):
if (row+col not in self.diagonal1) or (col-row+self.n-1 not in self.diagonal2) or (col not in self.col):
#can't place the queen here
continue
else:
self.diagonal1.remove(row+col)
self.diagonal2.remove(col-row+self.n-1)
self.col.remove(col)
self.solveN(count-1,s_array)
self.diagonal1.add(row+col)
self.diagonal2.add(col-row+self.n-1)
self.col.add(col)
return
s = Solution()
print(s.solveNQueens(4)) | [
"home@Felixs-MacBook-Pro.local"
] | home@Felixs-MacBook-Pro.local |
5c12ff8b977defd8a61e82aee2f045aba513a99b | 3196cedf740310c8a3c6e27075d6de1f8b11d381 | /window.py | f90862f96802278c61d72832e99eef2734238768 | [] | no_license | agrazela14/SeniorProject | 39fc75907867a9bb9a9e3f983e6fcb9e08483f21 | e40e82ce2609f425fb925fb420c035e327da43fc | refs/heads/master | 2021-01-21T17:46:26.507321 | 2017-06-02T09:17:12 | 2017-06-02T09:17:12 | 91,985,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | #!/user/bin/python
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QApplication, QWidget
if __name__ == '__main__':
app = QApplication(sys.argv)
w = QWidget()
w.resize(250, 150)
w.move(300, 300)
w.setWindowTitle('Simple')
w.show()
sys.exit(app.exec_())
| [
"agrazela@gmail.com"
] | agrazela@gmail.com |
b897a7c416d5a5f5cba998419949895ebbfca7bc | a6d9ba2ac0a6970b33cddfc7da68228e3575333c | /kde_numpy/function_decorators.py | c70f3e6495df4450c93deea8cd96daef451cd1dc | [] | no_license | mansoldm/kde_numpy | 258718f525e068ec8401de35eb8bee5456b3e02f | 5832778e5cb7bba129f52fb35427be4619f564bf | refs/heads/main | 2023-03-29T05:17:39.377467 | 2021-04-03T14:53:22 | 2021-04-03T14:56:01 | 354,318,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,607 | py |
import numpy as np
from kde_numpy.errors import DataShapeError
def check_init_args_gaussian_KDE(init_func):
def wrapper(self, bandwidth, *args, **kwargs):
if not isinstance(bandwidth, float) and not isinstance(bandwidth, int):
raise TypeError(f'bandwidth must be an integer or float, got {type(bandwidth)}')
if bandwidth <= 0:
raise ValueError(f'bandwidth must be greater than zero, got {bandwidth}')
return init_func(self, bandwidth, *args, **kwargs)
return wrapper
def check_init_args_KDE(init_func):
def wrapper(self, batch_size, *args, **kwargs):
if not isinstance(batch_size, int):
raise TypeError(
f"batch_size must be an integer, got {type(batch_size)}")
if batch_size <= 0:
raise ValueError(
f"batch_size must be greater than zero, got {batch_size}")
if 'eps' in kwargs:
eps = kwargs.pop('eps')
elif len(args):
eps = args.pop()
else:
eps = 1e-8
if not isinstance(eps, float) and not isinstance(eps, int):
raise TypeError(f'eps must be an integer or float, got {type(eps)}')
return init_func(self, batch_size=batch_size, eps=eps, *args, **kwargs)
return wrapper
def check_np_array(func):
def wrapper(self, datapoints: np.ndarray, *args, **kwargs):
# handle type
if not isinstance(datapoints, np.ndarray):
raise TypeError(
f'Input data to {func} must be a np.ndarray, got {type(datapoints)}')
# handle shape
shape = np.shape(datapoints)
if not len(shape) == 2:
raise DataShapeError(
f'Data must be of rank 2, but got rank {len(shape)} with shape {shape}')
return func(self, datapoints, *args, **kwargs)
return wrapper
def check_np_array_out(func):
def wrapper(self, *args, **kwargs):
out = func(self, *args, **kwargs)
# handle type
if not isinstance(out, np.ndarray):
raise TypeError(
f'Function output must be a np.ndarray, got {type(out)}')
return out
return wrapper
def check_output_type(out_type):
def check_output_type_c(func):
def wrapper(self, *args, **kwargs):
out = func(self, *args, **kwargs)
if not isinstance(out, out_type):
raise TypeError(
f'Function output must be a {output_type}, got {type(out)}')
return out
return wrapper
return check_output_type_c
def check_kernel_z(func):
def wrapper(self, z_i, *args, **kwargs):
if not isinstance(z_i, int):
raise TypeError(
f'z_i must be an integer, got value of type {type(z_i)}')
k = np.shape(self.mu)[0]
if not 0 <= z_i < k:
raise ValueError(
f'z_i must be between 0 and {k}, got {z_i}. No training datapoint provided for z_i={z_i}')
return func(self, z_i=z_i, *args, **kwargs)
return wrapper
def check_kernel_test_data(func):
def wrapper(self, test_X, *args, **kwargs):
# z_i should be checked by decorator above
# check testdata np array dimensionality
td, d = np.shape(test_X)[-1], np.shape(self.mu)[1]
if td != d:
raise DataShapeError(
f'Test datapoints must have the same dimensionality as training datapoints, but got {td} for test datapoints and {d} for training datapoints')
return func(self, test_X=test_X, *args, **kwargs)
return wrapper
| [
"mansoldm@tcd.ie"
] | mansoldm@tcd.ie |
7415e256bef0665e06557db0cb5cdf030803c387 | f190ea47011e63d09df42133ff4959ff5fabdb8c | /inverse_index/rose_inverse_index_lab.py | 999fba341da5fdc2fae55a3c865b8bdbc3e223d7 | [] | no_license | ecxr/matrix | 45859b1a4a281fef9c5ccc6acd608509f9c63f98 | 61e320b9bb52eb425194c1c0eaef2a826f45a05d | refs/heads/master | 2020-05-20T03:48:00.538218 | 2013-07-18T20:03:05 | 2013-07-18T20:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | from random import randint
from dictutil import *
## Task 1
def movie_review(name):
"""
Input: the name of a movie
Output: a string (one of the review options), selected at random using randint
"""
review_options = ["See it!", "A gem!", "Ideological claptrap!"]
return review_options[randint(0, len(review_options) - 1)]
## Tasks 2 and 3 are in dictutil.py
## Task 4
def makeInverseIndex(strlist):
"""
Input: a list of documents as strings
Output: a dictionary that maps each word in any document to the set consisting of the
document ids (ie, the index in the strlist) for all documents containing the word.
Note that to test your function, you are welcome to use the files stories_small.txt
or stories_big.txt included in the download.
"""
d = {}
for (i,s) in enumerate(strlist):
words = s.split()
for word in words:
if word not in d:
d[word] = {i}
else:
d[word].add(i)
return d
def ii2(strlist):
return {word:index for (index,line) in enumerate(strlist) for word in line.split()}
## Task 5
def orSearch(inverseIndex, query):
"""
Input: an inverse index, as created by makeInverseIndex, and a list of words to query
Output: the set of document ids that contain _any_ of the specified words
"""
result = set()
for q in query:
if q in inverseIndex:
result |= inverseIndex[q]
return result
## Task 6
def andSearch(inverseIndex, query):
"""
Input: an inverse index, as created by makeInverseIndex, and a list of words to query
Output: the set of all document ids that contain _all_ of the specified words
"""
result = set()
for q in query:
if q in inverseIndex:
if result == set():
result = inverseIndex[q].copy()
else:
result.intersection_update(inverseIndex[q])
if result == set(): return set()
else:
return set()
return result
| [
"skyking@gmail.com"
] | skyking@gmail.com |
6b6052a46201a5f11a1cc007c19fabf575369fcf | 38e675001f31d6e5fb9393d4e2578c61aa986692 | /http_client_example.py | 118b81a6f4b4a07314a0cb4f244313a744e1fe69 | [] | no_license | wpjunior/python36-async | c8ed999288539ab9470f0078b27bebc230a2db03 | b646242d4bc61adc6e7d75cd6ca30b39d411cbe2 | refs/heads/master | 2021-01-20T10:06:42.798084 | 2017-05-06T15:35:58 | 2017-05-06T15:35:58 | 90,322,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | import json
from tornado.ioloop import IOLoop
from tornado.web import (
Application,
RequestHandler,
)
from tornado.httpclient import AsyncHTTPClient
async def get_http_result():
url = 'https://raw.githubusercontent.com/backstage/functions/master/package.json'
response = await AsyncHTTPClient().fetch(url, validate_cert=False)
data = json.loads(response.body)
return {
'heey': data['name'],
}
class HelloWorldHandler(RequestHandler):
async def get(self):
result = await get_http_result()
self.write(json.dumps(result))
if __name__ == "__main__":
app = Application([
(r"/", HelloWorldHandler),
])
app.listen(8888)
IOLoop.current().start()
| [
"wilsonpjunior@gmail.com"
] | wilsonpjunior@gmail.com |
730763e1e4eddb2796489558af8b21741ba2e260 | 0bb430a6f2478d59c95940b11b9570829615fa37 | /CA2Restarted/vouchers/models.py | 982ecd3c0883fee79c59278ec42cea84350bd4f1 | [] | no_license | JamesOliver-college/Software-CA2-V2 | 39b54d31a08d503f320e6e28391c10024d7bad49 | 3cef7a17e4620762a21b4f5dc0df1e5b3c54358e | refs/heads/master | 2022-03-23T13:49:24.673970 | 2019-12-06T10:28:31 | 2019-12-06T10:28:31 | 224,004,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Voucher(models.Model):
code = models.CharField(max_length=50, unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code | [
"x00152764@mytudublin.ie"
] | x00152764@mytudublin.ie |
219cef7e0bdb3c19ef844fd2b9f31656dcc58f07 | 2817ecd7e48c4decba12ee76e451727c1a6acf14 | /scripts/legacy/survey_distribution.py | 645981e58fef1bc3a1c76cafe786360c095677dc | [] | no_license | schwa-lab/sharingnews | 6fcef71c16a03fb3a4a56c11322ba5c8ceb59582 | 81c87176c7b37511f15a97189f03d90d5074d0fb | refs/heads/master | 2021-01-16T21:46:23.108811 | 2018-02-12T06:33:30 | 2018-02-12T06:33:30 | 26,195,626 | 3 | 0 | null | 2018-02-12T06:33:16 | 2014-11-05T00:39:40 | Python | UTF-8 | Python | false | false | 3,985 | py | from __future__ import print_function, division
from collections import Counter, defaultdict
import operator
from likeable.cleaning import strip_subdomains
MONTH_FIELD = 1
def get_status_binary(l):
status = l[8]
if status == '200':
return True
else:
return False
def get_status_group(l):
status = l[8]
if status.startswith('<') or status == '-':
return 'ERR'
elif status == '200?':
return 'HOME'
else:
return status[0] + 'xx'
def _norm_date(dt, n_months):
if n_months is None:
return
return (dt[:4] + '-' +
'%02d' % ((int(dt[5:7]) - 1) // n_months * n_months + 1))
def get_distribs(key_field, get_cat, n_months, weight=None):
# Group survey by status (cat), sig (key) and date group
distrs = defaultdict(Counter)
for l in open('data/sample-survey-v2'):
l = l.rstrip('\r\n').split('\t')
dt = _norm_date(l[MONTH_FIELD], n_months)
distrs[l[key_field], dt][get_cat(l)] += 1
if weight is None:
get_weight = lambda k: 1
else:
get_weight = weight.get
for k in distrs:
distr = distrs[k]
w = get_weight(k) or 0 # HACK due to dirty data?
total = sum(distr.values())
distrs[k] = {c: w * n / total
for c, n in distr.items()}
return distrs
def get_sig_weights(n_months):
# Get overall frequency for each key and date
sig_weight = defaultdict(int)
for l in open('data/url-sig-frequencies.txt'):
l = l.rstrip('\r\n').split('\t')
try:
sig_weight[l[2], _norm_date(l[1], n_months)] += int(l[0])
except (IndexError, ValueError):
# Dirty data
pass
sig_weight.default_factory = None
return sig_weight
def _sig_to_domain(sig):
return strip_subdomains(sig.split('/')[0])
def regroup_by_domain(distrs):
out = defaultdict(lambda: defaultdict(float))
for (k, m), distr in distrs.iteritems():
for c, n in distr.iteritems():
out[_sig_to_domain(k), m][c] += n
return out
def get_all_cats(distrs):
cats = set()
for distr in distrs.itervalues():
for c in distr:
cats.add(c)
return sorted(cats)
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--month-quant', type=int,
help='Group this many months together (default, all time)')
ap.add_argument('--by-sig', default=False, action='store_true')
ap.add_argument('--use-end-sig', default=False, action='store_true',
help='Calculates status on the basis of likely canonical '
'URL signature')
cat_opts = {
'status-binary': get_status_binary,
'status-group': get_status_group,
}
ap.add_argument('-c', '--cats', choices=cat_opts.keys(),
default='status-binary')
args = ap.parse_args()
n_months = getattr(args, 'month_quant', None)
if n_months is not None and 12 % n_months != 0:
ap.error('--month-quant (-m) must divide into 12')
sig_weight = get_sig_weights(n_months)
key_field = 4 # start sig
if args.use_end_sig:
tmp = get_distribs(key_field, operator.itemgetter(7), n_months,
weight=sig_weight)
sig_weight = defaultdict(float)
for (start_sig, mo), distr in tmp.iteritems():
for end_sig, n in distr.iteritems():
sig_weight[end_sig, mo] += n
key_field = 7 # end sig
distrs = get_distribs(key_field, cat_opts[args.cats], n_months,
weight=sig_weight)
if not args.by_sig:
distrs = regroup_by_domain(distrs)
# output
all_cats = get_all_cats(distrs)
print('key', 'month', *all_cats, sep='\t')
for k, v in sorted(distrs.iteritems()):
k = list(k)
k.extend(v.get(c, 0) for c in all_cats)
print(*k, sep='\t')
| [
"joel.nothman@gmail.com"
] | joel.nothman@gmail.com |
2f53bc6629ea95db819766f774d79c7e3a51d91a | f9e4d2efc7dc468ecb7f5a911b7094a20f101cef | /test_client.py | 8dbce25481e82c34f019a12b2996b45389ee0387 | [
"MIT"
] | permissive | afinello/simple-echo-server | 2542fd5fb984f4ebeaf182a6993a0c61944b0171 | 2d3bdd6f747b1590b7a4998eb12eb607c456c388 | refs/heads/master | 2021-01-17T17:48:00.131892 | 2016-08-04T20:14:47 | 2016-08-04T20:14:47 | 64,962,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | import socket
import time
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('localhost', 8000)
print 'connecting to %s port %s' % server_address
sock.connect(server_address)
for x in range(0, 2):
try:
# Send data
sock.sendall('hello')
time.sleep(1)
response = sock.recv(16)
print 'received "%s"' % response
except Exception, e:
print 'error'
print 'closing socket'
sock.close()
| [
"vladimir@ugdsoft.com"
] | vladimir@ugdsoft.com |
419c135745f7a72485a118020083ef3248d3938a | 09d7f50dfd30830331db84fb1d157f3c9dab93e6 | /Project/webapi_carpark.py | b7f5e93f24c29d11a8353497c9b3e4493d854b35 | [] | no_license | JarrenLing/T2020_56 | b9eb30e5a13024bbaef5333da3e3ec944329a34e | e259a5f4bd8f93bb446853be34cc9217d9f9a93a | refs/heads/master | 2020-12-05T19:14:53.369964 | 2020-01-07T08:48:55 | 2020-01-07T08:48:55 | 232,219,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | import json
import requests
## insert headers ## edit
api_headers = {
'identity': '',
'token': ''}
## EXTRA TO TEST
def getCarparkAvailability(timestamp):
api_url = f"https://api.data.gov.sg/v1/transport/carpark-availability?date_time={timestamp}"
response = requests.get(api_url) #to add api's credentials => headers, use the code below
# response = requests.get(api_url, headers=api_headers)
# [
# {items{0{ timestamp, carpark_data{carpark_number, update_time, carpark info{0{total_lots, lots_type, lots_available
# }
# }
# }
# }
# }
# }
# ]
if response.status_code == 200:
data = json.loads(response.content.decode('utf-8'))
return data
else:
return None
| [
"59390223+kanxueyu@users.noreply.github.com"
] | 59390223+kanxueyu@users.noreply.github.com |
9d471af7f1259174caefe83c717d42cb66921a1e | 57428eea85a3fdd969b2220e274c58d26e4f3723 | /boards/models.py | c22ead30f116ad2c02d349ce7ccb0f7cbe19123b | [] | no_license | sjagz/django-board | 7e6a45635fc92aabe9a92d53d34f67c755b33f0e | 85bc03d5c36b1f6e8a3adf0eab481991d4e5972c | refs/heads/master | 2020-06-02T18:51:08.306313 | 2019-06-11T04:15:05 | 2019-06-11T04:15:05 | 191,272,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | from django.db import models
# Create your models here.
class Board(models.Model):
title =models.CharField(max_length=20)
content = models.TextField( )
created_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
| [
"sjagz@naver.com"
] | sjagz@naver.com |
15a77dadb550839e5ca94b8ed297d16bc7419521 | cd9b1ce687f42e7827f36d82511061ba1737c1b2 | /data_manipulation.py | 0d45e821c7bf0682006ee002b64be63676bf4671 | [
"MIT"
] | permissive | johannesCmayer/pt-kana | c52ef5b5641db0b18ffdf39f245126a4c7265ebd | 6f63bd4918fb517d4df249388b59d0d53583f70b | refs/heads/master | 2021-11-24T22:45:55.693347 | 2021-11-13T22:15:35 | 2021-11-13T22:15:35 | 230,698,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import json
hiragana = json.load(open('Vocabularies/hiragana.json'))
katakana = json.load(open('Vocabularies/katakana.json'))
class Instance:
def __init__(self, target, translation, frequency, correct, incorrect):
self.target = target
self.translation = translation
self.frequency = frequency
self.correct = correct
self.incorrect = incorrect
l = []
for (hk, hv), (kk, kv) in zip(hiragana.items(), katakana.items()):
l.append(Instance(hk, hv, 0, [], []).__dict__)
l.append(Instance(kk,kv,0, [], []).__dict__)
print(f"generated file with {len(l)} entries")
tdump = json.dumps(l,indent=4)
with open("Vocabularies/alternating_kana_base.json", 'w') as f:
f.write(tdump) | [
"j.c.mayer240@gmail.com"
] | j.c.mayer240@gmail.com |
d4424b5a3b9e077cf273fd0ed1f826519a486487 | 55479d965ec930760882aa019fa756fee706fdc9 | /mysite/polls/views.py | cdb8fd46af86937b6c7f8056b8c6503aeea9cde8 | [] | no_license | 5-223/Temp | 7171a6d3f9da3981c7f9fd49338b73063cde2ae9 | 34f0db366f4866d3fa03372b2d7e384cc3965f41 | refs/heads/master | 2016-09-01T16:02:01.062246 | 2015-10-28T04:11:58 | 2015-10-28T04:11:58 | 44,722,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
name = request.GET.get('name')
return HttpResponse('<h1>Hello ' + name + '!<h1>')
| [
"chendong0316@qq.com"
] | chendong0316@qq.com |
d2abdbabe6451af772fc9e62d5c5cd8be96f3efe | a0a4724a0b3a9ac232c147a125c1249707bfd324 | /ANIMEBANNER.PY | f8dfde29230fd505d4d44c777f836a948c8daeb9 | [] | no_license | RITESHMOHAPATRA/ANIME-BANNER | 305f988e3eba589add4bddaed45cd9ec852110cf | 4cdaa53288c345e66a3409ff7fae87661fc95494 | refs/heads/master | 2021-01-20T18:02:03.590784 | 2016-07-02T04:44:56 | 2016-07-02T04:44:56 | 62,434,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | import os
import time
#width of the console
WIDTH = 79
message = "HELLO"
#this is a 7-line display, stored as 7 strings
#initially, these are empty.
printedMessage = [ "","","","","","","" ]
#a dictionary mapping letters to their 7-line
#banner display equivalents. each letter in the dictionary
#maps to 7 strings, one for each line of the display.
characters = { " " : [ " ",
" ",
" ",
" ",
" ",
" ",
" " ],
"E" : [ "*****",
"* ",
"* ",
"*****",
"* ",
"* ",
"*****" ],
"H" : [ "* *",
"* *",
"* *",
"*****",
"* *",
"* *",
"* *" ],
"O" : [ "*****",
"* *",
"* *",
"* *",
"* *",
"* *",
"*****" ],
"L" : [ "* ",
"* ",
"* ",
"* ",
"* ",
"* ",
"*****" ],
"!" : [ " * ",
" * ",
" * ",
" * ",
" * ",
" ",
" * " ]
}
#display is carried for each vharacter in the message line by line
for row in range(7):
for char in message:
printedMessage[row]+=(str(characters[char][row])+" ")
#offset is how far to the right we want to print
offset = WIDTH
while True:
os.system("cls")
for row in range(7):
print(" " * offset + printedMessage[row][max(0,offset*-1):WIDTH - offset])
#move the message a little to the left.
offset -=1
#if the entire message has moved 'through' the display then
#start again from the right hand side.
if offset <= ((len(message)+2)*6) * -1:
offset = WIDTH
#take out or change this line to speed up / slow down the display
time.sleep(0.1)
| [
"noreply@github.com"
] | RITESHMOHAPATRA.noreply@github.com |
495ba133d20be9696a894db3f3accc2f2fd82015 | 326c6ad82d59bb7509c02c76695ea9035993da70 | /lib/modules/powershell/lateral_movement/invoke_psremoting.py | 4680387727765b745328f6c6d9f005817ee6c58e | [
"BSD-3-Clause"
] | permissive | Arvanaghi/Empire | 0c08bd7ddfba9be10e96bb0834b8ce3bc829059b | fd168ebf8acb1c2ee59d56f2c393ebd7a297603e | refs/heads/master | 2021-01-20T14:15:34.864581 | 2017-08-05T17:51:44 | 2017-08-05T17:51:44 | 99,435,848 | 2 | 0 | null | 2017-08-05T16:50:16 | 2017-08-05T16:50:16 | null | UTF-8 | Python | false | false | 5,441 | py | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-PSRemoting',
'Author': ['@harmj0y'],
'Description': ('Executes a stager on remote hosts using PSRemoting.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Host[s] to execute the stager on, comma separated.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : '[domain\]username to use to execute command.',
'Required' : False,
'Value' : ''
},
'Password' : {
'Description' : 'Password to use to execute command.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
userName = self.options['UserName']['Value']
password = self.options['Password']['Value']
script = """Invoke-Command """
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print helpers.color("[!] CredID is invalid!")
return ""
(credID, credType, domainName, userName, password, host, os, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
self.options["UserName"]['Value'] = str(domainName) + "\\" + str(userName)
self.options["Password"]['Value'] = password
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
return ""
else:
# build the PSRemoting execution string
computerNames = "\"" + "\",\"".join(self.options['ComputerName']['Value'].split(",")) + "\""
script += " -ComputerName @("+computerNames+")"
script += " -ScriptBlock {" + launcher + "}"
if self.options["UserName"]['Value'] != "" and self.options["Password"]['Value'] != "":
# add in the user credentials
script = "$PSPassword = \""+password+"\" | ConvertTo-SecureString -asPlainText -Force;$Credential = New-Object System.Management.Automation.PSCredential(\""+userName+"\",$PSPassword);" + script + " -Credential $Credential"
script += ";'Invoke-PSRemoting executed on " +computerNames +"'"
return script
| [
"will@harmj0y.net"
] | will@harmj0y.net |
525222b445edde992f532d392b7b999c4878eb19 | 1ca7830dc88d22a3335a1beff4083c2e75994512 | /NEGF_global/yaml_file_loader.py | b0cf147faa88dd5e0223d5f13a51a4265c104034 | [] | no_license | PramitBarua/system_generator | 207080c950cc29ba36fed620ee26929ae3c9afa9 | 70dfd9cbc3a18d0e7885e0da7a768a2567d31651 | refs/heads/master | 2020-03-26T21:18:09.937901 | 2018-08-20T07:07:30 | 2018-08-20T07:07:30 | 145,379,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,848 | py | #!/usr/bin/env python3
__author__ = "Pramit Barua"
__copyright__ = "Copyright 2018, INT, KIT"
__credits__ = ["Pramit Barua"]
__license__ = "INT, KIT"
__version__ = "1"
__maintainer__ = "Pramit Barua"
__email__ = ["pramit.barua@student.kit.edu", "pramit.barua@gmail.com"]
'''
this method load yaml file.
argument for this method:
location: directory of ymal file
name_yaml: name of the yaml file
general format of yaml file:
compulsory input
left_graphene_sheet_name:
contain the directory address of input file (left part)
left_tube_name:
contain the directory address of input file (left part)
optional inputs:
middle_tube_name:
name of the xyz file that contian cartesian coordinates
of each atom of tube (middle part)
right_graphene_sheet_name:
name of the xyz file that contain cartesian coordinates
of each atom of graphene sheet (right part)
right_tube_name:
name of the xyz file that contian cartesian coordinates
of each atom of tube (right part)
'''
import os
import yaml
def yaml_file_loader(location, name_yaml):
file = os.path.join(location, name_yaml)
if os.path.isfile(file):
with open(file, "r") as raw_data:
data = yaml.load(raw_data)
return True, data['Input']
else:
return False, {}
# if os.path.isdir(location):
# f = []
# for (dirpath, dirnames, filenames) in os.walk(location):
# f.extend(filenames)
# break
#
# # ao_visited = False
# for file in f:
# if file == name_yaml:
# with open(os.path.join(location, file), "r") as raw_data:
# data = yaml.load(raw_data)
# return data['Input']
| [
"pramit.barua@gmail.com"
] | pramit.barua@gmail.com |
da5719666baae1fa4ec9893053a5ce2d214aefaa | 6cef1a4ae66288673e7b4ff1e1163f3d38c8f448 | /Homework/Week-6/convertCSV2JSON.py.py | 145bb3ae574aadde6a303d0b1de8b7ea867de012 | [] | no_license | JasperLe/DataProcessing | cb08abfe7f674987a35d92926aafc350d845097f | 2962846cf898da18b4e0ae62f9eaca19eee09715 | refs/heads/master | 2021-01-12T12:29:50.520117 | 2016-12-09T20:36:32 | 2016-12-09T20:36:32 | 72,520,184 | 0 | 0 | null | 2016-11-03T11:08:03 | 2016-11-01T09:03:53 | Python | UTF-8 | Python | false | false | 702 | py | # http://stackoverflow.com/questions/19697846/python-csv-to-json
# modified it only slightly
# auteur: Jasper Lelijveld
# studentnummer: 11401753
# vak: Data Processing
import csv
import json
# input/output files
csvfile = open('Data.csv', 'r')
jsonfile = open('Data.json', 'w')
# define fieldnames
fieldnames = ("Datum", "GemiddeldeTemperatuur", "MinimumTemperatuur", "MaximumTemperatuur")
reader = csv.DictReader(csvfile, fieldnames)
# for each csv row dump json row
for row in reader:
json.dump(row, jsonfile)
jsonfile.write(',')
#remove last , in output
# add formatting in JSON file if > 1 array of data
# close files
csvfile.close()
jsonfile.close()
| [
"noreply@github.com"
] | JasperLe.noreply@github.com |
a8beaa562efe2ca1d65bfa9f1bda106bfd3d0042 | 3539b366576adce4bcbe680afdd10884c69ac847 | /logply/job.py | 1fc931a74bcf0ee1a5879b334cf43324f757ad67 | [] | no_license | pdvyas/logply | 81d05820cb0cc8f27478ae5802254aa6f8dfd1ad | 992e3ce5dce067ea4a58e0a96d240f788e9a485c | refs/heads/master | 2021-01-19T13:49:57.367375 | 2013-09-15T15:34:18 | 2013-09-15T15:34:24 | 12,626,137 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import os
import json
from importlib import import_module
from .utils import assert_log_config, get_stage, get_filter_stage
config_module = os.environ.setdefault('LOGPLY_CONFIG_MODULE', 'logply.config')
config = import_module(config_module)
def do():
for log_name, log_config in config.logs.iteritems():
assert_log_config(log_config)
for input_data in get_stage(log_config, 'input'):
filtered_data = get_filter_stage(log_config, input_data)
dispatched_data = get_stage(log_config, 'output', obj=filtered_data, abc="abc")
if __name__ == "__main__":
do()
| [
"m@pd.io"
] | m@pd.io |
80f4dd15a5578dce433da65c8728b08ba69322b6 | 7ef3cd519041a3ebb9b23722a05f9fc8b9968f80 | /modules/services/helprelay.py | a48a05b2054ab827beb0018711949a71b95f6498 | [
"Zlib",
"DOC"
] | permissive | joshtek0/cod | c56fd2e97ac12be299cacdae5d82eefd2555fb27 | 42ad65beb1112fe8510ba88922e8a0ee9dfe1319 | refs/heads/master | 2020-12-24T19:18:26.706740 | 2014-06-07T03:34:14 | 2014-06-07T03:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | """
Simple relay from #help tp #opers
Code is in public domain
"""
DESC="Relay messages from the help channel to the staff channel"
def initModule(cod):
cod.addHook("chanmsg", chanmsgHook)
def destroyModule(cod):
cod.delHook("chanmsg", chanmsgHook)
def chanmsgHook(cod, target, line):
if target.name == cod.config["etc"]["helpchan"]:
cod.privmsg(cod.config["etc"]["staffchan"], "%s: <-%s> %s" %\
(target.name, line.source.nick, line.args[-1]))
| [
"shadow.h511@gmail.com"
] | shadow.h511@gmail.com |
2a7d20919457da5c9af885882b4d2242941fc14b | a1bbd1c507d96761f55695c22484fe167fd70f30 | /MainScript.py | a9b0c1d2dabb31fc4b131ec58a2ccd71e1d8ee14 | [] | no_license | tel3port/MG3-COMMENT-BOT | d182767976cb0587cf0450bf31338fb36b80cc59 | 33716b55448c3c5718ddb73c1b12851ff83eb247 | refs/heads/master | 2023-05-05T12:14:10.279901 | 2021-05-25T22:40:30 | 2021-05-25T22:40:30 | 249,716,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,402 | py | from selenium import webdriver
from random import randint
import numpy as np
import random
import traceback
import heroku3
import time
from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
import globals as gls
import re
from collections import defaultdict
import requests
from urllib.request import urlparse, urljoin
from bs4 import BeautifulSoup
import colorama
import asyncio
import aiohttp
import os
import uuid
colorama.init()
GREEN = colorama.Fore.GREEN
GRAY = colorama.Fore.LIGHTBLACK_EX
RESET = colorama.Fore.RESET
total_urls_visited = 0
def open_everything():
with open("dictionary/adjectives.txt") as adj_file:
global adjectives
adjectives = [line.strip() for line in adj_file]
with open("dictionary/adverbs.txt") as adv_file:
global adverbs
adverbs = [line.strip() for line in adv_file]
with open("dictionary/comment_list.txt") as comment_file:
global comments
comments = [line.strip() for line in comment_file]
with open("dictionary/complements.txt") as complement_file:
global complements
complements = [line.strip() for line in complement_file]
with open("dictionary/landers.txt") as lander_file:
global landers
landers = [line.strip() for line in lander_file]
with open("dictionary/proverbs.txt") as prov_file:
global proverbs
proverbs = [line.strip() for line in prov_file]
with open("dictionary/static_phrase_list.txt") as phrase_file:
global STATIC_PHRASES
STATIC_PHRASES = [line.strip() for line in phrase_file]
with open("dictionary/article_synonyms.txt") as syn_file:
global articles
articles = [line.strip() for line in syn_file]
with open("dictionary/rant_synonyms.txt") as rant_file:
global rants
rants = [line.strip() for line in rant_file]
with open("dictionary/determiners_list.txt") as dets_file:
global dets
dets = [line.strip() for line in dets_file]
with open("generated/emails.txt") as emails_file:
global emails
emails = [line.strip() for line in emails_file]
with open("generated/names.txt") as names_file:
global names
names = [line.strip() for line in names_file]
with open("dictionary/parsed_jokes.txt") as jokes_file:
global jokes
jokes = [line.strip() for line in jokes_file]
with open("dictionary/profit_syn.txt") as prof_file:
global prof
prof = [line.strip() for line in prof_file]
with open("dictionary/new_syn.txt") as new_file:
global news
news = [line.strip() for line in new_file]
with open("dictionary/expound_syn.txt") as exp_file:
global exp
exp = [line.strip() for line in exp_file]
with open("dictionary/film_syn.txt") as film_file:
global film
film = [line.strip() for line in film_file]
open_everything()
global parsed_links
parsed_links = []
# initialize the set of links (unique links)
internal_urls = set()
external_urls = set()
wp_bot_name = "wp-mg3-comment-bot"
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_website_links(url):
try:
"""
Returns all URLs that is found on `url` in which it belongs to the same website
"""
# all URLs of `url`
urls = set()
# domain name of the URL without the protocol
domain_name = urlparse(url).netloc
soup = BeautifulSoup(requests.get(url).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
# href empty tag
continue
# join the URL if it's relative (not absolute link)
href = urljoin(url, href)
parsed_href = urlparse(href)
# remove URL GET parameters, URL fragments, etc.
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if not is_valid(href):
# not a valid URL
continue
if href in internal_urls:
# already in the set
continue
if domain_name not in href:
# external link
if href not in external_urls:
print(f"{GRAY}[!] External link: {href}{RESET}")
external_urls.add(href)
continue
print(f"{GREEN}[*] Internal link: {href}{RESET}")
urls.add(href)
internal_urls.add(href)
except Exception as e:
print(e)
return False
return urls
def crawl(url, max_urls=130):
"""
Crawls a web page and extracts all links.
You'll find all links in `external_urls` and `internal_urls` global set variables.
params:
max_urls (int): number of max urls to crawl, default is 30.
"""
global total_urls_visited
total_urls_visited += 1
links = get_all_website_links(url)
for link in links:
print(f"total_urls_visited:{total_urls_visited} -- max_urls:{max_urls}")
if total_urls_visited > max_urls:
break
crawl(link, max_urls=max_urls)
async def if_comment_box_exists(url):
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
html_text = await resp.read()
if "comment-form-field" in str(html_text) \
or "comments-area" in str(html_text) \
or "comment-form" in str(html_text) \
or "jetpack_remote_comment" in str(html_text) \
or "reply-title" in str(html_text) \
or "captcha" not in str(html_text) \
or "comment-respond" in str(html_text):
with open("EXTRACTOR/extracted/FINAL_URL_LIST.txt", "a") as new_urls_file:
print(url.strip(), file=new_urls_file)
return True
else:
pass
# with open("extracted/x.txt", "a") as new_urls_file:
# print(url.strip(), file=new_urls_file)
except Exception as e:
print(e)
return False
async def parse_tasks(url):
await if_comment_box_exists(url)
def int_checker(url):
char_list = url.split('/')
last_element = char_list[len(char_list) - 2]
if len(last_element) > 4:
return False
return True
async def main():
tasks = []
for url in (open("EXTRACTOR/extracted/internal_links.txt").readlines()):
t = loop.create_task(parse_tasks(url))
tasks.append(t)
await asyncio.wait(tasks)
def create_append_text_file(extd_links, my_uuid):
if not os.path.exists(f'./EXTRACTOR/urls/static_url_list_{my_uuid}.txt'):
with open(f'./EXTRACTOR/urls/static_url_list_{my_uuid}.txt', 'a') as final_urls_list_file:
for single_lk in extd_links:
print(single_lk.strip(), file=final_urls_list_file)
def soft_file_cleanup():
open('EXTRACTOR/extracted/internal_links.txt', 'w').close()
open('EXTRACTOR/extracted/external_links.txt', 'w').close()
def hard_file_cleanup():
open('EXTRACTOR/extracted/internal_links.txt', 'w').close()
open('EXTRACTOR/extracted/external_links.txt', 'w').close()
open('EXTRACTOR/extracted/blog_link_file.txt', 'w').close()
open('EXTRACTOR/extracted/FINAL_URL_LIST.txt', 'w').close()
def random_static_url_path():
static_url_list_paths = os.listdir('./EXTRACTOR/urls')
return f'EXTRACTOR/urls/{static_url_list_paths[randint(0, len(static_url_list_paths) - 1)]}'
# return f'EXTRACTOR/urls/static_url_list_26f8faa8c60b4542aed6d89847441296.txt'
class CommentsBot:
def __init__(self, bot_name, my_proxy):
self.my_proxy = my_proxy
self.bot_name = bot_name
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--incognito")
chrome_options.add_argument("--disable-dev-sgm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_experimental_option("excludeSwitches", ['enable-automation'])
prefs = {"profile.managed_default_content_settings.images": 2}
chrome_options.add_experimental_option("prefs", prefs)
my_proxy_address = self.my_proxy.get_address()
webdriver.DesiredCapabilities.CHROME['proxy'] = {
"httpProxy": my_proxy_address,
"ftpProxy": my_proxy_address,
"sslProxy": my_proxy_address,
"proxyType": "MANUAL",
}
# self.driver = webdriver.Chrome(executable_path='./chromedriver', options=chrome_options)
chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), options=chrome_options)
print("my ip address", my_proxy_address)
def restart_application(self):
heroku_conn = heroku3.from_key('b477d2e0-d1ba-48b1-a2df-88d87db973e7')
app = heroku_conn.apps()[self.bot_name]
app.restart()
def wp_post_getter(self):
data = self.driver.page_source
soup = BeautifulSoup(data, "html.parser")
article = soup.find('div', class_="entry-content")
return article.text
@staticmethod
def markov_script():
tokenized_text = [
word
for word in re.split('\W+', extracted_post)
if word != ''
]
# Create graph.
markov_graph = defaultdict(lambda: defaultdict(int))
last_word = tokenized_text[0].lower()
for word in tokenized_text[1:]:
word = word.lower()
markov_graph[last_word][word] += 1
last_word = word
# Preview graph.
limit = 3
for first_word in ('the', 'by', 'who'):
next_words = list(markov_graph[first_word].keys())[:limit]
# for next_word in next_words:
# print(first_word, next_word)
def walk_graph(graph, distance=5, start_node=None):
"""Returns a list of words from a randomly weighted walk."""
if distance <= 0:
return []
# If not given, pick a start node at random.
if not start_node:
start_node = random.choice(list(graph.keys()))
weights = np.array(
list(markov_graph[start_node].values()),
dtype=np.float64)
# Normalize word counts to sum to 1.
weights /= weights.sum()
# Pick a destination using weighted distribution.
choices = list(markov_graph[start_node].keys())
chosen_word = np.random.choice(choices, None, p=weights)
return [chosen_word] + walk_graph(
graph, distance=distance - 1,
start_node=chosen_word)
generated_sentence = f"{' '.join(walk_graph(markov_graph, distance=35))}... "
return generated_sentence
def blog_extractor(self):
with open("EXTRACTOR/extracted/search_terms.txt") as search_terms_file:
search_terms = [line.strip() for line in search_terms_file]
random_search_term = search_terms[randint(0, len(search_terms) - 2)]
print(f"searching for blogs on: {random_search_term}")
self.driver.get(f'https://en.wordpress.com/tag/{random_search_term}/')
data = self.driver.page_source
soup = BeautifulSoup(data, "html.parser")
links_list = soup.find_all('a', class_="blog-url")
for single_link in links_list:
link = single_link['href']
with open("EXTRACTOR/extracted/blog_link_file.txt", "a") as new_blogs_file:
print(link.strip(), file=new_blogs_file)
@staticmethod
def response_generator():
random_adj = adjectives[randint(0, len(adjectives) - 1)]
random_adv = adverbs[randint(0, len(adverbs) - 1)]
random_comm = comments[randint(0, len(comments) - 1)]
random_comp = complements[randint(0, len(complements) - 1)]
random_det = dets[randint(0, len(dets) - 1)]
random_lander = landers[randint(0, len(landers) - 1)]
random_prov = proverbs[randint(0, len(proverbs) - 1)]
random_phrase = STATIC_PHRASES[randint(0, len(STATIC_PHRASES) - 1)]
random_article_syn = articles[randint(0, len(articles) - 1)]
random_joke = jokes[randint(0, len(jokes) - 1)]
random_prof = prof[randint(0, len(prof) - 1)]
random_new = news[randint(0, len(news) - 1)]
random_exp = exp[randint(0, len(exp) - 1)]
random_film = film[randint(0, len(film) - 1)]
random_rant_syn = rants[randint(0, len(rants) - 1)]
first_segment = f"{random_det} {random_article_syn} is {random_adv} {random_adj}!"
# last_segment = f"My {random_new} {random_prof} {random_rant_syn.upper()} at my site {random_exp}"
# last_segment = f' We are making a <a href="https://afflat3d1.com/lnk.asp?o=18224&c=918277&a=242672&k=3FDF02DD1551319D6CB62C5CED1B7762&l=19426"> {random_film}</a> about this. Book for free HERE!'
last_segment = f' We are making a {random_film} about this. Book for free HERE! -> https://surprise-me-playlists.herokuapp.com/'
# generated_sentence = self.markov_script()
generated_sentence = ""
markov_comment = f'{first_segment.capitalize()} {generated_sentence.capitalize()}. {last_segment.capitalize()}'
final_comment = f"{random_comm.capitalize()} {generated_sentence.capitalize()}. \n {last_segment.capitalize()} "
final_complement = f" {random_comp.capitalize()}. {generated_sentence.capitalize()}. \n {last_segment.capitalize()}"
final_prov = f"You know what they say: {random_prov.capitalize()}. {generated_sentence.capitalize()} {last_segment.capitalize()}"
final_phrase = f"The author should know: {random_phrase.capitalize()}. {generated_sentence.capitalize()}\n {last_segment.capitalize()}"
final_joke = f"This post makes me remember a bad joke: {random_joke.capitalize()}. {generated_sentence.capitalize()} {last_segment.capitalize()}"
response_list = [markov_comment, final_comment, final_complement, final_prov, final_phrase, final_joke]
return response_list[randint(0, len(response_list) - 1)]
@staticmethod
def random_email_getter():
return emails[randint(0, len(emails) - 1)]
@staticmethod
def random_name_getter():
return names[randint(0, len(names) - 1)]
@staticmethod
def random_lander_getter():
return landers[randint(0, len(landers) - 1)]
def jetpack_frame_finder(self):
comment_frame_xpath = '//*[@id="jetpack_remote_comment"]'
jetpack_frame = None
try:
jetpack_frame = self.driver.find_element_by_xpath(comment_frame_xpath)
except Exception as e:
print(e)
return jetpack_frame
def comment_submit_finder(self):
submit_xpath = '//*[@id="comment-submit"]'
xpath_element = None
try:
xpath_element = self.driver.find_element_by_xpath(submit_xpath)
except Exception as e:
print(e)
return xpath_element
def submit_finder(self):
submit_xpath = '//*[@id="submit"]'
xpath_element = None
try:
xpath_element = self.driver.find_element_by_xpath(submit_xpath)
except Exception as e:
print(e)
return xpath_element
def fl_comment_finder(self):
submit_xpath = '//*[@id="fl-comment-form-submit"]'
xpath_element = None
try:
xpath_element = self.driver.find_element_by_xpath(submit_xpath)
except Exception as e:
print(e)
return xpath_element
def comment(self, random_post_url, random_author, random_email, random_website):
policy_xpath = '//*[@type="submit"]'
comment_xpath = '//*[@id="comment"]'
author_xpath = '//*[@id="author"]'
email_xpath = '//*[@id="email"]'
url_xpath = '//*[@id="url"]'
print(f'POST BEING WORKED ON: {random_post_url}')
random_num = randint(1, 100)
if random_num % 2 == 0:
author_name = random_author.capitalize()
elif random_num % 3 == 0:
author_name = random_author.upper()
else:
author_name = random_author
try:
self.driver.get(random_post_url)
time.sleep(5)
global extracted_post
# extracted_post = self.wp_post_getter()
random_comment = self.response_generator()
time.sleep(10)
try:
gls.sleep_time()
policy_element = self.driver.find_element_by_class_name('accept')
gls.sleep_time()
policy_element.click()
except Exception as e:
print("policy click error ", str(e))
try:
gls.sleep_time()
policy_element = self.driver.find_element_by_xpath(policy_xpath)
gls.sleep_time()
policy_element.click()
except Exception as e:
print("policy click error ", str(e))
if self.jetpack_frame_finder() is not None:
gls.sleep_time()
self.driver.switch_to.frame('jetpack_remote_comment')
gls.sleep_time()
else:
# scroll to element
gls.sleep_time()
try:
self.driver.execute_script("arguments[0].scrollIntoView();", self.driver.find_element_by_xpath(url_xpath))
gls.sleep_time()
except Exception as x:
print(x)
self.driver.find_element_by_xpath(comment_xpath).send_keys(random_comment)
gls.sleep_time()
self.driver.find_element_by_xpath(author_xpath).send_keys(f'{author_name}')
gls.sleep_time()
self.driver.find_element_by_xpath(email_xpath).send_keys(random_email)
# try:
# gls.sleep_time()
# self.driver.find_element_by_xpath(url_xpath).send_keys(random_website)
# gls.sleep_time()
# except Exception as ex:
# print("url loader error: ", str(ex))
self.driver.execute_script("window.scrollBy(0,150)", "")
gls.sleep_time()
submit_element_1 = self.comment_submit_finder() # '//*[@id="comment-submit"]'
submit_element_2 = self.submit_finder() # '//*[@id="submit"]'
submit_element_3 = self.fl_comment_finder() # '//*[@id="fl-comment-form-submit"]'
if submit_element_1 is not None:
gls.sleep_time()
submit_element_1.click()
gls.sleep_time()
elif submit_element_2 is not None:
gls.sleep_time()
submit_element_2.click()
gls.sleep_time()
elif submit_element_3 is not None:
gls.sleep_time()
submit_element_3.click()
gls.sleep_time()
except Exception as em:
print(f'comment Error occurred with url: {random_post_url} ' + str(em))
print(traceback.format_exc())
if 'invalid session id' in str(em):
self.clean_up()
finally:
print("comment() done")
def clean_up(self):
t = randint(50, 200)
print(f"clean up sleep for {t} seconds")
time.sleep(t)
self.driver.delete_all_cookies()
self.restart_application()
if __name__ == "__main__":
while 1:
time.sleep(5)
count = 0
random_cycle_nums = randint(300, 700)
req_proxy = RequestProxy() # you may get different number of proxy when you run this at each time
proxies = req_proxy.get_proxy_list() # this will create proxy list
random_proxy = proxies[randint(0, len(proxies) - 1)]
bot = CommentsBot(wp_bot_name, random_proxy)
for _ in range(10):
bot.blog_extractor()
# ===============LOOPS THRU EACH BLOG AND EXTRACTS ALL INTERNAL AND EXTERNAL URLS========================
try:
with open(f"EXTRACTOR/extracted/blog_link_file.txt", "r") as blog_list_file:
main_blog_list = [line.strip() for line in blog_list_file]
blog_list_set = set(main_blog_list)
for single_blog in blog_list_set:
# initialize the set of links (unique links)
internal_urls = set()
external_urls = set()
internal_urls.clear()
external_urls.clear()
# os.system(f"windscribe connect {random_proxy_location()}")
print(f"WORKING ON: {single_blog}")
try:
crawl(single_blog, max_urls=130)
except Exception as e:
print(e)
print("[+] Total Internal links:", len(internal_urls))
print("[+] Total External links:", len(external_urls))
print("[+] Total URLs:", len(external_urls) + len(internal_urls))
# todo find out why do i need this urlparse
# domain_name = urlparse(single_blog).netloc
# save the internal links to a file ====> {domain_name}_internal_links.txt"
with open(f"EXTRACTOR/extracted/internal_links.txt", "a") as f:
for internal_link in internal_urls:
if not ('/tag/' in internal_link or "/categor" in internal_link
or "faq" in internal_link or "events" in internal_link
or "policy" in internal_link or "terms" in internal_link
or "photos" in internal_link or "author" in internal_link
or "label" in internal_link or "video" in internal_link
or "search" in internal_link or "png" in internal_link
or "pdf" in internal_link or "jpg" in internal_link
or "facebook" in internal_link or "twitter" in internal_link
or "nytimes" in internal_link or "wsj" in internal_link
or "reddit" in internal_link or "bbc" in internal_link
or "wikipedia" in internal_link or "guardian" in internal_link
or "flickr" in internal_link or "cnn" in internal_link
or "ttps://wordpre" in internal_link or "google" in internal_link
or "cookies" in internal_link or "instagram" in internal_link
or "youtube" in internal_link or "spotify" in internal_link
or "mail" in internal_link or "pinterest" in internal_link
or "tumblr" in internal_link or "label" in internal_link
or "dribble" in internal_link or "unsplash" in internal_link
or "automattic" in internal_link or "facebook" in internal_link
or "amazon" in internal_link or "amzn" in internal_link
or "doc" in internal_link or "amzn" in internal_link
or int_checker(internal_link)) or "jsp" in internal_link:
print(internal_link.strip(), file=f)
else:
pass
#
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
soft_file_cleanup()
except Exception as e:
print(e)
with open("EXTRACTOR/extracted/FINAL_URL_LIST.txt") as extracted_links_file:
global extracted_links
extracted_links = [line.strip() for line in extracted_links_file]
create_append_text_file(extracted_links, uuid.uuid4().hex)
hard_file_cleanup()
with open(random_static_url_path(), "r") as internal_link_file:
parsed_links = [line.strip() for line in internal_link_file]
# to remove duplicates
parsed_links_set = set()
parsed_links_set.update(parsed_links)
# makes a single comment for each link per each iteration
# breaks the cycle after a given number of comments to force script tp get another ip address
if len(parsed_links_set) > 0:
for link in list(parsed_links_set):
bot.comment(link, bot.random_name_getter(), bot.random_email_getter(), bot.random_lander_getter())
gls.sleep_time()
count += 1
print(f"count number: {count}")
if count == random_cycle_nums:
break
bot.clean_up()
| [
"carn3lian@gmail.com"
] | carn3lian@gmail.com |
e58c33704fc446767cdbec764db8f119e75c3a2f | b31f0d2d20bdd081b1311edb6a1f8108fe127e9f | /nets/nets.py | 62703c8298b51e286eede8e279c73c49aa940315 | [] | no_license | frandelgado/mytorch | 8f51aa47bf082d8adc7fbc0414d16aded8a24ad2 | 22233b638443a2d1159418bbc40cf71cc4103f45 | refs/heads/master | 2023-02-01T00:56:52.569099 | 2020-12-16T17:02:43 | 2020-12-16T17:02:43 | 292,952,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | from nets.optim import Momentum
class Net:
def __init__(self, layers, lr, optimizer=Momentum()):
self.layers = layers
self.lr = lr
self.optimizer = optimizer
self.cost_history = []
def forward(self, X):
A_curr = X
for layer in self.layers:
A_prev = A_curr
A_curr, Z_curr = layer.forward(A_prev)
layer.store["Z"] = Z_curr
layer.store["A"] = A_prev
return A_curr
def backward(self, dLoss, action=None):
dA_prev = dLoss
for layer in reversed(self.layers):
dA_curr = dA_prev
# Activation output values for the previous layer
A_prev = layer.store["A"]
# Z values for the current layer A_curr = activ(Z_curr) = activ((A_prev * W_curr) + b_curr)
Z_curr = layer.store["Z"]
# Weights of the current layer
W_curr = layer.store["W"]
# biases of the current layer
b_curr = layer.store["b"]
# Calculate dL/dA, dL/dW, dL/db
dA_prev, dW_curr, db_curr = layer.backward(
dA_curr, W_curr, b_curr, Z_curr, A_prev, action=action
)
# Store the gradients for weights and biases (will be used for updates)
layer.store["dW"] += dW_curr
layer.store["db"] += db_curr
def update(self):
self.optimizer.update(self.layers, lr=self.lr)
def mean_grads(self, batch_size):
for layer in self.layers:
layer.mean_grads(batch_size)
| [
"fdelgado@itba.edu.ar"
] | fdelgado@itba.edu.ar |
ac49ac9a742dde207c205fdf63ceaf884a3a20e3 | 70ed9ef2867b2c0ca96596f8fdd75c31af5ac116 | /build/lib/ArticleSpider/zheye/__init__.py | 83954ea69947cd42adcc0f1dd46ef9f117c78f71 | [] | no_license | nanmuyao/ArticleSpider | b24aef4bbd761951dd1bd450e49de8f40c96f289 | a75cfaa028b1717636866b5833cdcaa29a2ec43a | refs/heads/master | 2021-07-24T16:16:20.597430 | 2017-11-05T08:01:53 | 2017-11-05T08:01:53 | 109,280,103 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Recognizing class
from sklearn.mixture import GaussianMixture
from PIL import Image
from ArticleSpider.zheye import util
import numpy as np
class zheye:
def __init__(self):
''' load model '''
import os
import keras
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
self.model = keras.models.load_model(path +'/zheyeV3.keras')
def Recognize(self, fn):
im = Image.open(fn)
im = util.CenterExtend(im, radius=20)
vec = np.asarray(im.convert('L')).copy()
Y = []
for i in range(vec.shape[0]):
for j in range(vec.shape[1]):
if vec[i][j] <= 200:
Y.append([i, j])
gmm = GaussianMixture(n_components=7, covariance_type='tied', reg_covar=1e2, tol=1e3, n_init=9)
gmm.fit(Y)
centers = gmm.means_
points = []
for i in range(7):
scoring = 0.0
for w_i in range(3):
for w_j in range(3):
p_x = centers[i][0] -1 +w_i
p_y = centers[i][1] -1 +w_j
cr = util.crop(im, p_x, p_y, radius=20)
cr = cr.resize((40, 40), Image.ANTIALIAS)
X = np.asarray(cr.convert('L'), dtype='float')
X = (X.astype("float") - 180) /200
x0 = np.expand_dims(X, axis=0)
x1 = np.expand_dims(x0, axis=3)
global model
if self.model.predict(x1)[0][0] < 0.5:
scoring += 1
if scoring > 4:
points.append((centers[i][0] -20, centers[i][1] -20))
return points | [
"1558249222han@gmail.com"
] | 1558249222han@gmail.com |
3352e62c11169a92f6c730c48aa2829399b6d800 | 817872cb000756a75ed3131918c62f9e9ccc32ae | /projects/api.py | fa0a3ab9ad3f743cb82b60e56dd28d87b75cf4e2 | [
"Zlib"
] | permissive | firstl/catami | ff7d426153828eacca1080323d2bb4de119057b0 | 3e03c7762e33a78ccc7d7e9354502deebc23a162 | refs/heads/master | 2021-01-17T04:23:12.049570 | 2013-07-01T17:14:45 | 2013-07-01T17:14:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,947 | py | from tastypie import fields
from tastypie.resources import ModelResource
from catamidb.models import GenericImage
from projects.models import (Project,
GenericAnnotationSet,
GenericPointAnnotation,
GenericWholeImageAnnotation,
AnnotationCodes,
QualifierCodes)
from catamidb.api import GenericImageResource
from tastypie.authentication import (Authentication,
SessionAuthentication,
MultiAuthentication,
ApiKeyAuthentication)
from tastypie.authorization import Authorization
from tastypie.exceptions import Unauthorized
from guardian.shortcuts import (get_objects_for_user, get_perms_for_model,
get_users_with_perms, get_groups_with_perms)
from jsonapi.api import UserResource
from jsonapi.security import get_real_user_object
from projects import authorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from datetime import datetime
from random import sample
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.http import HttpNotImplemented
import random
import logging
logger = logging.getLogger(__name__)
# ==============================
# Integration of Backbone and tastypie.
# Usage: extend this resource to make model compatibile with Backbonejs
# ==============================
class BackboneCompatibleResource(ModelResource):
class Meta:
always_return_data = True
def alter_list_data_to_serialize(self, request, data):
return data["objects"]
# Used to allow authent of anonymous users for GET requests
class AnonymousGetAuthentication(SessionAuthentication):
def is_authenticated(self, request, **kwargs):
# let anonymous users in for GET requests - Authorisation logic will
# stop them from accessing things not allowed to access
if request.user.is_anonymous() and request.method == "GET":
return True
return super(AnonymousGetAuthentication, self).is_authenticated(
request, **kwargs)
class ProjectAuthorization(Authorization):
"""
Implements authorization for projects.
"""
def read_list(self, object_list, bundle):
"""Restrict the list to only user visible project."""
user = get_real_user_object(bundle.request.user)
user_objects = get_objects_for_user(user, [
'projects.view_project'], object_list)
return user_objects
def read_detail(self, object_list, bundle):
"""Check user has permission to view this project."""
# get real user
user = get_real_user_object(bundle.request.user)
# check the user has permission to view this object
if user.has_perm('projects.view_project', bundle.obj):
return True
raise Unauthorized()
def create_list(self, object_list, bundle):
raise Unauthorized("Sorry, no create lists.")
def create_detail(self, object_list, bundle):
#Allow creates for Authenticated users
if bundle.request.user.is_authenticated():
return True
raise Unauthorized(
"You need to log in to create projects.")
def delete_list(self, object_list, bundle):
"""Currently do not permit deletion of any project list.
"""
raise Unauthorized(
"You do not have permission to delete these project.")
def delete_detail(self, object_list, bundle):
"""
Check the user has permission to delete.
"""
# get real user
user = get_real_user_object(bundle.request.user)
# check the user has permission to delete this object
if user.has_perm('projects.delete_project', bundle.obj):
return True
raise Unauthorized(
"You do not have permission to delete this project.")
def update_detail(self, object_list, bundle):
"""Restrict access to updating a project.
"""
# the original can be found in object_list
#original = object_list.get(id=bundle.obj.id)
user = get_real_user_object(bundle.request.user)
if user.has_perm('projects.change_project', bundle.obj):
# the user has permission to edit
return True
else:
raise Unauthorized(
"You don't have permission to edit this project"
)
class GenericAnnotationSetAuthorization(Authorization):
"""
Implements authorization for the GenericAnnotationSet.
"""
def read_list(self, object_list, bundle):
"""Restrict the list to only user visible GenericAnnotationSet."""
user = get_real_user_object(bundle.request.user)
user_objects = get_objects_for_user(user, [
'projects.view_genericannotationset'], object_list)
return user_objects
def read_detail(self, object_list, bundle):
"""Check user has permission to view this GenericAnnotationSet."""
# get real user
user = get_real_user_object(bundle.request.user)
# check the user has permission to view this object
if user.has_perm('projects.view_genericannotationset', bundle.obj):
return True
raise Unauthorized()
def create_list(self, object_list, bundle):
raise Unauthorized("Sorry, no create lists.")
def create_detail(self, object_list, bundle):
#Allow creates for Authenticated users
if bundle.request.user.is_authenticated():
return True
raise Unauthorized(
"You need to log in to create annotation sets.")
def delete_list(self, object_list, bundle):
"""Currently do not permit deletion of any GenericAnnotationSet list.
"""
raise Unauthorized(
"You do not have permission to delete these annotation sets.")
def delete_detail(self, object_list, bundle):
"""
Check the user has permission to delete.
"""
# get real user
user = get_real_user_object(bundle.request.user)
# check the user has permission to delete this object
if user.has_perm('projects.delete_genericannotationset', bundle.obj):
return True
raise Unauthorized(
"You do not have permission to delete this project.")
def update_detail(self, object_list, bundle):
"""Restrict access to updating a project.
"""
user = get_real_user_object(bundle.request.user)
if user.has_perm('projects.change_genericannotationset', bundle.obj):
# the user has permission to edit
return True
else:
raise Unauthorized(
"You don't have permission to edit this annotation set"
)
class GenericPointAnnotationAuthorization(Authorization):
"""
Implements authorization for the GenericPointAnnotations.
"""
def read_list(self, object_list, bundle):
"""Restrict the list to only user visible GenericPointAnnotations."""
user = get_real_user_object(bundle.request.user)
# get the objects the user has permission to see
annotation_set_objects = get_objects_for_user(user, [
'projects.view_genericannotationset'])
# get all annotation points for the above allowable annotation sets
point_annotations = GenericPointAnnotation.objects.select_related("generic_annotation_set")
point_annotation_ids = (point_annotations.filter(generic_annotation_set__in=annotation_set_objects).
values_list('id'))
#now filter out the deployments we are not allowed to see
return object_list.filter(id__in=point_annotation_ids)
def read_detail(self, object_list, bundle):
"""Check user has permission to view this GenericPointAnnotation."""
# get real user
user = get_real_user_object(bundle.request.user)
# check the user has permission to view this object
if user.has_perm('projects.view_genericannotationset', bundle.obj.generic_annotation_set):
return True
# raise hell! - https://github.com/toastdriven/django-
# tastypie/issues/826
raise Unauthorized()
def create_list(self, object_list, bundle):
raise Unauthorized("Sorry, no create lists.")
def create_detail(self, object_list, bundle):
#authenticated people can create items
if bundle.request.user.is_authenticated():
return True
raise Unauthorized(
"You don't have permission to create annotations on this annotation set.")
def delete_list(self, object_list, bundle):
"""Currently do not permit deletion of any GenericAnnotationSet list.
"""
raise Unauthorized("You do not have permission to delete these annotation points.")
def delete_detail(self, object_list, bundle):
"""
Check the user has permission to delete.
"""
# get real user
user = get_real_user_object(bundle.request.user)
#if the user is not authenticated they can't do anything
if not bundle.request.user.is_authenticated():
raise Unauthorized()
# check the user has permission to edit the contained annotation set
if user.has_perm('projects.change_genericannotationset', bundle.obj.generic_annotation_set):
return True
raise Unauthorized(
"You do not have permission to delete this annotation point.")
def update_detail(self, object_list, bundle):
"""Restrict access to updating a project.
"""
user = get_real_user_object(bundle.request.user)
#if the user is not authenticated they can't do anything
if not bundle.request.user.is_authenticated():
raise Unauthorized()
# check the user has permission to edit the contained annotation set
if user.has_perm('projects.change_genericannotationset', bundle.obj.generic_annotation_set):
return True
raise Unauthorized("You don't have permission to edit this annotation point.")
class ProjectResource(BackboneCompatibleResource):
owner = fields.ForeignKey(UserResource, 'owner', full=True)
generic_images = fields.ManyToManyField(GenericImageResource, 'generic_images', full=True)
class Meta:
queryset = Project.objects.all()
resource_name = "project"
authentication = MultiAuthentication(AnonymousGetAuthentication(),
ApiKeyAuthentication(),
Authentication())
authorization = ProjectAuthorization()
detail_allowed_methods = ['get', 'post', 'put', 'delete']
list_allowed_methods = ['get', 'post', 'put', 'delete']
filtering = {
'name': ALL,
'owner': ALL,
'generic_images': ALL_WITH_RELATIONS,
'id': 'exact'
}
#excludes = ['owner', 'creation_date', 'modified_date']
def obj_create(self, bundle, **kwargs):
"""
We are overiding this function so we can get access to the newly
created Project. Once we have reference to it, we can apply
object level permissions to the object.
"""
# get real user
user = get_real_user_object(bundle.request.user)
#put the created and modified dates on the object
create_modified_date = datetime.now()
bundle.data['creation_date'] = create_modified_date
bundle.data['modified_date'] = create_modified_date
#attach current user as the owner
bundle.data['owner'] = user
#create the bundle
super(ProjectResource, self).obj_create(bundle)
#make sure we apply permissions to this newly created object
authorization.apply_project_permissions(user, bundle.obj)
return bundle
def dehydrate(self, bundle):
# Add an image_count field to ProjectResource.
bundle.data['image_count'] = Project.objects.get(pk=bundle.data[
'id']).generic_images.count()
# Add the map_extent of all the images in this project
images = Project.objects.get(id=bundle.obj.id).generic_images.all()
images = GenericImage.objects.filter(id__in=images)
map_extent = ""
if len(images) != 0:
map_extent = images.extent().__str__()
bundle.data['map_extent'] = map_extent
return bundle
class GenericAnnotationSetResource(BackboneCompatibleResource):
project = fields.ForeignKey(ProjectResource, 'project', full=True)
generic_images = fields.ManyToManyField(GenericImageResource, 'generic_images', full=True, blank=True, null=True)
class Meta:
queryset = GenericAnnotationSet.objects.all()
resource_name = "generic_annotation_set"
authentication = MultiAuthentication(AnonymousGetAuthentication(),
ApiKeyAuthentication(),
Authentication())
authorization = GenericAnnotationSetAuthorization()
detail_allowed_methods = ['get', 'post', 'put', 'delete']
list_allowed_methods = ['get', 'post', 'put', 'delete']
filtering = {
'project': 'exact',
'name': 'exact',
'owner': 'exact',
'id': 'exact'
}
def random_sample_images(self, project, sample_size):
""" Randomly sample images from the parent project and
attach them to this annotation set. """
project_images = project.generic_images.all()
sampled_images = sample(project_images, int(sample_size))
return sampled_images
def stratified_sample_images(self, project, sample_size):
""" Stratified sample images from the parent project and
attach them to this resource. """
project_images = project.generic_images.all()
every_nth = project_images.count()/int(sample_size)
sampled_images = project_images[0:project_images.count():every_nth]
return sampled_images
def apply_random_sampled_points(self, annotation_set, sample_size):
""" Randomly apply points to the images attached to this annotation
set """
images = annotation_set.generic_images.all()
# iterate through the images and create points
for image in images:
for i in range(int(sample_size)):
x = random.random()
y = random.random()
point_annotation = GenericPointAnnotation()
point_annotation.generic_annotation_set = annotation_set
point_annotation.image = image
point_annotation.owner = annotation_set.owner
point_annotation.x = x
point_annotation.y = y
point_annotation.annotation_caab_code = "00000000" # not considered
point_annotation.qualifier_short_name = "" # not considered
point_annotation.save()
def apply_stratified_sampled_points(self, annotation_set, sample_size):
""" Apply points to the images attached to this annotation set using
stratified sampling """
#TODO: implement
return None
def do_sampling_operations(self, bundle):
""" Helper function to hold all the sampling logic """
# subsample and set the images
image_sample_size = bundle.data['image_sample_size']
image_sampling_methodology = bundle.data['image_sampling_methodology']
if image_sampling_methodology == '0':
bundle.obj.generic_images = self.random_sample_images(bundle.obj.project, image_sample_size)
elif image_sampling_methodology == '1':
bundle.obj.generic_images = self.stratified_sample_images(bundle.obj.project, image_sample_size)
else:
raise Exception("Image sampling method not implemented.")
#save the object with the new images on it
bundle.obj.save()
# subsample points based on methodologies
point_sample_size = bundle.data['point_sample_size']
annotation_methodology = bundle.data['annotation_methodology']
if annotation_methodology == '0':
self.apply_random_sampled_points(bundle.obj, point_sample_size)
else:
raise Exception("Point sampling method not implemented.")
def obj_create(self, bundle, **kwargs):
"""
We are overiding this function so we can get access to the newly
created GenericAnnotationSet. Once we have reference to it, we can apply
object level permissions to the object.
"""
# get real user
user = get_real_user_object(bundle.request.user)
#create the bundle
super(GenericAnnotationSetResource, self).obj_create(bundle)
#generate image subsamples and points
try:
self.do_sampling_operations(bundle)
except Exception:
#delete the object that was created
bundle.obj.delete()
#return not implemented response
raise ImmediateHttpResponse(HttpNotImplemented("Unable to create annotation set."))
#make sure we apply permissions to this newly created object
authorization.apply_generic_annotation_set_permissions(user, bundle.obj)
return bundle
class GenericPointAnnotationResource(BackboneCompatibleResource):
generic_annotation_set = fields.ForeignKey(GenericAnnotationSetResource, 'generic_annotation_set', full=True)
image = fields.ForeignKey(GenericImageResource, 'image', full=True)
class Meta:
queryset = GenericPointAnnotation.objects.all()
resource_name = "generic_point_annotation"
authentication = MultiAuthentication(AnonymousGetAuthentication(),
ApiKeyAuthentication(),
Authentication())
authorization = GenericPointAnnotationAuthorization()
detail_allowed_methods = ['get', 'post', 'put', 'delete']
list_allowed_methods = ['get', 'post', 'put', 'delete']
filtering = {
'image': 'exact',
'owner': 'exact',
'id': 'exact',
'annotation_caab_code': 'exact',
'qualifier_short_name': 'exact',
'generic_annotation_set': 'exact',
}
def obj_create(self, bundle, **kwargs):
"""
We are overiding this function so we can get access to the newly
created GenericAnnotationSet. Once we have reference to it, we can apply
object level permissions to the object.
"""
# get real user
user = get_real_user_object(bundle.request.user)
super(GenericPointAnnotationResource, self).obj_create(bundle)
# NOTE: we can't check permissions on related objects until the bundle
# is created - django throws an exception. What we need to do here is
# check permissions. If the user does not have permissions we delete
# the create object.
if not user.has_perm('projects.change_genericannotationset', bundle.obj.generic_annotation_set):
bundle.obj.delete()
return bundle
class GenericWholeImageAnnotationResource(BackboneCompatibleResource):
generic_annotation_set = fields.ForeignKey(GenericAnnotationSet, 'generic_annotation_set', full=True)
image = fields.ForeignKey(GenericImageResource, 'image', full=True)
class Meta:
queryset = GenericWholeImageAnnotation.objects.all()
resource_name = "generic_whole_image_annotation"
authentication = MultiAuthentication(AnonymousGetAuthentication(),
ApiKeyAuthentication(),
Authentication())
authorization = ProjectAuthorization()
detail_allowed_methods = ['get', 'post', 'put', 'delete']
list_allowed_methods = ['get', 'post', 'put', 'delete']
filtering = {
'image': 'exact',
'owner': 'exact',
'id': 'exact',
'annotation_caab_code': 'exact',
'qualifier_short_name': 'exact',
'generic_annotation_set': 'exact',
}
class AnnotationCodesResource(BackboneCompatibleResource):
parent = fields.ForeignKey('projects.api.AnnotationCodesResource', 'parent', null=True)
class Meta:
queryset = AnnotationCodes.objects.all()
resource_name = "annotation_code"
authentication = MultiAuthentication(AnonymousGetAuthentication(),
ApiKeyAuthentication(),
Authentication())
#authorization = ProjectAuthorization()
detail_allowed_methods = ['get']
list_allowed_methods = ['get']
filtering = {
'parent': ALL_WITH_RELATIONS,
'code_name': ALL,
'id': ALL,
}
class QualifierCodesResource(BackboneCompatibleResource):
parent = fields.ForeignKey('projects.api.QualifierCodesResource', 'parent', full=True)
class Meta:
queryset = QualifierCodes.objects.all()
resource_name = "qualifier_code"
authentication = MultiAuthentication(AnonymousGetAuthentication(),
ApiKeyAuthentication(),
Authentication())
#authorization = ProjectAuthorization()
detail_allowed_methods = ['get']
list_allowed_methods = ['get']
filtering = {
'short_name': 'exact',
'id': 'exact',
'parent': 'exact',
} | [
"mathew.wyatt@gmail.com"
] | mathew.wyatt@gmail.com |
52d7f9c923abe08fae80456e7493255c960f3373 | ca58998cb43727ac2a16a4c372b67862bccd9a8d | /python_files/IQ_mod.py | c25901bcfb2942650f671a45589f2405a57b3d7b | [] | no_license | aureoleday/acc_meter | 5f4baa95261b8bfea4e6a5c7f399f49787deb34b | cdafac8b2d5f36942299bc302c55adf034382065 | refs/heads/master | 2022-07-26T20:07:42.729892 | 2022-07-11T08:19:29 | 2022-07-11T08:19:29 | 162,675,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,879 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 15 16:35:41 2019
@author: Administrator
"""
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import rcos
import gold
import wave
import struct
SIG_FREQ = 500
SAMPLE_FREQ = 4000
# PN_CODE = np.array([1,1,1,1,1,-1,-1,1,1,-1,1,-1,1])
# PN_CODE = np.array([1,1,1,1,1,0,0,1,1,0,1,0,1])#BARK CODE
# PN_CODE = np.ones(127)
# PN_CODE = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0])#M CODE
#PN_CODE = np.array([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0])
# PN_CODE = np.array([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0])
# PN_CODE = np.array([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0])
PN_CODE = np.array([1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0])
# PN_CODE = np.array([0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1])
# PN_CODE = np.random.randint(0,2,16)#RANDOM CODE
# PN_CODE = np.kron(np.random.randint(0,2,20),np.ones(20))
class iq_mod:
def __init__(self,sig_freq=1000,sample_freq=32000,rep_N=8,beta=0.3,sps=4,span=1):
i_wave = np.kron(np.ones(rep_N),np.cos(2*np.pi*sig_freq*np.arange(sample_freq/sig_freq)/sample_freq))
q_wave = np.kron(np.ones(rep_N),np.sin(2*np.pi*sig_freq*np.arange(sample_freq/sig_freq)/sample_freq))
self.wave = np.vstack((i_wave,q_wave))
self.period = int(sample_freq*rep_N/sig_freq)
def apl_mod(self,d_iq,mod=0):
if mod==1:
din = d_iq*2 - 1
return np.vstack((np.kron(din[0],self.wave[0]),np.kron(din[1],self.wave[1])))
def mix(self,d_iq,phase=0):
return d_iq*np.tile(np.roll(self.wave,phase,axis=1),int(np.ceil(d_iq.shape[1]/self.wave.shape[1])))
def spread(self,din,code):
return np.kron((din-0.5)*2,code)
def despread(self,din,code):
out = np.zeros(din.shape[0])
code_p = code*2 -1
intp_code = np.kron(code_p,np.ones(self.period))
print("cor len:%d\n" % intp_code.shape[0])
for i in range(intp_code.shape[0],din.shape[0]):
out[i] = np.dot(din[i-intp_code.shape[0]:i],intp_code)
return out
#def rrc(beta, filter_width, Ts):
# """
# https://en.wikipedia.org/wiki/Root-raised-cosine_filter
# :param beta: roll-off factor
# :param filter_width: The width of the filter, samples
# :param Ts: The width of a symbol, samples
# :return: impulse response of the filter, the tuple of filter_width float numbers coefficients
# """
# rrc_out = []
# for i in range(0, filter_width):
# rrc_out.append(0.0)
# if beta != 0.0:
# t1 = Ts/(4*beta)
# else:
# t1 = Ts
#
# for p in range(0, filter_width):
# t = (p - filter_width / 2)
# if t == 0.0:
# rrc_out[p] = (1 + beta*(4/np.pi - 1))
# elif t == t1 or t == -t1:
# if beta != 0.0:
# arg = np.pi/(4*beta)
# s = (1 + 2/np.pi)*np.sin(arg)
# c = (1 - 2/np.pi)*np.cos(arg)
# rrc_out[p] = (s + c) * (beta/np.sqrt(2))
# else:
# rrc_out[p] = 0
# else:
# pts = np.pi*t/Ts
# bt = 4*beta*t/Ts
# s = np.sin(pts*(1-beta))
# c = np.cos(pts*(1+beta))
# div = pts*(1 - bt*bt)
# rrc_out[p] = (s + bt*c)/div
# return tuple(rrc_out)
class my_filter:
def __init__(self,N,filt_zone=[0.2],filt_type='lowpass'):
self.b,self.a = signal.butter(N, filt_zone, filt_type)
self.z = np.zeros(max(len(self.a),len(self.b))-1,dtype=np.float)
def filt(self,din):
dout, self.z = signal.lfilter(self.b, self.a, din, zi=self.z)
return dout
def my_fft(din):
fftx = np.fft.rfft(din)/din.shape[0]
xfp = np.abs(fftx)*2
return xfp
iq_mod_inst = iq_mod(SIG_FREQ,SAMPLE_FREQ,rep_N=1)
lpf_inst_i = my_filter(3,[0.15],'lowpass')
lpf_inst_q = my_filter(3,0.15,'lowpass')
din1 = np.tile(np.vstack((PN_CODE,PN_CODE)),4)
#din2 = np.tile(np.vstack((PN_CODE1,PN_CODE1)),4)
din2 = din1
din = din1 + din2
dm = iq_mod_inst.apl_mod(din,mod=1)
noise = np.random.randn(dm.shape[0],dm.shape[1])
# noise = np.random.randn(dm.shape)
dmn = dm + noise*1
dmn[1]=dmn[0]
dmm = iq_mod_inst.mix(dmn,1)
print("di len:%d\n" % din.shape[0])
b, a = signal.butter(3, [0.15], 'lowpass')
df = dmm[0]
zt = signal.filtfilt(b,a,df)
z1 = lpf_inst_i.filt(df[0:20])
z2 = lpf_inst_i.filt(df[20:40])
z3 = lpf_inst_i.filt(df[40:60])
z4 = lpf_inst_i.filt(df[60:80])
z5 = lpf_inst_i.filt(df[80:])
zo = np.concatenate((z1,z2,z3,z4,z5))
cor_i = iq_mod_inst.despread(zo,PN_CODE)
df = dmm[1]
zt = signal.filtfilt(b,a,df)
z1 = lpf_inst_q.filt(df[0:20])
z2 = lpf_inst_q.filt(df[20:40])
z3 = lpf_inst_q.filt(df[40:60])
z4 = lpf_inst_q.filt(df[60:80])
z5 = lpf_inst_q.filt(df[80:])
zo = np.concatenate((z1,z2,z3,z4,z5))
cor_q = iq_mod_inst.despread(zo,PN_CODE)
cor = np.vstack((cor_i,cor_q))
print("zi len:%d\n" % zo.shape[0])
fig = plt.figure()
ax = fig.add_subplot(411)
bx = fig.add_subplot(412)
cx = fig.add_subplot(413)
dx = fig.add_subplot(414)
x = np.arange(dm.shape[1])/SAMPLE_FREQ
xh = np.arange(dm.shape[1]/2 + 1)*SAMPLE_FREQ/dm.shape[1]
ax.plot(x,dmn[1],'g',label='qdm')
ax.plot(x,dm[0],'r',label='dm')
ax.legend()
bx.plot(x,cor[0],label='cor_i')
bx.plot(x,cor[1],label='cor_q')
bx.plot(x,np.linalg.norm(cor,axis=0),label='norm')
bx.grid(True, linestyle='-.')
bx.legend()
cx.plot(x,dmm[1],label='di')
cx.plot(x,zo,label='zo')
cx.plot(x,zt,'r',label='zt')
cx.legend()
#dx.plot(x,dm[0],label="di")
#dx.plot(x,dm[1],label="dq")
idff = my_fft(dmn[0])
dx.plot(xh,idff,label="i_freq/amp")
dx.legend()
plt.show()
| [
"ztscotty@gmail.com"
] | ztscotty@gmail.com |
f5e505181bd5aee5865bbe96cc12bf8ad4da0c8a | 2dd2688d97191b4729473a5410ebf78b690541ac | /mysite/mysite/settings.py | 13c4148366f4f17f70fdc2cd25e71373b6dae4e2 | [] | no_license | ather1/Pensions | 7c82d3b1b0e8f24959819c7ead83352cb0f7b3f0 | a7df96ef7af92f29b1b00aae7bea5cd0404ebd7a | refs/heads/master | 2020-05-23T14:34:21.914790 | 2019-07-11T10:09:09 | 2019-07-11T10:09:09 | 186,806,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,086 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!t=%i2d*f+za1-_=hccrpyi0wj(sxor&onybfq*lx*rfi*&mc!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"mohammed_ather@hotmail.com"
] | mohammed_ather@hotmail.com |
0ebcb09203028893ec5bc9f6cdbac6fd6989037a | b6b5917f8002f77941e519014c713434137b416d | /gitlapse.py | 0e67af8fdac511471b42122d52204bc665e689ff | [] | no_license | snewman/codelapse | ce6f34fd6b33dc862627e371010f157a27190ac1 | 831a7970f11ecedbb9c33415516b90a7d1637df2 | refs/heads/master | 2020-04-13T02:56:12.965721 | 2010-03-26T16:30:43 | 2010-03-26T16:30:43 | 563,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,637 | py | import os
import inspect
from subprocess import *
import tempfile
import sys
from optparse import OptionParser
from xml.dom.minidom import parseString
import re
from decimal import *
class Executor:
def execute(self, command):
try:
print "Running " + command
p = Popen(command, shell=True, stdout=PIPE)
retcode = os.waitpid(p.pid, 0)[1]
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
sys.exit(retcode)
else:
return p.stdout
except OSError, e:
print >>sys.stderr, "Execution failed:", e
sys.exit(2)
class GitRepo:
def __init__(self, git_dir, working_dir, executor):
self.git_dir = git_dir
self.working_dir = working_dir
self.executor = executor
def current_head(self):
return self.executor.execute('git --git-dir=' + self.git_dir + ' log --format=format:"%H" -1').read()
def list_commits_to_file(self, destination_file_name):
self.executor.execute('git --git-dir=' + self.git_dir + ' --no-pager log --format=format:"%H || %ai || %s%n" --date=iso > ' + destination_file_name)
return open(destination_file_name)
def commits(self, destination_file_name):
git_output_file = self.list_commits_to_file(destination_file_name)
list_of_commits = []
for line in git_output_file:
records = line.split('||')
if len(records) > 1:
git_commit = records[0]
date = records[1]
list_of_commits.append((git_commit, date))
return list_of_commits
def hard_reset(self, commit_hash):
self.executor.execute('git --git-dir=' + self.git_dir + ' --work-tree=' + self.working_dir + ' reset --hard %s' % commit_hash)
class CheckstyleParser:
def parse(self, checkstyle_report_content):
dom = parseString(checkstyle_report_content)
root = dom.getElementsByTagName('checkstyle')[0]
classes = root.getElementsByTagName('file')
healthy_class_names = [clazz.getAttribute('name') for clazz in classes if len(clazz.getElementsByTagName('error')) == 0]
unhealthy_classes = []
for clazz in classes:
if len(clazz.getElementsByTagName('error')) > 0:
errors = {}
for error in clazz.getElementsByTagName('error'):
errors[error.getAttribute('source')] = error.getAttribute('message')
unhealthy_classes.append(ToxicClass(clazz.getAttribute('name'), errors))
return ToxicityReport(healthy_class_names, unhealthy_classes)
class CheckstyleExecution:
def __init__(self, executor, path_to_install):
self.executor = executor
self.path_to_install = path_to_install
def analyse(self, src_directory):
#java -jar ../../code-time-lapse/tools/checkstyle/checkstyle-all-4.4.jar -c ../../code-time-lapse/tools/checkstyle/metrics.xml -r src -f xml
stdout = self.executor.execute('java -jar %s/tools/checkstyle/checkstyle-all-4.4.jar -c %s/tools/checkstyle/metrics.xml -r %s -f xml' % (self.path_to_install, self.path_to_install, src_directory))
return stdout.read()
class ToxicClass:
def __init__(self, class_name, errors):
self.errors = errors
class ToxicityReport:
def __init__(self, healthy_class_names, unhealthy_class_names):
self.healthy_class_names = healthy_class_names
self.unhealthy_class_names = unhealthy_class_names
def number_of_healty_classes(self):
return len(self.healthy_class_names)
def number_of_unhealthy_classes(self):
return len(self.unhealthy_class_names)
class ToxicityCalculator():
def __init__(self):
self.handlers = {
'com.puppycrawl.tools.checkstyle.checks.sizes.MethodLengthCheck' : self.calculate_long_method_length_cost,
'com.puppycrawl.tools.checkstyle.checks.sizes.FileLengthCheck' : self.calculate_long_class_cost,
'com.puppycrawl.tools.checkstyle.checks.metrics.ClassDataAbstractionCouplingCheck' : self.calculate_abstraction_coupling_cost}
def calculate_abstraction_coupling_cost(self, message_string):
values = self.matches('Class Data Abstraction Coupling is (\d*) \(max allowed is (\d*)\)', message_string)
return self.cost(values[0], values[1])
def calculate_long_method_length_cost(self, message_string):
values = self.matches('Method length is (\d*) lines \(max allowed is (\d*)\).', message_string)
return self.cost(values[0], values[1])
def calculate_long_class_cost(self, message_string):
values = self.matches('File length is (\d*) lines \(max allowed is (\d*)\)', message_string)
return self.cost(values[0], values[1])
def matches(self, pattern, string):
return re.search(pattern, string).groups()
def toxicity(self, errors):
score = Decimal(0)
for error_type in errors.keys():
score = score + self.handlers[error_type](errors[error_type])
return self.round_down(score)
def cost(self, actual, allowed):
return Decimal(actual) / Decimal(allowed)
def round_down(self, decimal):
return decimal.quantize(Decimal('.01'), rounding=ROUND_DOWN)
class SkippingAnalyser:
def __init__(self, skipping_commits, delegate_analyser, git_repo):
self.skipping_commits = skipping_commits
self.delegate_analyser = delegate_analyser
self.git_repo = git_repo
self.current_count = 0
def analyse(self, commit_hash, commit_date):
self.current_count = self.current_count + 1
if self.current_count == self.skipping_commits:
self.git_repo.hard_reset(commit_hash)
self.delegate_analyser.analyse(commit_hash, commit_date)
self.current_count = 0
class ClocParser:
def create_record(self, src_dir, by_date_count, cloc_line):
records = cloc_line.split(',')
if len(records) < 7:
raise Exception('Cannot parse line "' + cloc_line + '"')
number_of_files = records[0]
language = records[1]
number_of_blank_lines = records[2]
lines_of_comments = records[3]
lines_of_code = records[4]
scale = records[5]
third_gen = records[6]
by_date_count.add_record(src_dir, language, lines_of_code)
return by_date_count
def parse(self, commit_date, commit_hash, src_directory_name, cloc_output):
by_date_count = MetricsForCommit(commit_date, commit_hash)
lines = cloc_output.split('\n')
for line in lines:
if 'files' in line:
continue
if line.isspace() or len(line) == 0:
continue
by_date_count = self.create_record(src_directory_name, by_date_count, line)
return by_date_count
class TsvFormattingStore:
def __init__(self):
self.records_by_commit = {}
def store(self, metrics_for_commit):
commit = metrics_for_commit.commit
if self.records_by_commit.has_key(commit):
old_record = self.records_by_commit[commit]
old_record.merge(metrics_for_commit)
else:
self.records_by_commit[commit] = metrics_for_commit
def metrics_to_report(self):
metrics_to_report = {}
for record in self.records_by_commit.values():
for src_dir in record.src_dirs.keys():
metrics_for_dir = metrics_to_report.get(src_dir, set())
for metric in record.src_dirs[src_dir].keys():
metrics_for_dir.add(metric)
metrics_to_report[src_dir] = metrics_for_dir
return metrics_to_report
def create_row_header(self, metrics_to_report):
row_header = 'Date'
for src_dir in metrics_to_report.keys():
for language in metrics_to_report[src_dir]:
row_header = row_header + '\t' + src_dir + '-' + language
row_header = row_header + '\n'
return row_header
def as_csv(self):
metrics_to_report = self.metrics_to_report()
row_header = self.create_row_header(metrics_to_report)
for record in self.records_by_commit.values():
row_header = row_header + record.date
for src_dir in metrics_to_report.keys():
for metric in metrics_to_report[src_dir]:
row_header = row_header + '\t' + str(record.src_dirs.get(src_dir, {}).get(metric, 0))
row_header = row_header + '\n'
return row_header
class LinesOfCodeAnalyser:
def __init__(self, abs_src_directory, running_from, data_store, parser = ClocParser(), executor = Executor()):
self.executor = executor
self.parser = parser
self.running_from = running_from
self.abs_src_directory = abs_src_directory
self.data_store = data_store
def analyse(self, commit_hash, commit_date):
cloc_cmd = 'perl %s/tools/cloc-1.08.pl %s --csv --exclude-lang=CSS,HTML,XML --quiet' % (self.running_from, self.abs_src_directory)
cloc_result = self.executor.execute(cloc_cmd)
data_to_store = self.parser.parse(commit_date, commit_hash, self.abs_src_directory, cloc_result.read())
self.data_store.store(data_to_store)
class CompositeAnalyser:
def __init__(self, delegates):
self.delegates = delegates
def analyse(self, commit_hash, commit_date):
for delegate in self.delegates:
delegate.analyse(commit_hash, commit_date)
class MetricsForCommit:
def __init__(self, date, commit):
self.date = date
self.commit = commit
self.src_dirs = {}
def add_record(self, src_dir, metric, count):
counts_for_dir = self.src_dirs.get(src_dir, {})
counts_for_dir[metric] = int(count)
self.src_dirs[src_dir] = counts_for_dir
def merge(self, other_by_date_count):
if other_by_date_count.commit != self.commit:
raise Exception('Can only merge records with same commit')
for src_dir in other_by_date_count.src_dirs.keys():
self.src_dirs[src_dir] = other_by_date_count.src_dirs[src_dir]
def generate_commit_list(location_for_files, git_repo):
file_with_all_commits = location_for_files + "/commits.out"
return git_repo.commits(file_with_all_commits)
def line_counts(location_for_results, sample_rate, src_dirs, git_dir, working_dir):
data = open(location_for_results + "/line_count_by_time.tsv", 'w')
git_repo = GitRepo(git_dir, working_dir, Executor())
commit_list = generate_commit_list(location_for_results, git_repo)
head = git_repo.current_head()
store = TsvFormattingStore()
delegate = CompositeAnalyser([LinesOfCodeAnalyser(src_dir, RUNNING_FROM, store) for src_dir in src_dirs])
skipping_analyser = SkippingAnalyser(skipping_commits = sample_rate, delegate_analyser = delegate, git_repo = git_repo)
for commit in commit_list:
date = commit[1]
git_commit = commit[0]
skipping_analyser.analyse(git_commit, date)
data.write(store.as_csv())
print "Resetting to " + head
git_repo.hard_reset(head)
print data.name
data.close()
def to_gnuplot(data_table):
header_row = data_table.split('\n')[0]
columns = header_row.split('\t')[1:]
gnuplot = 'plot '
count = 4 # The first 3 columns contain the date
for column in columns:
gnuplot = gnuplot + ('"line_count_by_time.tsv" using 1:%d title "%s", ' % (count, column))
count = count + 1
return gnuplot
def execution_path(filename):
execution_path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))), 'run.sh')
path_to_run = os.path.abspath(execution_path)
if path_to_run.endswith('run.sh'):
index_of_run = len(path_to_run) - 6
path_to_run = path_to_run[:index_of_run]
print "Using " + path_to_run
return path_to_run
RUNNING_FROM = execution_path('run.sh')
def pwd():
return Executor().execute('pwd').read().strip()
def main(argv=None):
if argv is None:
argv = sys.argv
parser = OptionParser()
parser.add_option("-r", "--results_dir", action="store", dest="result_dir", type="string", default=".", help="Location where results will be stored")
parser.add_option("-s", "--source_dir", action="store", dest="src_dirs", type="string", default="src", help="A comma seperated list of directories to parse")
parser.add_option("-f", "--frequency_of_sample", action="store", dest="sample_rate", default=100, type="int", help="How often should a sample be made")
parser.add_option("-g", "--git_repo_dir", action="store", dest="git_repo_dir", default=pwd()+'/.git', type="string", help="The directory containing the .git file")
parser.add_option("-w", "--working_dir", action="store", dest="working_dir", default=pwd(), type="string", help="Where will files be checked out to for line counts etc")
(options, args) = parser.parse_args(argv)
results_dir = options.result_dir
sample_rate = options.sample_rate
src_dirs_str = options.src_dirs
git_dir = options.git_repo_dir
working_dir = options.working_dir
print "Using a sample rate of " + str(sample_rate) + " reading from files " + str(src_dirs_str)
line_counts(results_dir, sample_rate, src_dirs_str.split(','), git_dir, working_dir)
if __name__ == "__main__":
main()
| [
"sam.newman@gmail.com"
] | sam.newman@gmail.com |
0af6d31d824d1671f1f2e28c84dde481428dc968 | 212c850d2984328054b080f72db090bf5ba18565 | /quedadas/migrations/0017_auto_20171114_1057.py | 6278271873586ac76f97df755d9f3215c20b717f | [
"MIT"
] | permissive | fevsea/meet-Run-Server | e00713de3d11a8c0cca62d011e83f1ee909e8b6d | 48454a4665f55da019334271641c514df231f177 | refs/heads/master | 2021-09-03T23:22:10.628092 | 2018-01-11T16:57:04 | 2018-01-11T21:31:18 | 106,397,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-14 10:57
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quedadas', '0016_auto_20171114_1054'),
]
operations = [
migrations.RemoveField(
model_name='tracking',
name='meeting',
),
migrations.AddField(
model_name='meeting',
name='tracking',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='meeting',
to='quedadas.Tracking'),
),
]
| [
"alejandro.a.es@ieee.org"
] | alejandro.a.es@ieee.org |
4d2f8636e8f7b48a2b80aa2fb5f54d89710099bf | c21544f1495d06d356bfcc64e61c829ecb3c27c4 | /manage.py | 06ad930aebdeb2a97a5d072026fd8155ac4aa684 | [
"BSD-3-Clause"
] | permissive | ulysseswolf/flaskbb | da0eae930e62981ef51da32fa870db38ad566c8a | f4b86a6c63f1c59096bd1f315977715cd847aa65 | refs/heads/master | 2020-12-07T00:37:42.294478 | 2014-01-18T12:53:39 | 2014-01-18T12:53:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,865 | py | """
flaskbb.manage
~~~~~~~~~~~~~~~~~~~~
This script provides some easy to use commands for
creating the database with or without some sample content.
You can also run the development server with it.
Just type `python manage.py` to see the full list of commands.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from flask import current_app
from flask.ext.script import Manager, Shell, Server
from flaskbb import create_app
from flaskbb.extensions import db
from flaskbb.utils.populate import (create_test_data, create_admin_user,
create_welcome_forum, create_default_groups)
# Use the development configuration if available
try:
from flaskbb.configs.development import DevelopmentConfig as Config
except ImportError:
from flaskbb.configs.default import DefaultConfig as Config
app = create_app(Config)
manager = Manager(app)
# Run local server
manager.add_command("runserver", Server("localhost", port=8080))
# Add interactive project shell
def make_shell_context():
return dict(app=current_app, db=db)
manager.add_command("shell", Shell(make_context=make_shell_context))
@manager.command
def initdb():
"""
Creates the database.
"""
db.create_all()
@manager.command
def createall():
"""
Creates the database with some example content.
"""
# Just for testing purposes
db.drop_all()
db.create_all()
create_test_data()
@manager.command
def create_admin():
"""
Creates the admin user
"""
db.create_all()
create_admin_user()
@manager.command
def create_default_data():
"""
This should be created by every flaskbb installation
"""
db.create_all()
create_default_groups()
create_welcome_forum()
if __name__ == "__main__":
manager.run()
| [
"sh4nks7@gmail.com"
] | sh4nks7@gmail.com |
6043cf5b177769272f8511ae9b3b903c5f67eae1 | 752670319eefd704c631d15115fa494476201be4 | /Voice Assistant Project.py | 3d082e7b27553fa0cc8aaba8b46e8ec4d2df11b3 | [] | no_license | VedantZope/Data-Analyst-Voice-Assistant | 05e11c9060e454060092916894c62b0a907e5518 | 8b89e5f8636c3d17e3e4428ba1ecd851faa0a222 | refs/heads/main | 2023-08-15T12:37:50.497452 | 2021-09-19T09:26:53 | 2021-09-19T09:26:53 | 408,083,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,747 | py | import pyttsx3
import speech_recognition as sr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def file_name():
#It takes filename as voice input and returns it as string
speak("which file do you want to open")
name=takeCommand()
return(str(name))
def takecommandnum():
# It takes number as voice input and returns it as int
t = ['zero','one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']
text=takeCommand()
for i in range(0, 11):
if (t[i] == text):
return(int(i))
else:
return(int(text))
def takeCommand():
#It takes microphone input from the user and returns string output(speech to text)
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
# r.pause_threshold = 0.8
r.energy_threshold = 300
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
except Exception:
print("Say that again please...")
return "None"
return (query.lower())
x=file_name()
data_np = np.genfromtxt(x, delimiter=",", filling_values=0)
data_pd = pd.read_csv(x, delimiter=",")
print(x+" opened")
speak(x+" opened")
while(1):
# speak("I am voice Assistant")
query=takeCommand()
print(query)
if "print data" in query or "show data" in query:
print(data_pd)
elif "show info" in query:
print(data_pd.info())
elif "sum" in query and "axis 1" in query or "axis one" in query:
x=data_pd.sum(axis=1)
print(x)
speak("do you want to plot its graph")
y=takeCommand()
if "yes" in y:
x.plot()
plt.show()
elif "sum" in query and "axis 0" in query or "axis zero" in query:
x = data_pd.sum(axis=0)
print(x)
speak("do you want to plot its graph")
y = takeCommand()
if "yes" in y:
x.plot()
plt.show()
elif "size" in query and "array" in query:
print(str(np.size(data_np)))
elif "shape" in query or "dimension" in query and "array" in query:
print(str(np.shape(data_np)))
elif "sort the array" in query:
speak("along which column do you want to sort the array")
n=takecommandnum()
print("Along column ",n)
print(data_np[data_np[:,n].argsort()])
elif "exit" in query:
break
| [
"noreply@github.com"
] | VedantZope.noreply@github.com |
69361db72960261bacdefb16a340ba34ed1fe1e1 | c7605815ca4bc36682bd1dcce41514c1700e144c | /database/mysql_tmp.py | 27e6262ed3038bdf8e4142417c93d7d7213263fb | [] | no_license | xiaochenxigh/mart_strategy | 0f3bcb17da39e26d14afb9572b8b613f3e5c6307 | c2bd6dcd33b31c7f63357fd11b7d1b171571a00f | refs/heads/master | 2023-02-19T02:11:48.883896 | 2021-01-23T09:25:13 | 2021-01-23T09:25:13 | 332,171,466 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,114 | py | import pymysql
# username : adil
# password : helloyyj
class DataBaseHandle(object):
''' 定义一个 MySQL 操作类'''
def __init__(self,host,username,password,database,port):
'''初始化数据库信息并创建数据库连接'''
# 下面的赋值其实可以省略,connect 时 直接使用形参即可
self.host = host
self.username = username
self.password = password
self.database = database
self.port = port
self.db = pymysql.connect(self.host,self.username,self.password,self.database,self.port,charset='utf8')
# 这里 注释连接的方法,是为了 实例化对象时,就创建连接。不许要单独处理连接了。
#
# def connDataBase(self):
# ''' 数据库连接 '''
#
# self.db = pymysql.connect(self.host,self.username,self.password,self.port,self.database)
#
# # self.cursor = self.db.cursor()
#
# return self.db
def insertDB(self,sql):
''' 插入数据库操作 '''
self.cursor = self.db.cursor()
try:
# 执行sql
self.cursor.execute(sql)
# tt = self.cursor.execute(sql) # 返回 插入数据 条数 可以根据 返回值 判定处理结果
# print(tt)
self.db.commit()
except Exception as e:
# 发生错误时回滚
print(e)
self.db.rollback()
finally:
self.cursor.close()
def deleteDB(self,sql):
''' 操作数据库数据删除 '''
self.cursor = self.db.cursor()
try:
# 执行sql
self.cursor.execute(sql)
# tt = self.cursor.execute(sql) # 返回 删除数据 条数 可以根据 返回值 判定处理结果
# print(tt)
self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
finally:
self.cursor.close()
def updateDb(self,sql):
''' 更新数据库操作 '''
self.cursor = self.db.cursor()
try:
# 执行sql
self.cursor.execute(sql)
# tt = self.cursor.execute(sql) # 返回 更新数据 条数 可以根据 返回值 判定处理结果
# print(tt)
self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
finally:
self.cursor.close()
def selectDb(self,sql):
''' 数据库查询 '''
self.cursor = self.db.cursor()
try:
self.cursor.execute(sql) # 返回 查询数据 条数 可以根据 返回值 判定处理结果
data = self.cursor.fetchall() # 返回所有记录列表
return data
except:
print('Error: unable to fecth data')
finally:
self.cursor.close()
def closeDb(self):
''' 数据库连接关闭 '''
self.db.close()
DbHandle = DataBaseHandle(host='127.0.0.1',username='root',password='258369147@9012db',database='lhcoin',port=3306) | [
"ph@xdeMacBook-Pro.local"
] | ph@xdeMacBook-Pro.local |
0db2aa9ff306478ee3e5479f7c42bd343136846d | 795f0081004920c15c178c43b00432cb8e7ca586 | /controller/src/object_detection.py | 3d2d3f1083a94ede6cb0ff9622c6d4a24be2a5ba | [] | no_license | 60alex60/ECE140aLab6 | e6e9985a07e5615a5678d817cdfb031802322425 | f966af1d7aa87ab9f602bd3ad3f4cdea13ee7421 | refs/heads/master | 2023-04-05T15:31:52.014565 | 2021-03-05T05:41:31 | 2021-03-05T05:41:31 | 353,224,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | import numpy as np
import cv2
import time
class ImgProc():
def __init__(self):
# read pre-trained model and config file
self.net = cv2.dnn.readNet("object_detection/yolov4-tiny.weights", "object_detection/yolov4-tiny.cfg")
# read class names from text file
self.classes = None
with open("object_detection/coco.names", 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
# generate different colors for different classes
self.COLORS = np.random.uniform(0, 255, size=(len(self.classes), 3))
# function to get the output layer names
# in the architecture
def get_output_layers(self, net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# function to draw bounding box on the detected object with class name
def draw_bounding_box(self, img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(self.classes[class_id])
color = self.COLORS[class_id]
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def detect_objects(self, img):
W = img.shape[1]
H = img.shape[0]
# create input blob
sz = (416, 416) # (224,224)
normalization = 1.0 / 255.0
blob = cv2.dnn.blobFromImage(img, normalization, sz, (0, 0, 0), True, crop=False)
# set input blob for the network
self.net.setInput(blob)
# run inference through the network
# and gather predictions from output layers
outs = self.net.forward(self.get_output_layers(self.net))
# initialization
class_ids = []
confidences = []
boxes = []
centroids = []
conf_threshold = 0.3
nms_threshold = 0.1
# For each detetion from each output layer get the confidence, class id, bounding box params and ignore weak detections (confidence < 0.5)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * W)
center_y = int(detection[1] * H)
w = int(detection[2] * W)
h = int(detection[3] * H)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
centroids.append((center_x, center_y))
# Apply non-max suppression to prevent duplicate detections
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
# Go through the detections remaining after NMS and draw bounding boxes
detections = []
frame = img.copy()
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
self.draw_bounding_box(frame, class_ids[i], confidences[i], round(x), round(y), round(x + w), round(y + h))
detections.append((self.classes[class_ids[i]], centroids[i], box))
print("Detected Objects: ", detections)
return detections, frame
if __name__ == "__main__":
img = cv2.imread('sample_img.png')
imgProc = ImgProc()
imgProc.detect_objects(img)
| [
"66690702+github-classroom[bot]@users.noreply.github.com"
] | 66690702+github-classroom[bot]@users.noreply.github.com |
876845373eb204f104b260f7792200ec917014bd | 215607ba31f74f419d9fc68c92db1a846de9cc6c | /1st question.py | 560095e3e0bf5e8fd2e7c6c15ec4f85d99f8a805 | [] | no_license | imazshaikh/Hello-world | 7b1ae5242344b6862b855412fb42ef94685fb81f | 6f300e64661605ec69333bf41ba39633ab2b5fbd | refs/heads/main | 2023-02-11T02:37:16.204115 | 2021-01-06T10:33:41 | 2021-01-06T10:33:41 | 327,209,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | #this is how file open and read.
file1 = open('imaz.txt', 'r+')
text_to_read = file1.read()
print(text_to_read)
#we can also print the file name, mode and so on
print(file.name)
print(file1.mode)
#if we wamt to write in the file
file1.write()
#if we need to close the file.(*we need to close the file other wise it will not open the other file if we want to)
file1.close()
| [
"noreply@github.com"
] | imazshaikh.noreply@github.com |
424ce452a1e89bb996661c681f3a0a2a1d8679df | bf0ba9a1eeb443e047977163153e9db92dcd5b0b | /bin/jupyter-console | 67dfe773428f6c612bc31c8d91049fd054f387cd | [] | no_license | shmcminn/fec-api-totals-sen | 56feb6e88d4870335f954b4c1ae832bd2dcb7e99 | 2ca42596d3a1d719612163e131ebc4e0bbf470fb | refs/heads/master | 2021-07-13T17:33:01.881418 | 2017-10-16T16:33:27 | 2017-10-16T16:33:27 | 107,152,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/Users/smcminn/Documents/graphics/house-sen-fec-q3/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_console.app import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gillianroberts@cqrollcall.com"
] | gillianroberts@cqrollcall.com | |
c34f2a7b96c08a36e7696da9db963ad2291fc6b2 | db9f495efb23e3f4d80b0a67af81df57eef8f5a7 | /manage.py | c4194db63ee667f1709132546f90071914fa3769 | [] | no_license | antonionavarro88/my-first-blog | 132d538a1acc28ba27eed156f5882edb3035d9de | 391bf3325b482ac5019d0ea8a42d20dbbd21a746 | refs/heads/master | 2021-05-20T08:50:52.860835 | 2020-03-31T19:53:19 | 2020-03-31T19:53:19 | 252,135,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'General.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"silent2h@hotmail.com"
] | silent2h@hotmail.com |
bfabb2739c171793041ee29b0c6b4b331220b17b | d60e74dae2c4bcef6bc7c8faea51dc6b245de42f | /package/inference/mc/population.py | 3e105cf0362f917f410e330bcc5f023dc5518596 | [] | no_license | tloredo/inference | 37664ef62317f32ad5ab25c56ead1c49bfc91045 | 215de4e93b5cf79a1e9f380047b4db92bfeaf45c | refs/heads/master | 2021-09-09T06:24:16.690338 | 2021-09-01T21:03:52 | 2021-09-01T21:03:52 | 142,254,094 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,441 | py | """
Classes and functions for describing and sampling discrete populations.
"""
# TODO: Distinguish sizes from weights (normalized).
__author__ = "Tom Loredo"
from numpy import array, random
from _ppssampler import _ppssampler, equalprob
from _ppssampler import set_rng_state, get_rng_state
from inference.utils.pl import pl
# try:
# import pylab as pl
# except ImportError:
# pl = None
__all__ = ['Population', 'Population1D']
class Population(object):
def __init__(self, items=None, weights=None):
if weights is None: # The equal probability case
self.equiprob = True
try: # items is a sequence
self.npopn = len(items)
self.items = items
except TypeError: # items is an int
self.npopn = int(items)
self.items = list(range(self.npopn))
elif items is None: # Use indices as items
self.equiprob = False
self.npopn = len(weights)
self.items = list(range(self.npopn))
self.weights = array(weights, float)
else: # Use a list of items *and* weights
self.equiprob = False
self.npopn = len(weights)
if len(items) != self.npopn:
raise ValueError('Lengths of items & weights must match!')
self.items = items
self.weights = array(weights, float)
self.did_init = False
self.did_Sampford_init = False
self.did_Sampford_tables = False
def sample(self, nsamp):
"""
Return a set of nsamp samples from the population, sampled with
replacement.
"""
# *** Implement equiprob case.
if self.equiprob:
raise NotImplementedError('Awaiting code...')
if not self.did_init:
self.sampler = _ppssampler(self.weights)
self.did_init = True
# Track the RNG state within the sampler, to update NumPy's RNG state.
# Internally we only use the MT state; any extra state for cached
# normal or other samples can just be copied.
rng_state = random.get_state()
mt_state, extra_state = rng_state[:3], rng_state[3:]
set_rng_state(*mt_state) # *** modify to handle full rng state
indices = self.sampler.sample(nsamp)
new_state = list(get_rng_state())
new_state.extend(extra_state)
random.set_state(new_state)
return [self.items[i] for i in indices]
def max_subset(self):
"""
Return the maximum sample size for PPS sampling without replacement.
The limiting size arises because PPS sampling without replacement
requires nsamp*(max normalized weight) <= 1. If this is violated
for the desired sample size, you may consider trimming the large
weight members from the population and including them in every
sample (of course, they will all have inclusion probability of
unity, regardless of size).
"""
if self.did_Sampford_init:
return int(1./self.max_wt)
else:
return int(sum(self.weights)/self.weights.max())
def subset_pps(self, nsamp):
"""
Return a sample of nsamp distinct items from the population, sampled
without replacement with probability proportional to size (PPS)
according to Sampford's sampling scheme.
"""
# Copy the whole population if nsamp = npopn.
if nsamp == self.npopn:
return [item for item in self.items]
set_rng_state(*random.get_state())
if self.equiprob:
pool = arange(self.npopn)
indices = equalprob(nsamp, pool)
else:
# This part of setup has to be done before any sampling.
if not self.did_init:
print('Initing ppssampler...')
self.sampler = _ppssampler(self.weights)
self.did_init = True
# This part has to be done before any sampling w/o replacement.
if not self.did_Sampford_init:
print('Initing wts...')
self.sort_indices, self.sort_wts, self.tot_wt = \
self.sampler.prepwts(self.weights)
self.max_wt = self.sort_wts[0]/self.tot_wt # Max wt, normed
self.nsamp = 0
self.did_Sampford_init = True
self.did_Sampford_tables = False
# This part has to be done when sample size changes.
if self.nsamp != nsamp:
print('Initing ratios...')
if nsamp > self.npopn:
raise ValueError('nsamp larger than population size!')
if nsamp*self.max_wt > 1:
raise ValueError('Sample size too large for PPS sampling!')
self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt)
self.did_Sampford_tables = False
self.nsamp = nsamp
self.ntry, sindices = self.sampler.samplenr()
indices = [self.sort_indices[i] for i in sindices]
result = [self.items[i] for i in indices]
random.set_state(get_rng_state())
return result
def subset_pps5(self, nsamp):
"""
Return a sample of nsamp distinct items from the population, sampled
without replacement with probability proportional to size (PPS)
according to Sampford's sampling scheme.
5-table lookup samplers are used within Sampford's algorithm to
accelerate the sampling for large populations.
"""
# Copy the whole population if nsamp = npopn.
if nsamp == self.npopn:
return [item for item in self.items]
set_rng_state(*random.get_state())
if self.equiprob:
pool = arange(self.npopn)
indices = equalprob(nsamp, pool)
else:
# This part of setup has to be done before any sampling.
if not self.did_init:
print('Initing ppssampler...')
self.sampler = _ppssampler(self.weights)
self.did_init = True
# This part has to be done before any sampling w/o replacement.
if not self.did_Sampford_init:
print('Initing wts...')
self.sort_indices, self.sort_wts, self.tot_wt = \
self.sampler.prepwts(self.weights)
self.max_wt = self.sort_wts[0]/self.tot_wt # Max wt, normed
self.nsamp = 0
self.did_Sampford_init = True
# This part has to be done when sample size changes.
if self.nsamp != nsamp:
print('Initing ratios...')
if nsamp > self.npopn:
raise ValueError('nsamp larger than population size!')
if nsamp*self.max_wt > 1:
raise ValueError('Sample size too large for PPS sampling!')
self.sampler.prepratios(nsamp, self.sort_wts, self.tot_wt)
self.sampler.prepratiotables()
self.did_Sampford_tables = True
self.nsamp = nsamp
# This may happen if subset_pps is called before subset_pps5.
if not self.did_Sampford_tables:
print('Initing ratio tables...')
self.sampler.prepratiotables()
self.did_Sampford_tables = True
self.ntry, indices = self.sampler.samplenr5()
# Note the 5-table version returns unsorted indices.
# indices = [self.sort_indices[i] for i in sindices]
result = [self.items[i] for i in indices]
random.set_state(get_rng_state())
return result
class Population1D(Population):
"""
A Population object specialized for populations indexed by a
single (1-D) real-valued quantity that gives the "size" of
each member.
"""
def __init__(self, vals, weights, err=None):
indices = array(vals).argsort()
self.vals = vals[indices].copy()
self.weights = array(weights)[indices].copy()
if err == None:
self.err = None
else:
self.err = array(err)[indices].copy()
Population.__init__(self, self.vals, self.weights)
self.cdf = self.weights.cumsum()
self.hazard = self.cdf[::-1].copy()
def haz_pts(self, start=None, end=None):
"""
Return arrays of points specifying the hazard dist'n over the range
[start, end]. The range must fully span the range of
detected values. Also return arrays of points specifying
error bars, if defined on creation.
"""
if start is None:
start = self.vals[0]
if end is None:
end = self.vals[-1]
if start>self.vals[0] or end<self.vals[-1]:
raise ValueError('Range must span the range of sampled values!')
# Start the descending CDF.
absc, ord = [start], [1.]
# Add pairs of points for each uncensored value, defining jumps.
for x, p in zip(self.vals, self.hazard):
absc.extend([x, x])
ord.extend([ord[-1], p])
# The last step is zero.
absc.append(end)
ord.append(0.)
if self.err == None:
return array(absc), array(ord)
else:
# For error bars, just copy the stored errors in the middle of
# the CDF bins.
eabsc = []
for i in range(len(self.vals)-1):
eabsc.append( .5*(self.vals[i]+self.vals[i+1]) )
eabsc.append( .5*(self.vals[-1]+end) )
return array(absc), array(ord), array(eabsc), self.hazard.copy(), \
self.err.copy()
def plot(self, start=None, end=None):
"""
Plot the hazard over the range [start,end], which must span
the range of uncensored values.
"""
if not pl:
raise RuntimeError('Cannot plot without pylab!')
if start is None:
start = self.vals[0]
if end is None:
end = self.vals[-1]
if self.err == None:
a, o = self.haz_pts(start, end)
else:
a, o, ea, eo, ee = self.haz_pts(start, end)
pl.plot(a, o, 'b-', linewidth=2)
if self.err != None:
pl.errorbar(ea, eo, ee, fmt='o', markersize=0)
| [
"loredo@astro.cornell.edu"
] | loredo@astro.cornell.edu |
fc65babef9b7d7077b94f35d2c17bcd73e6ea202 | ac305c6739541e84857e297f8eb1b19417978548 | /78.py | 669cd4df43e886cd23adf6230f54103530d8dd28 | [] | no_license | imhardikj/git_test | d6608d6c02e0bc454f9dd31ffbbc5704a7046a61 | 43f0de2e9ac09ecd4fdfee27879fd8ae354a0685 | refs/heads/master | 2020-03-27T21:56:46.394739 | 2018-09-03T11:27:58 | 2018-09-03T11:27:58 | 147,189,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def greet_user(username):
"""Display a simple greeting."""
print("Hello, " + username.title() + "!")
greet_user('jesse')
| [
"noreply@github.com"
] | imhardikj.noreply@github.com |
a1c94e91b147f898f704cf3ee932bb26814d55d0 | ecd0df3f6de68e13724dc5a8548ebcc2cfd6ee7c | /consts.py | c4ea8dc15e048557bb95b537ddf3f35206b2b889 | [
"MIT"
] | permissive | ardieb/cryptoAI | 7771545ba3b33b7dcc465addf2bfa9790f17cacb | 025c5ba20e4d4399c8da2a1f9bac0dc875a90306 | refs/heads/master | 2020-06-28T02:04:41.131740 | 2019-08-03T03:08:53 | 2019-08-03T03:08:53 | 200,114,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | """
File for keeping track of constants
"""
CURRENCIES = {
'ADA': [],
'ADX': [],
'AE': [],
'AGI': [],
'AION': [],
'ALGO': [],
'AMB': [],
'ANKR': [],
'APPC': [],
'ARDR': [],
'ARK': [],
'ARN': [],
'AST': [],
'ATOM': [],
'BAT': [],
'BCC': [],
'BCD': [],
'BCH': [],
'BCN': [],
'BCPT': [],
'BGBP': [],
'BLZ': [],
'BNB': [],
'BNT': [],
'BQX': [],
'BRD': [],
'BSV': [],
'BTC': [],
'BTCB': [],
'BTG': [],
'BTS': [],
'BTT': [],
'CDT': [],
'CELR': [],
'CHAT': [],
'CLOAK': [],
'CMT': [],
'CND': [],
'CVC': [],
'DASH': [],
'DATA': [],
'DCR': [],
'DENT': [],
'DGD': [],
'DLT': [],
'DNT': [],
'DOCK': [],
'DOGE': [],
'DUSK': [],
'EDO': [],
'ELF': [],
'ENG': [],
'ENJ': [],
'EOS': [],
'ERD': [],
'ETC': [],
'ETH': [],
'EVX': [],
'FET': [],
'FTM': [],
'FUEL': [],
'FUN': [],
'GAS': [],
'GNT': [],
'GO': [],
'GRS': [],
'GTO': [],
'GVT': [],
'GXS': [],
'HC': [],
'HOT': [],
'HSR': [],
'ICN': [],
'ICX': [],
'INS': [],
'IOST': [],
'IOTA': [],
'IOTX': [],
'KEY': [],
'KMD': [],
'KNC': [],
'LEND': [],
'LINK': [],
'LOOM': [],
'LRC': [],
'LSK': [],
'LTC': [],
'LUN': [],
'MANA': [],
'MATIC': [],
'MCO': [],
'MDA': [],
'MFT': [],
'MITH': [],
'MOD': [],
'MTH': [],
'MTL': [],
'NANO': [],
'NAS': [],
'NAV': [],
'NCASH': [],
'NEBL': [],
'NEO': [],
'NPXS': [],
'NULS': [],
'NXS': [],
'OAX': [],
'OMG': [],
'ONE': [],
'ONG': [],
'ONT': [],
'OST': [],
'PAX': [],
'PHB': [],
'PHX': [],
'PIVX': [],
'POA': [],
'POE': [],
'POLY': [],
'POWR': [],
'PPT': [],
'QKC': [],
'QLC': [],
'QSP': [],
'QTUM': [],
'RCN': [],
'RDN': [],
'REN': [],
'REP': [],
'REQ': [],
'RLC': [],
'RPX': [],
'RVN': [],
'SALT': [],
'SC': [],
'SKY': [],
'SNGLS': [],
'SNM': [],
'SNT': [],
'STEEM': [],
'STORJ': [],
'STORM': [],
'STRAT': [],
'SUB': [],
'SYS': [],
'TFUEL': [],
'THETA': [],
'TNB': [],
'TNT': [],
'TRIG': [],
'TRX': [],
'TUSD': [],
'USDC': [],
'USDS': [],
'USDSB': [],
'USDT': [],
'VEN': [],
'VET': [],
'VIA': [],
'VIB': [],
'VIBE': [],
'WABI': [],
'WAN': [],
'WAVES': [],
'WINGS': [],
'WPR': [],
'WTC': [],
'XEM': [],
'XLM': [],
'XMR': [],
'XRP': [],
'XVG': [],
'XZC': [],
'YOYOW': [],
'ZEC': [],
'ZEN': [],
'ZIL': [],
'ZRX': [],
}
| [
"amb556@cornell.edu"
] | amb556@cornell.edu |
adcd55399186347b3f54024b81f47175edbe59e6 | 23a0c46a29e551c662ec91fa3c248a0f717b2688 | /060_permutation_sequence/Solution2/Solution.py | 7691aa46872f49cac89224e3f8254a4689d9115e | [] | no_license | zcybupt/leetcode | 429e752f6b0210af7c89afd866bb170062fe27f0 | d897b493dbc7b23d35be7400266cffcc2735efdd | refs/heads/master | 2023-01-12T20:26:39.874101 | 2020-11-15T07:51:18 | 2020-11-15T07:51:18 | 284,205,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | class Solution:
def __init__(self):
self.facts = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
def get_fact(self, n: int) -> int:
if n < 10: return self.facts[n]
return n * self.get_fact(n - 1)
def getPermutation(self, n: int, k: int) -> str:
result = []
is_visited = [False] * n
k -= 1
for i in range(n):
tmp_fact = self.get_fact(n - 1 - i)
cnt = k // tmp_fact
k %= tmp_fact
for j in range(n):
if is_visited[j]: continue
if cnt == 0:
is_visited[j] = True
result.append(str(j + 1))
break
cnt -= 1
return ''.join(result)
if __name__ == '__main__':
print(Solution().getPermutation(4, 9))
| [
"jp2016213431@qmul.ac.uk"
] | jp2016213431@qmul.ac.uk |
c679d745de553d8ef304cc8892703af5771d4ad6 | c6686a1d656f6d372bdf3d1989500125023eec84 | /pyGPs/Optimization/minimize.py | 24bdc739b949c10771464df5575a8e005b9570e2 | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | gusmaogabriels/pyGPs | 665a790175fd2f457f415b27c8f26e240a6ea10d | 7e6c40f7e6086ede6c5d754ec8a0797f3cc81654 | refs/heads/master | 2021-01-15T18:22:53.792546 | 2015-07-08T20:37:25 | 2015-07-08T20:37:25 | 40,375,713 | 1 | 0 | null | 2015-08-07T18:46:45 | 2015-08-07T18:46:45 | null | UTF-8 | Python | false | false | 8,684 | py |
#===============================================================================
# This program is distributed WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
#
# This file contains a Python version of Carl Rasmussen's Matlab-function
# minimize.m
#
# minimize.m is copyright (C) 1999 - 2006, Carl Edward Rasmussen.
# Python adaptation by Roland Memisevic 2008.
# updates by Shan Huang 2013
#
#
# The following is the original copyright notice that comes with the
# function minimize.m
# (from http://www.kyb.tuebingen.mpg.de/bs/people/carl/code/minimize/Copyright):
# Rasmussen
#
# "(C) Copyright 1999 - 2006, Carl Edward Rasmussen
#
# Permission is granted for anyone to copy, use, or modify these
# programs and accompanying documents for purposes of research or
# education, provided this copyright notice is retained, and note is
# made of any changes that have been made.
#
# These programs and documents are distributed without any warranty,
# express or implied. As the programs were written for research
# purposes only, they have not been tested to the degree that would be
# advisable in any important application. All use of these programs is
# entirely at the user's own risk."
#===============================================================================
from numpy import dot, isinf, isnan, any, sqrt, isreal, real, nan, inf
def run(f, X, args=(), length=None, red=1.0, verbose=False):
'''
This is a function that performs unconstrained
gradient based optimization using nonlinear conjugate gradients.
The function is a straightforward Python-translation of Carl Rasmussen's
Matlab-function minimize.m
'''
INT = 0.1;# don't reevaluate within 0.1 of the limit of the current bracket
EXT = 3.0; # extrapolate maximum 3 times the current step-size
MAX = 20; # max 20 function evaluations per line search
RATIO = 10; # maximum allowed slope ratio
SIG = 0.1;RHO = SIG/2;# SIG and RHO are the constants controlling the Wolfe-
#Powell conditions. SIG is the maximum allowed absolute ratio between
#previous and new slopes (derivatives in the search direction), thus setting
#SIG to low (positive) values forces higher precision in the line-searches.
#RHO is the minimum allowed fraction of the expected (from the slope at the
#initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1.
#Tuning of SIG (depending on the nature of the function to be optimized) may
#speed up the minimization; it is probably not worth playing much with RHO.
SMALL = 10.**-16 #minimize.m uses matlab's realmin
i = 0 # zero the run length counter
ls_failed = 0 # no previous line search has failed
result = f(X, *args)
f0 = result[0] # get function value and gradient
df0 = result[1]
fX = [f0]
i = i + (length<0) # count epochs?!
s = -df0
d0 = -dot(s,s) # initial search direction (steepest) and slope
x3 = red/(1.0-d0) # initial step is red/(|s|+1)
while i < abs(length): # while not finished
i = i + (length>0) # count iterations?!
X0 = X; F0 = f0; dF0 = df0 # make a copy of current values
if length>0:
M = MAX
else:
M = min(MAX, -length-i)
while 1: # keep extrapolating as long as necessary
x2 = 0; f2 = f0; d2 = d0; f3 = f0; df3 = df0
success = 0
while (not success) and (M > 0):
try:
M = M - 1; i = i + (length<0) # count epochs?!
result3 = f(X+x3*s, *args)
f3 = result3[0]
df3 = result3[1]
if isnan(f3) or isinf(f3) or any(isnan(df3)+isinf(df3)):
return
success = 1
except: # catch any error which occured in f
x3 = (x2+x3)/2 # bisect and try again
if f3 < F0:
X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values
d3 = dot(df3,s) # new slope
if d3 > SIG*d0 or f3 > f0+x3*RHO*d0 or M == 0:
# are we done extrapolating?
break
x1 = x2; f1 = f2; d1 = d2 # move point 2 to point 1
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
A = 6*(f1-f2)+3*(d2+d1)*(x2-x1) # make cubic extrapolation
B = 3*(f2-f1)-(2*d1+d2)*(x2-x1)
Z = B+sqrt(complex(B*B-A*d1*(x2-x1)))
if Z != 0.0:
x3 = x1-d1*(x2-x1)**2/Z # num. error possible, ok!
else:
x3 = inf
if (not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0):
# num prob | wrong sign?
x3 = x2*EXT # extrapolate maximum amount
elif x3 > x2*EXT: # new point beyond extrapolation limit?
x3 = x2*EXT # extrapolate maximum amount
elif x3 < x2+INT*(x2-x1): # new point too close to previous point?
x3 = x2+INT*(x2-x1)
x3 = real(x3)
while (abs(d3) > -SIG*d0 or f3 > f0+x3*RHO*d0) and M > 0:
# keep interpolating
if (d3 > 0) or (f3 > f0+x3*RHO*d0): # choose subinterval
x4 = x3; f4 = f3; d4 = d3 # move point 3 to point 4
else:
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
if f4 > f0:
x3 = x2-(0.5*d2*(x4-x2)**2)/(f4-f2-d2*(x4-x2))
# quadratic interpolation
else:
A = 6*(f2-f4)/(x4-x2)+3*(d4+d2) # cubic interpolation
B = 3*(f4-f2)-(2*d2+d4)*(x4-x2)
if A != 0:
x3=x2+(sqrt(B*B-A*d2*(x4-x2)**2)-B)/A
# num. error possible, ok!
else:
x3 = inf
if isnan(x3) or isinf(x3):
x3 = (x2+x4)/2 # if we had a numerical problem then bisect
x3 = max(min(x3, x4-INT*(x4-x2)),x2+INT*(x4-x2))
# don't accept too close
result3 = f(X+x3*s, *args)
f3 = result3[0]
df3 = result3[1]
if f3 < F0:
X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values
M = M - 1; i = i + (length<0) # count epochs?!
d3 = dot(df3,s) # new slope
if abs(d3) < -SIG*d0 and f3 < f0+x3*RHO*d0: # if line search succeeded
X = X+x3*s; f0 = f3; fX.append(f0) # update variables
s = (dot(df3,df3)-dot(df0,df3))/dot(df0,df0)*s - df3
# Polack-Ribiere CG direction
df0 = df3 # swap derivatives
d3 = d0; d0 = dot(df0,s)
if d0 > 0: # new slope must be negative
s = -df0; d0 = -dot(s,s) # otherwise use steepest direction
x3 = x3 * min(RATIO, d3/(d0-SMALL)) # slope ratio but max RATIO
ls_failed = 0 # this line search did not fail
else:
X = X0; f0 = F0; df0 = dF0 # restore best point so far
if ls_failed or (i>abs(length)): # line search failed twice in a row
break # or we ran out of time, so we give up
s = -df0; d0 = -dot(s,s) # try steepest
x3 = 1/(1-d0)
ls_failed = 1 # this line search failed
if verbose: print "\n"
#print fX
return X, fX, i
| [
"schan.huang@gmail.com"
] | schan.huang@gmail.com |
a28c0f031d0d273937621fa6e71b8321e69a8ec7 | fcc5c2dc7edf543c37cfc88bfe37580ea290f4f6 | /poll/settings.py | 8000af01ad17de1a2f9ce4760d4614171b66eac1 | [] | no_license | EMILY0227-png/poll | 7ed595a4d9be27acd13581e023657061da510a06 | 290b3e06137967ef48d78b0b4d86863b66ff1b5e | refs/heads/main | 2023-01-30T00:10:04.973089 | 2020-12-15T08:24:45 | 2020-12-15T08:24:45 | 321,601,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,066 | py | """
Django settings for poll project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c1_hjz!$r(5xl+c#a%f!cu8oz@w6yp7y5gkvj6tswrh#h7u7fz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'poll.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'poll.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'zh-hant'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"emilyw514ss@gmail.com"
] | emilyw514ss@gmail.com |
6bd7f8a855f1494d8fb69aa10287b7cc9d29e207 | 7867b9cb9ad676ca6823fa1298754491608a01d1 | /bluedon-iperf3-auto-test/iperf3-jresult-parser.py | 69914d42e1a8982910a74608e9b05c4bfb0c1d38 | [] | no_license | ekeyme/code-piece | bbe021f9443ccbf9aec1ab3fa5d0e5abdf8c9b99 | 9777d9173ee1772a0d7ae680cbf4f22ad50358ec | refs/heads/master | 2021-01-17T12:07:11.314698 | 2018-08-30T15:56:42 | 2018-08-30T15:56:42 | 84,057,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,010 | py | #!/usr/bin/env python2.7
import sys
import json
class TestResult(object):
"""Class containing iperf3 test results.
:param text: The raw result from libiperf as text
:param json: The raw result from libiperf asjson/dict
:param error: Error captured during test, None if all ok
:param time: Start time
:param timesecs: Start time in seconds
:param system_info: System info
:param version: Iperf Version
:param local_host: Local host ip
:param local_port: Local port number
:param remote_host: Remote host ip
:param remote_port: Remote port number
:param reverse: Test ran in reverse direction
:param protocol: 'TCP' or 'UDP'
:param num_streams: Number of test streams
:param blksize:
:param omit:
:param duration: Test duration in seconds
:param local_cpu_total: The local total CPU load
:param local_cpu_user: The local user CPU load
:param local_cpu_system: The local system CPU load
:param remote_cpu_total: The remote total CPU load
:param remote_cpu_user: The remote user CPU load
:param remote_cpu_system: The remote system CPU load
TCP test specific
:param tcp_mss_default:
:param retransmits: amount of retransmits (Only returned from client)
:param sent_bytes: Sent bytes
:param sent_bps: Sent bits per second
:param sent_kbps: sent kilobits per second
:param sent_Mbps: Sent Megabits per second
:param sent_kB_s: Sent kiloBytes per second
:param sent_MB_s: Sent MegaBytes per second
:param received_bytes: Received bytes
:param received_bps: Received bits per second
:param received_kbps: Received kilobits per second
:param received_Mbps: Received Megabits per second
:param received_kB_s: Received kiloBytes per second
:param received_MB_s: Received MegaBytes per second
UDP test specific
:param bytes:
:param bps:
:param jitter_ms:
:param kbps:
:param Mbps:
:param kB_s:
:param MB_s:
:param packets:
:param lost_packets:
:param lost_percent:
:param seconds:
"""
def __init__(self, result):
"""Initialise TestResult
:param result: raw json output from :class:`Client` and :class:`Server`
"""
# The full result data
self.text = result
self.json = json.loads(result)
if 'error' in self.json:
self.error = self.json['error']
else:
self.error = None
# start time
self.time = self.json['start']['timestamp']['time']
self.timesecs = self.json['start']['timestamp']['timesecs']
# generic info
self.system_info = self.json['start']['system_info']
self.version = self.json['start']['version']
# connection details
connection_details = self.json['start']['connected'][0]
self.local_host = connection_details['local_host']
self.local_port = connection_details['local_port']
self.remote_host = connection_details['remote_host']
self.remote_port = connection_details['remote_port']
# test setup
self.tcp_mss_default = self.json['start'].get('tcp_mss_default')
self.protocol = self.json['start']['test_start']['protocol']
self.num_streams = self.json['start']['test_start']['num_streams']
self.blksize = self.json['start']['test_start']['blksize']
self.omit = self.json['start']['test_start']['omit']
self.duration = self.json['start']['test_start']['duration']
# system performance
cpu_utilization_perc = self.json['end']['cpu_utilization_percent']
self.local_cpu_total = cpu_utilization_perc['host_total']
self.local_cpu_user = cpu_utilization_perc['host_user']
self.local_cpu_system = cpu_utilization_perc['host_system']
self.remote_cpu_total = cpu_utilization_perc['remote_total']
self.remote_cpu_user = cpu_utilization_perc['remote_user']
self.remote_cpu_system = cpu_utilization_perc['remote_system']
# TCP specific test results
if self.protocol == 'TCP':
sent_json = self.json['end']['sum_sent']
self.sent_bytes = sent_json['bytes']
self.sent_bps = sent_json['bits_per_second']
recv_json = self.json['end']['sum_received']
self.received_bytes = recv_json['bytes']
self.received_bps = recv_json['bits_per_second']
# Bits are measured in 10**3 terms
# Bytes are measured in 2**10 terms
# kbps = Kilobits per second
# Mbps = Megabits per second
# kB_s = kiloBytes per second
# MB_s = MegaBytes per second
self.sent_kbps = self.sent_bps / 1000
self.sent_Mbps = self.sent_kbps / 1000
self.sent_kB_s = self.sent_bps / (8 * 1024)
self.sent_MB_s = self.sent_kB_s / 1024
self.received_kbps = self.received_bps / 1000
self.received_Mbps = self.received_kbps / 1000
self.received_kB_s = self.received_bps / (8 * 1024)
self.received_MB_s = self.received_kB_s / 1024
# retransmits only returned from client
self.retransmits = sent_json.get('retransmits')
# UDP specific test results
elif self.protocol == 'UDP':
self.bytes = self.json['end']['sum']['bytes']
self.bps = self.json['end']['sum']['bits_per_second']
self.jitter_ms = self.json['end']['sum']['jitter_ms']
self.kbps = self.bps / 1000
self.Mbps = self.kbps / 1000
self.kB_s = self.kbps / (8 * 1024)
self.MB_s = self.Mbps / 1024
self.packets = self.json['end']['sum']['packets']
self.lost_packets = self.json['end']['sum']['lost_packets']
self.lost_percent = self.json['end']['sum']['lost_percent']
self.seconds = self.json['end']['sum']['seconds']
@property
def reverse(self):
if self.json['start']['test_start']['reverse']:
return True
else:
return False
@property
def type(self):
if 'connecting_to' in self.json['start']:
return 'client'
else:
return 'server'
def __repr__(self):
"""Print the result as received from iperf3"""
return self.text
if __name__ == '__main__':
def print2(s):
sys.stdout.write(s)
raw_s = sys.stdin.read()
if not raw_s:
sys.stderr.write("json format error: %s" % raw_s)
sys.exit(1)
user_protocol = sys.argv[1].upper()
result = TestResult(raw_s)
if result.error:
sys.stderr.write("error: %s\n" % (result.error,))
sys.exit(1)
if user_protocol != result.protocol:
sys.stderr.write("user_protocol != protocol\n")
sys.exit(1)
direction = '->' if result.type == 'client' \
and not result.reverse else '<-'
print2("%s%s%s\t%s\t%s\t%s\t%s\t" % (result.local_host,
direction,
result.remote_host,
user_protocol,
result.time,
result.local_cpu_total,
result.remote_cpu_total))
if user_protocol == 'TCP':
print("%.2f\t%.2f" % (result.sent_bytes/(2**20), result.sent_Mbps))
elif user_protocol == 'UDP':
print("%.2f\t%.2f\t%.2f%%" % (result.bytes/(2**20),
result.Mbps, result.lost_percent))
else:
sys.stderr.write("unsupported protocol: %s" % user_protocol)
sys.exit(1) | [
"ekeyme@gmail.com"
] | ekeyme@gmail.com |
e4373ae6e178468000ed7a0b8f5e5aba48950f4d | a2bbc6bc8492a486541ef6a19e97be619cbc1ccc | /benten/langserver/server.py | cf36b096aee2bfe3ad6a20a2322bb6da6f484e98 | [
"Apache-2.0"
] | permissive | stain/benten | 67eb675c96169a7f2802833cdbb669b3a272794e | 40440d36025e0b27b8dfa6752aa76b15e7abc0d1 | refs/heads/master | 2020-09-24T07:36:17.084414 | 2019-11-27T16:15:09 | 2019-11-27T16:15:09 | 225,703,316 | 0 | 0 | Apache-2.0 | 2019-12-03T19:53:07 | 2019-12-03T19:53:07 | null | UTF-8 | Python | false | false | 7,032 | py | """
Expected requests
"initialize": self.serve_initialize,
"initialized": self.serve_ignore,
"textDocument/didOpen": self.serve_doc_did_open,
"textDocument/didChange": self.serve_doc_did_change,
"textDocument/completion": self.serve_completion,
"textDocument/hover": self.serve_hover,
"textDocument/codeAction": self.serve_available_commands,
"textDocument/implementation":
"textDocument/definition": self.serve_definition,
"textDocument/xdefinition": self.serve_x_definition,
"textDocument/references": self.serve_references,
"workspace/xreferences": self.serve_x_references,
"textDocument/documentSymbol": self.serve_document_symbols,
"workspace/symbol": self.serve_symbols,
"workspace/xpackages": self.serve_x_packages,
"workspace/xdependencies": self.serve_x_dependencies,
"$/cancelRequest": self.serve_ignore,
"shutdown": self.serve_ignore,
"exit": self.serve_exit,
"""
# Copyright (c) 2019 Seven Bridges. See LICENSE
from enum import IntEnum
from .lspobjects import to_dict
from .base import CWLLangServerBase, JSONRPC2Error, ServerError, LSPErrCode
from .fileoperation import FileOperation
from .definition import Definition
from .completion import Completion
from .documentsymbol import DocumentSymbol
from .hover import Hover
import logging
logger = logging.getLogger(__name__)
logger.propagate = True
logging.getLogger("benten.langserver.jsonrpc").propagate = False
class TextDocumentSyncKind(IntEnum):
_None = 0
Full = 1
Incremental = 2
class LangServer(
Hover,
DocumentSymbol,
Completion,
Definition,
FileOperation,
CWLLangServerBase):
def run(self):
while self.running:
try:
request = self.conn.read_message()
self.handle(request)
except EOFError:
break
except Exception as e:
logger.error("Unexpected error: %s", e, exc_info=True)
# Request message:
# {
# "jsonrpc": "2.0",
# "id": 1, # <---------- if this field is missing, it's a notification
# and should not receive a response. Otherwise,
# the response should carry this id for accounting
# purposes
# "method": "...",
# "params": {
# ...
# }
# }
def handle(self, client_query):
logger.info("Client query: {}".format(client_query.get("method")))
is_a_request = "id" in client_query
if self.premature_request(client_query, is_a_request):
return
if self.duplicate_initialization(client_query, is_a_request):
return
try:
response = to_dict(self._dispatch(client_query))
if is_a_request:
self.conn.write_response(client_query["id"], response)
except ServerError as e:
logger.error(e.server_error_message)
let_client_know_of_errors = False
if let_client_know_of_errors:
self.conn.write_error(
client_query["id"],
code=e.json_rpc_error.code,
message=str(e.json_rpc_error.message),
data=e.json_rpc_error.data)
def premature_request(self, client_query, is_a_request):
if not self.initialization_request_received and \
client_query.get("method", None) not in ["initialize", "exit"]:
logger.warning("Client sent a request/notification without initializing")
if is_a_request:
self.conn.write_error(
client_query["id"],
code=LSPErrCode.ServerNotInitialized,
message="",
data={})
return True
else:
return False
def duplicate_initialization(self, client_query, is_a_request):
if self.initialization_request_received and client_query.get("method", None) == "initialize":
logger.warning("Client sent duplicate initialization")
if is_a_request:
self.conn.write_error(
client_query["id"],
code=LSPErrCode.InvalidRequest,
message="Client sent duplicate initialization",
data={})
return True
else:
return False
def _dispatch(self, client_query):
# textDocument/didOpen -> serve_textDocument_didOpen
method_name = "serve_" + client_query.get("method", "noMethod").replace("/", "_")
try:
f = getattr(self, method_name)
except AttributeError as e:
f = self.serve_unknown
return f(client_query)
@staticmethod
def serve_noMethod(client_query):
msg = "Method not specified"
raise ServerError(
server_error_message=msg,
json_rpc_error=JSONRPC2Error(
code=LSPErrCode.MethodNotFound,
message=msg))
@staticmethod
def serve_unknown(client_query):
msg = "Unknown method: {}".format(client_query["method"])
raise ServerError(
server_error_message=msg,
json_rpc_error=JSONRPC2Error(
code=LSPErrCode.MethodNotFound,
message=msg))
def serve_shutdown(self, client_query):
logging.shutdown()
self.running = False
# https://microsoft.github.io/language-server-protocol/specification#initialize
def serve_initialize(self, client_query):
self.initialization_request_received = True
self.client_capabilities = client_query.get("capabilities", {})
logger.debug("InitOpts: {}".format(client_query))
return {
"capabilities": {
"textDocumentSync": TextDocumentSyncKind.Full,
# Avoid complexity of incremental updates for now
"completionProvider": {
"resolveProvider": True,
"triggerCharacters": [".", "/"]
},
"hoverProvider": True,
"definitionProvider": True,
"referencesProvider": True,
"documentSymbolProvider": True,
"workspaceSymbolProvider": True,
"streaming": True,
"codeActionProvider": {
"codeActionKinds": ["source"]
},
"workspace": {
"workspaceFolders": {
"supported": True,
"changeNotifications": True
}
},
# https://github.com/sourcegraph/language-server-protocol/blob/master/extension-files.md#files-extensions-to-lsp
# This is not in the spec yet
"xfilesProvider": True
}
}
@staticmethod
def serve_initialized(client_query):
return {}
| [
"kaushik.ghose@sbgenomics.com"
] | kaushik.ghose@sbgenomics.com |
c8e7c04a12164aa50d57d08f389ae9cca8ae4c2e | 461cf8c3ec51b1fd22d7a6d0b8cdb3fb42e4bc21 | /blender_simply.py | 3c464dc27230c3d28ef085d48567cdfc6076aff3 | [] | no_license | jurryt/ob_sim | bd8112454e407ab3ec397e0770e25c3e4d868633 | 82f5e7e4d5ced33ee5e00290a1afe42e124b1224 | refs/heads/master | 2020-03-07T21:47:53.071651 | 2018-04-08T06:20:22 | 2018-04-08T06:20:22 | 127,737,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | import bpy
#
class ModalTimerOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.modal_timer_operator"
bl_label = "Modal Timer Operator"
_timer = None
def modal(self, context, event):
if event.type == 'ESC':
return self.cancel(context)
if event.type == 'TIMER':
# change theme color, silly!
color = context.user_preferences.themes[0].view_3d.space.gradients.high_gradient
color.s = 1.0
color.h += 0.01
return {'PASS_THROUGH'}
def execute(self, context):
self._timer = context.window_manager.event_timer_add(0.1, context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
context.window_manager.event_timer_remove(self._timer)
return {'CANCELLED'}
def register():
bpy.utils.register_class(ModalTimerOperator)
def unregister():
bpy.utils.unregister_class(ModalTimerOperator)
if __name__ == "__main__":
register()
# test call
bpy.ops.wm.modal_timer_operator() | [
"jurryt@dds.nl"
] | jurryt@dds.nl |
ce74a24931cde7f4726a9a179a84cb9f7ed45f3e | 4c0303e789e3e76ee0625b930243086bd8fa3386 | /squares_parallel.py | bb46d291c25ac247817611def6c0b73f7bfd4a72 | [
"BSD-3-Clause"
] | permissive | Kondziowy/PythonBasicTraining | 5ab4290b34a58f47316e338cc8a56c9a10cadbe3 | b7f98a4263faee6f94179a0786ba7eb2dd1e2191 | refs/heads/master | 2020-03-24T19:30:32.519397 | 2018-08-28T06:50:47 | 2018-08-28T06:50:47 | 142,930,552 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import multiprocessing
import time
def compute_squares(number):
return number ** 2
# Python idiom - the code below will not get run if you import the module
if __name__ == '__main__':
pool = multiprocessing.Pool(20)
result = pool.map(compute_squares, range(1000))
print(result)
| [
"noreply@github.com"
] | Kondziowy.noreply@github.com |
29e2e0fd96b2a035419294cf74219036d118ef31 | 7a79a3e14849558df5042a50b3ba635c10e6f0c8 | /m1b/db/main.py | cf093f0617d1b64a90749f23633ff17708613366 | [] | no_license | tysm/m1b_charts | 1257c3d246285090993c876ef366b2e2ffe94a43 | ca722cddd1b9f1e7303295a32af0d5cf40fdaa2a | refs/heads/master | 2020-06-13T00:26:50.486768 | 2019-06-30T09:14:49 | 2019-06-30T09:14:49 | 194,473,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from m1b.db.models import db
from m1b.db.models import DB_TABLES
if __name__ == "__main__":
db.connect()
db.create_tables(DB_TABLES)
| [
"ysm.thalles@gmail.com"
] | ysm.thalles@gmail.com |
c08eab177e914832df54713a4be70bfb13909b79 | 31c1e32dd4215fd0ef7456925453424f98a0fc18 | /generate_self_play_data.py | e3717a2923a737b360b78c1afed67a815c2dc1e7 | [] | no_license | JianqiaoAirport/Gomoku_v2 | 941cd37ff979c32306353a6c52cdb53b977a479a | 04768169649a2515a277aec860c793016512a9a1 | refs/heads/master | 2021-05-09T03:47:09.816023 | 2018-04-09T06:14:08 | 2018-04-09T06:14:08 | 119,251,401 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,287 | py | import game_logic as gl
import play
import time
import random
import numpy as np
import pandas as pd
import p_v_mcts_player
import logging
class GenerateSelfPlayData:
def __init__(self, self_play_game_logic):
self.self_play_game_logic = self_play_game_logic
self.play_record = []
def generate_self_play_data(self, player1, player2, number_of_games=2, numbuer_of_samples_in_each_game=1):
'''
:param number: 局数
:param sess:
:param p_v_neural_network:
:return: 很多棋谱,对局结果(列向量),下一步棋的走法
'''
start_time = time.time()
winner, plane_record, action_list, turn = self.self_play_game_logic.play(player1, player2)
end_time = time.time()
logging.info(end_time - start_time)
print(end_time - start_time)
# winner不能直接喂给神经网络,如果当时是白棋走,得把winner反一下得到z再作为神经网络的z
z, arr, y_ = self.select_and_generate_data(winner, plane_record, action_list, turn)
plane_records = arr
game_result = z
for k in range(1, numbuer_of_samples_in_each_game):
z, arr, y__ = self.select_and_generate_data(winner, plane_record, action_list, turn)
plane_records = np.concatenate((plane_records, arr))
y_ = np.concatenate((y_, y__))
game_result = np.concatenate((game_result, z))
# 注意,上面改了for循环下面的也得改
for i in range(1, number_of_games):
start_time = time.time()
player1.refresh()
player2.refresh()
winner, plane_record, action_list, turn = self.self_play_game_logic.play(player1, player2)
end_time = time.time()
logging.info(end_time - start_time)
print(end_time - start_time)
for k in range(numbuer_of_samples_in_each_game):
z, arr, y__ = self.select_and_generate_data(winner, plane_record, action_list, turn)
plane_records = np.concatenate((plane_records, arr))
y_ = np.concatenate((y_, y__))
game_result = np.concatenate((game_result, z))
# q.put((plane_records, game_result.T, y_))
return plane_records, game_result, y_
def select_and_generate_data(self, winner, plane_record, action_list, turn):
'''
turn: 一共走了多少步,注意:不是步数加1,selfplay最后有turn+=1,但这个判断胜负的方法中,如果比赛结束,current_trun会减1
注意: monte_carlo_tree_search 中也有类似的定义,这边如果要改,那边也得改
'''
size = plane_record.shape[1]
situation = random.randint(0, turn-1) # 走了situation步之后的局面
if situation % 2 == 1: # 最后一步的回合计数是奇数,说明黑棋刚走完,下一步该白棋走
z = -winner
else:
z = winner
result = np.array([[z]])
y_ = np.zeros(size**2, dtype=np.float32)
arr1 = np.zeros((size, size), dtype=np.float32)
arr2 = np.zeros((size, size), dtype=np.float32)
if situation % 2 == 1: # 最后一步的回合计数是奇数,说明黑棋刚走完,下一步该白棋走
arr3 = np.zeros((size, size), dtype=np.float32)
for i in range(size):
for j in range(size):
if plane_record[1][i][j] <= situation:
if plane_record[0][i][j] == -1:
arr1[i][j] = 1
elif plane_record[0][i][j] == 1:
arr2[i][j] = 1
elif plane_record[1][i][j] == situation+1:
y_[i * size + j] = 1 # 找出下一步棋在哪儿,返回一个类似 one_hot_key的向量,注意,后来不用这个东西了
else:
arr3 = np.ones((size, size), dtype=np.float32)
for i in range(size):
for j in range(size):
if plane_record[1][i][j] <= situation:
if plane_record[0][i][j] == 1:
arr1[i][j] = 1
elif plane_record[0][i][j] == -1:
arr2[i][j] = 1
elif plane_record[1][i][j] == situation+1:
y_[i * size + j] = 1
arr = np.concatenate((np.array([arr1]), np.array([arr2])))
arr = np.concatenate((arr, np.array([arr3])))
board = arr.copy()
board = board.swapaxes(0, 1)
board = board.swapaxes(1, 2)
board = np.array([board])
action_probability_distribution = action_list[situation]
action_matrix = action_probability_distribution.reshape((size, size))
for i in range(3): # 旋转90°,一共3次
arr_data_augment_board1 = arr.copy()
arr_data_augment_board2 = arr.copy() # 旋转90°之前,先左右翻一下
arr_data_augment_act1 = action_matrix.copy()
arr_data_augment_act2 = action_matrix.copy()
arr_data_augment_act1 = np.rot90(m=arr_data_augment_act1, k=i + 1)
arr_data_augment_act2 = np.fliplr(arr_data_augment_act2)
arr_data_augment_act2 = np.rot90(m=arr_data_augment_act2, k=i + 1)
for j in range(2): #分别对arr1,2进行操作
arr_data_augment_board1[j] = np.rot90(m=arr_data_augment_board1[j], k=i+1)
arr_data_augment_board2[j] = np.fliplr(arr_data_augment_board2[j])
arr_data_augment_board2[j] = np.rot90(m=arr_data_augment_board2[j], k=i+1)
arr_data_augment_board1 = arr_data_augment_board1.swapaxes(0, 1)
arr_data_augment_board1 = arr_data_augment_board1.swapaxes(1, 2)
arr_data_augment_board1 = np.array([arr_data_augment_board1])
board = np.concatenate((board, arr_data_augment_board1))
arr_data_augment_board2 = arr_data_augment_board2.swapaxes(0, 1)
arr_data_augment_board2 = arr_data_augment_board2.swapaxes(1, 2)
arr_data_augment_board2 = np.array([arr_data_augment_board2])
board = np.concatenate((board, arr_data_augment_board2))
action_probability_distribution = np.concatenate((action_probability_distribution, np.array([arr_data_augment_act1.reshape(size ** 2)])))
action_probability_distribution = np.concatenate((action_probability_distribution, np.array([arr_data_augment_act2.reshape(size ** 2)])))
result = np.concatenate((result, np.array([[z]])))
result = np.concatenate((result, np.array([[z]])))
arr_data_augment_board = arr.copy()
arr_data_augment_act = action_matrix.copy()
for j in range(2):
arr_data_augment_board[j] = np.fliplr(arr_data_augment_board[j])
arr_data_augment_board = arr_data_augment_board.swapaxes(0, 1)
arr_data_augment_board = arr_data_augment_board.swapaxes(1, 2)
arr_data_augment_act = np.fliplr(arr_data_augment_act)
board = np.concatenate((board, np.array([arr_data_augment_board])))
action_probability_distribution = np.concatenate((action_probability_distribution, np.array([arr_data_augment_act.reshape(size ** 2)])))
result = np.concatenate((result, np.array([[z]])))
return result, board, action_probability_distribution
if __name__ == "__main__":
import p_v_network
import play
self_play_game = play.PlayLogic(plane_size=15)
data_generator = GenerateSelfPlayData(self_play_game)
p_v_network = p_v_network.P_V_Network()
root1 = p_v_mcts_player.MCTSNode(gl.GameLogic(plane_size=15), father_edge=None, p_v_network=p_v_network)
root2 = p_v_mcts_player.MCTSNode(gl.GameLogic(plane_size=15), father_edge=None, p_v_network=p_v_network)
player1 = p_v_mcts_player.MCTSPlayer(root=root1, p_v_network=p_v_network, max_simulation=5)
player2 = p_v_mcts_player.MCTSPlayer(root=root2, p_v_network=p_v_network, max_simulation=5)
arr, result, y_ = data_generator.generate_self_play_data(player1, player2, number_of_games=2, numbuer_of_samples_in_each_game=8)
print(arr.shape, result.shape, y_.shape)
| [
"rommeld@foxmail.com"
] | rommeld@foxmail.com |
7d9afcd7b97cc3012318e9793d7249e323e8df72 | 7726c0b8a493294fb75031de1b3258b8e4687ea6 | /py2.1.5.py | 16fcfd158c9da2c3f89f9adec5fc323afc83a604 | [] | no_license | Sabarikn/python_project | dd59a87161a3648a5eb46c0c3baa91ad02be3b1c | 1d74d1dbe2f7548fda92c446a835620f529bc011 | refs/heads/master | 2021-09-08T12:35:22.181014 | 2018-03-09T17:38:54 | 2018-03-09T17:38:54 | 106,112,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | def product(x):
prod=1
for z in x:
prod *= z
return prod
def fact(n):
return product(range(1,n+1))
print fact(5)
print fact(0)
| [
"123sabari456@gmail.com"
] | 123sabari456@gmail.com |
b92c943549a132c92ed17f40a08639a3e024897f | 106983cf0b8df622f514ecff2bb2fa4c794c9dac | /Misc/Raspberry Pi Things/Motors/stringComparison.py | c9d425c4e5b4eeabd02957268eb17c72dcf90889 | [] | no_license | michael5486/Senior-Design | 2d9ae521c637abf7c0825f85b32752ad61c62744 | 6b6c78bed5f20582a9753a9c10020c709d6b6e53 | refs/heads/master | 2021-01-19T09:58:35.378164 | 2017-05-26T17:17:13 | 2017-05-26T17:17:13 | 67,556,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | testVar = None
run = True
while run == 1:
testVar = raw_input("Ask user for something.\n")
if testVar == "exit":
run = False
print "System Exiting..."
else:
print testVar | [
"michael5486@gmail.com"
] | michael5486@gmail.com |
bf6fc0e2b6e4401de6c67d0c2d6061680f4ded35 | bf6252f1d8f8723f2771ffe50988a0263b137c05 | /workflow/daxgen.py | 8e8cfff12c8d922ef5822b1e8e2d0b2158435a57 | [] | no_license | papajim/HIC-HQ-Workflow | 3fb12267170a434447de69084332ae854551fbe1 | 84386c7cd19d682b1d2397040a7236c2a10f358f | refs/heads/master | 2021-06-12T19:32:45.373185 | 2018-10-08T16:49:16 | 2018-10-08T16:49:16 | 148,705,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | #!/usr/bin/env python
import sys
import os
import pwd
import time
from Pegasus.DAX3 import *
# The name of the DAX file is the first argument
if len(sys.argv) != 2:
sys.stderr.write("Usage: %s DAXFILE\n" % sys.argv[0])
sys.exit(1)
daxfile = sys.argv[1]
USER = pwd.getpwuid(os.getuid())[0]
dax = ADAG("hic_wf")
dax.metadata("name", "HIC")
dax.metadata("creator", "%s@%s" % (USER, os.uname()[1]))
dax.metadata("created", time.ctime())
events_exec = Executable("run_events")
# if you need multiple runs just add the job in a for loop
# replace XYZ with the unique identifier
# and change the name of the input and args files
# eg.
args_conf = File("args.conf")
results_in = File("Results.tar.gz")
for i in range(10):
results_out = File("Results_"+str(i)+".tar.gz")
hic_job = Job("run_wrapper")
hic_job.addArguments(args_conf, str(i))
hic_job.uses(events_exec, link=Link.INPUT)
hic_job.uses(args_conf, link=Link.INPUT)
hic_job.uses(results_in, link=Link.INPUT)
hic_job.uses(results_out, link=Link.OUTPUT, transfer=True, register=False)
dax.addJob(hic_job)
# end of loop
f = open(daxfile,"w")
dax.writeXML(f)
f.close()
print "Generated dax %s" % daxfile
| [
"georgpap@isi.edu"
] | georgpap@isi.edu |
e95071e80c32aad85e8deab669e2b07b0a105fde | 741434652fdd39c674fefa975bcb5b59534440de | /globus_parser.py | 39d1ecbb167d833e764e76e713e48b726b5e0fe8 | [] | no_license | SalavatUsen/parser | 1a4702071e5f767376ba6aaaf5a60df79ffc50e1 | 29a6e89a30f7c529bcf46f807497755891919287 | refs/heads/master | 2023-02-22T19:51:58.257730 | 2021-01-23T08:46:45 | 2021-01-23T08:46:45 | 330,156,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import psycopg2
import requests
from bs4 import BeautifulSoup as bs
connection = psycopg2.connect(
dbname="parsing",
host="localhost",
user="postgres",
password="usi1103",
port=5432
)
cursor = connection.cursor()
# globus_page = requests.get(
# url='https://globus-online.kg/catalog/myaso_ptitsa_ryba/'
# )
# data = bs(globus_page.text, 'html.parser')
# view_showcase = data.find("div", attrs={"id": "view-showcase"})
# all_cards = view_showcase.find_all("div", class_='list-showcase__part-main')
# for card in all_cards:
# image_link = card.find('div', class_='list-showcase__picture').a.img.get('src')
# product_name = card.find('div', class_ = 'list-showcase__name-rating').a.text
# price = card.find("div", class_ = 'list-showcase__prices').find('span', class_='c-prices__value js-prices_pdv_ГЛОБУС Розничная').text
globus_vegetables_page = requests.get(
url='https://globus-online.kg/catalog/ovoshchi_frukty_orekhi_zelen/'
)
data_1 = bs(globus_vegetables_page.text, 'html.parser')
view_showcase = data_1.find("div", attrs={"id": "view-showcase"})
all_cards = view_showcase.find_all("div", class_='list-showcase__part-main')
for card in all_cards:
image_link = card.find('div', class_='list-showcase__picture').a.img.get('src')
product_name = card.find('div', class_ = 'list-showcase__name-rating').a.text
price = card.find("div", class_ = 'list-showcase__prices').find('span', class_='c-prices__value js-prices_pdv_ГЛОБУС Розничная').text
# cursor.execute(
# f'INSERT INTO vegetables(image, product_name, price) VALUES(\'{image_link}\', \'{product_name}\', \'{price}\');'
# )
# connection.commit()
# connection.close()
| [
"usenakunov20010103@gmail.com"
] | usenakunov20010103@gmail.com |
5b9c1aae3f26483755e82ecbe9dbc62f68a649ff | 9a343c495459e79dc408a102730bcaeac7fa8886 | /chapter9/SuperMattAdmin/ModelForm/urls.py | e330642ce8f8b5b0fcd8f30304a21a71719bd6f6 | [
"MIT"
] | permissive | MMingLeung/Python_Study | 62d3ae92bf6760de0804aa5792f53fb3799486a2 | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | refs/heads/master | 2022-12-27T12:53:05.186800 | 2018-03-07T04:34:36 | 2018-03-07T04:34:36 | 92,124,981 | 3 | 1 | MIT | 2021-06-10T18:35:33 | 2017-05-23T03:28:52 | JavaScript | UTF-8 | Python | false | false | 1,316 | py | """ModelForm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from app01 import views
from supermatt.service import test_v1
urlpatterns = [
# url(r'^admin/', admin.site.urls),
# include 如果参数是模块路径,导入模块找urlpatterns变量,获取列表
# url(r'^test/$', include('app01.urls')),
# 可以这样写
# url(r'^test/$', ([
# url(r'^test/', views.test),
# url(r'^test/', views.test),
# url(r'^test/', views.test),
# ],'app_name','name_space')),
url(r'^su/', test_v1.site.urls),
url(r'^test/', views.test),
url(r'^test2/', views.test2),
]
| [
"mingmingleung1991@gmail.com"
] | mingmingleung1991@gmail.com |
e142b94d6e5bd7ef5f4acda8123ad1994ca85d97 | ae2e5f44c770ca09e8a1a17ea235cf6367aa2f81 | /10.py | 7c3b3a749f9143d3dbd5dd84d7801b18455279d9 | [] | no_license | Xiaopoangangang/pc | fc46f15acc8f9646ec22c1a429beea96f5a1a2bb | d366ecf04df52770fa8b9d29e524842eb364a450 | refs/heads/master | 2021-01-22T07:58:25.020731 | 2017-09-04T05:33:51 | 2017-09-04T05:33:51 | 102,322,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from download import download
import
for page in itertools.count(1):
url = 'http://example.webscraping.com/view/-%d' % page
html = download(url)
if html = download(url)
break
else :
#success
pass
| [
"noreply@github.com"
] | Xiaopoangangang.noreply@github.com |
d1a383e4179706e362da39f7923776488f57e7e8 | be4d758ad43643594074d23a03202996de7a0e0f | /AngelaPythy/urls.py | 60c8413ace34f945c4e8995adfd2501b2d4540d1 | [] | no_license | SoowiiXie/lineBotPython | 0e145bf955f9cc1b47d7e72d09015344368bc022 | 4f0cae22e0c31bdc6c1179e2662e622c16308fae | refs/heads/master | 2022-12-23T22:59:33.469778 | 2020-05-12T02:57:26 | 2020-05-12T02:57:26 | 240,881,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | """AngelaPythy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from PythyAPI import views
urlpatterns = [
url('^callback', views.callback),
path('admin/', admin.site.urls),
]
| [
"e64009117@gs.ncku.edu.tw"
] | e64009117@gs.ncku.edu.tw |
4891c44698f85df5a56bf67e1dae425ed6795398 | 089eb1eb820ffa392fb6a6c40c7a8c9f48d4e4cd | /Django_exercise1_1/blog/migrations/0003_auto_20201223_2232.py | b48df8f10470e626aba142b23f47da7e1e2f1546 | [] | no_license | MohammadMahdiKarimKhani1998/Django_Maktabsharif | 75e28b958740b419795154f7f6ec6978fa85fed0 | f441cf8d4f34fa9187f6ad1bb3f3e3961a6ff38e | refs/heads/master | 2023-02-08T12:39:48.047107 | 2021-01-01T11:44:53 | 2021-01-01T11:44:53 | 322,036,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # Generated by Django 3.1.3 on 2020-12-23 19:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20201216_1919'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.post', verbose_name='Post'),
),
]
| [
"m.mahdi.k.khani@gmail.com"
] | m.mahdi.k.khani@gmail.com |
dd51a2a3d3f4237402f227537634c543a66170a8 | 58e672bf96dd79d08b123c99ef437ffefc651db2 | /accounts/decorators.py | 5e5bf00d2686f5043c8351b087039e725c482055 | [] | no_license | 04bikash/crm-live | 4666099984fdf88c884af7193633704558263d37 | e54a915c52df7aea7485fe019a12ef3ec3952deb | refs/heads/master | 2023-04-01T02:20:14.275217 | 2021-04-08T18:20:37 | 2021-04-08T18:20:37 | 356,892,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | from django.http import HttpResponse
from django.shortcuts import redirect
def unauthenticated_user(view_func):
def wrapper_func(request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('home')
else:
return view_func(request, *args, **kwargs)
return wrapper_func
def allowed_users(allowed_roles=[]):
def decorator(view_func):
def wrapper_func(request, *args, **kwargs):
group = None
if request.user.groups.exists():
group = request.user.groups.all()[0].name
if group in allowed_roles:
return view_func(request, *args, **kwargs)
else:
return HttpResponse('You are not authorized to view this page')
return wrapper_func
return decorator
def admin_only(view_func):
def wrapper_function(request, *args, **kwargs):
group = None
if request.user.groups.exists():
group = request.user.groups.all()[0].name
if group == 'customer':
return redirect('user-page')
if group == 'admin':
return view_func(request, *args, **kwargs)
#else:
r#eturn redirect('user-page')
return wrapper_function | [
"akrsingh12@gmail.com"
] | akrsingh12@gmail.com |
a14a08ab5d69e160bff8619e5fa0c565a6878d76 | 03b30e760f571e309ab1539edbc24ce0ff47c141 | /cyly/test1.py | 0ee1339e5bbd9f9f590df225b40d8211bac483c9 | [] | no_license | latata666/newcoder | 548f32ab3acd75b592ce7f7b399ecdf340e747d8 | e9206ab924899a2985bece312777e3b5d55c6f60 | refs/heads/master | 2022-12-12T22:46:03.255804 | 2020-08-31T02:27:17 | 2020-08-31T02:27:17 | 263,274,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | # -*- coding: utf-8 -*-
# @Time : 2020/5/15 10:41
# @Author : Mamamooo
# @Site :
# @File : test1.py
# @Software: PyCharm
"""
"""
import logging
# create logger with 'spam_application'
logger = logging.getLogger('spam_application')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info('creating an instance of auxiliary_module.Auxiliary')
logger.info('done with auxiliary_module.some_function()') | [
"aslovc@gmail.com"
] | aslovc@gmail.com |
242d537ee3c789baff1fb2466e57c5584c6d39c2 | ffebdaad64b2d096c94c865e282aa7d7d6abe7ac | /JobsScraperBrno.py | d969101a363e619dc5b346fe0ee1630739d623ba | [] | no_license | erlep/JobsCzScraper | ded17957c70859942f05d55d89dd1655a0ab09b0 | 528a154948a3d1243fe8933ddb742cf3de551603 | refs/heads/main | 2023-08-16T05:10:30.643317 | 2021-10-04T06:28:21 | 2021-10-04T06:28:21 | 348,390,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | # Stahne "Python Brno" z jobs.cz a ulozi do Xls
# dle predlohy: scrapeindeed - https://github.com/jhnwr/scrapeindeed
# https://www.jobs.cz/prace/brno/?q%5B%5D=python&employer=direct&page=1&locality%5Bradius%5D=10
# page 1 - 4
import requests
from bs4 import BeautifulSoup
import pandas as pd
def extract(page):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'}
url = f'https://cz.indeed.com/jobs?q=python&l=brno&start={page}'
# Python Brno https://www.jobs.cz/prace/brno/?q%5B%5D=python&employer=direct&page=01&locality%5Bradius%5D=10
url = f'https://www.jobs.cz/prace/brno/?q%5B%5D=python&employer=direct&page={page}&locality%5Bradius%5D=10'
r = requests.get(url, headers)
soup = BeautifulSoup(r.content, 'html.parser')
return soup
def transform(soup):
divs = soup.find_all('div', class_='standalone search-list__item')
for item in divs:
# print(item)
try:
title = item.find('a').text.strip()
except:
continue
if (title == ''):
continue
try:
company = item.find('span', class_='search-list__secondary-info--label').text.strip()
except:
company = ''
try:
salary = item.find('span', class_='search-list__tags__label search-list__tags__salary--label').text.strip()
except:
salary = ''
href = '' + item.find('a').get("href") + ''
try:
dtm = item.find('span', class_='label-added').text.strip()
except:
dtm = ''
try:
datum = item.find('span', {"class": "label-added", "data-label-added-valid-from": True})['data-label-added-valid-from']
except:
datum = ''
# Datum 2021-03-02T01:44:18+01:00 - pryc "T" a +01:00
datum = datum.replace("+01:00", "").replace("T", " ")
# Job
job = {
'Title': title,
'Company': company,
'Salary': salary,
'Kdy': dtm,
'Date Add': datum,
# 'item': item,
'Link': href,
}
joblist.append(job)
return
joblist = []
for i in range(1, 6, 1):
print(f'Getting page, {i}')
c = extract(i)
transform(c)
df = pd.DataFrame(joblist)
df.drop_duplicates(inplace=True)
df.to_excel('zzJobsPython-Brno.xlsx', index=False)
df.to_csv('zzJobsPython-Brno.csv', index=False)
print('OkDone.')
| [
"egix@centrum.cz"
] | egix@centrum.cz |
c67957df0b4b4b611ef1365fe46ccdda87f33190 | 63527bc0de4991578a1c69dfc1523d9df9655878 | /apps/message/migrations/0002_auto_20190216_2239.py | 0e98aa1b0b385f36e27053214c8ab9f99c22e4df | [
"MIT"
] | permissive | SparksFly8/Django_message_board | aa06ec34a6535bdd67a907fa9d6c525e88c6e33b | e6d745882ecd14eed308a7a60689a5e832a9c0ec | refs/heads/master | 2020-04-23T06:23:23.868253 | 2019-04-05T10:21:09 | 2019-04-05T10:21:09 | 170,971,487 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | # Generated by Django 2.1.7 on 2019-02-16 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('message', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='usermessage',
name='id',
),
migrations.AddField(
model_name='usermessage',
name='object_id',
field=models.CharField(default='', max_length=50, primary_key=True, serialize=False, verbose_name='主键'),
),
migrations.AlterField(
model_name='usermessage',
name='name',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='用户名'),
),
]
| [
"shiliang6402@foxmail.com"
] | shiliang6402@foxmail.com |
5be4e1baaef1f83e8d562f6093cc77e2b90ce622 | 83dd8b6cd62c63d15dc36634cee1ecf6b55b734e | /mary.py | a600ff49d2aa8d1399ac380b4e2c57caed82965e | [] | no_license | srilekha30/Python_Assignment | 52b359fe2943cdfca70acf1c25c4ae035c6dd76e | bbdfe0ca16ad6b03f39532f83ccaf48b6327f853 | refs/heads/master | 2020-07-01T15:37:56.760090 | 2019-08-08T08:25:38 | 2019-08-08T08:25:38 | 201,213,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | word1="veda"
word2="is"
word3="a"
word4="good"
word5="girl"
print word1+" "+word2+" " +word3+" " +word4+" " +word5
| [
"srielkha.n16@iiits.in"
] | srielkha.n16@iiits.in |
184565941211efa0dc2b813dea37bf3a3f72bd59 | 582ce3cf75f28f3c30c9f14d9f1c48b8a39752f2 | /8_next_node_in_bt.py | e9b4107b438af3978af8c55e5eb3d28c78641702 | [] | no_license | Mountain-AI/python-to-the-offer | 03c7c5706a6503e8e23065a89dce706c9c29e0af | 88045c7618ad84cbbd38dff00be763bccaaa3fad | refs/heads/master | 2023-03-02T17:01:38.923131 | 2018-11-07T03:49:39 | 2018-11-07T03:49:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | # -*- coding:utf-8 -*-
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution:
def GetNext(self, pNode):
"""查找给定节点的下一个节点
Args:
pNode (TreeLinkNode): 给定二叉树头
Returns:
TreeLinkNode: 给定节点的下一个节点,没有则是None
"""
if pNode.right:
p = pNode.right
while p.left:
p = p.left
return p
elif pNode.next and pNode.next.left == pNode:
return pNode.next
elif pNode.next and pNode.next.right == pNode:
p = pNode
while p.next and p.next.next:
if p.next.next.left == p.next:
return p.next.next
p = p.next
return None
else:
return None
| [
"devezhang@gmail.com"
] | devezhang@gmail.com |
1acf5e6c4372032602302fd52156f39fd34bcd21 | 157d8ce9b1af229d03dbfe93db740f328de1d33d | /predict_test_labels.py | b0c77c03396e8838a7296b31b6b67e206c80d4eb | [] | no_license | YIKU8800/TF_W2V | 111aea554f85df30a97b1f5d6c322992eb1636c3 | 7d23ac87e5aa888be0d75bbe668f3e1bc8ca78bc | refs/heads/master | 2020-03-15T15:29:24.261257 | 2018-05-05T09:06:56 | 2018-05-05T09:06:56 | 132,213,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,745 | py |
# coding: utf-8
# In[240]:
from PIL import Image
import numpy as np
from chainer import Variable, FunctionSet, optimizers,serializers,cuda
import chainer.functions as F
from collections import Counter
np.random.seed(0)
import sklearn.metrics
import chainer
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# In[241]:
def load_image(path, width=0, height=0):
img = Image.open(path).convert('RGB')
data = np.array(img)
if width == 0:
width = data.shape[0]
height = data.shape[1]
return data.reshape(width, height, 3)
# In[242]:
def gen_test_co():
test_co = {}
test_idx = {}
idx = 0
with open('corel5k_test_list.txt') as f:
for line in f:
test_idx[line.strip('\n')] = idx
idx = idx + 1
t = line.strip('\n').split('/')
if not t[0] in test_co:
test_co[t[0]] = []
test_co[t[0]].append(t[1])
return test_co, test_idx
def load_labels():
labelidx = {}
f = open("corel5k_words.txt")
line = f.read()
f.close()
line = line.split('\n')
del line[260]
for i in range(len(line)):
labelidx[line[i]] = i
return labelidx
labelidx = load_labels()
def load_gt_labels():
gt_labels = []
with open("ground_truth_labels.txt") as f:
for line in f:
t = line.strip('\n')
labels = np.zeros(260)
for label in (t.split('|')):
idx = labelidx[label]
labels[idx] = 1
gt_labels.append(labels)
return gt_labels
gt_labels = load_gt_labels()
# In[243]:
def get_test_img_idx(test_idx, idir, iid):
t = '%s/%s' % (idir, iid)
idx = test_idx[t]
return idx
test_co, test_idx = gen_test_co()
# In[244]:
def load_labels():
label=[]
f=open("corel5k_words.txt")
line=f.read()
f.close()
line=line.split('\n')
del line[260]
for i in range(len(line)):
label.append(line[i])
return label
labels = load_labels()
def load_image(filepath):
x_train = [np.array(Image.open(filepath).resize((im_size,im_size)))]
x_train=np.array(x_train)
x_train=x_train.astype(np.int32)/255.0
x_train=np.transpose(x_train,(0,3,1,2))
x_train=x_train.astype(np.float32)
return x_train
y_test = np.load('y_test.npy')
y_test = y_test.astype(np.int32)
# In[245]:
def get_origin_labels(idir, iid):
idx = get_test_img_idx(test_idx, idir, iid)
return y_test[idx]
# idx from 0
def get_gt_labels(idx):
return gt_labels[idx]
def get_orgin_label_str(idir, iid):
idx = get_test_img_idx(test_idx, idir, iid)
#path = '%s/%s' % (idir, iid)
r = ''
for i in range(0,260):
if y_test[idx][i] == 1:
if r == '':
r += '%s' % labels[i]
else:
r += '|%s' % labels[i]
return r
# In[246]:
im_size=127
threshold = 0.1
model_path = {
'without' : 'model/paper/without.model',
#'word2vec' : 'model/ohw2v_model_0.0000050000_1.00_100.model',}
#'word2vec' : 'model/ohw2v_model_0.0000050000_1.00_300_5.model',}
'word2vec' : 'model/ohw2v_model_0.0000050000_1.00_1000.model',}
without_model = FunctionSet(conv1=F.Convolution2D(3, 96, 11, stride=4),
bn1=F.BatchNormalization(96),
conv2=F.Convolution2D(96, 256, 5, pad=2),
bn2=F.BatchNormalization(256),
conv3=F.Convolution2D(256, 384, 3, pad=1),
conv4=F.Convolution2D(384, 384, 3, pad=1),
conv5=F.Convolution2D(384, 256, 3, pad=1),
fc6=F.Linear(2304,1024),
fc7=F.Linear(1024, 260))
serializers.load_npz(model_path['without'], without_model)
word2vec_model = FunctionSet(conv1=F.Convolution2D(3, 96, 11, stride=4),
bn1=F.BatchNormalization(96),
conv2=F.Convolution2D(96, 256, 5, pad=2),
bn2=F.BatchNormalization(256),
conv3=F.Convolution2D(256, 384, 3, pad=1),
conv4=F.Convolution2D(384, 384, 3, pad=1),
conv5=F.Convolution2D(384, 256, 3, pad=1),
fc6=F.Linear(2304,1024),
fc7=F.Linear(1024, 260))
serializers.load_npz(model_path['word2vec'], word2vec_model)
def predict(model, x_data):
#x = Variable(cuda.to_gpu(x_data))
x = Variable(x_data)
h=F.max_pooling_2d(F.relu(F.local_response_normalization(model.conv1(x))),3,stride=2)
h=F.max_pooling_2d(F.relu(F.local_response_normalization(model.conv2(h))),3,stride=2)
h=F.relu(model.conv3(h))
h=F.relu(model.conv4(h))
h=F.max_pooling_2d(F.relu(model.conv5(h)),3,stride=2)
h=F.relu(model.fc6(h))
y = model.fc7(h)
y_f=F.sigmoid(y)
return y_f
def predict_labels(model, image_path, mode = 1):
p_labels = np.zeros(260)
xdata = load_image(image_path)
y_f = predict(model, xdata)
label_prob = y_f.data[0, :]
limit = 0
idxsort = label_prob.argsort()
for i in range(len(idxsort)):
i = -i - 1
prob = label_prob[idxsort[i]]
if mode == 1:
if prob > 0.1:
p_labels[idxsort[i]] = 1
else:
p_labels[idxsort[i]] = 1
limit = limit + 1
if limit > 5:
break
#if prob > 0.1:
#p_labels[idxsort[i]] = 1
return p_labels.astype(int)
def _tp(gt_labels, p_labels):
tp = 0
for i in range(81):
if gt_labels[i] == 1 and p_labels[i] == 1:
tp += 1
return tp
def _fp(gt_labels, p_labels):
fp = 0
for i in range(81):
if gt_labels[i] == 0 and p_labels[i] == 1:
fp += 1
return fp
def _fn(gt_labels, p_labels):
fn = 0
for i in range(81):
if gt_labels[i] == 1 and p_labels[i] == 0:
fn += 1
return fn
def macro_f1(gt_labels, p_labels):
tp = _tp(gt_labels, p_labels)
fp = _fp(gt_labels, p_labels)
fn = _fn(gt_labels, p_labels)
mprecision = tp / (tp + fp + 0.000001)
mrecall = tp / (tp + fn + 0.000001)
f1_score = 2*(mprecision)*(mrecall)/(mprecision+mrecall+0.000001)
return f1_score
def cm_precision_recall(prediction,truth):
"""Evaluate confusion matrix, precision and recall for given set of labels and predictions
Args
prediction: a vector with predictions
truth: a vector with class labels
Returns:
cm: confusion matrix
precision: precision score
recall: recall score"""
confusion_matrix = Counter()
positives = [1]
binary_truth = [x in positives for x in truth]
binary_prediction = [x in positives for x in prediction]
for t, p in zip(binary_truth, binary_prediction):
confusion_matrix[t,p] += 1
cm = np.array([confusion_matrix[True,True], confusion_matrix[False,False], confusion_matrix[False,True], confusion_matrix[True,False]])
#print cm
precision = (cm[0]/(cm[0]+cm[2]+0.000001))
recall = (cm[0]/(cm[0]+cm[3]+0.000001))
macro_f1 = 2 * precision * recall / (precision + recall + 0.000001)
return macro_f1
print(cm_precision_recall(np.array([0,0,0,0]), np.array([0,0,1,1])))
print(sklearn.metrics.f1_score(np.array([0,0,0,0]), np.array([0,0,1,1])))
prediction = np.array([[0,0,1,0], [0,0,1,0], [1,0,1,0]])
truth = np.array([[0,0,1,0],[0,0,1,1], [0,1,0,1]])
total = 0
total += cm_precision_recall(np.array([0,0,1,0]), np.array([0,0,1,0]))
total += cm_precision_recall(np.array([0,0,1,0]), np.array([0,0,1,1]))
total += cm_precision_recall(np.array([1,0,1,0]), np.array([0,1,0,1]))
print(total / 3)
#print(sklearn.metrics.f1_score(truth, prediction, average='binary'))
# In[247]:
test_co, test_idx = gen_test_co()
def calc_f1_score(model, mode=1):
total_score = 0
inum = 0
for idir in test_co.keys():
i = 0
for iid in test_co[idir]:
val = []
i += 1
#if i > 2:
#break
inum += 1
#print("calc......", inum)
idx = get_test_img_idx(test_idx, idir, iid)
path = '%s/%s.jpeg' % (idir, iid)
gt_labels = get_origin_labels(idir, iid)
#gt_labels = get_gt_labels(inum - 1)
p_labels = predict_labels(model, path, mode)
#f1_score = cm_precision_recall(p_labels, gt_labels)
f1_score = sklearn.metrics.f1_score(gt_labels, p_labels)
total_score += f1_score
return total_score / inum
#f1_score = calc_f1_score(without_model, 2)
#print(f1_score)
#f1_score = calc_f1_score(word2vec_model, 2)
#print(f1_score)
# In[ ]:
| [
"yu.zhao.cs@gmail.com"
] | yu.zhao.cs@gmail.com |
710e9beb1670418cd048ecca8fd1069ed2ff32dd | 4b6960a8c0daa3bb8827946c2d7eeb38800c252e | /05_django/blog/blogger/models.py | 474504651854e34d47e54ad86979559575657d08 | [] | no_license | IamLAM/Python | becb5672e529fecfeb1edd884c55ceb56befe502 | adcb7af1fc46e4ce0059f6a9d23a43b7a0a0a2b9 | refs/heads/master | 2021-06-01T22:38:13.554405 | 2020-03-09T20:36:49 | 2020-03-09T20:36:49 | 135,672,708 | 3 | 7 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # Create your models here.
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title | [
"ingeniero@luismonge.com.mx"
] | ingeniero@luismonge.com.mx |
86091fcb70b761ae90f0b7ef98b4c0a7f318036d | 4dec407d81d91abf7295126d558df41616948991 | /crawl_dangi.py | eba11914db76f6dbde220dfcb30d74f5c38651e5 | [] | no_license | JOJEONGHA/finance_crawling | 8573455783c8ee7371693db455021c9ade9afdfd | 2b6b57b01e201a4483ac68fbef023b14955d66de | refs/heads/master | 2022-12-18T02:05:21.559484 | 2020-09-23T15:24:41 | 2020-09-23T15:24:41 | 286,010,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,236 | py | from bs4 import BeautifulSoup
from selenium import webdriver
import requests
import time
# Pull number of full page
url = "https://finance.naver.com/sise/theme.nhn?&page=1"
html_doc = requests.get(url)
html_doc = html_doc.text
soup = BeautifulSoup(html_doc,"html.parser")
tags = soup.select("table.Nnavi a")
page_num = len(tags) - 1 # number of pages
# url by pages
for i in range(page_num):
link = "https://finance.naver.com/sise/theme.nhn?&page=" + str(i+1)
# url by atag in a page
html_doc = requests.get(link)
html_doc = html_doc.text
soup = BeautifulSoup(html_doc,"html.parser")
tags = soup.select("table.theme td.col_type1 a")
for j in tags:
print("< " + j.text + " >") # 테마명 출력
link = "https://finance.naver.com/" + j.attrs['href']
#url by atag in a thema
html_doc = requests.get(link)
html_doc = html_doc.text
soup = BeautifulSoup(html_doc,"html.parser")
tags = soup.select("table.type_5 .name_area a")
# One event >>>>>>>>>>>>>>>>>>>>>>>>>>>>>
for k in tags:
# k tag has company name >>>>>>>>>>>>>
link = k.attrs['href']
# 재무재표 url
code = link.split("=")[1]
link = "https://navercomp.wisereport.co.kr/v2/company/c1030001.aspx?cmp_cd=" + code + "&cn="
link_test = "https://navercomp.wisereport.co.kr/v2/company/c1010001.aspx?cmp_cd=" + code + "&cn="
# 당기순이익 +- 정보 추출
browser = webdriver.Chrome('./webdriver_84/chromedriver')
browser.get(link_test)
ybtn = browser.find_element_by_css_selector('#cns_Tab21') # 연간
qbtn = browser.find_element_by_css_selector('#cns_Tab22') # 분기
qbtn.click()
# #TDdCYnFkT0 .gHead01 tbody tr
soup = BeautifulSoup(browser.page_source, "html.parser")
ni = soup.select("#TDdCYnFkT0 .gHead01 tbody tr")
# print(">>>>>>>>>>>>>>")
print(type(ni))
print(ni)
for n in ni:
# print(">>>>>>>>>>>>>>")
# print(type(n))
tag = n.select(".txt")
# print(type(tag))
# print(tag)
# if tag[0].text == "당기순이익":
# tag = n.select(".num")
# nums = ""
# for m in range(5):
# num = tag[m].text
# num = num.replace(",","")
# if float(num) < 0:
# break
# nums = nums + " " + num
# # break 되지않고 마지막 항목을 통과한다면 단기순이익은 모두 양수라는 뜻이다.
# if m == 4:
# print("종목이름 : " + k.text) # 종목이름
# print("당기순이익 : " + nums) # 당기순이익 5년치
# break
# browser.get(link)
# soup = BeautifulSoup(browser.page_source, "html.parser")
# ni = soup.select(".lvl1")
# for n in ni:
# tag = n.select(".txt")
# if tag[0].attrs['title'] == "당기순이익":
# tag = n.select(".num")
# nums = ""
# for m in range(5):
# num = tag[m].text
# num = num.replace(",","")
# if float(num) < 0:
# break
# nums = nums + " " + num
# # break 되지않고 마지막 항목을 통과한다면 단기순이익은 모두 양수라는 뜻이다.
# if m == 4:
# print("종목이름 : " + k.text) # 종목이름
# print("당기순이익 : " + nums) # 당기순이익 5년치
# break
# 1년치 분기별 당기순이익 흑적자 유무에 따라 출력
# CB,BW 유무에 따라 출력
# 시가총액 출력
# print가 아닌 메모장 파일로 저장
| [
"wosd1233@gmail.com"
] | wosd1233@gmail.com |
afb29b7ac67452e78d90f8085452a196faca8900 | b20f4ae6d059ad630d4f0292e4c9510761c782c0 | /randomizer.py | 97f5b1b41773a175a305263fa69b949b606d5d84 | [] | no_license | jamesrkiger/team_randomizer | 07c1a7d0a2c6cd42a60820ee559b90937eed5c12 | 60a8fdf10f3758381bb6e19b29397126ee3ffd0d | refs/heads/main | 2023-04-22T20:55:33.777260 | 2021-05-09T00:16:39 | 2021-05-09T00:16:39 | 365,595,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | import random
roster_init = "Linus, Cletus, Clement, Sixtus, Cornelius, Cyprian, Lawrence, Chrysogonus, John, Paul, Cosmas, Damian, Stephen, Matthias, Barnabas, Ignatius, Alexander, Marcellinus, Peter, Felicitas, Perpetua, Agatha, Lucy, Agnes, Cecilia, Anastasia"
print ("Please review the roster and see if anyone is absent:\n" + roster_init + "\n")
print ("Enter the names of any absent students as they appear above, separated by commas and spaces. If no one is absent, press enter.\n")
absentees = str(input())
roster = roster_init.split(", ")
#remove absentees, if any
if (len(absentees) > 0):
absentees = absentees.split(", ")
for name in absentees:
roster.remove(name)
student_num = len(roster)
print ("There are currently " + str(student_num) + " students here today. How many teams would you like to create?.\n")
number_of_teams = int(input())
default_team_size = len(roster) // number_of_teams
team_remainder = len(roster) % number_of_teams
#generate an array of team sizes (I think this could also be done without a loop using numpy split)
team_sizes = []
for team in range(number_of_teams):
team_sizes.append(default_team_size)
if (team_remainder > 0):
team_sizes[team] += 1
team_remainder -= 1
#randomly shuffle roster list
for name in range(len(roster)):
ran_spot = random.randint(0,name + 1)
roster[name],roster[ran_spot] = roster[ran_spot], roster[name]
#print out a team from the beginning of the roster, then delete those names from the roster
print()
for team in range(number_of_teams):
print ("Team " + str(team + 1))
for name in range(team_sizes[team]):
print(roster[name])
del roster[:team_sizes[team]]
print()
| [
"james.kiger@gmail.com"
] | james.kiger@gmail.com |
8d48f6966f15f7eaa7500ea26eede3edc21cf852 | f7844a828b7e1507b733fa01a0a563987ae77ad6 | /cpu-usage.py | 7860347dba573fe85a8e1adff417515f4f9a3518 | [] | no_license | emshilton/python | d0f8627314f5366bce6d94214f72ad66f737329d | 43ae51a4d2dc47d79e43ebcd798fbffba5b2d163 | refs/heads/master | 2021-01-19T09:33:00.100777 | 2017-04-12T09:43:35 | 2017-04-12T09:43:35 | 87,765,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #This program allows you to output your computer's current CPU usage in a readable format
#psutil is a library for retrieveing inormation about processes and system utilization
import psutil
#Every attribute of psutil represents seconds the CPU has spent in a given mode
#User is time spent by normal processes executing in user mode
userCPU = psutil.cpu_times().user
if userCPU < 60:
print 'Time spent in normal processes: ', userCPU , 'sec'
else:
print 'Time spent in normal processes: ', str(round(userCPU/60, 2)) , 'mins'
#Idle is time spent doing nothing
idleCPU = psutil.cpu_times().idle
if idleCPU < 60:
print 'Time spent doing nothing: ', idleCPU , 'sec'
else:
print 'Time spent doing nothing: ', str(round(idleCPU/60, 2)) , 'mins'
#System is time spent by processes executing in kernel mode
systemCPU = psutil.cpu_times().system
if systemCPU < 60:
print 'Time spent executing in kernel mode: ', systemCPU , 'sec'
else:
print 'Time spent executing in kernel mode: ', str(round(systemCPU/60, 2)) , 'mins'
| [
"emshilton@gmail.com"
] | emshilton@gmail.com |
c95ef0eb91bcbc64732b734d36f348d30ca813a2 | ff4523d04f7fdde704c56cfe1bf94686f3585d44 | /misc/remove_bpe.py | 12ae96addf2f9ec2323abbdf43ae85a72a652578 | [] | no_license | cshanbo/eval-scripts | b8c39209906a2d8534bfc2186c331281ac49c0f1 | 71c083dcfcda6580d267724a32477c62b4fa7f56 | refs/heads/master | 2021-01-20T04:16:17.055133 | 2017-05-22T02:07:38 | 2017-05-22T02:07:38 | 89,666,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import re
for line in sys.stdin:
line = line.strip()
snt = line.split(' ')
output = ''
for i in xrange(len(snt)):
if snt[i].endswith('@@'):
# if the following word starts with a digit, we don't remove the space key
if i + 1 < len(snt) and (snt[i + 1][0].isdigit() or snt[i + 1][0] == '|'):
output += snt[i].replace('@@', ' ')
else:
output += snt[i].replace('@@', '')
else:
output += snt[i] + ' '
print output
| [
"cshanbO@gmail.com"
] | cshanbO@gmail.com |
20488d1c0036f1572da86e8bdee91e307b3be22c | 80ddd81c8675c08fd44acdddef71d465e770f35f | /src/server/Bundle/Bundle/wsgi.py | 3342f4dd22b5345efdda065a5b8768b222a856cb | [
"Apache-2.0"
] | permissive | Sailer43/Bundle_Web | 8f316f0f1ffa2c38e9cd0cd908ba0b92029c6e87 | 17b2aea2a01bffd25a74506caa8a52dc3849c520 | refs/heads/master | 2021-09-15T02:14:58.841714 | 2018-05-24T22:52:11 | 2018-05-24T22:52:11 | 107,778,041 | 0 | 0 | null | 2017-10-22T12:07:38 | 2017-10-21T12:25:14 | HTML | UTF-8 | Python | false | false | 390 | py | """
WSGI config for Bundle project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Bundle.settings")
application = get_wsgi_application()
| [
"sitaochen@sitaos-air.attlocal.net"
] | sitaochen@sitaos-air.attlocal.net |
59bdee0e78e33a55ce9c679c57b0ee8dddd45b85 | 117954f010bfc97bf11dd40bf85783c331c7d1c6 | /src/syncproc.py | 64ab13d7416e1eb48e1eef96b1e268c7cfc43f55 | [] | no_license | bridgecrew-perf4/aws-textract-terraform | bc04b458ea0803cf8b6b89fc82156f4a1a756ee7 | 8fa2aa115275280b23f84871587ec91cc818fa80 | refs/heads/main | 2023-04-09T12:38:06.486998 | 2021-04-22T11:41:07 | 2021-04-22T11:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | import boto3
from decimal import Decimal
import json
import os
from helper import AwsHelper, S3Helper, DynamoDBHelper
from og import OutputGenerator
import datastore
def callTextract(bucketName, objectName, detectText, detectForms, detectTables):
textract = AwsHelper().getClient('textract')
if(not detectForms and not detectTables):
response = textract.detect_document_text(
Document={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
}
)
else:
features = []
if(detectTables):
features.append("TABLES")
if(detectForms):
features.append("FORMS")
response = textract.analyze_document(
Document={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
FeatureTypes=features
)
return response
def processImage(documentId, features, bucketName, outputBucket, objectName, outputTable):
detectText = "Text" in features
detectForms = "Forms" in features
detectTables = "Tables" in features
response = callTextract(bucketName, objectName, detectText, detectForms, detectTables)
dynamodb = AwsHelper().getResource("dynamodb")
ddb = dynamodb.Table(outputTable)
print("Generating output for DocumentId: {}".format(documentId))
opg = OutputGenerator(documentId, response, outputBucket, objectName, detectForms, detectTables, ddb)
opg.run()
print("DocumentId: {}".format(documentId))
ds = datastore.DocumentStore(outputTable)
ds.markDocumentComplete(documentId)
# --------------- Main handler ------------------
def processRequest(request):
output = ""
print("request: {}".format(request))
bucketName = request['bucketName']
objectName = request['objectName']
features = request['features']
documentId = request['documentId']
outputBucket = request['outputBucket']
outputTable = request['outputTable']
if(documentId and bucketName and objectName and features):
print("DocumentId: {}, features: {}, Object: {}/{}".format(documentId, features, bucketName, objectName))
processImage(documentId, features, bucketName, outputBucket, objectName, outputTable)
output = "Document: {}, features: {}, Object: {}/{} processed.".format(documentId, features, bucketName, objectName)
print(output)
return {
'statusCode': 200,
'body': output
}
def lambda_handler(event, context):
print("event: {}".format(event))
message = json.loads(event['Records'][0]['body'])
print("Message: {}".format(message))
request = {}
request["documentId"] = message['documentId']
request["bucketName"] = message['bucketName']
request["objectName"] = message['objectName']
request["features"] = message['features']
request["outputTable"] = os.environ['OUTPUT_TABLE']
request["outputBucket"] = os.environ['OUTPUT_BUCKET']
return processRequest(request) | [
"eddrichang@gmail.com"
] | eddrichang@gmail.com |
856e9c0036931f4496353a90a125e2e2e94829e8 | 95aa5a5c10ad18195d7f92e37265d9dff06debe6 | /synapse/tools/server.py | 55efd5ecbd42ae28f7f5bd3ea0aa56bb306baf18 | [
"Apache-2.0"
] | permissive | drstrng/synapse | 2679f7c23221ad7d8fd2fbb4745bdcd5275843da | 3901f17601821aa0e8b6de4de434309d465fbba2 | refs/heads/master | 2021-01-17T22:02:29.833824 | 2015-09-01T18:56:57 | 2015-09-01T18:56:57 | 40,968,669 | 0 | 0 | null | 2015-08-18T11:40:51 | 2015-08-18T11:40:50 | Python | UTF-8 | Python | false | false | 1,358 | py | import sys
import argparse
import importlib
import synapse.link as s_link
import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
def main(argv):
p = argparse.ArgumentParser(prog='server')
p.add_argument('--initmod',help='python module name for daemon init callback')
p.add_argument('--cortex', action='append', default=[], help='cortex name,url to share for RMI')
p.add_argument('linkurl',nargs='+',help='link urls to bind/listen')
opts = p.parse_args(argv)
daemon = s_daemon.Daemon()
# possibly load/share a cortex or two
for nameurl in opts.cortex:
name,url = nameurl.split(',',1)
core = s_cortex.openurl(url)
daemon.addSharedObject(name,core)
# fire up requested link servers
for url in opts.linkurl:
link = s_link.chopLinkUrl(url)
daemon.runLinkServer(link)
if opts.initmod:
mod = importlib.import_module(opts.initmod)
meth = getattr(mod,'initDaemon',None)
if meth == None:
print('error: initmod (%s) has no initDaemon() function!')
return
# call back the daemon init module
meth(daemon)
try:
daemon.wait()
except KeyboardInterrupt as e:
print('ctrl-c caught: shutting down')
daemon.fini()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"invisigoth@kenshoto.com"
] | invisigoth@kenshoto.com |
b50cad052ae86d391634b686391f8e2707578c4f | bbf7eeaa891be95f83ffa4369f72a545183fd7f5 | /dashboard/dev_settings_test.py | 6c5d37dc8ccd5641d98566923b26c6a96dd06fa6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | digirati-co-uk/elife-dashboard-fork | c52c744ec96e13b2d1997f27107b4cab3f468780 | 9b949d4a5907ccf11d65b705e8ff7ba017e59333 | refs/heads/master | 2021-04-03T02:07:28.184201 | 2018-03-09T16:29:36 | 2018-03-09T16:29:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # Dashboard setting
application_port = 5000
preview_base_url = 'https://elifesciences.org/'
# Article scheduler settings
article_scheduler_url = 'http://localhost:8000/schedule/v1/article_scheduled_status/'
article_schedule_publication_url = 'http://localhost:8000/schedule/v1/schedule_article_publication/'
article_schedule_range_url = ''
# SQS settings
sqs_region = 'eu-west-1'
event_monitor_queue = 'event-property-incoming-queue'
workflow_starter_queue = 'workflow-starter-queue'
event_queue_pool_size = 5
event_queue_message_count = 5
# Logging
log_level = "DEBUG"
log_file = "dashboard_test.log"
process_queue_log_file = "process_queue.log"
# Database
database = 'elifemonitortest'
host = 'localhost'
port = 5432
user = 'root'
password = ''
| [
"jenniferstrej@gmail.com"
] | jenniferstrej@gmail.com |
2c23a5b23600445d7f086063d298e0d769839238 | 405af31dc9e4cb28ac58dc6411f1ff4958013382 | /jsession/middleware.py | e889fd63a091349eff1a8afb1b347b7281a21490 | [] | no_license | dpnova/django-jsession | cbf18b65862b17a20c233205a01d85c32ae0a62a | 533e69fcafe3884157eff65f9321e0a70217134c | refs/heads/master | 2020-04-14T19:33:57.401538 | 2013-06-15T12:15:19 | 2013-06-15T12:15:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,053 | py | from django.conf import settings
import json, base64, time
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
class JSessionStore(object):
def __init__(self, data):
if data:
self.data = json.loads(base64.b64decode(data))
else:
self.data = {}
self.modified = False
def __setitem__(self, key, value):
self.data[key] = value
self.modified = True
def __getitem__(self, key):
return self.data.__getitem__(key)
def __delitem__(self, key):
self.data.__delitem__(key)
self.modified = True
def __iter__(self):
return self.data.__iter__()
def get(self, key, default=None):
try:
return self.data[key]
except KeyError:
return default
def dumps(self):
resp = base64.b64encode(json.dumps(self.data))
return resp
def empty(self):
self.data = {}
self.modified = True
class JSessionMiddleware(object):
def process_request(self, request):
data = request.COOKIES.get("jsession", None)
request.jsession = JSessionStore(data)
def process_response(self, request, response):
try:
modified = request.jsession.modified
except AttributeError:
pass
else:
patch_vary_headers(response, ('Cookie',))
if modified:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# request.jsession.save()
response.set_cookie("jsession",
request.jsession.dumps(), max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=None,
httponly=False)
return response
| [
"davidnovakovic@gmail.com"
] | davidnovakovic@gmail.com |
83fb210fa070a1486e7d0d70933f5079a00249e4 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GLES2/EXT/sRGB.py | 8f6425aa57802b0bcff7a9f104b0879ff0ac08bc | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.GLES2 import _errors
_EXTENSION_NAME = 'GLES2_EXT_sRGB'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_EXT_sRGB', error_checker=_errors._error_checker)
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT = _C('GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT', 0x8210)
GL_SRGB8_ALPHA8_EXT = _C('GL_SRGB8_ALPHA8_EXT', 0x8C43)
GL_SRGB_ALPHA_EXT = _C('GL_SRGB_ALPHA_EXT', 0x8C42)
GL_SRGB_EXT=_C('GL_SRGB_EXT',0x8C40)
| [
"rudnik49@gmail.com"
] | rudnik49@gmail.com |
84550dd6e9116faa27a59f519d51bbef057578e2 | 82fe749a3713252f5cbd15b0f6732f7d56d8efd6 | /demo1/MyWindowClass/MyWindow.py | ec187fb7ce96890474ad79d77011e469931f38b9 | [] | no_license | exialk2012/PyQt5Project | 6270b88c1018af92b3944ab8a0d23bd450257a47 | ce9c36a6103808a24e516db23c5d15eb34a6ec3f | refs/heads/master | 2020-06-16T05:57:56.132814 | 2019-07-27T03:46:45 | 2019-07-27T03:46:45 | 195,495,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from PyQt5.QtWidgets import QWidget, QLCDNumber
from PyQt5.QtGui import QIcon
class MyWindow(QWidget):
def __init__(self):
super().__init__()
self.uiInit()
def uiInit(self):
self.value = 0
self.setGeometry(300, 300, 300, 300)
self.setWindowTitle('不错的窗口')
self.setWindowIcon(QIcon('icon.ico'))
self.lcd = QLCDNumber(self)
self.show()
def mousePressEvent(self, e):
self.value += 1
self.lcd.display(self.value)
| [
"398404360@qq.com"
] | 398404360@qq.com |
84e151f44bc686b2b4aa6eba4d74e6e4a7dcf89a | f2d92c10deaa084250935530bfb179f8ece3b41a | /cifo/util/MO_utils.py | 12149bb25bf7302cf6f584f3f935c854eefb929e | [] | no_license | daveai/GA_Library | 24474963207a42e78f6e585c90d8c811cc4f74a5 | 2bc6a8b0e5fe5d7bd92fac5cb83208e6203af4aa | refs/heads/master | 2022-07-13T17:26:14.894368 | 2020-02-03T10:54:27 | 2020-02-03T10:54:27 | 237,943,102 | 1 | 0 | null | 2021-03-20T02:48:14 | 2020-02-03T10:48:42 | Python | UTF-8 | Python | false | false | 3,018 | py | import numpy as np
# From https://github.com/haris989/NSGA-II
def fast_non_dominated_sort(values1, values2):
S = [[] for i in range(0, len(values1))]
front = [[]]
n = [0 for i in range(0, len(values1))]
rank = [0 for i in range(0, len(values1))]
for p in range(0, len(values1)):
S[p] = []
n[p] = 0
for q in range(0, len(values1)):
if (
(values1[p] < values1[q] and values2[p] > values2[q])
or (values1[p] <= values1[q] and values2[p] > values2[q])
or (values1[p] < values1[q] and values2[p] >= values2[q])
):
# if (values1[p] > values1[q] and values2[p] > values2[q]) or (values1[p] >= values1[q] and values2[p] > values2[q]) or (values1[p] > values1[q] and values2[p] >= values2[q]):
if q not in S[p]:
S[p].append(q)
elif (
(values1[q] < values1[p] and values2[q] > values2[p])
or (values1[q] <= values1[p] and values2[q] > values2[p])
or (values1[q] < values1[p] and values2[q] >= values2[p])
):
# elif (values1[q] > values1[p] and values2[q] > values2[p]) or (values1[q] >= values1[p] and values2[q] > values2[p]) or (values1[q] > values1[p] and values2[q] >= values2[p]):
n[p] = n[p] + 1
if n[p] == 0:
rank[p] = 0
if p not in front[0]:
front[0].append(p)
i = 0
while front[i] != []:
Q = []
for p in front[i]:
for q in S[p]:
n[q] = n[q] - 1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q)
i = i + 1
front.append(Q)
del front[len(front) - 1]
return front
# Function to calculate crowding distance
def crowding_distance(values1, values2, front):
distance = [0 for i in range(0, len(front))]
sorted1 = sort_by_values(front, values1[:])
sorted2 = sort_by_values(front, values2[:])
distance[0] = np.Inf
distance[len(front) - 1] = np.Inf
for k in range(1, len(front) - 1):
distance[k] = distance[k] + (
values1[sorted1[k + 1]] - values2[sorted1[k - 1]]
) / (max(values1) - min(values1))
for k in range(1, len(front) - 1):
distance[k] = distance[k] + (
values1[sorted2[k + 1]] - values2[sorted2[k - 1]]
) / (max(values2) - min(values2))
return distance
# Function to sort by values
def sort_by_values(list1, values):
sorted_list = []
while len(sorted_list) != len(list1):
if index_of(min(values), values) in list1:
sorted_list.append(index_of(min(values), values))
values[index_of(min(values), values)] = np.math.inf
return sorted_list
# Function to find index of list
def index_of(a, list):
for i in range(0, len(list)):
if list[i] == a:
return i
return -1
| [
"noreply@github.com"
] | daveai.noreply@github.com |
b718b85f3fd17a4ef08123a96ee1f9b38221b47d | cb07529e3526b61033d08e44cb00dded18ea82ed | /app/forms.py | 7c943d83c1246bdee7d08dc40692c13b2c2163f7 | [] | no_license | piyush0810/cov19stats | 56bff241a6af85c9d58a6f6bf9f3dd0376cf203e | fa0318a9b239991f4ad9c03129f2c6d8788a454f | refs/heads/master | 2022-12-15T15:57:51.476979 | 2020-09-13T09:37:32 | 2020-09-13T09:37:32 | 295,119,819 | 0 | 0 | null | 2020-09-13T09:37:02 | 2020-09-13T09:37:01 | null | UTF-8 | Python | false | false | 284 | py | from django import forms
class Contact(forms.Form):
sender = forms.CharField(label='Name', max_length=30)
subject = forms.CharField(label='Subject', max_length=30)
email = forms.EmailField(label='Email', max_length=30)
message = forms.CharField(widget=forms.Textarea) | [
"kumar.55@iitj.ac.in"
] | kumar.55@iitj.ac.in |
9f5c3c2ae5e766b560ed33641653dcbbff6eedfb | b2fef77e77f77b6cfd83da4ec2f89cbe73330844 | /tests/test_integration_lazy_samples.py | c365616bc84723bb5c8013fdcf4d08ed6e238a58 | [
"Apache-2.0"
] | permissive | Project-MONAI/MONAI | 8ef2593cc5fd1cd16e13464f927fe563fe3f5bac | e48c3e2c741fa3fc705c4425d17ac4a5afac6c47 | refs/heads/dev | 2023-09-02T00:21:04.532596 | 2023-09-01T06:46:45 | 2023-09-01T06:46:45 | 214,485,001 | 4,805 | 996 | Apache-2.0 | 2023-09-14T15:19:30 | 2019-10-11T16:41:38 | Python | UTF-8 | Python | false | false | 9,193 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import shutil
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
import monai
import monai.transforms as mt
from monai.data import create_test_image_3d, decollate_batch
from monai.transforms.utils import has_status_keys
from monai.utils import TraceStatusKeys, set_determinism
from tests.utils import HAS_CUPY, DistTestCase, SkipIfBeforePyTorchVersion, skip_if_quick
def _no_op(x):
return x
def run_training_test(root_dir, device="cuda:0", cachedataset=0, readers=(None, None), num_workers=4, lazy=True):
print(f"test case: {locals()}")
images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])]
device = "cuda:0" if HAS_CUPY and torch.cuda.is_available() else "cpu" # mode 0 and cuda requires CUPY
num_workers = 0 if torch.cuda.is_available() else num_workers
# define transforms for image and segmentation
lazy_kwargs = {
"img": {"mode": "bilinear", "device": device, "padding_mode": "border", "dtype": torch.float32},
"seg": {"mode": 0, "device": device, "padding_mode": "nearest", "dtype": torch.uint8},
}
train_transforms = mt.Compose(
[
mt.LoadImaged(keys=["img", "seg"], reader=readers[0], image_only=True),
mt.EnsureChannelFirstd(keys=["img", "seg"]),
mt.Spacingd(
keys=["img", "seg"],
pixdim=[1.2, 0.8, 0.7],
mode=["bilinear", 0],
padding_mode=("border", "nearest"),
dtype=np.float32,
),
mt.Orientationd(keys=["img", "seg"], axcodes="ARS"),
mt.RandRotate90d(keys=["img", "seg"], prob=1.0, spatial_axes=(1, 2)),
mt.ScaleIntensityd(keys="img"),
mt.ApplyPendingd(keys=["seg"]),
mt.RandCropByPosNegLabeld(
keys=["img", "seg"], label_key="seg", spatial_size=[76, 82, 80], pos=1, neg=1, num_samples=4
),
mt.RandRotate90d(keys=["img", "seg"], prob=0.8, spatial_axes=(0, 2)),
mt.RandZoomd(
keys=["img", "seg"], prob=1.0, min_zoom=1.0, max_zoom=1.0, mode=("trilinear", 0), keep_size=True
),
mt.ResizeWithPadOrCropD(keys=["img", "seg"], spatial_size=[80, 72, 80]),
mt.Rotated(keys=["img", "seg"], angle=[np.pi / 2, np.pi / 2, 0], mode="nearest", keep_size=False),
mt.Lambdad(keys=["img"], func=_no_op),
],
lazy=lazy,
overrides=lazy_kwargs,
log_stats=num_workers > 0,
)
# create a training data loader
if cachedataset == 2:
train_ds = monai.data.CacheDataset(
data=train_files, transform=train_transforms, cache_rate=0.8, runtime_cache=False, num_workers=0
)
elif cachedataset == 3:
train_ds = monai.data.LMDBDataset(data=train_files, transform=train_transforms, cache_dir=root_dir)
else:
train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
# create UNet, DiceLoss and Adam optimizer
model = monai.networks.nets.UNet(
spatial_dims=3, in_channels=1, out_channels=1, channels=(2, 2, 2, 2), strides=(2, 2, 2), num_res_units=2
).to(device)
optimizer = torch.optim.Adam(model.parameters(), 5e-4)
loss_function = monai.losses.DiceLoss(sigmoid=True)
saver = mt.SaveImage(
output_dir=os.path.join(root_dir, "output"),
dtype=np.float32,
output_ext=".nii.gz",
output_postfix=f"seg_{lazy}_{num_workers}",
mode="bilinear",
resample=False,
separate_folder=False,
print_log=False,
)
inverter = mt.Invertd(
keys="seg", orig_keys="img", transform=mt.Compose(train_transforms.transforms[-5:]), to_tensor=True
)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
_g = torch.Generator()
_g.manual_seed(0)
set_determinism(0)
train_loader = monai.data.DataLoader(
train_ds, batch_size=1, shuffle=True, num_workers=num_workers, generator=_g, persistent_workers=num_workers > 0
)
all_coords = set()
batch_data = None
for epoch in range(5):
print("-" * 10)
print(f"Epoch {epoch + 1}/5")
for step, batch_data in enumerate(train_loader, start=1):
inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_len = len(train_ds) // train_loader.batch_size
print(f"{step}/{epoch_len}, train_loss:{loss.item():0.4f}")
for item, in_img, in_seg in zip(outputs, inputs, labels): # this decollates the batch, pt 1.9+
item.copy_meta_from(in_img)
np.testing.assert_array_equal(item.pending_operations, [])
np.testing.assert_array_equal(in_seg.pending_operations, [])
ops = [0]
if len(item.applied_operations) > 1:
found = False
for idx, n in enumerate(item.applied_operations): # noqa
if n["class"] == "RandCropByPosNegLabel":
found = True
break
if found:
ops = item.applied_operations[idx]["extra_info"]["extra_info"]["cropped"]
img_name = os.path.basename(item.meta["filename_or_obj"])
coords = f"{img_name} - {ops}"
print(coords)
# np.testing.assert_allclose(coords in all_coords, False)
all_coords.add(coords)
saver(item) # just testing the saving
saver(in_img)
saver(in_seg)
invertible, reasons = has_status_keys(batch_data, TraceStatusKeys.PENDING_DURING_APPLY)
inverted = [inverter(b_data) for b_data in decollate_batch(batch_data)] # expecting no error
return ops
@skip_if_quick
@SkipIfBeforePyTorchVersion((1, 11))
class IntegrationLazyResampling(DistTestCase):
def setUp(self):
monai.config.print_config()
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(3):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"seg{i:d}.nii.gz"))
self.device = "cuda:0" if torch.cuda.is_available() else "cpu:0"
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
def train_and_infer(self, idx=0):
results = []
_readers = (None, None)
_w = 2
if idx == 1:
_readers = ("itkreader", "itkreader")
_w = 1
elif idx == 2:
_readers = ("itkreader", "nibabelreader")
_w = 0
results_expected = run_training_test(
self.data_dir, device=self.device, cachedataset=0, readers=_readers, num_workers=_w, lazy=False
)
results = run_training_test(
self.data_dir, device=self.device, cachedataset=idx, readers=_readers, num_workers=_w, lazy=True
)
self.assertFalse(np.allclose(results, [0]))
self.assertFalse(np.allclose(results_expected, [0]))
np.testing.assert_allclose(results, results_expected)
lazy_files = glob(os.path.join(self.data_dir, "output", "*_True_*.nii.gz"))
regular_files = glob(os.path.join(self.data_dir, "output", "*_False_*.nii.gz"))
diffs = []
for a, b in zip(sorted(lazy_files), sorted(regular_files)):
img_lazy = mt.LoadImage(image_only=True)(a)
img_regular = mt.LoadImage(image_only=True)(b)
diff = np.size(img_lazy) - np.sum(np.isclose(img_lazy, img_regular, atol=1e-4))
diff_rate = diff / np.size(img_lazy)
diffs.append(diff_rate)
np.testing.assert_allclose(diff_rate, 0.0, atol=0.03)
print("volume diff:", diffs)
def test_training(self):
for i in range(4):
self.train_and_infer(i)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | Project-MONAI.noreply@github.com |
d7ba91ca1c5145138d5f15a7cb3b92c86bb0fdeb | b196bfc09b5bc917502603f0ddff608a916283d9 | /Clásica/Code/Vigenere.py | 60a73a9ab45d52b74b367899c1399d03765fda37 | [] | no_license | RaquelNeedsCoffee/Cripto | 2f9ea1e15712bf022f6d044278514a46bd68665d | 7fcc426374dfe9ec80596ddf9319d380b4182d39 | refs/heads/master | 2022-05-12T02:09:23.657781 | 2017-11-28T17:43:33 | 2017-11-28T17:43:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from itertools import permutations
text_path = './2017_09_21_17_19_17_raquel.leandra.perez.Vigenere'
text = open(text_path, 'r')
decrypted_path = './Vigenere/decrypded'
text = list(text)[0]
print(len(text))
alphabetsize = 26
def add(c, x):
if c.islower():
return chr((ord(c) - x - ord('a')) % alphabetsize + ord('a'))
else:
return chr((ord(c) - x - ord('A')) % alphabetsize + ord('A'))
def vigenere_decrypt(cipher_text, key):
chars = [c for c in cipher_text]
result = ''
keysize = len(key)
j = 0
for i in range(0, len(chars)):
if j == keysize:
j = 0
if chars[i].isalpha():
result += add(chars[i], key[j])
j += 1
else:
result += chars[i]
return result
"""Testing that the algorithm works
key = 'LEMON'
numkey = [ord(c) - ord('A') for c in key]
charkey = [chr(c + ord('A')) for c in numkey]
cipther_text = 'LXFOPV EF RNHR'
print(vigenere_decrypt(cipther_text,numkey))
"""
# Brute force to find the key
keyword = 'Machine'
for i in permutations(range(0, 26), 10):
decrypted = vigenere_decrypt(text, i)
if keyword in decrypted:
decrypted_path = './Vigenere/decrypted' + str(i)
decrypted_text = open(decrypted_path, 'w')
decrypted_text.write(decrypted)
decrypted_text.close()
key = [3, 8, 12, 4, 13, 18, 8, 14, 13, 18]
# key = dimensions
| [
"raquelpa93@gmail.com"
] | raquelpa93@gmail.com |
ce7936e9279838ce773a5d1c8ec644b1ab44048f | ce55c319f5a78b69fefc63595d433864a2e531b5 | /前后端分离-vue-DRF/Projects-lenongke/LNK/apps/users/signals.py | 43a056d613cf142a3a112d980f3a8db7cfac5f0d | [] | no_license | Suijng/1809_data | a072c875e8746190e3b715e53f1afe3323f4666b | 45f8a57089f5c30ccc1a3cddb03b76dc95355417 | refs/heads/master | 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 | HTML | UTF-8 | Python | false | false | 579 | py | # post_save Django中的model对象保存后,自动触发
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
# 获取用户模型
User = get_user_model()
@receiver(post_save,sender=User) # 监控用户User模型
def create_user(sender,instance=None,created=False,**kwargs):
# created: 表示是否已经创建
if created:
# 获取用户的密码
password = instance.password
# 加密
instance.set_password(password)
# 保存
instance.save() | [
"1627765913@qq.com"
] | 1627765913@qq.com |
b0d930eb9f2168bf87140aee5198b748f406c242 | c8765ddf12feb275a308cb70edcbca468fd25152 | /mlsrc/controllers/d_persons.py | d20eb605ad1bb0c85d3edef868f776fe62c706e2 | [] | no_license | driscollis/MediaLocker | d1291ad2f08a94600744034a14815861ace25c63 | 7f76e897ace3aa3c5664a10a8f1bb39ec23b20ec | refs/heads/master | 2023-08-12T07:44:51.723637 | 2020-09-28T15:00:25 | 2020-09-28T15:00:25 | 299,340,858 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | # -*- coding: utf-8 -*-#
#!/usr/bin/env python
"""The books controller"""
import logging
import functools
import wx
import base_list as bList
import d_person as dPerson
import mlsrc.libui.commonDlgs as commonDlgs
import mlsrc.libui.olvgroup as olvg
import mlsrc.libui.olvdefs as olvd
import mlsrc.models as db
from mlsrc.mypub import pub, pTopics
########################################################################
class Persons(bList.BaseList):
def __init__(self, parent, **kwds):
"""The controller for persons"""
self.view = wx.Dialog(parent, wx.ID_ANY,
style=wx.DEFAULT_DIALOG_STYLE,
title="Maintain person information")
self.view.SetName("dPersons")
super(Persons, self).__init__(view=self.view, model="Person", **kwds)
scats = ["Person", ]
self.initDlgListBase(scats)
#----------------------------------------------------------------------
def onAddRecord(self, event):
"""
Add a record to the database
"""
dbItem = getattr(db, self.modelName) ()
dlg = dPerson.Person(self.view, dbItem).view
dlg.ShowModal()
dlg.Destroy()
#----------------------------------------------------------------------
def onEditRecord(self, event):
"""
Edit a record
"""
selectedRow = self.theOlv.getList().GetSelectedObject()
if selectedRow == None:
commonDlgs.showMessageDlg("No row selected!", "Error")
return
dlg = dPerson.Person(self.view, selectedRow, title="Modify",
addModus=False).view
dlg.ShowModal()
dlg.Destroy()
#----------------------------------------------------------------------
def searchRecords(self, filterChoice, keyword):
"""
Searches the database based on the filter chosen and the keyword
given by the user
"""
session = wx.GetApp().session
model = getattr(db, self.modelName)
result = None
if filterChoice == "Person":
qry = session.query(model)
logging.debug(qry)
result = qry.filter(db.Person.full_name.contains('%s' % keyword))
result = result.all()
logging.debug(result)
return result
#----------------------------------------------------------------------
if __name__ == '__main__':
import mlsrc.base_app as base_app
app = base_app.BaseApp(redirect=False)
dlg = Persons(None).view
try:
dlg.ShowModal()
finally:
dlg.Destroy()
app.MainLoop()
| [
"noreply@github.com"
] | driscollis.noreply@github.com |
ddb5c9ec57d4341d04f67594d61a799199b5ca2e | ae60ca55bfdca62e22081190bd392fadd49af544 | /4_SNP/ex2_3_extract_SNP_data.py | ffde34b6da6a992cba48e1fe388debf2f120c491 | [] | no_license | St3451/Structural_Bioinformatics | 7b154500c3dc2d384f0397098cbc811503a183af | 16cce85142f1549cb0d909819185a1cc0c837d29 | refs/heads/master | 2022-11-18T04:35:54.041023 | 2020-07-19T16:00:23 | 2020-07-19T16:00:23 | 237,039,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,934 | py | ### Exercise 2
# Using the base pair probability information of the affected region find the base pair in the wild type sequence with the
# highest probability and compare it to the probability of the same base pair position in the mutant version.
def file_to_matrix(text_file):
"""
Take a text tile and return
a list of list (matrix)
"""
file_opened = open(text_file, "r")
lst_matrix = []
for line in file_opened.readlines():
line = line.rstrip()
lst_matrix.append(line.split())
return lst_matrix
def find_max_top_matrix_triangle(matrice):
"""
Take a matrix (list of list) as input,
find the maximum of its top right triangle
and the value of the same cell in the botton
left triangle
"""
WT_max = 0
WT_max_cell = ()
for row, elem in enumerate(matrice): # row is the position and element is the row_list that contain the values
for col, value in enumerate(elem): # the position in the row_list are the columns. With enumerate() the first element is the index and the second is the element itself
if row < col: # This condition ensure to be in the top right triangle of the matrix
if float(value) > WT_max:
WT_max = float(value)
WT_max_cell = (row, col)
MT_relat_value = float(lst_matrix[WT_max_cell[1]][WT_max_cell[0]]) # I just invert the coordinate (col, row) instead of (row, col)
return WT_max_cell, WT_max, MT_relat_value
lst_matrix = file_to_matrix("dot_plot_matrix.txt")
WT_max_cell = find_max_top_matrix_triangle(lst_matrix)[0]
WT_max = find_max_top_matrix_triangle(lst_matrix)[1]
MT_relat_value = find_max_top_matrix_triangle(lst_matrix)[2]
print("\nWT max cell (+1, +1): (" + str(WT_max_cell[0]) + ", " + str(WT_max_cell[1]) + ")")
print("WT max value: " + str(WT_max))
print("MT relative position value: " + str(MT_relat_value) + "\n")
### Exercise 3
# In the local region with altered RNA structure find the number of base pairs with pair probabilities
# higher then 0.5 and 0.8. Do you see a consistent pattern?
def find_cell_with_higher_prob(matrice):
"""
Take a bp prob. matrix as an input and return
a list of all the values of the cells with a
probability higher than threeshold
"""
wt_08plus = []
wt_05plus = []
mt_08plus = []
mt_05plus = []
for row, elem in enumerate(matrice): # row is the position and element is the row_list that contain the values
for col, value in enumerate(elem): # the position in the row_list are the columns
if row < col: # Check in wild type triangle (top right triangle)
if float(value) > 0.5:
wt_05plus.append(float(value))
if float(value) > 0.8:
wt_08plus.append(float(value))
if row > col: # Check in mutant triangle
if float(value) > 0.5:
mt_05plus.append(float(value))
if float(value) > 0.8:
mt_08plus.append(float(value))
return wt_08plus, wt_05plus, mt_08plus, mt_05plus
wt_08plus = find_cell_with_higher_prob(lst_matrix)[0]
wt_05plus = find_cell_with_higher_prob(lst_matrix)[1]
mt_08plus = find_cell_with_higher_prob(lst_matrix)[2]
mt_05plus = find_cell_with_higher_prob(lst_matrix)[3]
ratio_MT_WT08 = len(mt_08plus)/len(wt_08plus)
ratio_MT_WT05 = len(mt_05plus)/len(wt_05plus)
print("WT higher than 0.8: " + str(len(wt_08plus)))
print("WT higher than 0.5: " + str(len(wt_05plus)))
print("Mutant higher than 0.8: " + str(len(mt_08plus)))
print("Mutant higher than 0.5: " + str(len(mt_05plus)))
print("Ratio MT/WT bp with prob higher than 0.8: " + str(ratio_MT_WT08))
print("Ratio MT/WT bp with prob higher than 0.5: " + str(ratio_MT_WT05))
| [
"stefano.pellegrini26@gmail.com"
] | stefano.pellegrini26@gmail.com |
1cde822e75ef087eea418d6ecbe2730f1e3e70a5 | db95ce1dbbc3d8cd632efbe876488b77f7093ae6 | /AE_IV/Main.py | e2145c80bb19740abb7d7506f4277acb744e5ac6 | [] | no_license | LANCEREN/InverseNet | a490e887bc56400783f21d89f8075398cdc97c47 | ab847d2e6772cdefad210ce1eeca576f8f3cefac | refs/heads/main | 2023-05-07T16:44:58.203396 | 2021-05-10T15:35:29 | 2021-05-10T15:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | import jax
import numpy.random as npr
import pdb
from Utils import display_multiple_img, get_data
from Utils_Models import get_model
from Train import Trainer
if __name__ == '__main__':
rng = jax.random.PRNGKey(-1)
rng_batch = npr.RandomState(0)
X, dX, z_ref = get_data()
#display_multiple_img({'P_I: ': X[:, 0], 'dP_I: ': dX[:, 0], 'z_P_I: ': z_ref[:, 0], 'dz_P_I: ': z_ref[:, 2],
# 'Q_I: ': X[:, 1], 'dQ_I: ': dX[:, 1], 'z_Q_I: ': z_ref[:, 1], 'dz_Q_I: ': z_ref[:, 3]}, 0,
# 'X_real', rows=2, cols=4)
model_name = 'AE'
if(model_name == 'AE'):
hyper_params = {'lr': 0.001, 'epochs': 50, 'batch_size': 128, 'z_latent': 20, # eta1 = dx
'eta1': 10.0, 'eta2': 1e-3, 'eta3': 10e-5, 'x_dim': 159} # eta2 = dz
# eta3 = regularization
elif(model_name == 'IV'):
hyper_params = {'lr': 0.001, 'epochs': 110, 'batch_size': 128, 'z_latent': 20,
'eta1': 10.0, 'eta2': 1e-4, 'eta3': 1e-5, 'alpha': 0.050, 'steps_inner': 10}
else:
raise NameError('Wrong model name')
model = get_model(model_name, hyper_params, rng)
step_sample = 50
trainer = Trainer(model, hyper_params, step_sample, shuffle=False)
z_pred, x_pred = trainer.fit(X, dX, z_ref, rng_batch)
pdb.set_trace()
| [
"mmiller96@uni-bremen.de"
] | mmiller96@uni-bremen.de |
5f133ea18f13f267f1730fb04c3a6d648aedd954 | d96674845b48a5d20a16778a0c2779a2240cf7fa | /fifth_app/forms.py | d06375edc2d1ba08650ca18c098520f07098facc | [] | no_license | jackyhu289/fifth-project | 5565c45ab3267704ad65a23d7e395ccee6b6cbe2 | cc79defc76f8776e99599cc7ffcacf37b23ae864 | refs/heads/master | 2022-04-06T06:59:11.860778 | 2020-02-15T02:20:27 | 2020-02-15T02:20:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from django import forms
from django.contrib.auth.models import User
from fifth_app.models import UserProfileInfo
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileInfoForm(forms.ModelForm):
class Meta:
model = UserProfileInfo
fields = ('portfolio_site', 'profile_pic') | [
"TheDomesticUser@gmail.com"
] | TheDomesticUser@gmail.com |
96695f21a9cf41615a3de64b126798b0cf0d26b6 | 3b5dbf2720ab78a100a28cda1948778f2ae9ba7d | /OUr2c_evt.py | 4e1662f1fed0dedffbdd011d03be50787ea00a9d | [] | no_license | gasti1p/OU_Experiment | efbeebf2bf0b60943b958d3024f1c14458012cc6 | ecc9d8b0a1f30f264d9dd05c4d00b0f89d0ed398 | refs/heads/master | 2021-01-05T01:31:51.217202 | 2020-02-16T04:48:28 | 2020-02-16T04:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | import numpy as np
import os, glob, sys, h5py
run_number = sys.argv[-1]
path='evt_data/evt{}/'.format(run_number) #change path to reflect data location
evt_filenames = ["bci","dtime","ev_route","ev_tof","ev_psd","ev_es","ev_ea","ev_eea","ev_e"]
# ------------------
# A) FILE CONVERTER
# ------------------
sys.stdout.write("\nConverting raw files for run {}...".format(run_number))
sys.stdout.flush()
stop_times = []
for name in evt_filenames:
segment = 0
for filename in sorted(glob.glob(os.path.join(path, '{}.*'.format(name)))): #use commands below for all ev_e.xxx files in path
with open(path + '{}_{}.dat'.format(name,segment),'w') as fw: #open file to write the final result
with open(filename, 'r') as f:
header1 = f.readline() #getting rid of header...
if name==evt_filenames[0]: stop_times.append(header1[-7:])
n=8 #we need to read every n=8 characters
for line in f:
line=line.strip('\n') #remove end of line character
line=line.strip('\t') #remove begin of line character if it exists
for i in range(0,len(line),n): #loop over each line
fw.write(line[i:i+n]) #write 8 characters to file
fw.write("\n ") #type end of line character \n to make columns
f.close()
fw.close()
segment +=1
sys.stdout.write("\rConverting raw files for run {}... DONE".format(run_number))
# ------------------
# B) SEGMENTS INFO
# ------------------
sys.stdout.write("\nCollecting segments info for run {}...".format(run_number))
sys.stdout.flush()
durations = []
for i in xrange(1,len(np.array(stop_times)),1):
start = int(stop_times[i-1][:2])*3600 + int(stop_times[i-1][2:4])*60 + int(stop_times[i-1][-3:])
stop = int(stop_times[i][:2])*3600 + int(stop_times[i][2:4])*60 + int(stop_times[i][-3:])
if stop-start>0: durations.append(stop-start)
elif stop-start<0: durations.append((24*3600-stop)+start)
else: durations.append(0.0)
durations.insert(0, np.mean(durations[:10]))
sys.stdout.write("\rCollecting segments info for run {}... DONE".format(run_number))
# -----------------------
# C) CREATE BINARY FILE
# -----------------------
sys.stdout.write("\nCreate binary file for run {}...".format(run_number))
sys.stdout.flush()
h5data=h5py.File("evt{}.h5".format(run_number),'w')
for name in evt_filenames:
for filename in sorted(glob.glob(os.path.join(path, '{}_*'.format(name)))):
dset = np.loadtxt(filename)
leaf_name = os.path.basename(filename)[:-4]
h5data.create_dataset("/raw/all_files/{}".format(leaf_name),data=dset,compression="lzf")
os.remove(filename)
h5data.create_dataset("/raw/all_files/durations",data=durations,compression="lzf")
h5data.close()
sys.stdout.write("\rCreate binary file for run {}... DONE".format(run_number))
# -------------------------------
# D) CREATE BASIC ARRAYS
# -------------------------------
sys.stdout.write("\nCreate basic arrays for run {}...".format(run_number))
sys.stdout.flush()
_filenames_ = ["ev_tof","ev_psd","ev_es","ev_ea","ev_eea","ev_e"]
segments = np.array(durations)
h5data = h5py.File("evt{}.h5".format(run_number),'a')
for name in _filenames_:
list_lenda = []
list_ls = []
list_monitor = []
if name==_filenames_[0]: list_route = []
for i in xrange(0,len(np.array(segments)),1):
route_raw = h5data["/raw/all_files/ev_route_{}".format(i)]
route = route_raw[...]
if name==_filenames_[0]: list_route.append(route)
raw_file = h5data["/raw/all_files/{}_{}".format(name,i)]
file = raw_file[...]
GateLENDA = np.flatnonzero(route==1.0)
GateLS = np.flatnonzero(route==4.0)
GateMONITOR = np.flatnonzero(route==8.0)
list_lenda.append(file[GateLENDA])
list_ls.append(file[GateLS])
list_monitor.append(file[GateMONITOR])
if name==_filenames_[0]: h5data.create_dataset("raw/route",data=[j for i in list_route for j in i],compression="lzf")
h5data.create_dataset("raw/lenda/{}".format(name),data=[j for i in list_lenda for j in i],compression="lzf")
h5data.create_dataset("raw/ls/{}".format(name),data=[j for i in list_ls for j in i],compression="lzf")
h5data.create_dataset("raw/monitor/{}".format(name),data=[j for i in list_monitor for j in i],compression="lzf")
bci_tot = np.array(h5data["/raw/all_files/bci_0"][...])
for i in xrange(1,len(np.array(segments)),1):
bci_raw = h5data["/raw/all_files/bci_{}".format(i)]
bci_tot = bci_tot + np.array(bci_raw[...])
h5data.create_dataset("raw/bci",data=bci_tot)
dtime_tot = np.array(h5data["/raw/all_files/dtime_0"][...])
for i in xrange(1,len(np.array(segments)),1):
dtime_raw = h5data["/raw/all_files/dtime_{}".format(i)]
dtime_tot = dtime_tot + np.array(dtime_raw[...])
h5data.create_dataset("raw/dtime",data=dtime_tot)
h5data.close()
sys.stdout.write("\rCreate basic arrays for run {}... DONE\n\n".format(run_number))
| [
"noreply@github.com"
] | gasti1p.noreply@github.com |
b745100a558fbeee4b7b6de159e4049b197a4794 | a0187b92331b7300fbe5ba98469c92155fe80e7b | /Dictionary/Sets.py | 1b5e7d5a752af9d90909d5ab4ddc85fe8e985863 | [] | no_license | avi651/PythonBasics | a39187f03ef54374623286424e20ac7a08ae0d4f | 07e8276f47554fc48ab69650c90d526c4cfe66ff | refs/heads/master | 2020-04-10T19:22:24.893977 | 2018-12-16T18:15:31 | 2018-12-16T18:15:31 | 160,411,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | farm_animals = {"sheep","cow","hen"}
print(farm_animals)
for animal in farm_animals:
print(animal)
print("*" * 40)
wild_animals = set(["Lion","Tiger","Panther","Elephant","Hare"])
print(wild_animals)
farm_animals.add("horse")
farm_animals.add("horse")
| [
"noreply@github.com"
] | avi651.noreply@github.com |
d936ff5cd2190d9db59b8db0ba9da86e4fbdb753 | 6e70a4371308fb0eebfb0f7cbb240a2ae413c7bd | /config/2017-09-14-Run3538.py | 0fb363e4dec9c547d8b82579de24ec561dab689b | [] | no_license | crogan/MMFE8_DAQ | ed44d63debc906239e258834f3247679a5a86059 | 3037d6c160a04b2ea082d987235eb48d8f77e1f7 | refs/heads/master | 2020-09-12T11:52:10.111512 | 2018-02-21T22:42:05 | 2018-02-21T22:42:05 | 66,023,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,386 | py | class MMFEConfig:
pass
ips = [118, 120, 119, 111,
106, 107, 101, 105]
mmfes = {}
configload = {}
for ip in ips:
configload[ip] = range(8)
configload[111] = []
configload[120] = [0, 1, 2, 3, 6, 7]
configload[101] = [0, 1, 2, 4, 5, 6, 7]
for ip in ips:
mmfes[ip] = MMFEConfig()
mmfes[ip].ip = ip
mmfes[ip].i = ips.index(ip)
mmfes[ip].TestPulseDAC = 120
mmfes[ip].ST = {}
if ip is not 120 and ip is not 101:
mmfes[ip].vmmis = range(8)
mmfes[ip].EnableReadout = range(8)
elif ip is 120:
mmfes[ip].vmmis = [0, 1, 2, 3, 6, 7]
mmfes[ip].EnableReadout = [0, 1, 2, 3, 6, 7]
else:
mmfes[ip].vmmis = [0, 1, 2, 4, 5, 6, 7]
mmfes[ip].EnableReadout = [0, 1, 2, 4, 5, 6, 7]
mmfes[ip].ConfigureLoad = configload[ip]
mmfes[ip].ExternalTrigger = True
mmfes[ip].isExtPulseTrig = False
mmfes[ip].PeakingTime = 0
mmfes[ip].mon = {}
mmfes[ip].ThresholdDACs = {0: 220, 1: 220, 2: 220, 3: 220,
4: 220, 5: 220, 6: 220, 7: 220}
# masking
mmfes[118].SM = {0: [1],
1: [],
2: [1, 6, 27, 31, 56],
3: [],
4: [1, 6, 15, 21, 23],
5: [],
6: [1, 2, 3, 4],
7: [1]}
mmfes[120].SM = {0: [13, 16, 40, 41],
1: [46],
2: [1, 20, 21, 27, 38, 40, 46, 50, 51, 53],
3: [3, 5, 20, 22, 25, 29, 31, 34, 51, 53, 64],
4: [],
5: [],
6: [1, 40],
7: [1, 10, 12, 21, 22, 28, 29, 34]}
mmfes[120].SM[7].extend(range(35,65))
mmfes[120].ThresholdDACs[1] = 240
mmfes[120].ThresholdDACs[3] = 240
mmfes[120].ThresholdDACs[6] = 240
mmfes[120].ThresholdDACs[7] = 240
mmfes[119].SM = {0: [37],
1: [1, 3, 6, 17, 24, 29, 31, 36, 38, 44, 46],
2: [],
3: [55],
4: [],
5: [2],
6: [11, 34, 48, 52],
7: [64]}
mmfes[119].ThresholdDACs[1] = 240
mmfes[106].SM = {0: [1, 3],
1: [1],
2: [3],
3: [],
4: [],
5: [],
6: [1, 2, 3, 27],
7: [1, 2, 3, 19, 20]}
mmfes[107].SM = {0: [],
1: [16, 18, 26, 29, 40, 42, 63],
2: [1],
3: [1, 2, 3],
4: [1, 5, 28, 61],
5: [],
6: [1, 59, 62],
7: []}
mmfes[101].SM = {0: [1, 3],
1: [3, 5],
2: [],
3: [],
4: [2, 3, 4],
5: [1],
6: [],
7: []}
mmfes[101].SM[2] = range(29,65)
mmfes[105].SM = {0: [],
1: [],
2: [2],
3: [1],
4: [1, 2, 3, 4, 52],
5: [1],
6: [1, 2],
7: []}
mmfes[111].SM = {0: [],
1: [],
2: [],
3: [],
4: [],
5: [],
6: [],
7: []}
cfgs = []
for ip in ips:
cfgs.append(mmfes[ip])
| [
"alexander.tuna@gmail.com"
] | alexander.tuna@gmail.com |
99bdd4f3712583d0eca467f97b1c076141596f60 | 7edafb8e10c31bffd12420a4cee61d0a841fd226 | /YunluFramework/public/handle/renmai/RENMAIHANDLE5.py | c53467af7c700dc0002bde67039eec60351ee5c0 | [] | no_license | xiao2912008572/Appium | ca11d2cf82f9dcc051e9b719eb09f862f07621bf | 3931957a8ae9b4ee2acc13ae4aba0ba46b6d842b | refs/heads/master | 2021-01-21T12:27:36.243484 | 2018-09-12T09:25:35 | 2018-09-12T09:25:35 | 102,071,447 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,492 | py | __author__ = 'Administrator'
from YunluFramework.public.handle.renmai.RENMAIHANDLE4 import RENMAIHANDLE4
class RENMAIHANDLE5(RENMAIHANDLE4):
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-设置:RMSY_search_label_groupchat_menu_setting*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-群头像:点击
def RMSY_search_label_groupchat_menu_setting_grouphead_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_grouphead)
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-返回:点击
def RMSY_search_label_groupchat_menu_setting_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-设置-群名称:点击
def RMSY_search_label_groupchat_menu_setting_groupname_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu_setting_groupname)
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-热度设置:RMSY_search_label_groupchat_menu_heatsetting*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-返回:点击
def RMSY_search_label_groupchat_menu_heatsetting_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-消息:点击
def RMSY_search_label_groupchat_menu_heatsetting_msg_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_msg)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-飘泡:点击
def RMSY_search_label_groupchat_menu_heatsetting_bubble_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_bubble)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-震动:点击
def RMSY_search_label_groupchat_menu_heatsetting_shock_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_shock)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-铃声:点击
def RMSY_search_label_groupchat_menu_heatsetting_bell_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_bell)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-确定:点击
def RMSY_search_label_groupchat_menu_heatsetting_confirm_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_confirm)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-周期:点击
def RMSY_search_label_groupchat_menu_heatsetting_period_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_period)
#定位:人脉首页-搜索-标签列表-点击进入群聊-热度设置-时段:点击
def RMSY_search_label_groupchat_menu_heatsetting_time_click(self):
return self.p.click(self.RMSY_search_label_groupchat_menu__heatsetting_time)
#*********************************【PAGE4】人脉首页-搜索-标签列表-点击进入群聊-人群按钮:RMSY_search_label_groupchat_groupbtn*********************************
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-返回:点击
def RMSY_search_label_groupchat_groupbtn_back_click(self):
return self.p.click(self.RMSY_search_label_groupchat_groupbtn_back)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-联系人列表:点击
def RMSY_search_label_groupchat_groupbtn_Contacts_click(self, n):
return self.p.clicks(self.RMSY_search_label_groupchat_groupbtn_Contacts,n)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-消息输入框:输入
def RMSY_search_label_groupchat_groupbtn_msginput_sendkeys(self, msg):
return self.p.send_keys(self.RMSY_search_label_groupchat_groupbtn_msginput, msg)
#定位:人脉首页-搜索-标签列表-点击进入群聊-人群按钮-消息按钮:点击
def RMSY_search_label_groupchat_groupbtn_msgbtn_click(self):
return self.p.click(self.RMSY_search_label_groupchat_groupbtn_msgbtn)
#*********************************【PAGE3】人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期:RMSY_contacts_menu_heatsetting_p2pconversation_period*********************************
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-返回:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_back_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_back)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-每天:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_everyday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_everyday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-工昨日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_workday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_workday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-节假日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_holiday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_holiday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-择日:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_selectday_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_selectday)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-周期-保存:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_period_save_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_period_save)
#*********************************【PAGE3】人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段:RMSY_contacts_menu_heatsetting_p2pconversation_time*********************************
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段-确定:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_time_confirm_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_time_confirm)
#定位:人脉首页-点击联系人-打开主菜单-热度设置-一对一会话-时段-取消:点击
def RMSY_contacts_menu_heatsetting_p2pconversation_time_cancel_click(self):
return self.p.click(self.RMSY_contacts_menu_heatsetting_p2pconversation_time_cancel)
#*********************************【PAGE3】人脉首页-点击联系人-消息-热度设置-周期:RMSY_contacts_msg_menu_heatsetting_period*********************************
#定位:人脉首页-点击联系人-消息-热度设置-周期-返回:点击
def RMSY_contacts_msg_menu_heatsetting_period_back_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_back)
#定位:人脉首页-点击联系人-消息-热度设置-周期-每天:点击
def RMSY_contacts_msg_menu_heatsetting_period_everyday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_everyday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-工昨日:点击
def RMSY_contacts_msg_menu_heatsetting_period_workday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_workday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-节假日:点击
def RMSY_contacts_msg_menu_heatsetting_period_holiday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_holiday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-择日:点击
def RMSY_contacts_msg_menu_heatsetting_period_selectday_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_selectday)
#定位:人脉首页-点击联系人-消息-热度设置-周期-保存:点击
def RMSY_contacts_msg_menu_heatsetting_period_save_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_period_save)
#*********************************【PAGE4】人脉首页-点击联系人-消息-热度设置-时段:RMSY_contacts_msg_menu_heatsetting_time*********************************
#定位:人脉首页-点击联系人-消息-热度设置-时段-确定:点击
def RMSY_contacts_msg_menu_heatsetting_time_confirm_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_time_confirm)
#定位:人脉首页-点击联系人-消息-热度设置-时段-取消:点击
def RMSY_contacts_msg_menu_heatsetting_time_cancel_click(self):
return self.p.click(self.RMSY_contacts_msg_menu_heatsetting_time_cancel) | [
"291008572@qq.com"
] | 291008572@qq.com |
c74eb5bd52247b730a59e99e72bbe73dff621644 | 081792dd1ab498c012db72fb71c0a4789722547a | /ch10_errorExample.py | 9cd7bf666e78ee5d5b76a78aadd0a6686cceeed7 | [] | no_license | Elmlea/pythonAuto | 5c0ae48ec6a851f1d15ff0129e353b1cdf2db5ec | bd366ac4794ce302c19fa6bf18a4f3689a51ee74 | refs/heads/master | 2020-12-10T01:13:03.084395 | 2020-05-14T14:17:37 | 2020-05-14T14:17:37 | 233,467,273 | 0 | 0 | null | 2020-01-23T23:33:33 | 2020-01-12T22:19:07 | Python | UTF-8 | Python | false | false | 95 | py | def spam():
bacon()
def bacon():
raise Exception("This is the error message.")
spam()
| [
"mark.wharry@gmail.com"
] | mark.wharry@gmail.com |
e032063e3a93480ea33ee428cbfbac54d8a1aa16 | 3beefb1ab5eaaab622e736dbd257795cec4c49fa | /Python3/P3_2.py | c89ae914a55c709186700b51256e7d351489e661 | [] | no_license | mkalapala/PythonCoursera | c8a94014281f16dbb2e8afd2a1efdfd4e4c84287 | 601ba1894cf486a8d9b6b8285b332da38a95f9d4 | refs/heads/master | 2021-06-14T01:25:54.316606 | 2017-03-14T01:03:46 | 2017-03-14T01:03:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | """Exploring the HyperText Transport Protocol
You are to retrieve the following document using the HTTP protocol in a way that you can examine the HTTP Response headers."""
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('www.py4inf.com', 80))
mysock.send('GET http://www.pythonlearn.com/code/intro-short.txt HTTP/1.0\n\n')
while True:
data = mysock.recv(512)
if ( len(data) < 1 ) :
break
print data;
mysock.close() | [
"noreply@github.com"
] | mkalapala.noreply@github.com |
c54474960393e315ce4c20c12727518a962feff2 | 766cc83bb5098d2fa8a88cf28b2d854d884b2118 | /app_servers.py | cfb6ff2d2d7f111e15025673c3eb78115066b2d4 | [] | no_license | XiangHan2018/wsgi_test | f7923276b0c95e0c66064beecc006b6087cb30a5 | 7a09d098e4ce7583219ee4eabb4d821b2d6594fe | refs/heads/master | 2020-11-28T20:36:29.027068 | 2020-02-03T09:38:04 | 2020-02-03T09:38:04 | 229,914,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | from wsgiref.simple_server import make_server
from APP import application
from app_cors import Http_split
app = Http_split(application.login)
server = make_server('localhost', 8080, app)
server.serve_forever()
| [
"XiangHan2018@hotmail.com"
] | XiangHan2018@hotmail.com |
8fcdd55cdac2fe7087a27487828c3c7b4369f056 | 4f91a0369f96eb6aa556125b7d6f2bce8e1a7c73 | /pics/views.py | 6d1d2ca7f932934874c5096d5a63532ca455c880 | [
"MIT"
] | permissive | DerrickOdhiambo/Personal-Gallery | bc2b4ab0058dd979c14bf0b42aa56ff0bff84a5c | b64fdf79e1fa2df56fb07aafa6cf2f09a9dc69bb | refs/heads/master | 2022-12-26T22:52:31.582839 | 2020-10-13T05:20:12 | 2020-10-13T05:20:12 | 302,565,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | from django.shortcuts import render
from .models import Image
def index(request):
display_images = Image.all_images
return render(request, 'index.html', {'display_images': display_images})
def search_by_category(request):
if 'q' in request.GET and request.GET["q"]:
search_term = request.GET.get("q")
searched_images = Image.search_image_by_category(search_term)
message = f"{search_term}"
return render(request, 'search.html', {'searched_images': searched_images, 'message': message})
def filter_by_location(request, location_id):
search_term = '1'
searched_images = Image.filter_by_location(location_id)
message = f"{search_term}"
return render(request, 'search.html', {'searched_images': searched_images, 'message': message})
| [
"odhiamboderrick56@gmail.com"
] | odhiamboderrick56@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.