blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b455ffb9affb2046007a05d596c0edf937f68f83 | 2474f8f990c9921afa158eef0b97a32fba952fe6 | /automation/automation.py | 866cf551755c53875ebd5d8a40c76e316b493782 | [] | no_license | anas-abusaif/Automation | 344ff5c071e98d300e48a23a7ecb77355e926097 | bbe3879b115c1afb009b689f254ed0b4ce9f2fb9 | refs/heads/master | 2023-08-23T00:53:15.808560 | 2021-11-03T18:44:11 | 2021-11-03T18:44:11 | 424,241,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | from os import replace
import re
with open('potential-contacts.txt') as file:
file=file.read()
phone_numbers = re.findall(r'[(]+[0-9]+[)]?-?[0-9]{3}-?[0-9]{4}|[\d]{3}-[\d]{3}-[\d]{4}|[\d]{3}-[\d]{4}',file)
phone_numbers=set(phone_numbers)
cleaned_phone_numbers=[]
for i in phone_numbers:
if len(i)<12:
cleaned_phone_numbers.append('206-'+i)
for i in phone_numbers:
if len(i)==12:
cleaned_phone_numbers.append(i)
for i in phone_numbers:
if i[0]=='(':
cleaned_phone_numbers.append(i[1:4]+'-'+i[5:])
with open('phone_numbers.txt', 'w') as result_file:
for i in cleaned_phone_numbers:
result_file.write(f'{i}\n')
emails=re.findall(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b',file)
emails=set(emails)
with open('emails.txt', 'w') as emails_file:
for i in emails:
emails_file.write(f'{i}\n') | [
"anasabusief@gmail.com"
] | anasabusief@gmail.com |
b97d1f05cee577fa55fa3374f1bb423691aaa88f | 83d4d660d7d5ceab8d39e1604909498124bcdbcc | /boatgod/lora.py | 38ca1c938fd8cbcc3bc6911ee8efe2f47af456ad | [] | no_license | kivsiak/boatgod-test | 36262f1872ad4ff259753299613f8d766df7b757 | 931014d129098bde39d91fc75f9e098f1a7c31e7 | refs/heads/master | 2020-04-16T23:57:45.371224 | 2019-01-22T19:39:39 | 2019-01-22T19:39:39 | 166,032,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,546 | py | import asyncio
import aiopubsub
from boatgod.hub import lora_publisher
from boatgod.nmea2000 import create_rmp_message, create_voltage_message
class LoraProtocol(asyncio.Protocol):
VOLTAGE_MULTIPLIER = 1.7636363636363637 / 100
RPM_CALIBRATION = 1
def __init__(self):
self.result = []
self.inverse = 0x00
self.state = 0
self.point = 0
self.len = 0
self.crc = 0
self.transport = None
def connection_made(self, transport):
self.transport = transport
print('port opened', transport)
def data_received(self, pck):
for a in pck:
if a == 0xAA:
self.result = []
self.state = 1
self.point = 0
self.len = 0
self.inverse = 0x00
self.crc = 0
continue
if a == 0xBB:
self.inverse = 0xFF
continue
if self.state == 0:
continue
b = a ^ self.inverse
self.inverse = 0
if self.point == 0:
self.len = b
if self.point > self.len:
if b != (self.crc & 255):
self.state = 0
continue
self.on_message(self.result[1:])
self.crc += b
self.result.append(b)
self.point += 1
def connection_lost(self, exc):
pass
def on_message(self, msg):
cmd = msg[3]
if cmd == 0x02: # напряжение оборотыm
v = int.from_bytes(msg[6:8], "little") * LoraProtocol.VOLTAGE_MULTIPLIER
rpm = int.from_bytes(msg[4:6], "little") * 60 * LoraProtocol.RPM_CALIBRATION
lora_publisher.publish(aiopubsub.Key('obj', 'voltage'), v)
lora_publisher.publish(aiopubsub.Key('obj', 'rpm'), rpm)
lora_publisher.publish(aiopubsub.Key('message', 'nmea2000'),
create_rmp_message(rpm))
lora_publisher.publish(aiopubsub.Key('message', 'nmea2000'),
create_voltage_message(v))
if cmd == 0x03: # геркон протечка напряжение на батарейке
lora_publisher.publish(aiopubsub.Key('obj', 'flood'),
dict(water=msg[4],
door=msg[5],
battery=msg[6],
))
| [
"kivsiak@gmail.com"
] | kivsiak@gmail.com |
1c8db9717d1a853fd31944ddc0e85e2c055d7437 | 7126788c74425ca9c8681cac2f0dc29b1d7b04ee | /Assignment 3/bayes.py | 21b08f94d2761212fe98ccbc62faf0da5b21fc5c | [] | no_license | LukasZhornyak/Intro-to-Machine-Learning | 7d18ea80783432e0845dba8edbc9c72bf5a1adf4 | aed9c2bd40d1dae6f65f259847a3454a6452f518 | refs/heads/master | 2021-09-10T11:33:36.636396 | 2018-03-25T18:41:49 | 2018-03-25T18:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | import numpy as np
def naive_bayes(data):
prior = gradient_descent(data, [], np.array([10.] + [0.5]), bayes_gradient)
param = build_bayes_param(data[0], prior[1:], prior[0])
return param, prior
def conditional_probability(data, labels, p, m):
p_fake = (data.T * labels + p * m) / (np.sum(labels) + m)
not_labels = np.logical_not(labels)
p_real = (data.T * not_labels + p * m) / (np.sum(not_labels) + m)
return p_fake, p_real
def bayes_is_fake(param, data):
return eval_bayes(param, data) > 0
def eval_bayes(param, data):
return param[0] + data * param[1:]
def cross_entropy(yh, y):
s = 1 / (1 + np.exp(-yh))
return -np.sum(y * np.log(s) + (1 - y) * np.log(1 - s))
def build_bayes_param(data, p, m):
p_fake, p_real = conditional_probability(data[0], data[1], p, m)
n = len(p_fake)
param = np.empty(n + 1)
param[0] = np.log(np.sum(data[1]).astype(float) / (n - np.sum(data[1]))) \
+ np.sum(np.log((1 - p_fake) / (1 - p_real)))
param[1:] = np.log(p_fake / p_real) - np.log((1 - p_fake) / (1 - p_real))
return param
def bayes_gradient(data, labels, p, h=1e-5):
param = build_bayes_param(data[0], p[1:], p[0])
y0 = cross_entropy(eval_bayes(param, data[1][0]), data[1][1])
dp = np.empty_like(p)
dparam = build_bayes_param(data[0], p[1:], p[0] + h)
dp[0] = (cross_entropy(eval_bayes(dparam, data[1][0]), data[1][1]) - y0) / h
for i in range(1, len(p)):
p_mod = p.copy()
p_mod[i] = p_mod[i] + h
dparam = build_bayes_param(data[0], p_mod[1:], p_mod[0])
dp[i] = (cross_entropy(eval_bayes(dparam, data[1][0]),
data[1][1]) - y0) / h
return dp
def gradient_descent(data, labels, parameters, gradient, learning_rate=1e-5,
epsilon=4e-5, max_iter=1e5):
parameters = parameters.copy() # ensure that passed in value not changed
last = np.zeros_like(parameters)
i = 0
while np.linalg.norm(parameters - last) > epsilon and i < max_iter:
last = parameters.copy()
parameters -= gradient(data, labels, parameters) \
* learning_rate
i += 1
return parameters
| [
"CainRose@users.noreply.github.com"
] | CainRose@users.noreply.github.com |
b338b9a751885a042160b09b6f62936a8b9e65c6 | b63f6a17a6d3b3dcd80780718f4b43af82f34a6f | /get_area_data.py | 8cd2be760a23f80c00b2d65e98566b677e104a7a | [] | no_license | spang/mountainproject | 813acdb03081fdced775d7a82aa688629fdd7212 | 828542968ab43cc909e912303d6392cb6a2e1a0e | refs/heads/master | 2016-09-05T17:51:39.081058 | 2013-03-16T04:45:24 | 2013-03-16T04:45:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | #!/usr/bin/python3
import requests
import sys, os
DATADIR = 'data'
areas = dict(spain="106111770", gunks="105798167", leavenworth="105790610",
mazama="106112166", index="105790635", crowhill="105905492",
quincy="105908121", cathedral="105908823",
whitehorse="105909079", seneca="105861910")
def aws_url(area_id):
return 'http://s3.amazonaws.com/MobilePackages/' + area_id + '.gz'
def aws_img_url(area_id):
return 'http://s3.amazonaws.com/MobilePackages/' + area_id + '_img.tgz'
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
for name, area_id in areas.items():
area_data_fname = name+"-"+area_id+'.gz'
area_img_fname = name+"-"+area_id+'_img.tgz'
r = requests.get(aws_url(area_id))
if r.ok:
open(os.path.join(DATADIR, area_data_fname), 'wb').write(r.content)
print("got file", area_data_fname)
else:
print("error getting file", area_data_fname, file=sys.stderr)
r = requests.get(aws_img_url(area_id))
if r.ok:
open(os.path.join(DATADIR, area_img_fname), 'wb').write(r.content)
print("got file", area_img_fname)
else:
print("error getting file", area_img_fname, file=sys.stderr)
| [
"christine@spang.cc"
] | christine@spang.cc |
f2019d1e9561228cb0481b67234bdf4a1540de14 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/simcore-sdk/src/simcore_sdk/node_data/__init__.py | 595d6f1ed7c62d9dc3319d2cc38f81966604b3ec | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 27 | py | from . import data_manager
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
5f021c7f67037101485a78987bd462e9077c3f9a | 45dd427ec7450d2fac6fe2454f54a130b509b634 | /homework_3/preparation2.py | f45b9c53cbbbda4b2d028ec030b01ce3a6e5a699 | [] | no_license | weka511/smac | 702fe183e3e73889ec663bc1d75bcac07ebb94b5 | 0b257092ff68058fda1d152d5ea8050feeab6fe2 | refs/heads/master | 2022-07-02T14:24:26.370766 | 2022-06-13T00:07:36 | 2022-06-13T00:07:36 | 33,011,960 | 22 | 8 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | import os, random
filename = 'disk_configuration.txt'
if os.path.isfile(filename):
f = open(filename, 'r')
L = []
for line in f:
a, b = line.split()
L.append([float(a), float(b)])
f.close()
print ('starting from file', filename)
else:
L = []
for k in range(3):
L.append([random.uniform(0.0, 1.0), random.uniform(0.0, 1.0)])
print ('starting from a new random configuration')
L[0][0] = 3.3
f = open(filename, 'w')
for a in L:
f.write(str(a[0]) + ' ' + str(a[1]) + '\n')
f.close() | [
"simon@greenweaves.nz"
] | simon@greenweaves.nz |
dcf1b8da0e24589c36e224719499d07a0cf14ac6 | ab11640874d7f7eb6c6c44ecadf0022368fd3d30 | /ppm.py | 0a2936220a56bda68cb0ba41af36762844c0711b | [] | no_license | bsdphk/BSTJ_reformat | 074d44d86cb0fccd25e47be5ffc2199c910640bf | 9e72421ed110a582f67cd94727573da9b68c4ed2 | refs/heads/master | 2021-01-25T10:11:42.752665 | 2013-01-23T09:44:26 | 2013-01-23T09:44:26 | 7,771,692 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,280 | py |
from __future__ import print_function
import mmap
import os
import sys
class ppm(object):
def __init__(self, fn, a = "r", x = None, y = None):
assert a == "r" or a == "w"
if a == "w":
self.wr = True
assert type(x) == int
assert type(y) == int
assert x > 0
assert y > 0
else:
self.wr = False
if self.wr:
self.fi = open(fn, "w+b")
self.fi.truncate(0)
self.fi.truncate(19 + 3 * x * y)
self.m = mmap.mmap(self.fi.fileno(), 0 )
s = "P6\n%5d %5d\n%3d\n" % (x, y, 255)
self.m[0:len(s)] = s
self.m[len(s):] = str(bytearray((255,255,255)) * (x * y))
else:
self.fi = open(fn, "rb")
self.m = mmap.mmap(self.fi.fileno(),
0, prot=mmap.PROT_READ)
assert self.m[:2] == "P6"
o = 0
n = 0
while True:
x = self.m.find("\n", o, o + 100)
assert x >= -1
s = self.m[o:x]
o = x + 1
if s[0] == '#':
continue
if n == 0:
self.type = s
elif n == 1:
s = s.split()
self.x = int(s[0])
self.y = int(s[1])
elif n == 2:
self.d = int(s)
self.o = o
break
n += 1
self.xhis = None
self.yhis = None
self.fn = fn
def __repr__(self):
return "<P %dx%d %s>" % (self.x, self.y, self.fn)
def rdpx(self, x, y):
i = self.o + 3 * (y * self.x + x)
return bytearray(self.m[i:i+3])
def wrpx(self, x, y, r, g, b):
assert self.wr
if y >= self.y:
print("WRPX hi y", self.y, y)
return
if x >= self.x:
print("WRPX hi x", self.x, x)
return
i = self.o + 3 * (y * self.x + x)
self.m[i:i+3] = str(bytearray((r,g,b)))
def clone(self, fn):
o = ppm(fn, "w", self.x, self.y)
o.m[o.o:] = self.m[self.o:]
return o
def hist(self):
self.yhis = list()
lx = list([0] * (self.x * 3))
for y in range(0, self.y):
o = self.o + y * self.x * 3
w = self.x * 3
v = bytearray(self.m[o:o+w])
self.yhis.append(sum(v)/float(w))
#for i in range(len(v)):
# lx[i] += v[i]
self.xhis = list()
for x in range(0, self.x):
self.xhis.append(sum(lx[x * 3:x*3+3]) / (3 * self.y))
def put_rect(self, xlo, ylo, r):
for b in r:
o = self.o + ylo * self.x * 3 + xlo * 3
self.m[o:o+len(b)] = str(b)
ylo += 1
class rect(object):
def __init__(self, parent, xlo = 0, ylo = 0, xhi = None, yhi = None):
self.p= parent
self.xlo = xlo
self.ylo = ylo
if xhi == None:
xhi = parent.x
self.xhi = xhi
if yhi == None:
yhi = parent.y
self.yhi = yhi
self.typ = None
def set_typ(self, typ):
self.typ = typ
def outline(self, o, r, g, b):
for x in range(self.xlo, self.xhi - 1):
o.wrpx(x, self.ylo, r, g, b)
o.wrpx(x, self.ylo + 1, r, g, b)
o.wrpx(x, self.yhi - 2, r, g, b)
o.wrpx(x, self.yhi - 1, r, g, b)
for y in range(self.ylo, self.yhi - 1):
o.wrpx(self.xlo, y, r, g, b)
o.wrpx(self.xlo + 1, y, r, g, b)
o.wrpx(self.xhi - 2, y, r, g, b)
o.wrpx(self.xhi - 1, y, r, g, b)
def yavg(self):
l = list()
w= (self.xhi - self.xlo) * 3
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
a = sum(bytearray(self.p.m[a0:a0 + w]))
a /= float(w)
l.append(a)
return l
def ymin(self):
l = list()
w= (self.xhi - self.xlo) * 3
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
a = min(bytearray(self.p.m[a0:a0 + w]))
l.append(a)
return l
def ymax(self):
l = list()
w= (self.xhi - self.xlo) * 3
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
a = max(bytearray(self.p.m[a0:a0 + w]))
l.append(a)
return l
def xmin(self):
w= (self.xhi - self.xlo)
l = [255] * w
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
l[i] = min(l[i], b[i * 3])
return l
def xmax(self):
w= (self.xhi - self.xlo)
l = [0] * w
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
l[i] = max(l[i], b[i * 3])
return l
def xavg(self):
w= (self.xhi - self.xlo)
l = [0] * w
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
l[i] += b[i * 3]
for i in range(w):
l[i] /= float(self.yhi - self.ylo)
return l
def ydens(self, lo = 64, hi = 192):
w= (self.xhi - self.xlo)
h= (self.yhi - self.ylo)
dl = [0] * h
dh = [0] * h
for y in range(h):
a0 = self.p.o + (self.xlo + (self.ylo + y) * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
v = b[i]
if v < lo:
dl[y] += 1
elif v > hi:
dh[y] += 1
return dl, dh
def hist(self):
w= (self.xhi - self.xlo)
h= (self.yhi - self.ylo)
hh = [0] * 256
for y in range(h):
a0 = self.p.o + (self.xlo + (self.ylo + y) * self.p.x) * 3
b = bytearray(self.p.m[a0:a0 + w * 3])
for i in range(w):
v = b[i * 3]
hh[v] += 1
return hh
def __iter__(self):
w= (self.xhi - self.xlo)
for y in range(self.ylo, self.yhi):
a0 = self.p.o + (self.xlo + y * self.p.x) * 3
yield bytearray(self.p.m[a0:a0 + w * 3])
def __repr__(self):
return "<R %dx%d+%d+%d>" % (
self.xhi - self.xlo,
self.yhi - self.ylo,
self.xlo, self.ylo
)
| [
"phk@FreeBSD.org"
] | phk@FreeBSD.org |
3a9096dc1ec53ed83ea0a5980a79dd6aca7fe133 | 33277474b13e97876c18337c963717efee3ed5f9 | /Testing_Assignment/login.py | dc79d9e382c8545f7e0a5b080ae3d1e801cd7cb4 | [] | no_license | Ramya2902/nbny6 | b693f5b9c03b3c821c65f14996a278fec452fd6c | 0c605fb789c6a582870c16f89113ff53b1570221 | refs/heads/master | 2020-07-10T19:19:03.123939 | 2019-09-26T13:31:08 | 2019-09-26T13:31:08 | 204,345,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | def username():
username='Ramya';
return username
| [
"nbny6@mail.missouri.edu"
] | nbny6@mail.missouri.edu |
6e7cb657f766e088b1c0fb3cbe8754948b3991a6 | c3175f2b482691fbfcb9adc391b4d45b6f17b09d | /PyOhio_2019/examples/pyscript_example.py | 0b49ed87ee1a875552f07f3411e05bb70e6a9b23 | [
"MIT"
] | permissive | python-cmd2/talks | 27abff4566c6545c00ad59c701831694224b4ccf | 64547778e12d8a457812bd8034d3c9d74edff407 | refs/heads/master | 2023-08-28T20:45:01.123085 | 2021-03-29T20:44:36 | 2021-03-29T20:44:36 | 197,960,510 | 2 | 3 | MIT | 2022-01-21T20:03:37 | 2019-07-20T17:14:51 | Python | UTF-8 | Python | false | false | 3,750 | py | #!/usr/bin/env python
# coding=utf-8
"""A sample application for how Python scripting can provide conditional control flow of a cmd2 application"""
import os
import cmd2
from cmd2 import style
class CmdLineApp(cmd2.Cmd):
""" Example cmd2 application to showcase conditional control flow in Python scripting within cmd2 aps. """
def __init__(self):
# Enable the optional ipy command if IPython is installed by setting use_ipython=True
super().__init__(use_ipython=True)
self._set_prompt()
self.intro = 'Built-in Python scripting is a killer feature ...'
# Add cwd accessor to Python environment used by pyscripts
self.py_locals['cwd'] = self.cwd
def _set_prompt(self):
"""Set prompt so it displays the current working directory."""
self._cwd = os.getcwd()
self.prompt = style('{!r} $ '.format(self._cwd), fg='cyan')
def postcmd(self, stop: bool, line: str) -> bool:
"""Hook method executed just after a command dispatch is finished.
:param stop: if True, the command has indicated the application should exit
:param line: the command line text for this command
:return: if this is True, the application will exit after this command and the postloop() will run
"""
"""Override this so prompt always displays cwd."""
self._set_prompt()
return stop
def cwd(self):
"""Read-only property used by the pyscript to obtain cwd"""
return self._cwd
@cmd2.with_argument_list
def do_cd(self, arglist):
"""Change directory.
Usage:
cd <new_dir>
"""
# Expect 1 argument, the directory to change to
if not arglist or len(arglist) != 1:
self.perror("cd requires exactly 1 argument")
self.do_help('cd')
return
# Convert relative paths to absolute paths
path = os.path.abspath(os.path.expanduser(arglist[0]))
# Make sure the directory exists, is a directory, and we have read access
out = ''
err = None
data = None
if not os.path.isdir(path):
err = '{!r} is not a directory'.format(path)
elif not os.access(path, os.R_OK):
err = 'You do not have read access to {!r}'.format(path)
else:
try:
os.chdir(path)
except Exception as ex:
err = '{}'.format(ex)
else:
out = 'Successfully changed directory to {!r}\n'.format(path)
self.stdout.write(out)
data = path
if err:
self.perror(err)
self.last_result = data
# Enable tab completion for cd command
def complete_cd(self, text, line, begidx, endidx):
return self.path_complete(text, line, begidx, endidx, path_filter=os.path.isdir)
dir_parser = cmd2.Cmd2ArgumentParser()
dir_parser.add_argument('-l', '--long', action='store_true', help="display in long format with one item per line")
@cmd2.with_argparser_and_unknown_args(dir_parser)
def do_dir(self, args, unknown):
"""List contents of current directory."""
# No arguments for this command
if unknown:
self.perror("dir does not take any positional arguments:")
self.do_help('dir')
return
# Get the contents as a list
contents = os.listdir(self._cwd)
fmt = '{} '
if args.long:
fmt = '{}\n'
for f in contents:
self.stdout.write(fmt.format(f))
self.stdout.write('\n')
self.last_result = contents
if __name__ == '__main__':
import sys
c = CmdLineApp()
sys.exit(c.cmdloop())
| [
"todd.leonhardt@gmail.com"
] | todd.leonhardt@gmail.com |
59484598a05007d2cfa6633a4f83be87423d45ff | d73f795654dad6ac5cc813319e1cc0c57981420a | /Robot.py | 07d6756ef6752f82a5afe576ebff98932cd0c7fd | [] | no_license | sehyun-hwang/finger_inverse_kinematic | 9545d1a1765659bd60f724ad3bedb6d8eddc38c6 | 842fd7db8314ed05c214b938d510219b03d2c207 | refs/heads/main | 2023-04-10T17:59:08.335452 | 2021-04-25T05:57:11 | 2021-04-25T05:57:11 | 361,333,102 | 1 | 0 | null | 2021-04-25T04:46:37 | 2021-04-25T04:46:36 | null | UTF-8 | Python | false | false | 3,329 | py | import traceback
import numpy as np
from env import Env
from ddpg import DDPG
import config
from os import getppid, getenv
from os.path import isfile
from random import random, randrange
from socketio import Client as IO, ClientNamespace as Namespace
from socket import gethostname as Host
from base64 import b64decode
import json
import numpy as np
PARAMS = json.loads(b64decode(getenv("PARAMS", 'e30=')))
print(PARAMS)
MODEL_PATH = 'Model'
def default(obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
#if isinstance(value, date):
# return value.replace(tzinfo=timezone.utc).timestamp()
raise TypeError(type(obj) + 'is not serializable')
class JSON:
@staticmethod
def dumps(obj, *args, **kwargs):
return json.dumps(obj, *args, default=default, **kwargs)
@staticmethod
def loads(*args, **kwargs):
return json.loads(*args, **kwargs)
namespace = '/Container'
"""
class CustomNamespace(Namespace):
def on_test(self, data, callback=Callback):
print('test', data)
return ["OK", 123]
def reset(self, reset_rotation=True):
print('reset')
return self.get_state()
def step(self, action):
return s, r, done
def get_state(self):
return np.array([x1, y1, x2, y2, x3, y3, xt, yt])
"""
io = IO(
json=JSON(), #logger=True, engineio_logger=True
)
Emit = lambda *args: io.emit('Container', list(args), namespace=namespace)
def On(value, key=''):
global io, namespace
isFn = callable(value)
if not key:
key = value.__name__ if isFn else type(value).__name__
print(key)
@io.on(key, namespace=namespace)
def handler(data):
fn = value if isFn else getattr(value, data[0])
kwargs = data.pop() if len(data) and isinstance(data[-1], dict) else {}
args = data[0 if isFn else 1:]
print(fn.__name__, args, kwargs)
try:
result = fn(*args, **kwargs)
print(result)
except BaseException as error:
print(error)
result = {"error": repr(error), "stack": traceback.format_exc()}
return result
def Learn(var):
print(model.memory_counter, model.memory_size)
# if model.memory_counter % 1000:
# print('Model not saved')
# else:
# model.save_model(MODEL_PATH)
# print('Model saved')
if model.memory_counter > model.memory_size:
var *= .9995
model.learn()
return var
def main():
global io
io.sleep(1)
host = Host()
host = 'http://' + ('localhost' if 'hwangsehyun' in host else 'express.network') + \
(':8080' if getppid() >2 else '') + f"?Container={host.replace('.network', '')}&KeepAlive=1"
print('Host:', host)
io.connect(host + namespace,
transports=['websocket'],
namespaces=[namespace])
#io.register_namespace(CustomNamespace(namespace))
io.namespaces[namespace] = None
io.emit('Room', 'robot', namespace=namespace)
print('Connected')
#get_event_loop().run_until_complete(self.main())
if __name__ == '__main__':
model = DDPG(PARAMS['Actions'], PARAMS['States'], 1)
if isfile(MODEL_PATH):
model.load_model(MODEL_PATH)
print('Model loaded')
On(model, "model")
On(Learn)
main()
| [
"hwanghyun3@gmail.com"
] | hwanghyun3@gmail.com |
a13687d1aa7eb47c3995dfc3c77080e5e6e62c5c | e66ab5cb1b2db67f42e1b8beaa7b9ab4b163311e | /Binary-search-tree.py | 6d5223e8a6a8889d4775e7a30cf86a840ceb0af7 | [] | no_license | Noobpolad/The-binary-search-tree | c0938f2195a42516a4249d90d61994fd9954a6a8 | 7afd38da22b794001d99c790b1a4e7fdb0717398 | refs/heads/master | 2021-08-19T19:43:05.356698 | 2017-11-27T08:40:24 | 2017-11-27T08:40:24 | 112,168,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,921 | py | class theBinarySearchThree:
def __init__(self,value):
self.setValue(value)
self.setParent(None)
self.setLeft(None)
self.setRight(None)
def getLeft(self):
return self._left
def setLeft(self,value):
self._left = value
def getRight(self):
return self._right
def setRight(self,value):
self._right = value
def getParent(self):
return self._parent
def setParent(self,value):
self._parent = value
def setValue(self,value):
self._v = int(value)
def getValue(self):
return self._v
class operationsWithBST:
def __init__(self):
self._root = None
def insert(self,value):
if self._root != None:
cur = theBinarySearchThree(value)
new = self._root
while 1:
if cur.getValue() > new.getValue() and new.getRight() == None:
new.setRight(cur)
cur.setParent(new)
break
elif cur.getValue() > new.getValue() and new.getRight() != None:
new = new.getRight()
if cur.getValue() < new.getValue() and new.getLeft() == None:
new.setLeft(cur)
cur.setParent(new)
break
elif cur.getValue() < new.getValue() and new.getLeft() != None:
new = new.getLeft()
else:
self._root = theBinarySearchThree(value)
def search(self,element):
new = self._root
found = False
while new != None:
if element > new.getValue():
new = new.getRight()
elif element < new.getValue():
new = new.getLeft()
else:
print("The element " + str(element) + " exists in the tree")
found = True
break
if found == False:
print("The element doesn't exist in the tree")
def delete(self,element):
new = self._root
while 1:
if element > new.getValue():
new = new.getRight()
elif element < new.getValue():
new = new.getLeft()
else:
break
if element == new.getValue() and element <= self._root.getValue():
self.DNFLS(new)
elif element == new.getValue() and element >= self._root.getValue():
self.DNFRS(new)
#Delete the node from the left subtree or root
def DNFLS(self,new):
new = self._root
while 1:
if new.getRight() != None and new.getLeft() == None:
new.setValue(new.getRight().getValue())
if new.getRight().getLeft() != None:
new.getRight().getLeft().setParent(new)
new.setLeft(new.getRight().getLeft())
if new.getRight().getRight() != None:
new.getRight().getRight().setParent(new)
new.setRight(new.getRight().getRight())
else:
new.setRight(None)
break
elif new.getLeft() != None and new.getRight() == None:
new.setValue(new.getLeft().getValue())
if new.getLeft().getRight() != None:
new.getLeft().getRight().setParent(new)
new.setRight(new.getLeft().getRight())
if new.getLeft().getLeft() != None:
new.getLeft().getLeft().setParent(new)
new.setLeft(new.getLeft().getLeft())
else:
new.setLeft(None)
break
elif new.getLeft() != None and new.getRight() != None:
if new.getRight().getLeft() == None:
new.setValue(new.getRight().getValue())
if new.getRight().getRight() == None:
new.setRight(None)
break
else:
new.getRight().getRight().setParent(new)
new.setRight(new.getRight().getRight())
break
elif new.getRight().getLeft() != None:
cur = new.getRight().getLeft()
while 1:
if cur.getLeft() != None:
cur = cur.getLeft()
elif cur.getLeft() == None and cur.getRight() == None:
new.setValue(cur.getValue())
cur.getParent().setLeft(None)
break
elif cur.getLeft() == None and cur.getRight() != None:
new.setValue(cur.getValue())
cur.getRight().setParent(cur.getParent())
cur.getParent().setLeft(cur.getRight())
break
elif new.getLeft() == None and new.getRight() == None and new.getParent().getLeft() == new:
new.getParent().setLeft(None)
break
elif new.getLeft() == None and new.getRight() == None and new.getParent().getRight() == new:
new.getParent().setRight(None)
break
#Delete the node from the right subthree or root
def DNFRS(self,new):
while 1:
if new.getRight() != None and new.getLeft() == None:
new.setValue(new.getRight().getValue())
if new.getRight().getLeft() != None:
new.getRight().getLeft().setParent(new)
new.setLeft(new.getRight().getLeft())
if new.getRight().getRight() != None:
new.getRight().getRight().setParent(new)
new.setRight(new.getRight().getRight())
else:
new.setRight(None)
break
elif new.getLeft() != None and new.getRight() == None:
new.setValue(new.getLeft().getValue())
if new.getLeft().getRight() != None:
new.getLeft().getRight().setParent(new)
new.setRight(new.getLeft().getRight())
if new.getLeft().getLeft() != None:
new.getLeft().getLeft().setParent(new)
new.setLeft(new.getLeft().getLeft())
else:
new.setLeft(None)
break
elif new.getLeft() != None and new.getRight() != None:
if new.getRight().getLeft() == None:
new.setValue(new.getRight().getValue())
if new.getRight().getRight() == None:
new.setRight(None)
break
else:
new.getRight().getRight().setParent(new)
new.setRight(new.getRight().getRight())
break
elif new.getRight().getLeft() != None:
cur = new.getRight().getLeft()
while 1:
if cur.getLeft() != None:
cur = cur.getLeft()
elif cur.getLeft() == None and cur.getRight() == None:
new.setValue(cur.getValue())
cur.getParent().setLeft(None)
break
elif cur.getLeft() == None and cur.getRight() != None:
new.setValue(cur.getValue())
cur.getRight().setParent(cur.getParent())
cur.getParent().setLeft(cur.getRight())
break
if new.getLeft() == None and new.getRight() == None and new.getParent().getLeft() == new:
new.getParent().setLeft(None)
break
elif new.getLeft() == None and new.getRight() == None and new.getParent().getRight() == new:
new.getParent().setRight(None)
break
def printInorder(self):
self.inorder(self._root)
def inorder(self,root):
if root != None:
self.inorder(root.getLeft())
print(root.getValue())
self.inorder(root.getRight())
def printPostorder(self):
self.postorder(self._root)
def postorder(self,root):
if root != None:
self.postorder(root.getLeft())
self.postorder(root.getRight())
print(root.getValue())
def printPreorder(self):
self.preorder(self._root)
def preorder(self,root):
if root != None:
print(root.getValue())
self.preorder(root.getLeft())
self.preorder(root.getRight()) | [
"noreply@github.com"
] | Noobpolad.noreply@github.com |
66cd7f817921e5ec588b73767556d27295813b11 | 430d1f21f366dd2dc85c26edcb1f923470ff6bae | /books/tests.py | dcef51c48e6139714044d3066b523a3dc9c7958c | [] | no_license | nsinner1/django_crud | fc4456c0d5904713a46b8fbdc245a3e80cdef67d | eace4af71f9ebe4436df5bf5852e1ebc037ee024 | refs/heads/master | 2022-12-12T18:34:14.214211 | 2020-08-30T00:35:05 | 2020-08-30T00:35:05 | 291,348,866 | 0 | 1 | null | 2020-08-30T00:35:06 | 2020-08-29T21:04:10 | Python | UTF-8 | Python | false | false | 1,731 | py | from django.test import SimpleTestCase, TestCase
from django.urls import reverse
from .models import Books
from django.contrib.auth import get_user_model
# Create your tests here.
class BooksTest(SimpleTestCase):
def test_homepage_status(self):
self.help_status_code('home')
def help_status_code(self, url_name):
url = reverse(url_name)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_homepage_template(self):
self.check_template_used('home', 'home.html')
def check_template_used(self, url_name, template):
url = reverse(url_name)
response = self.client.get(url)
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateUsed(response, template)
class BooksTest2(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username = 'test',
password = 'pass',
)
self.books = Books.objects.create(
title='Test Title',
author=self.user,
body='Test Description',
)
self.books.save()
self.book_record = Books.objects.get(pk=1)
def test_model_content(self):
self.assertEqual(self.book_record, self.books)
def test_model_name(self):
self.assertEqual(self.book_record.title, self.books.title)
def test_create_redirect_home(self):
response = self.client.post(reverse('home'),{
'title' : 'Test Title',
'author' : self.user,
'body' : 'Test Description',
}
, follow=True)
self.assertEqual(response.status_code, 405)
self.assertTemplateUsed('home.html') | [
"nataliesinner@hotmail.com"
] | nataliesinner@hotmail.com |
988a53fed87c4d15c1bffbed597674dd7197ec2b | 88350153e766641e999969860eef8f1617d85487 | /list.py | f5e02b2e7d3b0afbb53462ad348ab28b886ab668 | [] | no_license | alirezaghd/assignment-2 | 6cef5875192ab9ea5997b8abe435ac224c12d08d | b3242f5111661d0d403038ac60e4790f7b8eb788 | refs/heads/main | 2023-04-02T11:03:03.045744 | 2021-04-04T13:18:11 | 2021-04-04T13:18:11 | 354,391,390 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | list_1 = []
for i in range(20):
list_1.append(int(input()) ** 2)
print(list_1)
max_number = max(list_1)
min_number = min(list_1)
print("Max number in the list :" , max_number,"Min number in the list :" , min_number) | [
"noreply@github.com"
] | alirezaghd.noreply@github.com |
ac3e9c697e353b693c7f8c8310a98068050b8172 | b25485391a8a2007c31cd98555855b517cc68a64 | /examples/src/dbnd_examples/tutorial_syntax/T60_task_that_calls_other_tasks.py | 4e531bd99f3753db413843c5526d5528de64f9e8 | [
"Apache-2.0"
] | permissive | ipattarapong/dbnd | 5a2bcbf1752bf8f38ad83e83401226967fee1aa6 | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | refs/heads/master | 2022-12-14T06:45:40.347424 | 2020-09-17T18:12:08 | 2020-09-17T18:12:08 | 299,219,747 | 0 | 0 | Apache-2.0 | 2020-09-28T07:07:42 | 2020-09-28T07:07:41 | null | UTF-8 | Python | false | false | 536 | py | import pandas as pd
from dbnd import task
@task
def func_return_df():
return pd.DataFrame(data=[[3, 1]], columns=["c1", "c2"])
@task
def func_gets(df):
return df
@task
def func_pipeline(p: int):
df = pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"])
d1 = func_gets(df)
d2 = func_gets(d1)
return d2
@task
def func_pipeline2(p: int):
df = func_return_df()
d1 = func_gets(df)
return d1
if __name__ == "__main__":
import os
os.environ["DBND__TRACKING"] = "true"
func_pipeline2(4)
| [
"evgeny.shulman@databand.ai"
] | evgeny.shulman@databand.ai |
41c60a17f34b955aee0c20c1b60fb8ef5e1c0dc5 | 74a7f74373c90dd42056cad4625e53c582591525 | /补充练习/ex56.py | 1a2c056b0b9dc0661194f3203b88df13bdf4c644 | [] | no_license | ligj1706/learn-python-hard | 93c144efa3485d380a263319a9a3bc2838667802 | 1befe6f75d144fa39c7c1a180d857da2c0321ce3 | refs/heads/master | 2021-10-22T00:17:46.483403 | 2019-03-07T08:02:17 | 2019-03-07T08:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
#coding:utf-8
# 参数收集,解决参数个数的不确定性问题,*表示多个参数,?表示一个参数
def func(x, *arg):
print x
result = x
print arg
for i in arg:
result += i
return result
print func(1, 2, 3, 4, 5, 6, 7, 8, 9)
# 收集字典的数据
def foo(**kargs):
print kargs
foo(a = 1, b = 2, c = 3, d = 4)
# 综合练习
def foo(x, y ,z ,*arg, **kargs):
print x
print y
print z
print arg
print kargs
foo(1, 2, 3)
foo(1, 2, 3, 4, 5)
foo(1, 2, 3, 4, 5, a = 'lgsir')
foo(1, 2, 3, 4, 5, B, a = 'lgsiir')
| [
"noreply@github.com"
] | ligj1706.noreply@github.com |
10aa510c9322b23c32f858faf3870fbb96d1e089 | 6a8cd4c644ceb0ff1560d716805647830b8cefdd | /sqlite/query.py | 1b771994acaf6f6ddf93a46ba465baa9b54619a5 | [] | no_license | yasmeen/softdev1 | 1af216687f8e262439e33c08cdb99393b9c3ed23 | 52e2abd9945b349010810821645ee09ac98445f5 | refs/heads/master | 2021-05-30T08:21:14.685997 | 2015-12-08T19:59:45 | 2015-12-08T19:59:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | import sqlite3
conn = sqlite3.connect("demo.db")
c = conn.cursor()
q = """
SELECT people.name, classes.name, grade
FROM people,classes
WHERE people.id=classes.id and grade > 90
"""
result = c.execute(q)
for r in result:
print r
print r[0]
| [
"y4smeen@gmail.com"
] | y4smeen@gmail.com |
507b10cc2abdf0221e52199c3bc4d62ac33d5927 | 6ce3c461c0c2664be25f2bfa6730e83e49dfb7d3 | /pandas/readcsvwithcomments.py | 82ff4b45f95d2d9dcb900e13c2c34700f5022d47 | [] | no_license | kiranshashiny/PythonCodeSamples | 6fa6dddde75de300791a90c1ceeb912bad1541a0 | 6bfc462d4bbbac48c2c4883334e73f8262e9a524 | refs/heads/master | 2020-03-11T17:41:08.450514 | 2018-05-07T07:56:17 | 2018-05-07T07:56:17 | 130,154,132 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | import pandas as pd
df = pd.read_csv('HON.csv')
#This prints everything to everything.
# This reads in the column names as well, and assigns the column names
df = pd.read_csv('HON.csv', names=['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume' ], comment='#')
#This prints as a Series in Pandas. one after another
print ( df['Date'])
# This prints as a numpy array. All in one line.
print ( df['Date'].values )
print ("Printing the Open now " )
print ( df['Open'].values )
| [
"kiranshashiny@gmail.com"
] | kiranshashiny@gmail.com |
fc9090a299eff24d6bebd74667e559516adaa406 | d999fe799e960620ba855568e8c0072405b0c858 | /blog.py | 0c6cfd1892add0a1c28b8067dabce9e00d2de9fb | [] | no_license | EndLife/web_py | 8bdf8b7eb0118db9e72319166221312e8944a139 | 9a5a5d7e868a4eb2e21f847d9a35445ee88d1882 | refs/heads/master | 2021-01-22T19:22:14.189733 | 2017-03-16T12:41:06 | 2017-03-16T12:41:06 | 85,194,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | # coding:utf-8
""" Basic blog using webpy 0.3 """
import web
import model
### Url mappings
urls = (
'/', 'Index',
'/view/(\d+)', 'View',
'/new', 'New',
'/delete/(\d+)', 'Delete',
'/edit/(\d+)', 'Edit',
)
### Templates
t_globals = {
'datestr': web.datestr
}
render = web.template.render('templates', base='base', globals=t_globals)
class Index:
def GET(self):
""" Show page """
posts = model.get_posts()
return render.index(posts)
class View:
def GET(self, id):
""" View single post """
post = model.get_post(int(id))
return render.view(post)
class New:
form = web.form.Form(
web.form.Textbox('title', web.form.notnull,
size=30,
description="Post title:"),
web.form.Textarea('content', web.form.notnull,
rows=30, cols=80,
description="Post content:"),
web.form.Button('Post entry'),
)
def GET(self):
form = self.form()
return render.new(form)
def POST(self):
form = self.form()
if not form.validates():
return render.new(form)
model.new_post(form.d.title, form.d.content)
raise web.seeother('/')
class Delete:
def POST(self, id):
model.del_post(int(id))
raise web.seeother('/')
class Edit:
def GET(self, id):
post = model.get_post(int(id))
form = New.form()
form.fill(post)
return render.edit(post, form)
def POST(self, id):
form = New.form()
post = model.get_post(int(id))
if not form.validates():
return render.edit(post, form)
model.update_post(int(id), form.d.title, form.d.content)
raise web.seeother('/')
app = web.application(urls, globals())
if __name__ == '__main__':
app.run() | [
"noreply@github.com"
] | EndLife.noreply@github.com |
99539357d5acc666009e193e9326a3a724aac63e | f33fa29aca343cf152b6ec4907d3db01046a44e3 | /Shop/admin.py | 07d563c61c54ce0ce1e7f4753639f58cd897e242 | [] | no_license | deepakkumarcse/Machine_Test | bf8028a146eac9990eff5515b6577f30f22a1703 | 677355d35f5adeace3916271e684d5f6617ea0aa | refs/heads/master | 2023-05-12T15:02:34.619573 | 2021-05-29T11:55:53 | 2021-05-29T11:55:53 | 371,959,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from django.contrib import admin
from .models import Category, Tag, Product
class CategoryAdmin(admin.ModelAdmin):
list_display = ('pk', 'name',)
search_fields = ['pk', 'name']
class TagAdmin(admin.ModelAdmin):
list_display = ('pk', 'name',)
search_fields = ['pk', 'name']
class ProductAdmin(admin.ModelAdmin):
list_display = ('pk', 'name', 'category')
search_fields = ['pk', 'tags', 'category']
admin.site.register(Category, CategoryAdmin)
admin.site.register(Tag, TagAdmin)
admin.site.register(Product, ProductAdmin)
| [
"deepakcse82@gmail.com"
] | deepakcse82@gmail.com |
db3ac07a8db13d186a143de360f1bba62bf3cb70 | cfc1ccfe578b58653cb39c42658c626a11f53603 | /ann_train.py | 31e303a642f8f6fa918a8f02ecb2027d43ebecb0 | [] | no_license | kkawesum/deep_learning_udemy | 4a7bf9ce24dd9a334b45085b665b3c6c124e9329 | 4d9d62ebfae55176e83b23ce3b7d5644b29c6a98 | refs/heads/master | 2020-06-25T21:55:53.177476 | 2019-08-03T06:05:11 | 2019-08-03T06:05:11 | 199,432,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from process import get_data
def y2indicator(y,K): #func to get the indicator matrix
N=len(y)
ind=np.zeros((N,K))
for i in range(N):
ind[i,y[i]] = 1
return ind
X,Y=get_data()
X,Y=shuffle(X,Y)
Y=Y.astype(np.int32)
M = 5
D = X.shape[1]
K = len(set(Y))
#preparing the training set
Xtrain = X[:-100]
Ytrain= Y[:-100]
Ytrain_ind=y2indicator(Ytrain,K)
#preparing test set
Xtest = X[-100:]
Ytest= Y[-100:]
Ytest_ind=y2indicator(Ytest,K)
W1 = np.random.randn(D,M)
b1 = np.zeros(M)
W2 =np.random.randn(M,K)
b2 = np.zeros(K)
def softmax(a):
expA = np.exp(a)
return expA / expA.sum(axis=1,keepdims=True)
#we create a forward func for creating a NN
def forward(X,W1,b1,W2,b2):
Z=np.tanh(X.dot(W1)+ b1) # tanh is the activation func
return softmax(Z.dot(W2)+ b2),Z #Z will be used for derivative
#P_Y_given_X = forward(X,W1,b1,W2,b2)
#predictions= np.argmax(P_Y_given_X,axis=1)
def classification_rate(Y,P):
return np.mean(Y==P)
def predict(P_Y_given_X):
return np.argmax(P_Y_given_X,axis=1)
def cross_entropy(T,pY):
return -np.mean(T*np.log(pY))
train_costs=[]
test_costs=[]
learning_rate = 0.001
for i in range(10000):
pYtrain, Ztrain=forward(Xtrain,W1,b1,W2,b2)
pYtest,Ztest=forward(Xtest,W1,b1,W2,b2)
ctrain = cross_entropy(Ytrain_ind,pYtrain)
ctest = cross_entropy(Ytest_ind,pYtest)
train_costs.append(ctrain)
test_costs.append(ctest)
W2 -= learning_rate*Ztrain.T.dot(pYtrain-Ytrain_ind)
b2 -= learning_rate*(pYtrain - Ytrain_ind).sum(axis=0)
dZ=(pYtrain-Ytrain_ind).dot(W2.T)*(1-Ztrain*Ztrain)
W1 -= learning_rate*Xtrain.T.dot(dZ)
b1 -= learning_rate*dZ.sum(axis=0)
if i % 1000==0:
print(i,ctrain,ctest)
print("training classification rate:",classification_rate(Ytrain,predict(pYtrain)))
print("testing classification rate:",classification_rate(Ytest,predict(pYtest)))
legend1, = plt.plot(train_costs, label ="train costs")
legend2, = plt.plot(test_costs,label="test costs")
plt.legend([legend1,legend2])
plt.show()
| [
"noreply@github.com"
] | kkawesum.noreply@github.com |
8c7333fb6b2ec5e9196c6bbb79efdc339563b3a9 | 5b6a8e4b54744f3e2b430ef0368dfa4d99b8cb6f | /Caffe/data_prep.py | 5f1dd30ec3dc85384f393815dfcf619d986add95 | [] | no_license | Dipanjana2802/Dipanjana | f84e4ca3a0c296fd5870f2c060fb56faa4f750c8 | 5969fa0f7634078b828bf87c58c8a00e012d8add | refs/heads/master | 2020-04-01T04:54:38.741766 | 2020-02-10T21:48:35 | 2020-02-10T21:48:35 | 152,771,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,561 | py | import os
import numpy as np
import time
import shutil
#import cPickle
import random
from PIL import Image, ImageOps
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def shuffle_in_unison(a, b):
# courtsey http://stackoverflow.com/users/190280/josh-bleecher-snyder
assert len(a) == len(b)
shuffled_a = np.empty(a.shape, dtype=a.dtype)
shuffled_b = np.empty(b.shape, dtype=b.dtype)
permutation = np.random.permutation(len(a))
for old_index, new_index in enumerate(permutation):
shuffled_a[new_index] = a[old_index]
shuffled_b[new_index] = b[old_index]
return shuffled_a, shuffled_b
def move_files(input, output):
'''
Input: folder with dataset, where every class is in separate folder
Output: all images, in format class_number.jpg; output path should be absolute
'''
index = -1
for root, dirs, files in os.walk(input):
path = root.split('/')
print ('Working with path ', path)
print ('Path index ', index)
filenum = 0
for file in files:
fileName, fileExtension = os.path.splitext(file)
if fileExtension == '.jpg' or fileExtension == '.JPG':
full_path = '<path to images>' + path[9] + '/'+ file
if (os.path.isfile(full_path)):
file = str(index) + '_' + path[1] + str(filenum) + fileExtension
print (output + '/' + file)
shutil.copy(full_path, output + '/' + file)
else:
print('No file')
filenum += 1
index += 1
def create_text_file(input_path, outpath, percentage):
'''
Creating train.txt and val.txt for feeding Caffe
'''
images, labels = [], []
os.chdir(input_path)
for item in os.listdir('.'):
if not os.path.isfile(os.path.join('.', item)):
continue
try:
label = int(item.split('_')[0])
images.append(item)
labels.append(label)
except:
continue
images = np.array(images)
labels = np.array(labels)
images, labels = shuffle_in_unison(images, labels)
im_length = len(images)
im_labels = len(labels)
#print('image length: {}'.format(im_length))
#print('image length type: {}'.format(type(im_length)))
#print('image label: {}'.format(im_labels))
#print('image label type: {}'.format(type(im_labels)))
train_size = int(im_length*percentage)
X_train = images[0:train_size]
y_train = labels[0:train_size]
X_test = images[train_size:]
y_test = labels[train_size:]
os.chdir(outpath)
print('The current directory for output is: {}'.format(os.getcwd()))
trainfile = open("train.txt", "w")
#trainfile.write('Hello Train')
for i, l in zip(X_train, y_train):
trainfile.write(i + " " + str(l) + "\n")
testfile = open("val.txt", "w")
for i, l in zip(X_test, y_test):
testfile.write(i + " " + str(l) + "\n")
trainfile.close()
testfile.close()
def main():
caffe_path = '<path to images>'
new_path = '<path to a temp folder>'
output_path = '<desired output location>'
move_files(caffe_path, new_path)
create_text_file(new_path, output_path, 0.85)
main()
| [
"facebook.dips@gmail.com"
] | facebook.dips@gmail.com |
34697d981a1930c023a733316b6aae02fc072f70 | 792af5a9c7e1e02d596a989bfbe10d1f14f4e266 | /log/migrations/0002_auto_20170130_1322.py | 64f0bafe3f95d5191baa72a0baef9198e6d5f2a0 | [] | no_license | martisr/authtest | d3963a223d2932cef4a52dddcaf82180026f125f | 5f9d2ba4c3f7bed3add7c7058a80c7f8d4cc5638 | refs/heads/master | 2020-05-23T10:16:31.243107 | 2017-03-01T08:41:47 | 2017-03-01T08:41:47 | 80,410,657 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-30 11:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('log', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='step',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='step',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='step',
name='insertion_date',
field=models.DateTimeField(),
),
]
| [
"mart.israel@hotmail.com"
] | mart.israel@hotmail.com |
d23276959cbd7b4c92f24e3ab99d6bf350ea31b5 | 5dda5fbf4947b009a5f7eac49874faa285881e49 | /problem1.py | 689fa48d992ba84b1750c789637635548db01445 | [
"MIT"
] | permissive | mpUrban/python_problems | 9e9aed8e85c063d410291fecfc0b13b289c98ae3 | d8c67a33119dce3a558bb1e76b9d3595932ddfa8 | refs/heads/master | 2020-04-24T04:37:52.884110 | 2019-02-24T14:37:44 | 2019-02-24T14:37:44 | 171,709,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # written in VS Code with jupyter extension
#https://simpleprogrammer.com/programming-interview-questions/
# How do you find the missing number in a given integer array of 1 to 100?
# assume only 1 missing number
# missing number can be zeroed or removed from array
#%%
startArray = 1
stopArray = 100
#%%
n = startArray + stopArray - 1
#%%
sequenceArray = list(range(startArray, (stopArray + 1)))
#%%
testArray = sequenceArray.copy()
testArray[4] = 0 #zeroing fifth element
#%%
sumArray = n * (startArray + stopArray) / 2
sumArray
#%%
sumTest = sum(testArray)
sumTest
#%%
answer = sumArray - sumTest
print('The missing number is: ' + str(answer)) | [
"urban22@gmail.com"
] | urban22@gmail.com |
0433b7395eb3ee539638aa14dcbbba5e10ae531f | d4c74a8001451840f3efb87f15856cdb9d5e9eb6 | /tools/nntool/importer/tflite/tflite_schema_head/LSHProjectionType.py | 74c91fa75b8252a125fff614d55c912984ec71eb | [
"Apache-2.0"
] | permissive | danieldennett/gap_sdk | 3e4b4d187f03a28a761b08aed36a5e6a06f48e8d | 5667c899025a3a152dbf91e5c18e5b3e422d4ea6 | refs/heads/master | 2020-12-19T19:17:40.083131 | 2020-02-27T14:51:48 | 2020-02-27T14:51:48 | 235,814,026 | 1 | 0 | Apache-2.0 | 2020-01-23T14:39:59 | 2020-01-23T14:39:58 | null | UTF-8 | Python | false | false | 182 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite_schema_head
class LSHProjectionType(object):
UNKNOWN = 0
SPARSE = 1
DENSE = 2
| [
"zhouxinge@live.nl"
] | zhouxinge@live.nl |
50101cfa3131859ea40e1a4287fcdf2b918a6912 | ba9d4ab1a1153ecd91ed280722b84e19f2a34810 | /src/postprocessing.py | 38aa62c3068f271ae3fa05afe046d75d569d7e42 | [] | no_license | tatigabru/nfl_impact_detection | 43ce79bdbbcf059317007963ac330d6abeb1188b | 743d822412811344ecb0c25232e5076f404f56b8 | refs/heads/master | 2023-03-21T00:17:38.599208 | 2021-03-18T15:34:02 | 2021-03-18T15:34:02 | 319,380,921 | 0 | 0 | null | 2021-03-11T15:51:37 | 2020-12-07T16:37:35 | Jupyter Notebook | UTF-8 | Python | false | false | 10,201 | py | """
Postprocessing
"""
# Imports
from typing import List, Dict, Optional, Tuple, Union, Type, Callable
from collections import defaultdict, namedtuple
import cv2
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.optimize import linear_sum_assignment
from scipy.ndimage.filters import maximum_filter
def add_bottom_right(df):
df["right"] = df["left"] + df["width"]
df["bottom"] = df["top"] + df["height"]
return df
def box_pair_distance(bbox1, bbox2):
bbox1 = [float(x) for x in bbox1]
bbox2 = [float(x) for x in bbox2]
(x0_1, y0_1, x1_1, y1_1) = bbox1
(x0_2, y0_2, x1_2, y1_2) = bbox2
x_1 = (x1_1 + x0_1) / 2
x_2 = (x1_2 + x0_2) / 2
y_1 = (y1_1 + y0_1) / 2
y_2 = (y1_2 + y0_2) / 2
print(x_1, x_2, y_1, y_2)
# get Eucledian distance
dist = (x_2 - x_1) ** 2 + (y_2 - y_1) ** 2
print(np.sqrt(dist))
return np.sqrt(dist)
def box_pair_iou(bbox1, bbox2):
bbox1 = [float(x) for x in bbox1]
bbox2 = [float(x) for x in bbox2]
(x0_1, y0_1, x1_1, y1_1) = bbox1
(x0_2, y0_2, x1_2, y1_2) = bbox2
# get the overlap rectangle
overlap_x0 = max(x0_1, x0_2)
overlap_y0 = max(y0_1, y0_2)
overlap_x1 = min(x1_1, x1_2)
overlap_y1 = min(y1_1, y1_2)
# check if there is an overlap
if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:
return 0
# if yes, calculate the ratio of the overlap to each ROI size and the unified size
size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)
size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)
size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)
size_union = size_1 + size_2 - size_intersection
return size_intersection / size_union
def track_boxes_centers(videodf, dist=1, dist_thresh=0.8):
# most simple algorithm for tracking boxes
# based on distance and hungarian algorithm
track = 0
n = len(videodf)
inds = list(videodf.index)
frames = [-1000] + sorted(videodf["frame"].unique().tolist())
ind2box = dict(
zip(inds, videodf[["left", "top", "right", "bottom"]].values.tolist())
)
ind2track = {}
for f, frame in enumerate(frames[1:]):
cur_inds = list(videodf[videodf["frame"] == frame].index)
assigned_cur_inds = []
if frame - frames[f] <= dist:
prev_inds = list(videodf[videodf["frame"] == frames[f]].index)
cost_matrix = np.ones((len(cur_inds), len(prev_inds)))
for i, ind1 in enumerate(cur_inds):
for j, ind2 in enumerate(prev_inds):
box1 = ind2box[ind1]
box2 = ind2box[ind2]
a = box_pair_distance(box1, box2)
###
# TO DO
# multiply by coefficient proportional frame - frames[f]
print(f"Distance boxes: {a}")
###
# dist_thresh = dist_thresh*(1 + (frame - frames[f])*0.2)
cost_matrix[i, j] = a / dist_thresh if a < dist_thresh else 1
row_is, col_js = linear_sum_assignment(cost_matrix)
# assigned_cur_inds = [cur_inds[i] for i in row_is]
for i, j in zip(row_is, col_js):
if cost_matrix[i, j] < 1:
ind2track[cur_inds[i]] = ind2track[prev_inds[j]]
assigned_cur_inds.append(cur_inds[i])
not_assigned_cur_inds = list(set(cur_inds) - set(assigned_cur_inds))
for ind in not_assigned_cur_inds:
ind2track[ind] = track
track += 1
tracks = [ind2track[ind] for ind in inds]
# print(f'tracks: {tracks}')
return tracks
def track_boxes(videodf, dist=1, iou_thresh=0.8):
# most simple algorithm for tracking boxes
# based on iou and hungarian algorithm
track = 0
n = len(videodf)
inds = list(videodf.index)
frames = [-1000] + sorted(videodf["frame"].unique().tolist())
ind2box = dict(
zip(inds, videodf[["left", "top", "right", "bottom"]].values.tolist())
)
ind2track = {}
for f, frame in enumerate(frames[1:]):
cur_inds = list(videodf[videodf["frame"] == frame].index)
assigned_cur_inds = []
if frame - frames[f] <= dist:
prev_inds = list(videodf[videodf["frame"] == frames[f]].index)
cost_matrix = np.ones((len(cur_inds), len(prev_inds)))
for i, ind1 in enumerate(cur_inds):
for j, ind2 in enumerate(prev_inds):
box1 = ind2box[ind1]
box2 = ind2box[ind2]
a = box_pair_iou(box1, box2)
###
# print(f'IoU boxes: {a}')
###
cost_matrix[i, j] = 1 - a if a > iou_thresh else 1
row_is, col_js = linear_sum_assignment(cost_matrix)
# assigned_cur_inds = [cur_inds[i] for i in row_is]
for i, j in zip(row_is, col_js):
if cost_matrix[i, j] < 1:
ind2track[cur_inds[i]] = ind2track[prev_inds[j]]
assigned_cur_inds.append(cur_inds[i])
not_assigned_cur_inds = list(set(cur_inds) - set(assigned_cur_inds))
for ind in not_assigned_cur_inds:
ind2track[ind] = track
track += 1
tracks = [ind2track[ind] for ind in inds]
# print(f'tracks: {tracks}')
return tracks
def add_tracking(df, dist=1, iou_thresh=0.8) -> pd.DataFrame:
# add tracking data for boxes. each box gets track id
df = add_bottom_right(df)
df["track"] = -1
videos = df["video"].unique()
for video in videos:
# print(f'Video: {video}')
videodf = df[df["video"] == video]
tracks = track_boxes(videodf, dist=dist, iou_thresh=iou_thresh)
df.loc[list(videodf.index), "track"] = tracks
return df
def add_tracking_centers(df, dist=1, dist_thresh=0.8) -> pd.DataFrame:
# add tracking data for boxes. each box gets track id
df = add_bottom_right(df)
df["track"] = -1
videos = df["video"].unique()
for video in videos:
# print(f'Video: {video}')
videodf = df[df["video"] == video]
tracks = track_boxes_centers(videodf, dist=dist, dist_thresh=dist_thresh)
df.loc[list(videodf.index), "track"] = tracks
return df
def keep_maximums(df, iou_thresh=0.35, dist=2) -> pd.DataFrame:
# track boxes across frames and keep only box with maximum score
df = add_tracking(df, dist=dist, iou_thresh=iou_thresh)
df = df.sort_values(["video", "track", "scores"], ascending=False).drop_duplicates(
["video", "track"]
)
return df
def keep_maximums_cent(df, dist_thresh=0.35, dist=2) -> pd.DataFrame:
# track boxes across frames and keep only box with maximum score
df = add_tracking_centers(df, dist=dist, dist_thresh=dist_thresh)
df = df.sort_values(["video", "track", "scores"], ascending=False).drop_duplicates(
["video", "track"]
)
return df
def keep_mean_frame(df, iou_thresh=0.35, dist=2) -> pd.DataFrame:
df = add_tracking(df, dist=dist, iou_thresh=iou_thresh)
keepdf = df.groupby(["video", "track"]).mean()["frame"].astype(int).reset_index()
df = df.merge(keepdf, on=["video", "track", "frame"])
return df
def test_keep_maximums(df, iou_thresh=0.35, dist=2):
"""
make a test dataframe, using both false positives and dummy samples
video,frame,left,width,top,scores,height,right,bottom
57906_000718_Endzone.mp4,45,962,20,285,0.8837890625,19,982,304
57906_000718_Endzone.mp4,47,967,23,287,0.87890625,28,990,315
57906_000718_Sideline.mp4,243,652,9,326,0.466064453125,9,661,335
57906_000718_Sideline.mp4,244,656,9,329,0.55810546875,9,665,338
dummy,1,652,20,326,0.466064453125,12,672,338
dummy,2,656,20,329,0.55810546875,19,676,348
dummy,3,659,20,329,0.55810546875,20,679,349
dummy,4,665,20,331,0.55810546875,19,685,350
dummy,5,670,20,335,0.55810546875,21,690,356
dummy,6,671,20,337,0.55810546875,19,691,356
dummy,7,677,20,333,0.55810546875,20,697,353
"""
df_new = keep_maximums(df, iou_thresh=iou_thresh, dist=dist)
print(f"Processed dataframe: \n{df_new.head(10)}")
# check we have left 3 tracks
if df_new.track.count() != 3:
print(f"Not right tracks, {df_new.track.values}")
# assert df_new.track.count() == 3
def test_centers_track(df, dist_thresh=0.35, dist=7):
"""
make a test dataframe, using both false positives and dummy samples
video,frame,left,width,top,scores,height,right,bottom
57906_000718_Endzone.mp4,45,962,20,285,0.8837890625,19,982,304
57906_000718_Endzone.mp4,47,967,23,287,0.87890625,28,990,315
57906_000718_Sideline.mp4,243,652,9,326,0.466064453125,9,661,335
57906_000718_Sideline.mp4,244,656,9,329,0.55810546875,9,665,338
dummy,1,652,20,326,0.466064453125,12,672,338
dummy,2,656,20,329,0.55810546875,19,676,348
dummy,3,659,20,329,0.55810546875,20,679,349
dummy,4,665,20,331,0.55810546875,19,685,350
dummy,5,670,20,335,0.55810546875,21,690,356
dummy,6,671,20,337,0.55810546875,19,691,356
dummy,7,677,20,333,0.55810546875,20,697,353
"""
df_new = keep_maximums_cent(df, dist_thresh=dist_thresh, dist=dist)
print(f"Processed dataframe: \n{df_new.head(10)}")
# check we have left 3 tracks
if df_new.track.count() != 3:
print(f"Not right tracks, {df_new.track.values}")
# assert df_new.track.count() == 3
if __name__ == "__main__":
df = pd.read_csv("../../preds/sample.csv")
print(f"Initial dataframe: \n{df.head(11)}")
dist = 7
num = 0
dist_threshholds = [2, 4, 6, 8, 10, 13, 16, 20]
for dist_thresh in dist_threshholds:
num += 1
print(f"\n EXPERIMENT {num}: distance = {dist}, dist thres = {dist_thresh}")
test_centers_track(df, dist_thresh=dist_thresh, dist=dist)
# iou_threshholds = [0.15, 0.2, 0.25, 0.3, 0.35, 0.4]
# for iou_thresh in iou_threshholds:
# num += 1
# print(f'\n EXPERIMENT {num}: distance = {dist}, IoU thres = {iou_thresh}')
# test_keep_maximums(df, iou_thresh=iou_thresh, dist=dist)
| [
"travellingtati@gmail.com"
] | travellingtati@gmail.com |
0a71040bfcdb1d515bdaece21e9bb5308f9f63e7 | 7cd279b2adba0b25d7781488a86e11c801fbd9d7 | /server.py | 53e5c5aa36c33f26deea8833256d46b9311f8a51 | [] | no_license | ramonus/bchain | 11d9bf09162885b8a22a9ea1a8fcf84b8ecbce04 | 40e7a7fdbada21226967d94f10d9e257b82d48ba | refs/heads/master | 2020-05-23T20:21:52.715477 | 2019-05-23T00:23:19 | 2019-05-23T00:26:23 | 186,928,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,398 | py | import hashlib, json, time, uuid, argparse
from flask import Flask, jsonify, request, render_template
from blockchain import Blockchain
from wallet_utils import create_wallet, save_wallet
import threading
parser = argparse.ArgumentParser()
parser.add_argument("-p","--port",default=5000, type=int, help="Port to run node on")
args = parser.parse_args()
# Instantiate our node
app = Flask(__name__)
# Generate globally unique address for this node
node_identifier = str(uuid.uuid4()).replace("-","")
# Instantiate Blockchain
blockchain = Blockchain(port=args.port, uid=node_identifier)
@app.route("/mine",methods=['GET'])
def mine():
"""
GET request to try to mine a block.
"""
if not blockchain.mining:
threading.Thread(target=blockchain.mine).start()
return jsonify(True), 200
else:
return jsonify(False), 200
# # Call function to mine a block
# mined_block = blockchain.mine()
# # Check if it worked
# if mined_block is not False:
# # If it's not False
# msg = "New block mined"
# error = []
# data = mined_block
# s = 201
# else:
# # If it's False
# msg = "Error mining block"
# error = ["Unknown error"]
# data = None
# s = 401
# else:
# msg = "Node is mining!"
# error = ["Node already mining"]
# data = None
# s = 401
# # Create response
# response = {
# 'message': msg,
# 'error': error,
# 'data': data,
# }
# return jsonify(response), s
@app.route("/transactions/add",methods=['POST'])
def add_transaction():
"""
Adds a new transaction to the current_transactions list if valid throught a POST request.
"""
tr = json.loads(request.get_data().decode())
print("Adding transaction:",tr['hash'])
state = blockchain.is_valid_chain()
state = blockchain.update_state(state, blockchain.current_transactions)
if blockchain.is_valid_transaction(state,tr):
blockchain.update_transaction(tr)
print("Added transaction:",tr['hash'])
return jsonify(tr['hash']), 201
else:
print("Couldn't add. Invalid transaction:",tr['hash'])
return jsonify(False), 401
@app.route("/transactions/new",methods=['POST'])
def new_transaction():
"""
This method will listen for a POST request to /transactions/new and expect data ['wallet', 'recipient', 'amount']
"""
# Read json string
values = json.loads(request.get_data().decode())
print("Values:",values)
# Setup error and message lists
error = []
msg = []
# Check that the required fields are in POST'ed data
required = ['wallet', 'recipient', 'amount']
# Get values
try:
wallet = values['wallet']
recipient = values['recipient']
amount = values['amount']
# Create transaction
t = blockchain.create_transaction(wallet, recipient, amount)
# Compute state
state = blockchain.is_valid_chain()
state = blockchain.update_state(state, blockchain.current_transactions)
# Check transaction validity
if blockchain.is_valid_transaction(state, t):
blockchain.update_transaction(t)
msg = "Done"
else:
msg = "Not enough funds, maybe some are reserved"
error.append("Not enough funds")
except KeyError:
error.append("Invalid input")
# Create response
response = {
'message': msg,
'error': error,
}
return jsonify(response), 201
@app.route("/transactions",methods=['GET'])
def transactions():
"""
GET request to view all pending transactions.
"""
return jsonify(blockchain.current_transactions), 200
@app.route("/transactions/hash",methods=['GET'])
def get_transaction_hash():
"""
GET request to view all pending transactions hash in a list.
"""
# Get state
state = blockchain.is_valid_chain()
# Get all transactions hash
hashes = [t['hash'] for t in blockchain.current_transactions]
return jsonify(hashes), 200
@app.route("/transactions/length",methods=['GET'])
def transactions_length():
"""
GET request to view pending transactions length.
"""
# Create response
resp = {
"length": len(blockchain.current_transactions),
}
return jsonify(resp), 200
@app.route("/transaction/<hash>")
def get_transaction(hash):
"""
GET request to retrive a single transaction given a hash.
"""
tra = [i for i in blockchain.current_transactions if i['hash']==hash]
if len(tra)==1:
return jsonify(tra[0]), 200
elif len(tra)==0:
# Create response
resp = {
"error":"No transaction found with hash: "+hash
}
return jsonify(resp), 200
else:
# Create response
resp = {
"error":"Error, multiple transactions found!",
}
return jsonify(resp), 200
@app.route("/transactions/resolve",methods=['GET'])
def resolve_transactions():
threading.Thread(target=blockchain.resolve_transactions_all).start()
return "resolve transactions started", 201
@app.route("/transactions/clean",methods=['GET'])
def clean_transactions():
blockchain.clean_transactions()
return "Done",201
@app.route("/nodes",methods=["GET"])
def get_nodes():
"""
GET request to view all current nodes.
"""
return jsonify(blockchain.nodes), 200
@app.route("/nodes/resolve",methods=['GET'])
def resolve_node():
threading.Thread(target=blockchain.resolve_chains).start()
return "resolve chains started", 201
@app.route("/nodes/add",methods=['POST'])
def add_node():
"""
POST request to add a new node.
"""
node = request.get_data().decode()
if blockchain.is_valid_node(node):
blockchain.add_node(node)
return jsonify(True), 200
else:
return jsonify(False), 401
@app.route("/nodes/discover",methods=['GET'])
def discover_nodes():
threading.Thread(target=blockchain.discover_nodes).start()
return "Discovery started", 201
@app.route("/chain",methods=['GET'])
def full_chain():
"""
GET request to view full chain.
"""
return jsonify(blockchain.chain), 200
@app.route("/chain/add",methods=['POST'])
def add_block():
b = json.loads(request.get_data().decode())
if blockchain.is_valid_next_block(blockchain.last_block, b):
blockchain.update_chain(b)
return jsonify(b['hash']), 201
elif request.headers.get("port",None) is not None:
node = "http://"+request.remote_addr+":"+str(request.headers.get("port"))
updated = blockchain.resolve_chain(node)
if updated:
return jsonify("Chain updated"), 201
else:
try:
r = requests.post(node,headers={"port": str(args.port)},data=json.dumps(blockchain.last_block))
except:
pass
return jsonify("Chain not updated"), 401
@app.route("/chain/length",methods=['GET'])
def chain_length():
"""
GET request to view full chain's length.
"""
# Create response
resp = {
"length": len(blockchain.chain)
}
return jsonify(resp), 200
@app.route("/chain/last",methods=['GET'])
def last_block():
"""
GET request to view the last block on node's chain.
"""
return jsonify(blockchain.last_block), 200
@app.route("/working",methods=['GET'])
def working():
resp = {
"chains": blockchain.resolving_chains,
"transactions": blockchain.resolving_transactions,
"mining": blockchain.mining,
}
return jsonify(resp), 200
@app.route("/state",methods=['GET'])
def state():
"""
GET request to view the current state in main chain.
"""
# Get state
state = blockchain.is_valid_chain()
return jsonify(state), 200
@app.route("/state/all",methods=['GET'])
def state_all():
"""
GET request to view the current state adding the pending transactions.
"""
# Get state
state = blockchain.is_valid_chain()
# Update with pending transactions
state = blockchain.update_state(state, blockchain.current_transactions)
return jsonify(state), 200
@app.route("/uid",methods=['GET'])
def get_uid():
return node_identifier, 200
@app.route("/mining",methods=['GET'])
def mining():
return jsonify(blockchain.mining), 200
"""
This section will be a test gui to simplify debugging
"""
@app.route("/")
def root():
return render_template('index.html',wallet=blockchain.wallet)
@app.route("/new_transaction")
def ntransaction():
return render_template('newt_gui.html')
@app.route("/get_wallets")
def get_wallets():
from pathlib import Path
p = Path("wallets")
wallets = []
for pa in p.iterdir():
w = {"name":pa.stem, "wallet": json.loads(pa.read_text())}
wallets.append(w)
return jsonify(wallets), 200
@app.route("/new_wallet", methods=['GET'])
def new_wallet():
w = create_wallet()
resp = {
"wallet": w,
}
return jsonify(resp), 201
@app.route("/add_node")
def add_node_gui():
return render_template("add_node.html")
if __name__=="__main__":
app.run(host='0.0.0.0',port=args.port, debug=True)
| [
"ralmatob@gmail.com"
] | ralmatob@gmail.com |
b25ab28352bded45f847834fe4876b3e3fc1face | 5fca74e404e88c648e1fe8d51377ad1833857251 | /mysite/member/migrations/0001_initial.py | f3d39274a04ed3d4d29b01bff8239584b72f0bc2 | [] | no_license | Minsik113/AI_course-using-Django | 9b5c237664665577cb9f630c87dd188d2eb76ab4 | fc1a65979e6b02c0f8e115a03a5d9a1998a27071 | refs/heads/main | 2023-06-05T14:44:21.517079 | 2021-06-30T02:52:24 | 2021-06-30T02:52:24 | 380,938,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # Generated by Django 2.2.5 on 2021-06-30 00:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('user_id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('user_pw', models.CharField(max_length=50)),
('user_name', models.CharField(max_length=50)),
('c_date', models.DateTimeField()),
],
),
]
| [
"hyunicho62@gmail.com"
] | hyunicho62@gmail.com |
d1168c3686164b89c8348988a414cae9247e0dd4 | d3a325a344867f95440f0a40884f6162f4463517 | /raspi-firmware/graph_peaks.py | 708705b3505b1cc1dc385f49c2658eb4c96f8d25 | [] | no_license | schivuk/snorlax | b9d2bd44c6368e83bc4a37380ecaf52972fa68a6 | fd0ffedf6c61fc0d7c5c4270e40fe9fff7b37589 | refs/heads/master | 2021-01-20T11:41:23.502392 | 2016-05-10T02:31:24 | 2016-05-10T02:31:24 | 51,566,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,001 | py | import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import find_peaks_cwt
import random
peak_threshold = 4
def acc_algorithm(vals, threshold):
num_vals = len(vals)
peaks = set()
for i in xrange(0, num_vals-10):
next_ten_average = sum(vals[i:i+10])/10
if next_ten_average > vals[i]+threshold:
peaks.add(i)
for i in xrange(0, num_vals-10):
next_ten_average = sum(vals[i:i+10])/10
if next_ten_average < vals[i]-threshold:
peaks.add(i)
for i in xrange(10, num_vals):
prev_ten_average = sum(vals[i-10:i])/10
if prev_ten_average < vals[i] - threshold:
peaks.add(i)
for i in xrange(10, num_vals):
prev_ten_average = sum(vals[i-10:i])/10
if prev_ten_average > vals[i] + threshold:
peaks.add(i)
return peaks
def mic_algorithm(vals, threshold):
average = sum(vals)/len(vals)
print average
peaks = set()
for i in xrange(len(vals)):
val = vals[i]
if abs(val-average) > threshold:
peaks.add(i)
return peaks
filename = 'accAndMicrophoneQuick.txt'
with open(filename) as f:
mic_vals = []
x_vals = []
y_vals = []
z_vals = []
timestamps = []
for line in f.readlines():
mic,x,y,z,timestamp = line.strip('\n').split(',')
mic = mic.lstrip('\x00')
x = x.lstrip('\x00')
y = y.lstrip('\x00')
z = z.lstrip('\x00')
mic_vals.append(int(mic))
x_vals.append(int(x))
y_vals.append(int(y))
z_vals.append(int(z))
timestamps.append(timestamp)
timestamps = range(1,len(x_vals)+1)
x_peaks = acc_algorithm(x_vals,4)
y_peaks = acc_algorithm(y_vals,4)
z_peaks = acc_algorithm(z_vals,4)
mic_peaks = mic_algorithm(mic_vals,100)
plt.subplot(4,1,1)
plt.title('x vs. Time')
for i in xrange(len(x_vals)):
if i in x_peaks:
plt.scatter(timestamps[i], x_vals[i], color='red')
else:
plt.scatter(timestamps[i], x_vals[i], color='green')
plt.subplot(4,1,2)
plt.title('y vs. Time')
for i in xrange(len(y_vals)):
if i in y_peaks:
plt.scatter(timestamps[i], y_vals[i], color='red')
else:
plt.scatter(timestamps[i], y_vals[i], color='green')
plt.subplot(4,1,3)
plt.title('z vs. Time')
for i in xrange(len(z_vals)):
if i in z_peaks:
plt.scatter(timestamps[i], z_vals[i], color='red')
else:
plt.scatter(timestamps[i], z_vals[i], color='green')
plt.subplot(4,1,4)
plt.title('Microhpone vs. Time')
for i in xrange(len(mic_vals)):
if i in mic_peaks:
plt.scatter(timestamps[i], mic_vals[i], color='red')
else:
plt.scatter(timestamps[i], mic_vals[i], color='green')
plt.show()
| [
"qiu.benson@gmail.com"
] | qiu.benson@gmail.com |
3c80a21b57ae2707f9a66aef9c33445e8683c9ce | 95a27dd31260802da3768064c9614ce4f6ca4797 | /Scripts/MLP_GenCode_112.py | 72a351e7ac8a3dc76101c0b9c9b6d8c96346f104 | [
"MIT"
] | permissive | ShepherdCode/Soars2021 | 7ee34227076ea424ad42f4727205141b69c78bb9 | ab4f304eaa09e52d260152397a6c53d7a05457da | refs/heads/main | 2023-07-09T05:28:56.993951 | 2021-08-18T14:16:57 | 2021-08-18T14:16:57 | 364,885,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,618 | py | #!/usr/bin/env python
# coding: utf-8
# # MLP GenCode
# MLP_GenCode_trying to fix bugs.
# NEURONS=128 and K={1,2,3}.
#
# In[14]:
import time
def show_time():
t = time.time()
print(time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t)))
show_time()
# In[15]:
PC_TRAINS=8000
NC_TRAINS=8000
PC_TESTS=8000
NC_TESTS=8000
PC_LENS=(200,99000)
NC_LENS=(200,99000)
PC_LENS=(200,4000)
NC_LENS=(200,4000)
MAX_K = 3
INPUT_SHAPE=(None,84) # 4^3 + 4^2 + 4^1
NEURONS=128
DROP_RATE=0.01
EPOCHS=1000 # 200
SPLITS=5
FOLDS=1 # make this 5 for serious testing
# In[16]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Flatten,TimeDistributed
from keras.losses import BinaryCrossentropy
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
# In[17]:
import sys
IN_COLAB = False
try:
from google.colab import drive
IN_COLAB = True
except:
pass
if IN_COLAB:
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/GenCodeTools.py')
with open('GenCodeTools.py', 'w') as f:
f.write(r.text)
from GenCodeTools import GenCodeLoader
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/KmerTools.py')
with open('KmerTools.py', 'w') as f:
f.write(r.text)
from KmerTools import KmerTools
else:
print("CoLab not working. On my PC, use relative paths.")
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.GenCodeTools import GenCodeLoader
from SimTools.KmerTools import KmerTools
BESTMODELPATH=DATAPATH+"BestModel-112" # saved on cloud instance and lost after logout
LASTMODELPATH=DATAPATH+"LastModel-112" # saved on Google Drive but requires login
# ## Data Load
# Restrict mRNA to those transcripts with a recognized ORF.
# In[18]:
PC_FILENAME='gencode.v26.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v26.lncRNA_transcripts.fa.gz'
PC_FILENAME='gencode.v38.pc_transcripts.fa.gz'
NC_FILENAME='gencode.v38.lncRNA_transcripts.fa.gz'
PC_FULLPATH=DATAPATH+PC_FILENAME
NC_FULLPATH=DATAPATH+NC_FILENAME
# In[19]:
loader=GenCodeLoader()
loader.set_label(1)
loader.set_check_utr(False)
pcdf=loader.load_file(PC_FULLPATH)
print("PC seqs loaded:",len(pcdf))
loader.set_label(0)
loader.set_check_utr(False)
ncdf=loader.load_file(NC_FULLPATH)
print("NC seqs loaded:",len(ncdf))
show_time()
# ## Data Prep
# In[20]:
def dataframe_length_filter(df,low_high):
(low,high)=low_high
# The pandas query language is strange,
# but this is MUCH faster than loop & drop.
return df[ (df['seqlen']>=low) & (df['seqlen']<=high) ]
def dataframe_extract_sequence(df):
return df['sequence'].tolist()
pc_all = dataframe_extract_sequence(
dataframe_length_filter(pcdf,PC_LENS))
nc_all = dataframe_extract_sequence(
dataframe_length_filter(ncdf,NC_LENS))
show_time()
print("PC seqs pass filter:",len(pc_all))
print("NC seqs pass filter:",len(nc_all))
# Garbage collection to reduce RAM footprint
pcdf=None
ncdf=None
# In[21]:
# Any portion of a shuffled list is a random selection
pc_train=pc_all[:PC_TRAINS]
nc_train=nc_all[:NC_TRAINS]
pc_test=pc_all[PC_TRAINS:PC_TRAINS+PC_TESTS]
nc_test=nc_all[NC_TRAINS:NC_TRAINS+PC_TESTS]
print("PC train, NC train:",len(pc_train),len(nc_train))
print("PC test, NC test:",len(pc_test),len(nc_test))
# Garbage collection
pc_all=None
nc_all=None
print("First PC train",pc_train[0])
print("First PC test",pc_test[0])
# In[22]:
def prepare_x_and_y(seqs1,seqs0):
len1=len(seqs1)
len0=len(seqs0)
total=len1+len0
L1=np.ones(len1,dtype=np.int8)
L0=np.zeros(len0,dtype=np.int8)
S1 = np.asarray(seqs1)
S0 = np.asarray(seqs0)
all_labels = np.concatenate((L1,L0))
all_seqs = np.concatenate((S1,S0))
for i in range(0,len0):
all_labels[i*2] = L0[i]
all_seqs[i*2] = S0[i]
all_labels[i*2+1] = L1[i]
all_seqs[i*2+1] = S1[i]
return all_seqs,all_labels # use this to test unshuffled
# bug in next line?
X,y = shuffle(all_seqs,all_labels) # sklearn.utils.shuffle
#Doesn't fix it
#X = shuffle(all_seqs,random_state=3) # sklearn.utils.shuffle
#y = shuffle(all_labels,random_state=3) # sklearn.utils.shuffle
return X,y
Xseq,y=prepare_x_and_y(pc_train,nc_train)
print(Xseq[:3])
print(y[:3])
# Tests:
show_time()
# In[23]:
def seqs_to_kmer_freqs(seqs,max_K):
tool = KmerTools() # from SimTools
empty = tool.make_dict_upto_K(max_K)
collection = []
for seq in seqs:
counts = empty
# Last param should be True when using Harvester.
counts = tool.update_count_one_K(counts,max_K,seq,True)
# Given counts for K=3, Harvester fills in counts for K=1,2.
counts = tool.harvest_counts_from_K(counts,max_K)
fdict = tool.count_to_frequency(counts,max_K)
freqs = list(fdict.values())
collection.append(freqs)
return np.asarray(collection)
Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K)
show_time()
# ## Neural network
# In[24]:
def make_DNN():
dt=np.float32
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt)) # relu doesn't work as well
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=dt))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(1,activation="sigmoid",dtype=dt))
dnn.compile(optimizer='adam', # adadelta doesn't work as well
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build(input_shape=INPUT_SHAPE)
return dnn
model = make_DNN()
print(model.summary())
# In[25]:
def do_cross_validation(X,y):
cv_scores = []
fold=0
#mycallbacks = [ModelCheckpoint(
# filepath=MODELPATH, save_best_only=True,
# monitor='val_accuracy', mode='max')]
# When shuffle=True, the valid indices are a random subset.
splitter = KFold(n_splits=SPLITS,shuffle=True)
model = None
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
# callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
return model # parameters at end of training
# In[26]:
show_time()
last_model = do_cross_validation(Xfrq,y)
# In[27]:
def show_test_AUC(model,X,y):
ns_probs = [0 for _ in range(len(y))]
bm_probs = model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc*100.0))
def show_test_accuracy(model,X,y):
scores = model.evaluate(X, y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# In[28]:
print("Accuracy on training data.")
print("Prepare...")
show_time()
Xseq,y=prepare_x_and_y(pc_train,nc_train)
print("Extract K-mer features...")
show_time()
Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K)
print("Plot...")
show_time()
show_test_AUC(last_model,Xfrq,y)
show_test_accuracy(last_model,Xfrq,y)
show_time()
# In[29]:
print("Accuracy on test data.")
print("Prepare...")
show_time()
Xseq,y=prepare_x_and_y(pc_test,nc_test)
print("Extract K-mer features...")
show_time()
Xfrq=seqs_to_kmer_freqs(Xseq,MAX_K)
print("Plot...")
show_time()
show_test_AUC(last_model,Xfrq,y)
show_test_accuracy(last_model,Xfrq,y)
show_time()
| [
"jmill02@shepherd.edu"
] | jmill02@shepherd.edu |
1f98e74eef835ca6a17c0f6a2081205ba2b18a15 | c41069e0cb4105c4092853e60de6bf116b332e70 | /resaspy/__init__.py | a5fdbf6faf8e137b9e35b31325e2ee25d4a95bd9 | [
"MIT"
] | permissive | ar90n/resaspy | 5a4e7789dc24f412e1f1f929fb491f349abe90f1 | 58d140ad1e61478ab8f3993676bd0c97ad43ae18 | refs/heads/master | 2021-01-11T10:13:13.712176 | 2017-05-10T14:17:57 | 2017-05-10T14:17:57 | 78,554,641 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # -*- coding: utf-8 -*-
"""
resaspy is a simple utility for RESAS api(https://opendata.resas-portal.go.jp).
usage:
>>> from resaspy import Resaspy
>>> resas = Resaspy( key )
>>> r = resas.prefectures()
>>> r.result
:copyright: (c) 2016 by Masahiro Wada.
:license: MIT, see LICENSE for more details.
"""
__title__ = 'resaspy'
__version__ = '0.2.1'
__build__ = 0x021204
__author__ = 'Masahiro Wada'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Masahiro Wada'
from .resaspy import Resaspy
| [
"argon.argon.argon@gmail.com"
] | argon.argon.argon@gmail.com |
c246c6158c0b505b6b3d9d926557418c0bf0e3ff | cd8d3543b3fff3a5b986ce4d5601f92b6ecbfcf3 | /src/util/post.py | 53a2aa352402e4d05f86e57c0a887c2acb693f8d | [
"MIT"
] | permissive | jsrdzhk/noctorro_test | 29da81ddfa79299ec193bec73ef453aaed157d58 | 2b449c91a15beb654c3373c7d60b1ab129129496 | refs/heads/master | 2023-01-01T01:37:03.542252 | 2020-10-16T03:01:09 | 2020-10-16T03:01:09 | 304,245,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | import requests
from requests import Response
class PostUtil:
@staticmethod
def post_form_data(url: str, data: dict) -> Response:
return requests.post(url, data)
| [
"zhanghaokang@antiy.cn"
] | zhanghaokang@antiy.cn |
0c40d54de503d81995b68e2d44827f6f5f07f80b | ce58cc8dadf22239222cd921a7f3b66efe71d47b | /animal_production/wizard/visit_report_wizard.py | c70e60c1e2091d88c0b28ce8398b51539a526df0 | [] | no_license | odooCode11/napata | 62e03281c9e36834aab9d2ee4b3a31ef30271e04 | e4ebaa341cef04247bd0f828e82fdd2ee5da4459 | refs/heads/main | 2023-03-09T11:56:45.405751 | 2021-02-20T18:11:19 | 2021-02-20T18:11:19 | 340,721,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,156 | py | from odoo import models, fields, api
class VisitReportWizard(models.TransientModel):
_name = 'visit.report.wizard'
_description = 'Visits Report'
PARAMETERS = [
('general', 'General'),
('period_only', 'Period Only'),
('period_state', 'Period & State'),
('period_locality', 'Period & Locality'),
('period_area', 'Period & Area'),
('period_department', 'Period & Department'),
('period_visit_type', 'Period & Order & Type'),
]
state_id = fields.Many2one('states', string='State')
locality_id = fields.Many2one('localities', string='Locality')
area_id = fields.Many2one('areas', string='Area')
department_id = fields.Many2one('departments', string='Department')
visit_type = fields.Selection([('I', 'Initial Visit'),
('p', 'Permission'),
('O', 'Re Permission')],
string='Visit Type')
from_date = fields.Date(string="From", default=fields.Date.today())
to_date = fields.Date(string="To", default=fields.Date.today())
parameter_id = fields.Selection(PARAMETERS, default='general', string='Select Report Parameters')
def get_report(self):
data = {
'ids': self.ids,
'model': self._name,
'form': {
'params': self.parameter_id,
'state_id': self.state_id.id,
'locality_id': self.locality_id.id,
'area_id': self.area_id.id,
'department_id': self.department_id.id,
'visit_type': self.visit_type,
'state_name': self.state_id.name,
'locality_name': self.locality_id.name,
'area_name': self.area_id.name,
'department_name': self.department_id.name,
'from_date': fields.Date.from_string(self.from_date),
'to_date': fields.Date.from_string(self.to_date),
},
}
return self.env.ref('animal_production.visit_report').report_action(self, data=data)
class VisitReport(models.AbstractModel):
_name = 'report.animal_production.visit_template'
@api.model
def _get_report_values(self, docids, data=None):
params = data['form']['params']
state_id = data['form']['state_id']
locality_id = data['form']['locality_id']
area_id = data['form']['area_id']
department_id = data['form']['department_id']
state_name = data['form']['state_name']
locality_name = data['form']['locality_name']
area_name = data['form']['area_name']
department_name = data['form']['department_name']
visit_type = data['form']['visit_type']
from_date = data['form']['from_date']
to_date = data['form']['to_date']
domain = []
if params == 'general':
docs = self.env['visits'].search([])
elif params == 'period_only':
domain.append(('visit_date', '>=', from_date))
domain.append(('visit_date', '<=', to_date))
elif params == 'period_state':
domain.append(('state_id', '=', state_id))
domain.append(('visit_date', '>=', from_date))
domain.append(('visit_date', '<=', to_date))
elif params == 'period_locality':
domain.append(('locality_id', '=', locality_id))
domain.append(('visit_date', '>=', from_date))
domain.append(('visit_date', '<=', to_date))
elif params == 'period_area':
domain.append(('area_id', '=', area_id))
domain.append(('visit_date', '>=', from_date))
domain.append(('visit_date', '<=', to_date))
elif params == 'period_department':
domain.append(('department_id', '=', department_id))
domain.append(('visit_date', '>=', from_date))
domain.append(('visit_date', '<=', to_date))
elif params == 'period_visit_type':
domain.append(('visit_type', '=', visit_type))
domain.append(('visit_type', '>=', from_date))
domain.append(('visit_type', '<=', to_date))
docs = self.env['visits'].search(domain)
rec = self.env['orders'].search(domain, limit=1)
state = rec.state_id.name
locality = rec.locality_id.name
area = rec.area_id.name
visit = ''
if visit_type == 'I':
visit = 'زيارة مبدئية'
elif visit_type == 'p':
visit = 'زيارة تصديق'
else:
visit = 'زيارة تجديد تصديق'
return {
'doc_ids': data['ids'],
'doc_model': data['model'],
'params': params,
'state_name': state_name,
'locality_name': locality_name,
'area_name': area_name,
'department_name': department_name,
'visit': visit,
'state': state,
'locality': locality,
'area': area,
'from_date': from_date,
'to_date': to_date,
'docs': docs,
}
| [
"odoocode11@gmai.com"
] | odoocode11@gmai.com |
6a97d871b8ed8a49afb5c0e20a56bf9f9b38ed3a | 5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba | /algorithm-study/codewars/Quarter_of_the_year.py | 96e4279a8205373084cf8d50623834bef2b00e87 | [] | no_license | namujinju/study-note | 4271b4248b3c4ac1b96ef1da484d86569a030762 | 790b21e5318a326e434dc836f5f678a608037a8c | refs/heads/master | 2023-02-04T13:25:55.418896 | 2020-12-26T10:47:11 | 2020-12-26T10:47:11 | 275,279,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | def quarter_of(month):
return (month - 1) // 3 + 1
| [
"59328810+namujinju@users.noreply.github.com"
] | 59328810+namujinju@users.noreply.github.com |
e5cf31ac6e55eedafb5b7255682ba74d6d2ccfc5 | b502a61dae00f9fbfed7a89b693ba9352e016756 | /Python/findAstring.py | f60a778cd837d25228e5dd5c6162ae6b2b9def57 | [] | no_license | VIJAYAYERUVA/100DaysOfCode | 4971fadd8a9583a79a3b66723db91d9d0b1cfd2a | 637bfd559e0a50181902cc31cfe062de20615b53 | refs/heads/main | 2023-03-27T06:06:14.725721 | 2021-03-27T22:09:41 | 2021-03-27T22:09:41 | 322,189,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # https://www.hackerrank.com/challenges/find-a-string/problem
def count_substring(string, sub_string):
c = 0
for i in range(0, len(string) - len(sub_string) + 1):
if string[i:i + len(sub_string)] == sub_string:
c += 1
return c
if __name__ == '__main__':
string = input().strip()
sub_string = input().strip()
count = count_substring(string, sub_string)
print(count)
| [
"VIJAYAYERUVA@users.noreply.github.com"
] | VIJAYAYERUVA@users.noreply.github.com |
4da11dc3aeb5eb44e23efaa874600210f5727f5c | 6faa263efd75c11650b59ba08267b43f6e46be20 | /main.py | a0942a2fbcfffe38d25f1ef7c324100fdb6117d6 | [] | no_license | inzapp/c-yolo | 18c572cdbd9fa0b94d96449dda37522bacd22385 | f4de418bad7ed81cc48fa8377a56283a6c5f16f2 | refs/heads/master | 2023-04-14T21:49:02.194069 | 2021-04-21T06:28:29 | 2021-04-21T06:28:29 | 340,872,149 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,695 | py | """
Authors : inzapp
Github url : https://github.com/inzapp/c-yolo
Copyright 2021 inzapp Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"),
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from yolo import Yolo
if __name__ == '__main__':
"""
Train model using fit method.
train_image_path:
The path to the directory where the training data is located.
There must be images and labels in this directory.
The image and label must have the same file name and not be in different directories.
input_shape:
(height, width, channel) format of model input size
If the channel is 1, train with a gray image, otherwise train with a color image.
batch_size:
2 batch is recommended.
lr:
Learning rate value while training. 1e-3 ~ 1e-4 is recommended.
epochs:
Epochs value.
curriculum_epochs:
Epochs to pre-reduce the loss to the confidence and bounding box channel before starting the training.
validation_split:
The percentage of data that will be used as validation data.
validation_image_path:
Use this parameter if the validation data is in a different path from the training data.
training_view:
During training, the image is forwarded in real time, showing the results are shown.
False if training is on a server system without IO equipment.
mixed_float16_training:
Train faster and consume less memory using both 32bit and 16bit floating point types during training.
use_map_callback:
It behaves similarly to ModelCheckpoint callback,
but it stores models with higher mAP value by calculating the mAP of the validation data per each epoch.
"""
model = Yolo()
model.fit(
train_image_path=r'C:\inz\train_data\coco_2017',
model_name='coco_2017_416_416_3',
input_shape=(416, 416, 3),
batch_size=2,
lr=1e-3,
epochs=1000,
curriculum_epochs=1,
validation_split=0.2,
training_view=True,
mixed_float16_training=True,
use_map_callback=True)
model.evaluate()
| [
"inzapp@naver.com"
] | inzapp@naver.com |
30d1e191fd39fe9da315d8edc408653ef79ab813 | 9baa9f1bedf7bc973f26ab37c9b3046824b80ca7 | /venv-bck/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 1940bcf576151f37297b97d9ebafec8007ab3c80 | [] | no_license | shakthydoss/suriyan | 58774fc5de1de0a9f9975c2ee3a98900e0a5dff4 | 8e39eb2e65cc6c6551fc165b422b46d598cc54b8 | refs/heads/master | 2020-04-12T05:36:59.957153 | 2017-01-08T06:12:13 | 2017-01-08T06:12:13 | 59,631,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,507 | py | from __future__ import absolute_import
import time
from socket import _GLOBAL_DEFAULT_TIMEOUT
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| [
"shakthydoss@gmail.com"
] | shakthydoss@gmail.com |
3288d26e331955d77c11f56c860ccabb2886f6ea | 381f1c8e04273067a10037b0ffd38b17ebd83f75 | /search/graphicsDisplay.py | b31496a1435c14710c032890da3b7e5cc69376fa | [] | no_license | joshuaburkhart/CIS_571 | 8ca89a67ca2db6628ccd72ab65fbf5237068439a | c54768acfffbabe85ce39c42673fe58dab5b895a | refs/heads/master | 2021-01-18T21:09:29.368579 | 2016-04-27T16:45:50 | 2016-04-27T16:45:50 | 12,277,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,362 | py | # graphicsDisplay.py
# ------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from graphicsUtils import *
import math, time
from game import Directions
###########################
# GRAPHICS DISPLAY CODE #
###########################
# Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley.
# Some code from a Pacman implementation by LiveWires, and used / modified with permission.
DEFAULT_GRID_SIZE = 30.0
INFO_PANE_HEIGHT = 35
BACKGROUND_COLOR = formatColor(0, 0, 0)
WALL_COLOR = formatColor(0.0 / 255.0, 51.0 / 255.0, 255.0 / 255.0)
INFO_PANE_COLOR = formatColor(.4, .4, 0)
SCORE_COLOR = formatColor(.9, .9, .9)
PACMAN_OUTLINE_WIDTH = 2
PACMAN_CAPTURE_OUTLINE_WIDTH = 4
GHOST_COLORS = []
GHOST_COLORS.append(formatColor(.9, 0, 0)) # Red
GHOST_COLORS.append(formatColor(0, .3, .9)) # Blue
GHOST_COLORS.append(formatColor(.98, .41, .07)) # Orange
GHOST_COLORS.append(formatColor(.1, .75, .7)) # Green
GHOST_COLORS.append(formatColor(1.0, 0.6, 0.0)) # Yellow
GHOST_COLORS.append(formatColor(.4, 0.13, 0.91)) # Purple
TEAM_COLORS = GHOST_COLORS[:2]
GHOST_SHAPE = [
(0, 0.3),
(0.25, 0.75),
(0.5, 0.3),
(0.75, 0.75),
(0.75, -0.5),
(0.5, -0.75),
(-0.5, -0.75),
(-0.75, -0.5),
(-0.75, 0.75),
(-0.5, 0.3),
(-0.25, 0.75)
]
GHOST_SIZE = 0.65
SCARED_COLOR = formatColor(1, 1, 1)
GHOST_VEC_COLORS = map(colorToVector, GHOST_COLORS)
PACMAN_COLOR = formatColor(255.0 / 255.0, 255.0 / 255.0, 61.0 / 255)
PACMAN_SCALE = 0.5
# pacman_speed = 0.25
# Food
FOOD_COLOR = formatColor(1, 1, 1)
FOOD_SIZE = 0.1
# Laser
LASER_COLOR = formatColor(1, 0, 0)
LASER_SIZE = 0.02
# Capsule graphics
CAPSULE_COLOR = formatColor(1, 1, 1)
CAPSULE_SIZE = 0.25
# Drawing walls
WALL_RADIUS = 0.15
class InfoPane:
def __init__(self, layout, gridSize):
self.gridSize = gridSize
self.width = (layout.width) * gridSize
self.base = (layout.height + 1) * gridSize
self.height = INFO_PANE_HEIGHT
self.fontSize = 24
self.textColor = PACMAN_COLOR
self.drawPane()
def toScreen(self, pos, y=None):
"""
Translates a point relative from the bottom left of the info pane.
"""
if y == None:
x, y = pos
else:
x = pos
x = self.gridSize + x # Margin
y = self.base + y
return x, y
def drawPane(self):
self.scoreText = text(self.toScreen(0, 0), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold")
def initializeGhostDistances(self, distances):
self.ghostDistanceText = []
size = 20
if self.width < 240:
size = 12
if self.width < 160:
size = 10
for i, d in enumerate(distances):
t = text(self.toScreen(self.width / 2 + self.width / 8 * i, 0), GHOST_COLORS[i + 1], d, "Times", size, "bold")
self.ghostDistanceText.append(t)
def updateScore(self, score):
changeText(self.scoreText, "SCORE: % 4d" % score)
def setTeam(self, isBlue):
text = "RED TEAM"
if isBlue: text = "BLUE TEAM"
self.teamText = text(self.toScreen(300, 0), self.textColor, text, "Times", self.fontSize, "bold")
def updateGhostDistances(self, distances):
if len(distances) == 0: return
if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances)
else:
for i, d in enumerate(distances):
changeText(self.ghostDistanceText[i], d)
def drawGhost(self):
pass
def drawPacman(self):
pass
def drawWarning(self):
pass
def clearIcon(self):
pass
def updateMessage(self, message):
pass
def clearMessage(self):
pass
class PacmanGraphics:
def __init__(self, zoom=1.0, frameTime=0.0, capture=False):
self.have_window = 0
self.currentGhostImages = {}
self.pacmanImage = None
self.zoom = zoom
self.gridSize = DEFAULT_GRID_SIZE * zoom
self.capture = capture
self.frameTime = frameTime
def initialize(self, state, isBlue=False):
self.isBlue = isBlue
self.startGraphics(state)
# self.drawDistributions(state)
self.distributionImages = None # Initialized lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def startGraphics(self, state):
self.layout = state.layout
layout = self.layout
self.width = layout.width
self.height = layout.height
self.make_window(self.width, self.height)
self.infoPane = InfoPane(layout, self.gridSize)
self.currentState = layout
def drawDistributions(self, state):
walls = state.layout.walls
dist = []
for x in range(walls.width):
distx = []
dist.append(distx)
for y in range(walls.height):
(screen_x, screen_y) = self.to_screen((x, y))
block = square((screen_x, screen_y),
0.5 * self.gridSize,
color=BACKGROUND_COLOR,
filled=1, behind=2)
distx.append(block)
self.distributionImages = dist
def drawStaticObjects(self, state):
layout = self.layout
self.drawWalls(layout.walls)
self.food = self.drawFood(layout.food)
self.capsules = self.drawCapsules(layout.capsules)
refresh()
def drawAgentObjects(self, state):
self.agentImages = [] # (agentState, image)
for index, agent in enumerate(state.agentStates):
if agent.isPacman:
image = self.drawPacman(agent, index)
self.agentImages.append((agent, image))
else:
image = self.drawGhost(agent, index)
self.agentImages.append((agent, image))
refresh()
def swapImages(self, agentIndex, newState):
"""
Changes an image from a ghost to a pacman or vis versa (for capture)
"""
prevState, prevImage = self.agentImages[agentIndex]
for item in prevImage: remove_from_screen(item)
if newState.isPacman:
image = self.drawPacman(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image)
else:
image = self.drawGhost(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image)
refresh()
def update(self, newState):
agentIndex = newState._agentMoved
agentState = newState.agentStates[agentIndex]
if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState)
prevState, prevImage = self.agentImages[agentIndex]
if agentState.isPacman:
self.animatePacman(agentState, prevState, prevImage)
else:
self.moveGhost(agentState, agentIndex, prevState, prevImage)
self.agentImages[agentIndex] = (agentState, prevImage)
if newState._foodEaten != None:
self.removeFood(newState._foodEaten, self.food)
if newState._capsuleEaten != None:
self.removeCapsule(newState._capsuleEaten, self.capsules)
self.infoPane.updateScore(newState.score)
if 'ghostDistances' in dir(newState):
self.infoPane.updateGhostDistances(newState.ghostDistances)
def make_window(self, width, height):
grid_width = (width - 1) * self.gridSize
grid_height = (height - 1) * self.gridSize
screen_width = 2 * self.gridSize + grid_width
screen_height = 2 * self.gridSize + grid_height + INFO_PANE_HEIGHT
begin_graphics(screen_width,
screen_height,
BACKGROUND_COLOR,
"CS188 Pacman")
def drawPacman(self, pacman, index):
position = self.getPosition(pacman)
screen_point = self.to_screen(position)
endpoints = self.getEndpoints(self.getDirection(pacman))
width = PACMAN_OUTLINE_WIDTH
outlineColor = PACMAN_COLOR
fillColor = PACMAN_COLOR
if self.capture:
outlineColor = TEAM_COLORS[index % 2]
fillColor = GHOST_COLORS[index]
width = PACMAN_CAPTURE_OUTLINE_WIDTH
return [circle(screen_point, PACMAN_SCALE * self.gridSize,
fillColor=fillColor, outlineColor=outlineColor,
endpoints=endpoints,
width=width)]
def getEndpoints(self, direction, position=(0, 0)):
x, y = position
pos = x - int(x) + y - int(y)
width = 30 + 80 * math.sin(math.pi * pos)
delta = width / 2
if (direction == 'West'):
endpoints = (180 + delta, 180 - delta)
elif (direction == 'North'):
endpoints = (90 + delta, 90 - delta)
elif (direction == 'South'):
endpoints = (270 + delta, 270 - delta)
else:
endpoints = (0 + delta, 0 - delta)
return endpoints
def movePacman(self, position, direction, image):
screenPosition = self.to_screen(position)
endpoints = self.getEndpoints(direction, position)
r = PACMAN_SCALE * self.gridSize
moveCircle(image[0], screenPosition, r, endpoints)
refresh()
def animatePacman(self, pacman, prevPacman, image):
if self.frameTime < 0:
print 'Press any key to step forward, "q" to play'
keys = wait_for_keys()
if 'q' in keys:
self.frameTime = 0.1
if self.frameTime > 0.01 or self.frameTime < 0:
start = time.time()
fx, fy = self.getPosition(prevPacman)
px, py = self.getPosition(pacman)
frames = 4.0
for i in range(1, int(frames) + 1):
pos = px * i / frames + fx * (frames - i) / frames, py * i / frames + fy * (frames - i) / frames
self.movePacman(pos, self.getDirection(pacman), image)
refresh()
sleep(abs(self.frameTime) / frames)
else:
self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image)
refresh()
def getGhostColor(self, ghost, ghostIndex):
if ghost.scaredTimer > 0:
return SCARED_COLOR
else:
return GHOST_COLORS[ghostIndex]
def drawGhost(self, ghost, agentIndex):
pos = self.getPosition(ghost)
dir = self.getDirection(ghost)
(screen_x, screen_y) = (self.to_screen(pos))
coords = []
for (x, y) in GHOST_SHAPE:
coords.append((x * self.gridSize * GHOST_SIZE + screen_x, y * self.gridSize * GHOST_SIZE + screen_y))
colour = self.getGhostColor(ghost, agentIndex)
body = polygon(coords, colour, filled=1)
WHITE = formatColor(1.0, 1.0, 1.0)
BLACK = formatColor(0.0, 0.0, 0.0)
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
leftEye = circle((screen_x + self.gridSize * GHOST_SIZE * (-0.3 + dx / 1.5), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy / 1.5)), self.gridSize * GHOST_SIZE * 0.2, WHITE, WHITE)
rightEye = circle((screen_x + self.gridSize * GHOST_SIZE * (0.3 + dx / 1.5), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy / 1.5)), self.gridSize * GHOST_SIZE * 0.2, WHITE, WHITE)
leftPupil = circle((screen_x + self.gridSize * GHOST_SIZE * (-0.3 + dx), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy)), self.gridSize * GHOST_SIZE * 0.08, BLACK, BLACK)
rightPupil = circle((screen_x + self.gridSize * GHOST_SIZE * (0.3 + dx), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy)), self.gridSize * GHOST_SIZE * 0.08, BLACK, BLACK)
ghostImageParts = []
ghostImageParts.append(body)
ghostImageParts.append(leftEye)
ghostImageParts.append(rightEye)
ghostImageParts.append(leftPupil)
ghostImageParts.append(rightPupil)
return ghostImageParts
def moveEyes(self, pos, dir, eyes):
(screen_x, screen_y) = (self.to_screen(pos))
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
moveCircle(eyes[0], (screen_x + self.gridSize * GHOST_SIZE * (-0.3 + dx / 1.5), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy / 1.5)), self.gridSize * GHOST_SIZE * 0.2)
moveCircle(eyes[1], (screen_x + self.gridSize * GHOST_SIZE * (0.3 + dx / 1.5), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy / 1.5)), self.gridSize * GHOST_SIZE * 0.2)
moveCircle(eyes[2], (screen_x + self.gridSize * GHOST_SIZE * (-0.3 + dx), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy)), self.gridSize * GHOST_SIZE * 0.08)
moveCircle(eyes[3], (screen_x + self.gridSize * GHOST_SIZE * (0.3 + dx), screen_y - self.gridSize * GHOST_SIZE * (0.3 - dy)), self.gridSize * GHOST_SIZE * 0.08)
def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts):
old_x, old_y = self.to_screen(self.getPosition(prevGhost))
new_x, new_y = self.to_screen(self.getPosition(ghost))
delta = new_x - old_x, new_y - old_y
for ghostImagePart in ghostImageParts:
move_by(ghostImagePart, delta)
refresh()
if ghost.scaredTimer > 0:
color = SCARED_COLOR
else:
color = GHOST_COLORS[ghostIndex]
edit(ghostImageParts[0], ('fill', color), ('outline', color))
self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:])
refresh()
def getPosition(self, agentState):
if agentState.configuration == None: return (-1000, -1000)
return agentState.getPosition()
def getDirection(self, agentState):
if agentState.configuration == None: return Directions.STOP
return agentState.configuration.getDirection()
def finish(self):
end_graphics()
def to_screen(self, point):
(x, y) = point
# y = self.height - y
x = (x + 1) * self.gridSize
y = (self.height - y) * self.gridSize
return (x, y)
# Fixes some TK issue with off-center circles
def to_screen2(self, point):
(x, y) = point
# y = self.height - y
x = (x + 1) * self.gridSize
y = (self.height - y) * self.gridSize
return (x, y)
def drawWalls(self, wallMatrix):
wallColor = WALL_COLOR
for xNum, x in enumerate(wallMatrix):
if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0]
if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1]
for yNum, cell in enumerate(x):
if cell: # There's a wall here
pos = (xNum, yNum)
screen = self.to_screen(pos)
screen2 = self.to_screen2(pos)
# draw each quadrant of the square based on adjacent walls
wIsWall = self.isWall(xNum - 1, yNum, wallMatrix)
eIsWall = self.isWall(xNum + 1, yNum, wallMatrix)
nIsWall = self.isWall(xNum, yNum + 1, wallMatrix)
sIsWall = self.isWall(xNum, yNum - 1, wallMatrix)
nwIsWall = self.isWall(xNum - 1, yNum + 1, wallMatrix)
swIsWall = self.isWall(xNum - 1, yNum - 1, wallMatrix)
neIsWall = self.isWall(xNum + 1, yNum + 1, wallMatrix)
seIsWall = self.isWall(xNum + 1, yNum - 1, wallMatrix)
# NE quadrant
if (not nIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0, 91), 'arc')
if (nIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize * WALL_RADIUS, 0)), add(screen, (self.gridSize * WALL_RADIUS, self.gridSize * (-0.5) - 1)), wallColor)
if (not nIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize * (-1) * WALL_RADIUS)), add(screen, (self.gridSize * 0.5 + 1, self.gridSize * (-1) * WALL_RADIUS)), wallColor)
if (nIsWall) and (eIsWall) and (not neIsWall):
# outer circle
circle(add(screen2, (self.gridSize * 2 * WALL_RADIUS, self.gridSize * (-2) * WALL_RADIUS)), WALL_RADIUS * self.gridSize - 1, wallColor, wallColor, (180, 271), 'arc')
line(add(screen, (self.gridSize * 2 * WALL_RADIUS - 1, self.gridSize * (-1) * WALL_RADIUS)), add(screen, (self.gridSize * 0.5 + 1, self.gridSize * (-1) * WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize * WALL_RADIUS, self.gridSize * (-2) * WALL_RADIUS + 1)), add(screen, (self.gridSize * WALL_RADIUS, self.gridSize * (-0.5))), wallColor)
# NW quadrant
if (not nIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90, 181), 'arc')
if (nIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize * (-1) * WALL_RADIUS, 0)), add(screen, (self.gridSize * (-1) * WALL_RADIUS, self.gridSize * (-0.5) - 1)), wallColor)
if (not nIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize * (-1) * WALL_RADIUS)), add(screen, (self.gridSize * (-0.5) - 1, self.gridSize * (-1) * WALL_RADIUS)), wallColor)
if (nIsWall) and (wIsWall) and (not nwIsWall):
# outer circle
circle(add(screen2, (self.gridSize * (-2) * WALL_RADIUS, self.gridSize * (-2) * WALL_RADIUS)), WALL_RADIUS * self.gridSize - 1, wallColor, wallColor, (270, 361), 'arc')
line(add(screen, (self.gridSize * (-2) * WALL_RADIUS + 1, self.gridSize * (-1) * WALL_RADIUS)), add(screen, (self.gridSize * (-0.5), self.gridSize * (-1) * WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize * (-1) * WALL_RADIUS, self.gridSize * (-2) * WALL_RADIUS + 1)), add(screen, (self.gridSize * (-1) * WALL_RADIUS, self.gridSize * (-0.5))), wallColor)
# SE quadrant
if (not sIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270, 361), 'arc')
if (sIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize * WALL_RADIUS, 0)), add(screen, (self.gridSize * WALL_RADIUS, self.gridSize * (0.5) + 1)), wallColor)
if (not sIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize * (1) * WALL_RADIUS)), add(screen, (self.gridSize * 0.5 + 1, self.gridSize * (1) * WALL_RADIUS)), wallColor)
if (sIsWall) and (eIsWall) and (not seIsWall):
# outer circle
circle(add(screen2, (self.gridSize * 2 * WALL_RADIUS, self.gridSize * (2) * WALL_RADIUS)), WALL_RADIUS * self.gridSize - 1, wallColor, wallColor, (90, 181), 'arc')
line(add(screen, (self.gridSize * 2 * WALL_RADIUS - 1, self.gridSize * (1) * WALL_RADIUS)), add(screen, (self.gridSize * 0.5, self.gridSize * (1) * WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize * WALL_RADIUS, self.gridSize * (2) * WALL_RADIUS - 1)), add(screen, (self.gridSize * WALL_RADIUS, self.gridSize * (0.5))), wallColor)
# SW quadrant
if (not sIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180, 271), 'arc')
if (sIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize * (-1) * WALL_RADIUS, 0)), add(screen, (self.gridSize * (-1) * WALL_RADIUS, self.gridSize * (0.5) + 1)), wallColor)
if (not sIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize * (1) * WALL_RADIUS)), add(screen, (self.gridSize * (-0.5) - 1, self.gridSize * (1) * WALL_RADIUS)), wallColor)
if (sIsWall) and (wIsWall) and (not swIsWall):
# outer circle
circle(add(screen2, (self.gridSize * (-2) * WALL_RADIUS, self.gridSize * (2) * WALL_RADIUS)), WALL_RADIUS * self.gridSize - 1, wallColor, wallColor, (0, 91), 'arc')
line(add(screen, (self.gridSize * (-2) * WALL_RADIUS + 1, self.gridSize * (1) * WALL_RADIUS)), add(screen, (self.gridSize * (-0.5), self.gridSize * (1) * WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize * (-1) * WALL_RADIUS, self.gridSize * (2) * WALL_RADIUS - 1)), add(screen, (self.gridSize * (-1) * WALL_RADIUS, self.gridSize * (0.5))), wallColor)
def isWall(self, x, y, walls):
if x < 0 or y < 0:
return False
if x >= walls.width or y >= walls.height:
return False
return walls[x][y]
def drawFood(self, foodMatrix):
foodImages = []
color = FOOD_COLOR
for xNum, x in enumerate(foodMatrix):
if self.capture and (xNum * 2) <= foodMatrix.width: color = TEAM_COLORS[0]
if self.capture and (xNum * 2) > foodMatrix.width: color = TEAM_COLORS[1]
imageRow = []
foodImages.append(imageRow)
for yNum, cell in enumerate(x):
if cell: # There's food here
screen = self.to_screen((xNum, yNum))
dot = circle(screen,
FOOD_SIZE * self.gridSize,
outlineColor=color, fillColor=color,
width=1)
imageRow.append(dot)
else:
imageRow.append(None)
return foodImages
def drawCapsules(self, capsules):
capsuleImages = {}
for capsule in capsules:
(screen_x, screen_y) = self.to_screen(capsule)
dot = circle((screen_x, screen_y),
CAPSULE_SIZE * self.gridSize,
outlineColor=CAPSULE_COLOR,
fillColor=CAPSULE_COLOR,
width=1)
capsuleImages[capsule] = dot
return capsuleImages
def removeFood(self, cell, foodImages):
x, y = cell
remove_from_screen(foodImages[x][y])
def removeCapsule(self, cell, capsuleImages):
x, y = cell
remove_from_screen(capsuleImages[(x, y)])
def drawExpandedCells(self, cells):
"""
Draws an overlay of expanded grid positions for search agents
"""
n = float(len(cells))
baseColor = [1.0, 0.0, 0.0]
self.clearExpandedCells()
self.expandedCells = []
for k, cell in enumerate(cells):
screenPos = self.to_screen(cell)
cellColor = formatColor(*[(n - k) * c * .5 / n + .25 for c in baseColor])
block = square(screenPos,
0.5 * self.gridSize,
color=cellColor,
filled=1, behind=2)
self.expandedCells.append(block)
if self.frameTime < 0:
refresh()
def clearExpandedCells(self):
if 'expandedCells' in dir(self) and len(self.expandedCells) > 0:
for cell in self.expandedCells:
remove_from_screen(cell)
def updateDistributions(self, distributions):
"Draws an agent's belief distributions"
if self.distributionImages == None:
self.drawDistributions(self.previousState)
for x in range(len(self.distributionImages)):
for y in range(len(self.distributionImages[0])):
image = self.distributionImages[x][y]
weights = [dist[ (x, y) ] for dist in distributions]
if sum(weights) != 0:
pass
# Fog of war
color = [0.0, 0.0, 0.0]
colors = GHOST_VEC_COLORS[1:] # With Pacman
if self.capture: colors = GHOST_VEC_COLORS
for weight, gcolor in zip(weights, colors):
color = [min(1.0, c + 0.95 * g * weight ** .3) for c, g in zip(color, gcolor)]
changeColor(image, formatColor(*color))
refresh()
class FirstPersonPacmanGraphics(PacmanGraphics):
def __init__(self, zoom=1.0, showGhosts=True, capture=False, frameTime=0):
PacmanGraphics.__init__(self, zoom, frameTime=frameTime)
self.showGhosts = showGhosts
self.capture = capture
def initialize(self, state, isBlue=False):
self.isBlue = isBlue
PacmanGraphics.startGraphics(self, state)
# Initialize distribution images
walls = state.layout.walls
dist = []
self.layout = state.layout
# Draw the rest
self.distributionImages = None # initialize lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def lookAhead(self, config, state):
if config.getDirection() == 'Stop':
return
else:
pass
# Draw relevant ghosts
allGhosts = state.getGhostStates()
visibleGhosts = state.getVisibleGhosts()
for i, ghost in enumerate(allGhosts):
if ghost in visibleGhosts:
self.drawGhost(ghost, i)
else:
self.currentGhostImages[i] = None
def getGhostColor(self, ghost, ghostIndex):
return GHOST_COLORS[ghostIndex]
def getPosition(self, ghostState):
if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1:
return (-1000, -1000)
else:
return PacmanGraphics.getPosition(self, ghostState)
def add(x, y):
return (x[0] + y[0], x[1] + y[1])
# Saving graphical output
# -----------------------
# Note: to make an animated gif from this postscript output, try the command:
# convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif
# convert is part of imagemagick (freeware)
SAVE_POSTSCRIPT = False
POSTSCRIPT_OUTPUT_DIR = 'frames'
FRAME_NUMBER = 0
import os
def saveFrame():
"Saves the current graphical output as a postscript file"
global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR
if not SAVE_POSTSCRIPT: return
if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR)
name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER)
FRAME_NUMBER += 1
writePostscript(name) # writes the current canvas
| [
"burkhart.joshua@gmail.com"
] | burkhart.joshua@gmail.com |
8f56751503b8046acd00b9acbea5a38ce489adf4 | a24a02af6809ec58eadbd94238c59f463a094022 | /tests/test_dafsa.py | 335c85f098e609dbb2ea9e093ad88d7108730bdc | [
"MIT"
] | permissive | tresoldi/dafsa | d7302e6760cc81ede7c61db07c803604db5a033f | 6feda5b2142142e973cd9fb0dd986f6bd329cb45 | refs/heads/master | 2023-03-09T13:17:18.169871 | 2023-02-18T08:16:39 | 2023-02-18T08:16:39 | 221,056,792 | 21 | 2 | MIT | 2023-02-18T08:16:08 | 2019-11-11T19:42:41 | Python | UTF-8 | Python | false | false | 6,191 | py | """
test_dafsa
==========
Tests for the `dafsa` package.
"""
# Import Python libraries
import sys
import tempfile
import unittest
# Import the library itself
import dafsa
def test_trigger():
assert 1 == 1
def test_dummy():
assert dafsa.dummy() == 42
OLD_TEST = """
class TestNode(unittest.TestCase):
def test_node(self):
# Missing ID
with self.assertRaises(TypeError):
node = dafsa.dafsa.DAFSANode()
# Create nodes for testing
node_a = dafsa.dafsa.DAFSANode(0)
node_b = dafsa.dafsa.DAFSANode(1)
node_c = dafsa.dafsa.DAFSANode(13)
node_d = dafsa.dafsa.DAFSANode(14)
node_b.final = True
node_c.edges["x"] = dafsa.dafsa.DAFSAEdge(node_b, 2)
node_d.edges["x"] = dafsa.dafsa.DAFSAEdge(node_b, 1)
# __str__ and __repr__ assertions
if not str(node_a) == "":
raise AssertionError()
if not str(node_b) == "":
raise AssertionError()
if not str(node_c) == "x|1":
raise AssertionError()
if not str(node_d) == "x|1":
raise AssertionError()
if not repr(node_a) == "0()":
raise AssertionError
if not repr(node_b) == "F()":
raise AssertionError
if not repr(node_c) == "n(#1/0:<x>/2)":
raise AssertionError
if not repr(node_d) == "n(#1/0:<x>/1)":
raise AssertionError
# __eq__ assertions
if node_a == node_b:
raise AssertionError
if not node_c == node_d:
raise AssertionError
if not node_a != node_c:
raise AssertionError
# __gt__ assertions
if not node_a < node_c:
raise AssertionError
if not node_d > node_b:
raise AssertionError
# __hash__ assertions, follow _str__ for now
if not hash(node_a) == hash(node_b):
raise AssertionError
if not hash(node_c) == hash(node_d):
raise AssertionError
if not hash(node_a) != hash(node_c):
raise AssertionError
# repr_hash
assert node_a.repr_hash() != node_b.repr_hash()
class TestEdge(unittest.TestCase):
def test_edge(self):
# Missing node
with self.assertRaises(TypeError):
edge_a = dafsa.dafsa.DAFSAEdge()
# Wrong type
with self.assertRaises(TypeError):
edge_a = dafsa.dafsa.DAFSAEdge(1)
# Create nodes for testing
node_a = dafsa.dafsa.DAFSANode(15)
node_a.final = True
node_b = dafsa.dafsa.DAFSANode(16)
# Create edges
edge_a = dafsa.dafsa.DAFSAEdge(node_a)
edge_b = dafsa.dafsa.DAFSAEdge(node_a, 2)
edge_c = dafsa.dafsa.DAFSAEdge(node_b)
# __str__ assertions
if not str(edge_a) == "{node_id: 15, weight: 0}":
raise AssertionError
if not str(edge_b) == "{node_id: 15, weight: 2}":
raise AssertionError
if not str(edge_c) == "{node_id: 16, weight: 0}":
raise AssertionError
# __repr__ assertions
assert repr(edge_a) == "{node: <F()>, weight: 0}"
assert repr(edge_b) == "{node: <F()>, weight: 2}"
assert repr(edge_c) == "{node: <n()>, weight: 0}"
# hashes
assert hash(edge_a) != edge_a.repr_hash()
assert hash(edge_b) != edge_b.repr_hash()
assert hash(edge_c) != edge_c.repr_hash()
class TestDAFSA(unittest.TestCase):
def test_hardcoded(self):
seqs = [
"tap",
"taps",
"top",
"tops",
"dib",
"dibs",
"tapping",
"dibbing",
]
# build object, without and with joining
dafsa_obj_a = dafsa.DAFSA(seqs)
dafsa_obj_b = dafsa.DAFSA(seqs, condense=True)
def test_full_test(self):
# Load strings from file
filename = dafsa.utils.RESOURCE_DIR / "ciura.txt"
with open(filename.as_posix()) as handler:
strings = [line.strip() for line in handler]
# build object
dafsa_obj_a = dafsa.DAFSA(strings)
dafsa_obj_b = dafsa.DAFSA(strings, join_trans=True)
# don't print
text = str(dafsa_obj_a)
text = str(dafsa_obj_b)
# simple checks
assert dafsa_obj_a.lookup("den") is None
assert dafsa_obj_b.lookup("den") is None
assert dafsa_obj_a.lookup("deny") is not None
assert dafsa_obj_b.lookup("deny") is not None
assert dafsa_obj_a.lookup("dafsa") is None
assert dafsa_obj_b.lookup("dafsa") is None
def test_to_figure(self):
# Load strings from file
filename = dafsa.utils.RESOURCE_DIR / "ciura.txt"
with open(filename.as_posix()) as handler:
strings = [line.strip() for line in handler]
# build object
dafsa_obj = dafsa.DAFSA(strings)
# Get a temporary filename (on Unix, it can be reused)
handler = tempfile.NamedTemporaryFile()
output_filename = "%s.png" % handler.name
handler.close()
# Test
# TODO: revert once fixed
# dafsa_obj.write_figure(output_filename)
def test_to_graph(self):
# Load strings from file
filename = dafsa.utils.RESOURCE_DIR / "ciura.txt"
with open(filename.as_posix()) as handler:
strings = [line.strip() for line in handler]
# build object
dafsa_obj = dafsa.DAFSA(strings)
# Run function
# TODO: assert results
dafsa_obj.to_graph()
def test_to_gml(self):
# Load strings from file
filename = dafsa.utils.RESOURCE_DIR / "ciura.txt"
with open(filename.as_posix()) as handler:
strings = [line.strip() for line in handler]
# build object
dafsa_obj = dafsa.DAFSA(strings)
# Run function
# TODO: assert results
# Get a temporary filename (on Unix, it can be reused)
handler = tempfile.NamedTemporaryFile()
output_filename = "%s.png" % handler.name
handler.close()
dafsa_obj.write_gml(output_filename)
"""
| [
"tresoldi@shh.mpg.de"
] | tresoldi@shh.mpg.de |
d30358f0846d56cb0ee45b769401deee0df440a7 | 528097895ef768469f8bc3f6cdbe95b83fcbfedb | /allauth1/urls.py | 92992741695bba24837e5e0aed756233be62fac2 | [] | no_license | SHUKLA123/Neighbourhood | ce705b65986d184ab9c7eff366dad66a4e0cb2fb | 8c461cf26957f7860ba1fc330174c75c2c669265 | refs/heads/master | 2022-04-04T07:34:28.190397 | 2019-12-26T18:33:53 | 2019-12-26T18:33:53 | 230,300,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,624 | py | """allauth1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from testapp import views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import include
#For Registration and signup
from django.contrib.auth import views as auth_views
from users import views
from bussiness import views as as_views
from comment import views as as_views1
#comment
from django.views.generic import TemplateView
urlpatterns = [
url(r'^friendship/', include('friendship.urls')),
url(r'^admin/', admin.site.urls),
url(r'home/',views.home,name = 'home'),
url(r'tweet/',views.tweet_view,name = 'tweet'),
url(r'^accounts/', include('allauth.urls')),
#This of registration and login and logout
url(r'^$',views.register,name = 'register'),
url(r'login/',auth_views.LoginView.as_view(template_name = 'login.html'), name = 'login'),
url(r'logout/',auth_views.LogoutView.as_view(template_name = 'logout.html'), name = 'logout'),
url(r'password-reset/',auth_views.PasswordResetView.as_view(template_name = 'password_reset.html'),name = 'password_reset'),
url(r'^password_reset/done/', auth_views.PasswordResetDoneView.as_view(template_name = 'password_reset_done.html'), name='password_reset_done'),
url(r'^password-reset-confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(template_name = 'password_reset_confirm.html'), name='password_reset_confirm'),
#search bar
url(r'^search/$',views.search,name = 'search'),
url(r'^user/follow/$', views.user_follow, name='user_follow'),
#Add User_Profile
url(r'^user/(?P<username>.\w+)/$', views.ProfileView),
url(r'^people', views.follow_people, name = 'people'),
url(r'^post/', views.post_list,name = 'post'),
url(r'^like/$', views.image_like, name = 'like'),
#group chat
url('chat/', include('chat.urls')),
#bussiness
url(r'^buss_name/', as_views.bussiness_form, name = 'buss_make'),
url(r'^buss/', as_views.display, name = 'buss'),
url(r'^nearby/', as_views.nearby, name = 'nearby'),
url(r'^user/(?P<user_id>\d+)/$', as_views.request_user),
# comments model
# url(r'^news/(?P<pincode>\d+)/$',as_views1.news),
url(r'^news/(?P<pincode>\d+)/$',as_views1.news),
url(r'^problem/$',as_views1.write_problem,name = 'problem'),
url(r'^preply/$',as_views1.write_reply,name = 'problem-reply'),
#comment tweet
url(r'^comment/$',views.write_comment,name='comment'),
url(r'^reply/$',views.write_reply,name='reply'),
#map
url(r'^map/',views.map),
#address update
url(r'^address_update/',views.AddressUpdateView,name = 'address_update'),
#Event
url(r'^event-like/',views.event_like,name = 'eventlike'),
url(r'^event/',views.event_create,name = 'event'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root = settings.MEDIA_ROOT)
| [
"noreply@github.com"
] | SHUKLA123.noreply@github.com |
13df24ff882324e906c7419f48b270497bbc0ed9 | e80169e5d4243034b3d2c1bc346c41b024151644 | /cut_CSV.py | ccb7fbb0654a2e5ffa2192cbfc3969650bdc2754 | [] | no_license | sidownbusdriver/UHI-Scripts | 55476bbdd11e4dbff5547f63c2a842564dea439c | f472bd4d0e99ed81a8e880a0f67742b7abbf1442 | refs/heads/master | 2020-12-02T22:34:44.642490 | 2017-07-03T21:46:30 | 2017-07-03T21:46:30 | 96,152,188 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,469 | py | import numpy as np
import matplotlib as mpl
from scipy import ndimage
import matplotlib.pyplot as plt
import datetime as dt
import csv
# Call in the file
f1 = '/data/ahardin/New_York/NewYork_2007.csv'
data = np.recfromtxt(f1, unpack=True, dtype=None, names=True, delimiter=',')
#data1 = np.genfromtxt(StringIO(f1), skip_header=1, delimiter=',')
# Variables
date = data['Observation_Date']
station = data['Station_ID']
lat = data['Latitude']
lon = data['Longitude']
elev = data['Elevation']
temp = data['Outdoor_Temperature']
rh = data['Humidity']
pres = data['Pressure']
ws = data['Wind_Speed']
wd = data['Wind_Direction']
avg_ws = data['Average_Wind_Speed']
avg_wd = data['Average_Wind_Direction']
temp_rate = data['Out__Temp__Rate']
rain = data['Daily_Rainfall']
rain_rate = data['Rainfall_Rate']
in_temp = data['Indoor_Temperature']
#print date[8]
# Chamnge dates to datetime object
dates = [dt.datetime.strptime(t, '%m/%d/%Y %I:%M:%S %p') for t in date]
#print dates[8]
begin_date = dt.datetime(2007, 5, 1, 0, 0, 0)
end_date = dt.datetime(2007, 10, 1, 0, 0, 0)
#begin_date = dt.datetime.strptime('2008/5/1 12:00:00 AM', '%Y/%m/%d %I:%M:%S %p')
#end_date = dt.datetime.strptime('2008/10/1 12:00:00 AM', '%Y/%m/%d %I:%M:%S %p')
valid_date = []
valid_station = []
valid_lat = []
valid_lon = []
valid_elev = []
valid_temp = []
valid_rh = []
valid_pres = []
valid_ws = []
valid_wd = []
valid_avg_ws = []
valid_avg_wd = []
valid_temp_rate = []
valid_rain = []
valid_rain_rate = []
valid_in_temp = []
for i in range(len(dates)):
if dates[i] >= begin_date and dates[i] <= end_date:
valid_date.append(date[i])
valid_station.append(station[i])
valid_lat.append(lat[i])
valid_lon.append(lon[i])
valid_elev.append(elev[i])
valid_temp.append(temp[i])
valid_rh.append(rh[i])
valid_pres.append(pres[i])
valid_ws.append(ws[i])
valid_wd.append(wd[i])
valid_avg_ws.append(avg_ws[i])
valid_avg_wd.append(avg_wd[i])
valid_temp_rate.append(temp_rate[i])
valid_rain.append(rain[i])
valid_rain_rate.append(rain_rate[i])
valid_in_temp.append(in_temp[i])
#valid_date = np.vstack(valid_date)
#print np.shape(valid_rh)
# Put valid data into cloumns
valid_data = np.column_stack(( valid_station, valid_lat, valid_lon, valid_elev, valid_date, valid_temp, valid_rh, valid_pres, valid_ws, valid_wd, valid_avg_ws, valid_avg_wd, valid_temp_rate, valid_rain, valid_rain_rate, valid_in_temp))
#valid_data = np.array([valid_station, valid_lat, valid_lon, valid_elev,valid_date, valid_temp, valid_rh, valid_pres, valid_ws, valid_wd, valid_avg_ws, valid_avg_wd, valid_temp_rate, valid_rain, valid_rain_rate, valid_in_temp])
#print valid_data[0,:]
#print np.shape(valid_data)
# Save valid data as a file
head = ['Station_ID','Latitude','Longitude','Elevation','Observation_Date','Outdoor_Temperature','Humidity','Pressure','Wind_Speed','Wind_Direction','Average_Wind_Speed','Average_Wind_Direction','Out_Temp_Rate','Daily_Rainfall','Rainfall_Rate','Indoor_Temperature']
np.savetxt('/data/ahardin/New_York/cut_2007_NewYork.csv', valid_data, fmt='%s', header='Station_ID,Latitude,Longitude,Elevation,Observation_Date,Outdoor_Temperature,Humidity,Pressure,Wind_Speed,Wind_Direction,Average_Wind_Speed,Average_Wind_Direction,Out_Temp_Rate,Daily_Rainfall,Rainfall_Rate,Indoor_Temperature', delimiter=',')
'''
# Delete dates outside of threshold
#data = data[~(dates.date[:] >= begin_date.date)]
#data = data[~(dates.date[:] < end_date.date)]
#print data
# Save as a CSV file
head = ['Station_ID','Latitude','Longitude','Elevation','Observation_Date','Outdoor_Temperature','Humidity','Pressure','Wind_Speed','Wind_Direction','Average_Wind_Speed','Average_Wind_Direction','Out__Temp__Rate','Humidity_Rate','Pressure_Rate','Hourly_Gust','Daily_Rainfall','Rainfall_Rate','Auxillary_Temperature','Aux__Tmp_Rate','Indoor_Temperature','Ind__Tmp__Rate,Light','Light_Rate']
#np.savetxt('cut_2008_Boston.csv', valid_data, header=head, delimiter=',')
with open('cut_2008_Boston.csv', 'wt') as g:
writer = csv.writer(g)
writer.writerow( ('Station_ID','Latitude','Longitude','Elevation','Observation_Date','Outdoor_Temperature','Humidity','Pressure','Wind_Speed','Wind_Direction','Average_Wind_Speed','Average_Wind_Direction','Out__Temp__Rate','Humidity_Rate','Pressure_Rate','Hourly_Gust','Daily_Rainfall','Rainfall_Rate','Auxillary_Temperature','Aux__Tmp_Rate','Indoor_Temperature','Ind__Tmp__Rate,Light','Light_Rate') )
writer.writerow(a)
'''
| [
"noreply@github.com"
] | sidownbusdriver.noreply@github.com |
5215d7aee1f79545124b261740052b0b5e5af4fb | f44c0ae69de99da4ca248d7ec00f6a56d84f649d | /src/parameters_combinator/ListOfParams.py | 6446198df70ded58d37de5c8a074601cd06bedf7 | [
"BSD-3-Clause"
] | permissive | birlrobotics/parameters_combinator | 641f7701e4b7429fb68be524de97e1eb2e514fa6 | 6f4f1ed838867c3d6b692b57d5895c18320394e9 | refs/heads/master | 2021-01-25T14:10:20.852853 | 2018-07-09T06:44:11 | 2018-07-09T06:44:11 | 123,659,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py |
class ListOfParams(list):
pass
| [
"sk.law.lsq@gmail.com"
] | sk.law.lsq@gmail.com |
56620c15d1d974188b6ff057d1009fc205b0c77b | 587e2fc104a484c60aa2fab01c3cdc1d2a330778 | /Cryptography/crypto.py | 5fd14d3a944506262fe0f82063a012388ab1ff82 | [] | no_license | tlittle2/Kattis-Solutions-Python | 7b3c379f9b394b8b944377c1329332e483ccfcf3 | 92ed9987b81e3276d2a03252177f1f45b98388e4 | refs/heads/main | 2022-10-10T07:51:33.431519 | 2022-09-30T22:29:12 | 2022-09-30T22:29:12 | 216,254,189 | 0 | 2 | null | 2022-09-30T18:27:47 | 2019-10-19T18:42:47 | Python | UTF-8 | Python | false | false | 735 | py | #!/usr/bin/env python3
from string import ascii_uppercase as upp
def main():
message = str(input())
mLst = [str(i) for i in message]
key = str(input())
kLst = [str(i) for i in key]
for i in range(len(kLst)-1, -1, -1):
mLst.pop(len(mLst)-1)
mLst.insert(0,kLst[i])
print("new word: {}".format(mLst))
print(len(mLst))
for i in range(len(kLst), len(mLst)):
print("{} {} ".format(i, mLst[i]))
print("{} - {}".format(upp.index(mLst[i]), upp.index(kLst[i % len(kLst)])))
new = upp.index(mLst[i]) - upp.index(kLst[i % len(kLst)])
print("new Letter: {}".format(upp[new]))
print()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | tlittle2.noreply@github.com |
f73f59d3cacd10432bbaf47dccbf81cc1630967d | 0a7bdc823de7a97571e7440cd8ac09a07d9fde04 | /site-packages/azure/storage/queueservice.py | f665fca3713260f70a187dd47e4234a5e988a263 | [] | no_license | jacalata/optimization | 0b7790975f8f1d18bc0ec01d3fb91c5a5926630d | 78ba79e6a557bd15b0166c68038b3f9ac0e4af3f | refs/heads/master | 2020-05-17T08:30:41.485057 | 2014-01-28T05:48:16 | 2014-01-28T05:48:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,714 | py | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import urllib2
from azure.storage import *
from azure.storage.storageclient import _StorageClient
from azure.storage import (_update_storage_queue_header)
from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
_dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class QueueService(_StorageClient):
'''
This is the main class managing queue resources.
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
'''
def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = QUEUE_SERVICE_HOST_BASE, dev_host = DEV_QUEUE_HOST):
return super(QueueService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue Service, including Windows Azure
Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds. For example, the
following value sets a timeout of 30 seconds for the request: timeout=30
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def list_queues(self, prefix=None, marker=None, maxresults=None, include=None):
'''
Lists all of the queues in a given storage account.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(response, QueueEnumResults, "Queues", Queue)
def create_queue(self, queue_name, x_ms_meta_name_values=None, fail_on_exist=False):
'''
Creates a queue under the given account.
queue_name: name of the queue.
x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
with the queue as metadata.
fail_on_exist: specify whether throw exception when queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
if not fail_on_exist:
try:
response = self._perform_request(request)
if response.status == HTTP_RESPONSE_NO_CONTENT:
return False
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
response = self._perform_request(request)
if response.status == HTTP_RESPONSE_NO_CONTENT:
raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Permanently deletes the specified queue.
queue_name: name of the queue.
fail_not_exist: specify whether throw exception when queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as e:
_dont_fail_not_exist(e)
return False
else:
self._perform_request(request)
return True
def get_queue_metadata(self, queue_name):
'''
Retrieves user-defined metadata and queue properties on the specified queue.
Metadata is associated with the queue as name-values pairs.
queue_name: name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(response, prefix='x-ms-meta')
def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
'''
Sets user-defined metadata on the specified queue. Metadata is associated
with the queue as name-value pairs.
queue_name: name of the queue.
x_ms_meta_name_values: Optional. A dict containing name-value pairs to associate
with the queue as metadata.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def put_message(self, queue_name, message_text, visibilitytimeout=None, messagettl=None):
'''
Adds a new message to the back of the message queue. A visibility timeout can
also be specified to make the message invisible until the visibility timeout
expires. A message must be in a format that can be included in an XML request
with UTF-8 encoding. The encoded message can be up to 64KB in size for versions
2011-08-18 and newer, or 8KB in size for previous versions.
queue_name: name of the queue.
visibilitytimeout: Optional. If specified, the request must be made using an
x-ms-version of 2011-08-18 or newer.
messagettl: Optional. Specifies the time-to-live interval for the message,
in seconds. The maximum time-to-live allowed is 7 days. If this parameter
is omitted, the default time-to-live is 7 days.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_text', message_text)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.query = [
('visibilitytimeout', _str_or_none(visibilitytimeout)),
('messagettl', _str_or_none(messagettl))
]
request.body = _get_request_body('<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def get_messages(self, queue_name, numofmessages=None, visibilitytimeout=None):
'''
Retrieves one or more messages from the front of the queue.
queue_name: name of the queue.
numofmessages: Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If fewer are
visible, the visible messages are returned. By default, a single message
is retrieved from the queue with this operation.
visibilitytimeout: Required. Specifies the new visibility timeout value, in
seconds, relative to server time. The new value must be larger than or
equal to 1 second, and cannot be larger than 7 days, or larger than 2
hours on REST protocol versions prior to version 2011-08-18. The visibility
timeout of a message can be set to a value later than the expiry time.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.query = [
('numofmessages', _str_or_none(numofmessages)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def peek_messages(self, queue_name, numofmessages=None):
'''
Retrieves one or more messages from the front of the queue, but does not alter
the visibility of the message.
queue_name: name of the queue.
numofmessages: Optional. A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages?peekonly=true'
request.query = [('numofmessages', _str_or_none(numofmessages))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def delete_message(self, queue_name, message_id, popreceipt):
'''
Deletes the specified message.
queue_name: name of the queue.
popreceipt: Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('popreceipt', popreceipt)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/' + _str(message_id) + ''
request.query = [('popreceipt', _str_or_none(popreceipt))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def clear_messages(self, queue_name):
'''
Deletes all messages from the specified queue.
queue_name: name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
def update_message(self, queue_name, message_id, message_text, popreceipt, visibilitytimeout):
'''
Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
queue_name: name of the queue.
popreceipt: Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
visibilitytimeout: Required. Specifies the new visibility timeout value, in
seconds, relative to server time. The new value must be larger than or
equal to 0, and cannot be larger than 7 days. The visibility timeout
of a message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('message_text', message_text)
_validate_not_none('popreceipt', popreceipt)
_validate_not_none('visibilitytimeout', visibilitytimeout)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/' + _str(message_id) + ''
request.query = [
('popreceipt', _str_or_none(popreceipt)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.body = _get_request_body('<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])
def set_queue_service_properties(self, storage_service_properties, timeout=None):
'''
Sets the properties of a storage account's Queue service, including Windows Azure
Storage Analytics.
storage_service_properties: a StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
| [
"jacalata@gmail.com"
] | jacalata@gmail.com |
5376359526eb1ac0de52a283b309692922b54864 | 74a01e6a22fe7c6b552e2ffb9f92d9671c54aa20 | /bpb/parser/pdf.py | fb7471eb62cbce5bdbd4260bce0c4ba579fa4d16 | [] | no_license | snagwuk/blog_post_bot_cli | 549805ba988c3753185111575ba759566c7ea17f | 29e6c6e9e7c48e5ad7c9b4dda26e56226c683290 | refs/heads/master | 2022-03-27T01:05:44.441712 | 2020-01-05T01:00:54 | 2020-01-05T01:00:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # modules for
import PyPDF2
import pprint
# pdf file object
# you can find find the pdf file with complete code in below
pdfFileObj = open('../data/test.pdf', 'rb')
# pdf reader object
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# number of pages in pdf
print(pdfReader.numPages)
# a page object
pageObj = pdfReader.getPage(0)
# extracting text from page.
# this will print the text you can also save that into String
pprint.pprint(pageObj) | [
"pjt3591oo@gmail.com"
] | pjt3591oo@gmail.com |
bf18ccf06edc02e98f278157db148c8745dfb6ae | c56779d6ea4ae0043caa4b6cec88b35e326e8961 | /web_development/app.py | 82a61546e5909b3609c6f2285bf425e86b4005ca | [] | no_license | zxh2135645/BE223A | 40f7d3a099f34be8a29d8a43b058b5e7f522cac9 | 3c3cd6050b7a4d76fd23b22ac0553e3e4704b880 | refs/heads/master | 2021-09-14T06:53:17.122259 | 2018-05-09T03:33:15 | 2018-05-09T03:33:15 | 110,093,725 | 0 | 1 | null | 2017-12-05T00:41:55 | 2017-11-09T09:22:11 | Jupyter Notebook | UTF-8 | Python | false | false | 1,149 | py | # -*- coding: utf-8 -*-
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from pandas_datareader import data as web
from datetime import datetime as dt
app = dash.Dash('Hello World')
app.layout = html.Div([
dcc.Dropdown(
id='my-dropdown',
options=[
{'label': 'Coke', 'value': 'COKE'},
{'label': 'Tesla', 'value': 'TSLA'},
{'label': 'Apple', 'value': 'AAPL'}
],
value='COKE'
),
dcc.Graph(id='my-graph')
], style={'width': '500'})
@app.callback(Output('my-graph', 'figure'), [Input('my-dropdown', 'value')])
def update_graph(selected_dropdown_value):
df = web.DataReader(
selected_dropdown_value,
'yahoo',
dt(2017, 1, 1),
dt.now(),
)
print(df)
return {
'data': [{
'x': df.index,
'y': df.Close
}],
'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}}
}
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
if __name__ == '__main__':
app.run_server()
| [
"zxh2135645@gmail.com"
] | zxh2135645@gmail.com |
69e64077be97c782e455563333f9f0aaafc67fca | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-ims/huaweicloudsdkims/v2/model/list_image_tags_response.py | 76a1f6343bbb44bb9fa8a53ef623e27886720b43 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListImageTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[ResourceTag]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""ListImageTagsResponse - a model defined in huaweicloud sdk"""
super(ListImageTagsResponse, self).__init__()
self._tags = None
self.discriminator = None
if tags is not None:
self.tags = tags
@property
def tags(self):
"""Gets the tags of this ListImageTagsResponse.
标签列表
:return: The tags of this ListImageTagsResponse.
:rtype: list[ResourceTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListImageTagsResponse.
标签列表
:param tags: The tags of this ListImageTagsResponse.
:type: list[ResourceTag]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListImageTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
440db0c44f8330f6cdd323170112e02a5febbb91 | d3ebdc92a4766e5105278c5ce05e627ec78ef026 | /app/emaskjp/migrations/0004_auto_20200423_0932.py | 1271cf1f5d86a66443e25e4e7932dd5c65f46307 | [] | no_license | jun-JUNJUN/emaskjp | 5377fedcc5c8e7c61b8bb4640dab21f0862b530d | 27fbe408f2eb5e69b8cc85441901d429103bd774 | refs/heads/master | 2023-08-15T05:47:15.062899 | 2020-05-05T06:05:18 | 2020-05-05T06:05:18 | 266,265,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # Generated by Django 3.0.3 on 2020-04-23 09:32
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('emaskjp', '0003_auto_20200423_0905'),
]
operations = [
migrations.AlterField(
model_name='entity',
name='entity_name',
field=models.CharField(max_length=200, validators=[django.core.validators.MinLengthValidator(2)], verbose_name='医療機関名'),
),
migrations.AlterField(
model_name='entity',
name='zip_code',
field=models.CharField(blank=True, max_length=8, verbose_name='zip_code'),
),
]
| [
"donkun77jp@gmail.com"
] | donkun77jp@gmail.com |
46ee00e658ceb974d6e469f67b024421c60f50ec | 70695894ffa9abe7f7d56787159aa9c80e55b343 | /backend/backend/config.prod.py | a3d7accf529500f88825d4c8c259fb9743c6d8c3 | [] | no_license | cmihai/chat | afecb98dd3643289000c9887cc95f9eff8ad3098 | c3f8cebedaff9fc6922e8314b55f144d4ce0c034 | refs/heads/master | 2020-03-31T08:39:52.258866 | 2018-10-08T13:31:31 | 2018-10-08T13:31:37 | 152,067,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | DB_PATH = '/data/messages.db'
| [
"cmihai@users.noreply.github.com"
] | cmihai@users.noreply.github.com |
6d93b0cd78292a61ae919edfa5a15e96fa5f6f6a | c9697437c292df7fefd68559fdd9636066bdb2f1 | /dev/potentials/sinc_pulse_from_number_of_cycles.py | 0b1d64c026d7f66d3afbc925237681fad25c3cd4 | [] | no_license | JoshKarpel/ionization | ebdb387483a9bc3fdb52818ab8e897e562ffcc67 | 3056df523ee90147d262b0e8bfaaef6f2678ea11 | refs/heads/master | 2021-03-24T13:03:57.469388 | 2020-04-06T03:37:04 | 2020-04-06T03:37:04 | 62,348,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | #!/usr/bin/env python
import logging
import os
import numpy as np
import simulacra as si
import simulacra.units as u
FILE_NAME = os.path.splitext(os.path.basename(__file__))[0]
OUT_DIR = os.path.join(os.getcwd(), "out", FILE_NAME)
LOGMAN = si.utils.LogManager("simulacra", "ionization", stdout_level=logging.DEBUG)
PLOT_KWARGS = dict(target_dir=OUT_DIR, img_format="png", fig_dpi_scale=6)
if __name__ == "__main__":
with LOGMAN as logger:
number_of_cycles = [0.51, 1, 2, 3]
nc_pulses = [
(
nc,
ion.potentials.SincPulse.from_number_of_cycles(
pulse_width=100 * u.asec, number_of_cycles=nc, phase=u.pi / 2
),
)
for nc in number_of_cycles
]
# note that you actually get twice as many carrier cycles as you specify in the "center"
# because the center of the sinc is twice as wide as a pulse width (it's double-sided)
tb = 1
for nc, pulse in nc_pulses:
print(pulse.info())
times = np.linspace(-tb * pulse.pulse_width, tb * pulse.pulse_width, 10000)
si.vis.xy_plot(
f"Nc={nc}",
times,
pulse.amplitude * np.cos((pulse.omega_carrier * times) + pulse.phase),
pulse.get_electric_field_amplitude(times),
line_labels=["carrier", "pulse"],
line_kwargs=[{"linestyle": "--"}, None],
x_unit=pulse.pulse_width,
y_unit=pulse.amplitude,
**PLOT_KWARGS,
)
| [
"josh.karpel@gmail.com"
] | josh.karpel@gmail.com |
7804532b391fe43fc82acd7a3ec0e8164b758671 | 54bdcc4aeae0c15ecda2f662adebb47733f0c8fb | /CO1/4.py | 03e2458ddb2cbcff8d8d71779f97274657a86efe | [] | no_license | AswinP2711/Python_Programs | ed1b8f77e18964143a42aa9721ed25b56be8f136 | 698929db2d751bd80a77059483c7a32a1e352c8c | refs/heads/master | 2023-03-28T23:06:06.014344 | 2021-03-23T12:32:22 | 2021-03-23T12:32:22 | 344,063,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | s = str(input("Enter a line of text : "))
counts = dict()
words = s.split()
for word in words:
if word in counts:
counts[word] += 1
else:
counts[word] = 1
print(counts) | [
"aswinp2711@gmail.com"
] | aswinp2711@gmail.com |
151392182417b31d3dd7cb2a6d0fcfa253fee301 | 436177bf038f9941f67e351796668700ffd1cef2 | /venv/Lib/site-packages/sklearn/linear_model/__init__.py | 796b13e6c63d51def5a559c6a79836627fc25551 | [] | no_license | python019/matplotlib_simple | 4359d35f174cd2946d96da4d086026661c3d1f9c | 32e9a8e773f9423153d73811f69822f9567e6de4 | refs/heads/main | 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,952 | py | """
The :mod:`sklearn.linear_model` module implements a variety of linear models.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
| [
"82611064+python019@users.noreply.github.com"
] | 82611064+python019@users.noreply.github.com |
2c0f5451d92c6e2a7cc8ed70a45bb1ef821960c6 | cc83eb3318f8e15b68e3a6e3033c384ce851f497 | /VoiceD/googleRead.py | 34c1da244586f49d13cabb7a81d6205aecbf2e68 | [] | no_license | san7nu/Python_codes | bee04fd2865d0cf86aeb600f3039d8387a04f03f | f3aca72f27aa17c5853633f39657363c621e11d8 | refs/heads/master | 2020-03-22T17:17:55.393692 | 2019-05-07T13:24:25 | 2019-05-07T13:24:25 | 140,386,596 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Say something!")
audio = r.listen(source)
try:
print("Google Speech Recognition thinks you said #" + r.recognize_google(audio))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e)) | [
"4fun.san@gmail.com"
] | 4fun.san@gmail.com |
6f4834c15b3b1e191d3392fc4e083bce9dd8c3ee | 454a4c4b070f8e7c312a0641dd8431b5b169f716 | /cart/context_processors.py | f2ea95c45c62f5e362979784485dc5120cd38add | [] | no_license | adamdyderski/Online-shop | c6ed7736b319402866887ebd51938c5483af10c9 | a52a358122911239cd2390494dcbd8981568a9d6 | refs/heads/master | 2021-04-28T03:03:30.135701 | 2018-02-21T09:43:09 | 2018-02-21T09:43:09 | 122,127,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py |
def cart_processor(request):
cart = request.session.get('cart', {})
return {'cart_count': len(cart)}
| [
"adamdyderski94@gmail.com"
] | adamdyderski94@gmail.com |
ccbb02c3cf0ac4b9e9da7e4142bf9b2deecd73fd | c7a932e28a1a1dbc70c05c62caa43ce6cb691686 | /fabric/service/monitor/promethues/prometheus.py | 13d3ebd36fcfa08a10fc933ae20e580484cc043f | [] | no_license | Martians/deploy | 9c2c9a9b0e4431e965960aada0f40df6a34b2e09 | 6fd3f892edd7a12aa69d92f357cc52932df86d9c | refs/heads/master | 2022-01-09T03:29:13.948962 | 2019-04-29T05:15:40 | 2019-04-29T05:15:40 | 112,311,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,554 | py | # coding=utf-8
from invoke import task
from common import *
import system
class LocalConfig(LocalBase):
""" 默认配置
"""
def __init__(self):
LocalBase.__init__(self, 'prometheus')
self.source = 'https://github.com/prometheus/prometheus/releases/download/v2.6.0/prometheus-2.6.0.linux-amd64.tar.gz'
self.config = 'prometheus.yml'
self.node_source = 'https://github.com/prometheus/node_exporter/releases/download/v0.17.0/node_exporter-0.17.0.linux-amd64.tar.gz'
self.node_name = 'node_exporter'
self.node_port = 9100
self.node_config = 'node.yaml'
self.client_config = 'client.yaml'
self.alert = 'https://github.com/prometheus/alertmanager/releases/download/v0.16.0-beta.0/alertmanager-0.16.0-beta.0.linux-amd64.tar.gz'
""" 提供个默认参数
该变量定义在头部,这样在函数的默认参数中,也可以使用了
"""
local = LocalConfig()
""" fab install-server
fab install-node
fab start-server
fab start-node
"""
@task
def install_server(c):
c = hosts.one()
download(c, local.name, source=local.source)
""" 安装包下载后,到master上进行解压
"""
scp(c, hosts.get(0), package(), dest=local.temp)
unpack(conn(0), local.name, path=package(local.temp))
config_server(conn(0))
def config_server(c):
sed.path(os.path.join(local.base, local.config))
""" 配置文件
"""
file_sd_node = """
- job_name: 'node'
file_sd_configs:
- files:
- '{node}'""".format(node=local.node_config)
file_sd_client = """
- job_name: 'client'
scrape_interval: 1s
file_sd_configs:
- files:
- '{client}'""".format(client=local.client_config)
sed.append(c, file_sd_node)
sed.append(c, file_sd_client)
sed.append(c, ' - "*_rules.yml"', 'rule_files:')
""" file service discovery
"""
with c.cd(local.base):
c.run("""echo '
- labels:
type: 'node'
targets:' > {node}""".format(node=local.node_config))
c.run("""echo '
- labels:
type: 'client'
targets:' > {client}""".format(client=local.client_config))
@task
def help(c):
c = conn(c, True)
system.help(c, '''
monitor node: {base}/{node}
monitor client: {base}/{client}
monitor rules; {base}/*_rules.yaml\n'''.format(base=local.base, node=local.node_config, client=local.client_config), 'config')
@task
def install_node(c):
c = hosts.one()
download(c, local.node_name, source=local.node_source)
copy_pack(c, dest=local.temp)
hosts.execute('sudo rm -rf /opt/*{}*'.format(local.node_name))
for index in hosts.lists():
unpack(hosts.conn(index), local.node_name, path=package(local.temp))
config_server_node(c)
def config_server_node(c):
c = hosts.conn(0)
append = ''
for host in hosts.lists(index=False):
append += " - '{}:{}'\n".format(host.host, local.node_port)
sed.path(os.path.join(local.base, local.node_config))
sed.append(c, append)
@task
def start_server(c):
c = hosts.conn(0)
c.run(system.nohup('cd {}; nohup ./prometheus --config.file={}'
.format(local.base, local.config), nohup=''), pty=True)
@task
def stop_server(c):
c = hosts.conn(0)
c.run('{}'.format(system.kills('prometheus', string=True)))
@task
def start_node(c):
system.start(local.node_name, system.nohup('cd {}; nohup ./node_exporter --web.listen-address=":{}"'
.format(base(local.node_name), local.node_port), nohup=''), pty=True)
@task
def stop_node(c):
system.stop(local.node_name)
@task
def clean(c):
stop_server(c)
stop_node(c)
system.clean('/opt/{}, /opt/{}'.format(local.name, local.node_name))
@task
def install_alert(c):
pass
# hosts.execute('sudo rm -rf /opt/*kafka*')
#
# for index in hosts.lists():
# unpack(hosts.conn(index), local.name, path=package(local.temp))
@task
def help(c):
c = conn(c, True)
system.help(c, '''
http://192.168.0.81:9090
fab install-server
fab start-server
node:
http://192.168.0.81:9100
fab install-node
fab start-node
''', 'server')
# install_server(conn(0))
# install_node(conn(0))
# start_server(conn(0))
# stop(conn(0))
# clean(conn(0))
# start_node(conn(0)) | [
"liudong@daowoo.com"
] | liudong@daowoo.com |
53eb69b3f36faa7820b1029d5787c4e03dc5bcf3 | e442f513d5bc640d93637a6f41b98427b3d5d392 | /queue_test.py | e98ebed4986f82b577ec2bbd7d9bff7e8ec7b09b | [] | no_license | CNBlackJ/mobot | e9b9e04234e3220f4c94c9f52176f443005eb709 | 2b674eeead564820bae31c8f3a3ed1df08d71c92 | refs/heads/master | 2020-04-08T14:17:53.727413 | 2018-12-03T07:14:31 | 2018-12-03T07:14:31 | 159,430,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | #!/usr/bin/python3
# -*- coding:utf8 -*-
import multiprocessing
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = multiprocessing.Queue()
pw = multiprocessing.Process(target=write, args=(q,))
pr = multiprocessing.Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate() | [
"yes.heng@icloud.com"
] | yes.heng@icloud.com |
4ff68446a3e28c7bc77b5fc15b631b5a27957f7d | 7ac0f072065b1ed322dc804f3bfc741dd935e7c0 | /HW2/Q5_train.py | 4c58641a77fa35a3d5ff7cdb2d64d068a044648c | [] | no_license | zhu849/NCKU-image-process | 34e36ade1e8a4b103e19daa44dde93dc8297e158 | f62f41dd2a580b67fc073697b348732206fd8cdc | refs/heads/main | 2023-09-01T01:17:25.101178 | 2021-10-26T11:26:06 | 2021-10-26T11:26:06 | 380,796,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,179 | py | import random
import os
import cv2
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, merge, Input
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D, GlobalAveragePooling2D
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.utils import np_utils
from keras.models import model_from_json,load_model, Model
from keras import backend as K
from keras.preprocessing import image
from keras.optimizers import SGD
from keras.utils.data_utils import get_file
from keras import layers
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import preprocess_input
from datetime import datetime
%matplotlib inline
# random print 16 imgs in terminal with plt
def random_print():
x, y = train_generator.next()
plt.figure(figsize=(16, 8))
for i, (img, label) in enumerate(zip(x, y)):
plt.subplot(3, 6, i+1)
if label == 1:
plt.title('dog')
else:
plt.title('cat')
plt.axis('off')
plt.imshow(img, interpolation="nearest")
# 定義一個 identity block,輸入和輸出維度相同,可串聯,用於加深網路
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# default stride = 1
x = Convolution2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
# 定義一個會重複的捲積結構 - conv Block,輸入和輸出維度不同,不可串聯,用於改變網路維度
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
# 分別解出各個 filter 的值
filters1, filters2, filters3 = filters
# 選擇捲積使用的軸
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
# 為新定義的層統一名稱
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(filters1, (1, 1), strides=strides, name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
# 經歷兩層後將捲積和 short cut 結合後再輸出
x = Convolution2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
# 捷徑路線
shortcut = Convolution2D(filters3, (1, 1), strides=strides, name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
# H(x) = F(x) + shortcut
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
if __name__ == "__main__":
# 定義要處理影像的大小參數
image_width = 224
image_height = 224
image_size = (image_width, image_height)
# 讀取資料集 && 驗證集
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(
'./Train', # this is the target directory
target_size=image_size, # all images will be resized to 224x224
batch_size=4, # 一次讀16張
class_mode='binary') # 格式是二進位檔案
validation_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
validation_generator = validation_datagen.flow_from_directory(
'./Validation', # this is the target directory
target_size=image_size, # all images will be resized to 224x224
batch_size=4,
class_mode='binary')
#這subfunc可以確認是否正確讀到資料集
#random_print()
### 利用 conv Block 和 identity_block 建構 resNet50 架構 ###
## conv1
# 定義 input shape = (None, 224, 224, 3)
img_input = Input(shape=(image_width, image_height, 3)) # Input() is used to instantiate a Keras tensor.
af_padding = ZeroPadding2D((3, 3))(img_input) # Zero-padding layer for 2D input , size will be (None, 230, 230, 3)
# padding 的目的應該是因為用 7*7 去做捲積?
# 因為 strides = 2, 所以 conv1 size = (None, 115, 115, 64) , format:(rows, cols, filters)
conv1 = Convolution2D(filters=64, kernel_size=(7,7), padding="same", strides=(2,2), name='conv1', data_format='channels_last')(af_padding)
# 每次捲積後都需要做一次 Batch Normorlization, 做完後大小相同,僅是將內容值平滑化, axis = 3 表示用第三個當作 "軸" 做操作
BN_conv1 = BatchNormalization(axis=3, name='bn_conv1')(conv1)
#BN_conv1 = conv1
# 將輸入透過 relu 函數轉換, 輸出是相同 size
relu_conv1 = Activation('relu')(BN_conv1)
## stage2
# 從3*3方格內找到最大值代表一格,且每次 Stride 為2,所以 col & row 變成一半,size = (Noen, 58, 58, 64)
maxPool_conv1 = MaxPooling2D (pool_size=(3, 3), strides=(2, 2), padding="same")(relu_conv1)
# conv2 return size = (None, 29, 29, 256)
conv2_a = conv_block(maxPool_conv1, 3, [64, 64, 256], stage=2, block='a')
conv2_b = identity_block(conv2_a, 3, [64, 64, 256], stage=2, block='b')
conv2_c = identity_block(conv2_b, 3, [64, 64, 256], stage=2, block='c')
## stage3
# conv3 return size = (None, 15, 15, 512)
conv3_a = conv_block (conv2_c, 3, [128, 128, 512], stage=3, block='a')
conv3_b = identity_block(conv3_a, 3, [128, 128, 512], stage=3, block='b')
conv3_c = identity_block(conv3_b, 3, [128, 128, 512], stage=3, block='c')
conv3_d = identity_block(conv3_c, 3, [128, 128, 512], stage=3, block='d')
## stage4
# conv4 return size = (None, 8, 8, 1024)
conv4_a = conv_block (conv3_d, 3, [256, 256, 1024], stage=4, block='a')
conv4_b = identity_block(conv4_a, 3, [256, 256, 1024], stage=4, block='b')
conv4_c = identity_block(conv4_b, 3, [256, 256, 1024], stage=4, block='c')
conv4_d = identity_block(conv4_c, 3, [256, 256, 1024], stage=4, block='d')
conv4_e = identity_block(conv4_d, 3, [256, 256, 1024], stage=4, block='e')
conv4_f = identity_block(conv4_e, 3, [256, 256, 1024], stage=4, block='f')
## stage5
# conv5 return size = (None, 4, 4, 2048)
conv5_a = conv_block (conv4_f, 3, [512, 512, 2048], stage=5, block='a')
conv5_b = identity_block(conv5_a, 3, [512, 512, 2048], stage=5, block='b')
conv5_c = identity_block(conv5_b, 3, [512, 512, 2048], stage=5, block='c')
#construct model
base_model = Model(img_input, conv5_c)
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
base_model.load_weights(weights_path)
# add top layer to ResNet-50
x = AveragePooling2D((7, 7), name='avg_pool', padding = "same")(base_model.output)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid', name='output')(x)
model = Model(base_model.input, x)
#model.summary()
top_num = 4
for layer in model.layers[:-top_num]:
layer.trainable = False
for layer in model.layers[-top_num:]:
layer.trainable = True
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
best_model = ModelCheckpoint("resnet_best.h5", monitor='val_accuracy', verbose=0, save_best_only=True)
model.fit_generator(
train_generator,
epochs=8,
validation_data=validation_generator,
callbacks=[best_model,TensorBoard(log_dir='./logs', histogram_freq=1,update_freq=1000)])
with open('resnet.json', 'w') as f:
f.write(model.to_json())
| [
"66557634+zhu849@users.noreply.github.com"
] | 66557634+zhu849@users.noreply.github.com |
2646cbca549f16adcee90198326548ef2b7dbfe0 | a3bf1597881b56be605e4fb4482a6c99213f3cef | /robin/wsgi.py | a2ab64974a7706a5ffd1a2070522bdfb3bd929ed | [
"Apache-2.0"
] | permissive | oneprice/microbot | dd7a27f62cabc293fddad0bf175bcacaf78ab350 | dbd6417c70d8e20d37e2ca585e85a97a4c8a4144 | refs/heads/master | 2021-07-20T15:34:01.033737 | 2017-10-27T02:31:35 | 2017-10-27T02:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import os, sys
sys.path.append('/home/pi/Django')
#sys.path.append('/home/pi/Django/robin')
sys.path.append('/home/pi/Django/myvenv/lib/python3.5/site-packages')
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "robin.settings")
application = get_wsgi_application()
| [
"noreply@github.com"
] | oneprice.noreply@github.com |
797987fe548a6f7c7c46884932412b3e90e8bc1a | 119437adb7830659307c18b79a9cc3f6bfc6fe40 | /onnx_model_serving/onnx_model_predict.py | 95a0f36ce1f8192ebe4a598455dc1cc4eb833cee | [] | no_license | percent4/PyTorch_Learning | 478bec35422cdc66bf41b4258e29fbcb6d24f60c | 24184d49032c9c9a68142aff89dabe33adc17b52 | refs/heads/master | 2023-03-31T03:01:19.372830 | 2023-03-17T17:02:39 | 2023-03-17T17:02:39 | 171,400,828 | 16 | 7 | null | 2023-09-02T08:53:26 | 2019-02-19T03:47:41 | Jupyter Notebook | UTF-8 | Python | false | false | 730 | py | # -*- coding: utf-8 -*-
# @Time : 2021/2/3 20:09
# @Author : Jclian91
# @File : onnx_model_predict.py
# @Place : Yangpu, Shanghai
import onnxruntime
import torch
import numpy as np
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
ort_session = onnxruntime.InferenceSession("iris.onnx")
# compute ONNX Runtime output prediction
x = torch.Tensor([[6.4, 2.8, 5.6, 2.1]])
print("input size: ", to_numpy(x).shape)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
print(ort_outs[0])
print("Exported model has been tested with ONNXRuntime, and the result looks good!") | [
"1137061634@qq.com"
] | 1137061634@qq.com |
dd72aaa603df01d3ea5567d1cef9146d0c699c55 | 9a3064cd9a2712575ba57936566cd824c18cb8cd | /scripts/m4.py | 8d339e0a34914c20aacf1302d3311f139b4c5f42 | [] | no_license | chengu1993/cs445 | a6ad71aa056e17003e5e030e5437af5c4140d3c7 | 44ba34cff325342411d0ff8c2a8020a4233785b3 | refs/heads/master | 2021-01-01T19:26:33.306907 | 2017-07-27T22:54:13 | 2017-07-27T22:54:13 | 98,586,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | #import numpy as np
import pandas as pd
#from matplotlib import pyplot as plt
from sklearn import svm, tree
from sklearn.cross_validation import cross_val_score
from sklearn.naive_bayes import GaussianNB
import pydotplus
#from sklearn.externals.six import StringIO
#from IPython.display import Image
#import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn import decomposition
# import data
raw_data = pd.read_csv('out.csv', header=0)
# target data set
target = raw_data['FGM']
# train data set
train_data = raw_data[['SHOT_DIST', 'FINAL_MARGIN', 'PERIOD',
'SHOT_CLOCK', 'DRIBBLES', 'CLOSE_DEF_DIST', 'DEFENSE_LEVEL', 'OFFENSE_LEVEL']]
tree_clf = tree.DecisionTreeClassifier()
tree_clf = tree_clf.fit(train_data, target)
dot_data = tree.export_graphviz(tree_clf, out_file=None,
feature_names=list(train_data.columns.values),
class_names=["missed", "made"],
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("decision_tree_data.pdf") | [
"zijun@Zijuns-MBP.wireless.yale.internal"
] | zijun@Zijuns-MBP.wireless.yale.internal |
a3ad6fec8b7b991839d4265cfb3f8f96df862df6 | 7b270cf5f9d0a3e26b5afd758563c6cff73a5248 | /comportamentais/iterator/canal/canal/canais/__init__.py | 89401268f00e0cba0c575756511cf3b8a786a561 | [] | no_license | reginaldosantarosa/DesignPatterns | 10810672d3831e562ec636a5f66bd709c797ca34 | bec4247f52b8d2e1fe41c570408816a5d4b22608 | refs/heads/master | 2020-04-04T06:54:19.757054 | 2018-01-04T03:06:05 | 2018-01-04T03:06:05 | 155,761,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | from canal.canais.filmes import Filme
from canal.canais.esportes import Esporte
| [
"victorhad@gmail.com"
] | victorhad@gmail.com |
c0bc193c0ca45d24c0490317457e0038ba7a2b66 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0701-0800/0701-Insert into a Binary Search Tree/0701-Insert into a Binary Search Tree.py | fe9dea06abb24db8df133f5a1db2ab1c7bbf15c4 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 617 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
inode = TreeNode(val)
if root is None:
return inode
node = root
while node:
prev = node
if node.val < val:
node = node.right
else:
node = node.left
if prev.val < val:
prev.right = inode
else:
prev.left = inode
return root | [
"jiadaizhao@gmail.com"
] | jiadaizhao@gmail.com |
dd7d9f4672f5b1e4cbc5d0e3a459f1cdd8689023 | d331eb807a5e9424fed05a25fd1dfa4b680bddce | /resources/解析单词.py | 54e23d92837007182038df07ad17663f76b1f41d | [] | no_license | ahuinee/challengewords | b8fd9a49cb1b0fff0683a0b8b80e138190ef9696 | 11bfcafe89a546b8d0c37b3982e6cb044ca80bd5 | refs/heads/master | 2023-08-07T19:25:29.598013 | 2021-09-17T08:11:22 | 2021-09-17T08:11:22 | 345,714,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | import openpyxl #xlsx解析
import re # 正则
import json # JSON
# 文件名
fileName = "单词.xlsx"
# 以只读模式打开工作簿
wb = openpyxl.load_workbook(fileName, read_only=True)
# 获取workbook中所有的表格sheet
sheets = wb.get_sheet_names()
# 班级与书册
volumeRE = re.compile("(.*)(上册|下册)")
# 单元匹配
unitRE = re.compile(".*单元")
# 单词匹配
wordRE_EN = re.compile("[a-zA-Z]")
# 年级
gradeList = []
# 书册
volumeList = []
# 循环遍历所有sheet len(sheets)
for i in range(len(sheets)):
# 每个sheet 年级和书册
sheet = wb.get_sheet_by_name(sheets[i])
# 年级与书册
st = volumeRE.match(sheet.title)
if st:
gradeName = st.group(1) # 年级
volumeName = st.group(2) # 书册
volumeDict = {'volumeName':volumeName,'units':[]}
# 年级
if volumeName == '上册':
gradeDict = {'gradeName':gradeName,'volumes':[]}
volumeList = gradeDict['volumes']
gradeList.append(gradeDict)
# 书册
volumeList.append(volumeDict)
# 单元数组
unitList = volumeDict['units']
unitDict = None
for c in range(1, sheet.max_column+1):
for r in range(1,sheet.max_row+1):
# 每个cell的内容
cellValue = sheet.cell(r, c).value
if cellValue :
cellValue = str(cellValue)
# 单元
if unitRE.match(cellValue):
unit = {
"unitName":cellValue,
"words": []
}
unitDict = unit
unitList.append(unit)
continue
# 过滤掉English
if wordRE_EN.match(cellValue):
continue
# 单词 (仅中文)
if unitDict :
word_en = str(sheet.cell(r, c+1).value) # 取中文对应掉英语单词
unitDict['words'].append({'chinese':cellValue,'english':word_en})
# 将json对象写入到文件
with open('words.json', 'w') as dump_f:
json.dump(gradeList,dump_f,indent=4,ensure_ascii=False)
dump_f.close()
| [
"xiahesong@sz.hitrontech.com"
] | xiahesong@sz.hitrontech.com |
8739c5eab034f23e4b46d81dca13f9f755c4c200 | db113789a0b0afa7b511d9df6ebc9928639badda | /save_pb.py | ecc6dcbc703e981de5a4e129a5caeedbb50bc698 | [] | no_license | zhaohb/tensorflow_demo | c89357090560453b0fe5e9db5cf21d2685f902f7 | 9d8fc9368a2983cea478bc5974777f39787d913c | refs/heads/master | 2020-05-20T12:23:04.938362 | 2019-05-08T09:13:07 | 2019-05-08T09:13:07 | 185,571,475 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | #!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import os
from tensorflow.python.framework import graph_util
def save_mode_pb(pb_file_path):
x = tf.placeholder(tf.int32, name='x')
y = tf.placeholder(tf.int32, name='y')
b = tf.Variable(1, name='b')
xy = tf.multiply(x, y)
# 这里的输出需要加上name属性
op = tf.add(xy, b, name='op_to_store')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
path = os.path.dirname(os.path.abspath(pb_file_path))
if os.path.isdir(path) is False:
os.makedirs(path)
# convert_variables_to_constants 需要指定output_node_names,list(),可以多个
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['op_to_store'])
with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
f.write(constant_graph.SerializeToString())
# test
feed_dict = {x: 2, y: 3}
print(sess.run(op, feed_dict))
save_mode_pb("./model.pb")
| [
"zhaohongbocloud@gmail.com"
] | zhaohongbocloud@gmail.com |
db0abd0731c1c469315f6d765e74347f5c886476 | 5780917b2279dbe60d5205b2052d523b0dfd86e1 | /app.py | ccb69fa57980e40450347d7f1a8f476121a1ea7b | [] | no_license | agungd3v/myassistant | 9f5a7a74056fb14c41395cf578e9169c2c4b5d68 | 2ff472bf92366a4b86c767025ca6641a040ec46e | refs/heads/master | 2023-07-17T16:48:14.016160 | 2021-08-14T13:37:17 | 2021-08-14T13:37:17 | 394,739,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | from modules import speak, takeCommand, openBrowser, playMusic
WAKE = "buddy"
MUSIC = "music"
YOUTUBE = "youtube"
CAN = "what you can"
while True:
query = takeCommand().lower()
if query.count(WAKE) > 0:
speak("hi, do you need help ?")
while True:
query = takeCommand().lower()
if query.count(CAN) > 0:
speak("I can read documents")
speak("I can open a web in the browser")
speak("I can play a music")
speak("I can send an email")
if query.count(YOUTUBE) > 0:
openBrowser("youtube.com")
if query.count(MUSIC) > 0:
playMusic()
if "stop" in query or "maybe everything for now is enough" in query:
speak("OK. Just call me, if there's anything you need. Byebye")
break
| [
"mailtotuyul1@gmail.com"
] | mailtotuyul1@gmail.com |
7bc53a93671804c0f4d9e619a80a21474a43e4a5 | 12d7fcf617b47380316b0121504a8725e1766e98 | /funzioni_iot/views.py | 86bdee34c390b6b2899d496942223e2f323e8284 | [] | no_license | gordongekko67/testrepository | 0a824d02c1b302321ced264e1e8167d5cce907f2 | 18a2fcfd8129d808973590dc7ed78f8206eae5a6 | refs/heads/main | 2023-04-04T08:11:01.554259 | 2021-04-01T10:32:31 | 2021-04-01T10:32:31 | 353,658,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,227 | py | from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.mail import send_mail
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic import TemplateView
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.utils import timezone
import datetime
import requests
from .models import Titoli2, Giornalista, PublishedManager, Cliente
from funzioni_iot.forms import FormContatto, FormTitoli, TitoliModelForm, FormRegistrazioneUser
from funzioni_iot.serializers import ClienteSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
import io
def titoli_list(request):
posts = Titoli2.objects.all()
object_list = Titoli2.objects.all()
paginator = Paginator(object_list, 3) # 3 posts in each page
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
posts = paginator.page(paginator.num_pages)
return render(request, 'blog/post/list.html', {'page': page, 'posts': posts})
def clienti_list(request):
cli = Cliente.objects.all()
object_list = Cliente.objects.all()
paginator = Paginator(object_list, 3) # 3 posts in each page
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
posts = paginator.page(paginator.num_pages)
return render(request, 'blog/post/listcli.html', {'page': page, 'posts': posts})
def titoli_detail(request, year, month, day, post):
post = get_object_or_404(Titoli2, slug=post,
status='published',
publish__year=year,
publish__month=month,
publish__day=day)
return render(request, 'blog/post/detail.html', {'post': post})
def invio_mail(request):
send_mail('Django mail', 'This e-mail was sent with Django.', 'enrico.saccheggiani@gmail.com', ['ensa77@yahoo.com'], fail_silently=False)
msg = f'invio mail in Django'
return HttpResponse(msg, content_type='text/plain')
def homep(request):
return render(request, "index.html")
def prova_django(request):
msg = f'prova visualizzazione dati django'
return HttpResponse(msg, content_type='text/plain')
def hellocontattaci(request):
if request.method == 'POST':
form = FormContatto(request.POST)
if form.is_valid():
print("il form e' valido")
print("NomE ", form.cleaned_data["nome"])
print("Cognome ", form.cleaned_data["cognome"])
return HttpResponse("<h1> Grazie per averci contattato </h1>")
else:
form = FormContatto()
context = {"form": form}
return render(request, "contattaci.html", context)
def crea_titoli(request):
if request.method == 'POST':
form = TitoliModelForm(request.POST)
if form.is_valid():
# inserimento dati nel data base
new_titolo = form.save()
titolo = Titoli2.objects.all()
context = {"titoli": titolo}
return render(request, 'blog/post/homepage2.html', context)
else:
form = TitoliModelForm()
context = {"form": form}
return render(request, "institoli.html", context)
def mod_titoli(request):
if request.method == 'POST':
pk1 = request.POST.get("pk")
print(pk1)
codtit = request.POST.get("codtit2")
codslugtit = request.POST.get("codslugtit2")
isin = request.POST.get("isin")
body = request.POST.get("body")
autor = request.POST.get("autor")
Titoli2.objects.filter(pk=pk1).update(codtit2=codtit, codslugtit2=codslugtit, codisintit2=isin, codbodytit2=body, codpublishtit2=datetime.datetime.now(
), codcreatedtit2=datetime.datetime.now(), codupdatedtit2=datetime.datetime.now(), codmintit2=1, codmaxtit2=10)
titolo = Titoli2.objects.all()
context = {"titoli": titolo}
return render(request, 'blog/post/homepage2.html', context)
def can_titoli(request):
if request.method == 'POST':
pk = request.POST.get("pk")
titol = Titoli2.objects.get(id=pk)
titol.delete()
titolo = Titoli2.objects.all()
context = {"titoli": titolo}
return render(request, 'blog/post/homepage2.html', context)
def visuatitoli(request):
return render(request, "contattaci.html", context)
def registrazione(request):
if request.method == 'POST':
form = FormRegistrazioneUser(request.POST)
if form.is_valid():
username = form.cleaned_data["username"]
email = form.cleaned_data["email"]
password = form.cleaned_data["password"]
User.objects.create_user(
username=username, password=password, email=email)
user = authenticate(username=username, password=password)
login(request, user)
return HttpResponseRedirect
else:
form = FormRegistrazioneUser()
context = {"form": form}
return render(request, "registrazione.html", context)
def form2(request):
template = loader.get_template('scelta.html')
return HttpResponse(template)
def homeiot(request):
return render(request, "scelta.html")
def base(request):
return render(request, "base.html")
def titoli2(request):
msg = f'prova django Today is '
return HttpResponse(msg, content_type='text/plain')
def home(request):
g = []
for gio in Giornalista.objects.all():
g.append(gio.nome)
response = str(g)
print(response)
return HttpResponse(response, content_type='text/plain')
def homeTitoly(request):
titolo = Titoli2.objects.all()
context = {"titoli": titolo}
return render(request, 'blog/post/homepage2.html', context)
def titoloDetailView(request, pk):
titolo = Titoli2.objects.get(pk=pk)
context = {"titoli": titolo}
return render(request, 'blog/post/titolo_detail.html', context)
# CBV Class Based Views
# Documentazione ufficiale
class TitoloDetailViewCB(DetailView):
model = Titoli2
template_name = "titolo_detail.html"
class AboutView(TemplateView):
template_name = "blog/post/about2.html"
class Titoli_list_view(ListView):
model = Titoli2
template_name = "lista_titoli.html"
class Clienti_list_view(ListView):
model = Cliente
template_name = "lista_clienti.html"
def login(request):
username = "not logged in"
if request.method == "POST":
# Get the posted form
MyLoginForm = LoginForm(request.POST)
if MyLoginForm.is_valid():
username = MyLoginForm.cleaned_data['username']
else:
MyLoginForm = Loginform()
return render(request, 'login.html', {"username": username})
def logout_enrico(request):
logout(request)
return render(request, 'logged_out.html')
def chiamata_request(request):
print("chiamata request")
r = requests.get('https://api.exchangeratesapi.io/latest')
print(r.status_code)
print(r.headers['content-type'])
print(r.encoding)
print(r.text)
msg = r.json()
print(msg)
return HttpResponse(msg, content_type='text/plain')
def chiamata_request_payload(request):
print("chiamata request con payload ")
payload = {'base': 'USD', 'symbols': 'GBP'}
r = requests.get('https://api.exchangeratesapi.io/latest', params=payload)
print(r.status_code)
print(r.headers['content-type'])
print(r.encoding)
print(r.text)
msg = r.json()
print(msg)
return HttpResponse(msg, content_type='text/plain')
def risposta_rest(request):
titoli = Titoli2.objects.all()
data1 = {"titoli": list(titoli.values("codtit2", "codbodytit2"))}
data2 = {"titoli": list(titoli.values())}
response = JsonResponse(data2)
return response
@api_view(['GET'])
def cliente_collection(request):
cli = Cliente.objects.all()
serializer = ClienteSerializer(cli, many=True)
print(serializer.data)
content = JSONRenderer().render(serializer.data)
print(content)
#
"""
stream = io.BytesIO(content)
data = JSONParser().parse(stream)
serializer = ClienteSerializer(data=data)
#serializer.is_valid()
# True
serializer.validated_data
# OrderedDict([('title', ''), ('code', 'print("hello, world")\n'), ('linenos', False), ('language', 'python'), ('style', 'friendly')])
serializer.save()
"""
return Response(serializer.data)
| [
"ensa77@yahoo.com"
] | ensa77@yahoo.com |
9a324685e1167bb07e78868dd6ef4f71fd3a1487 | 24ef41c369162f4d7ec3576f2d6128c2cbc2598e | /pigpio_ranger.py | 8c78b2312c11c3fe0bbb1be5706a68c1347d6499 | [] | no_license | dausi15/WallFollower | 4303e9f0bdff4768349a3ae5fa0083c32402db61 | 2b3299548d72c385374ef8fdfd25012516ac9404 | refs/heads/master | 2020-09-24T06:48:35.489191 | 2019-12-03T15:51:37 | 2019-12-03T15:51:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,659 | py | import time
import pigpio
class ranger:
"""
This class encapsulates a type of acoustic ranger. In particular
the type of ranger with separate trigger and echo pins.
A pulse on the trigger initiates the sonar ping and shortly
afterwards a sonar pulse is transmitted and the echo pin
goes high. The echo pins stays high until a sonar echo is
received (or the response times-out). The time between
the high and low edges indicates the sonar round trip time.
"""
def __init__(self, pi, trigger, echo):
"""
The class is instantiated with the Pi to use and the
gpios connected to the trigger and echo pins.
"""
self.pi = pi
self._trig = trigger
self._echo = echo
self._ping = False
self._high = None
self._time = None
self._triggered = False
self._trig_mode = pi.get_mode(self._trig)
self._echo_mode = pi.get_mode(self._echo)
pi.set_mode(self._trig, pigpio.OUTPUT)
pi.set_mode(self._echo, pigpio.INPUT)
self._cb = pi.callback(self._trig, pigpio.EITHER_EDGE, self._cbf)
self._cb = pi.callback(self._echo, pigpio.EITHER_EDGE, self._cbf)
self._inited = True
def _cbf(self, gpio, level, tick):
if gpio == self._trig:
if level == 0: # trigger sent
self._triggered = True
self._high = None
else:
if self._triggered:
if level == 1:
self._high = tick
else:
if self._high is not None:
self._time = tick - self._high
self._high = None
self._ping = True
def read(self):
"""
Triggers a reading. The returned reading is the number
of microseconds for the sonar round-trip.
round trip cms = round trip time / 1000000.0 * 34030
"""
if self._inited:
self._ping = False
self.pi.gpio_trigger(self._trig)
start = time.time()
while not self._ping:
if (time.time()-start) > 5.0:
return 20000
time.sleep(0.001)
return self._time
else:
return None
def cancel(self):
"""
Cancels the ranger and returns the gpios to their
original mode.
"""
if self._inited:
self._inited = False
self._cb.cancel()
self.pi.set_mode(self._trig, self._trig_mode)
self.pi.set_mode(self._echo, self._echo_mode)
| [
"marcndkk@gmail.com"
] | marcndkk@gmail.com |
a2463c9f471f00343aeea8d410921d81c32a9458 | 4ab6c6037c8b0643f0052400ecc5e5776ae7d9b4 | /psd_test_code/psd_env/bin/f2py3 | d19ba86e97d56f6526daacadbe5df5f3e6943847 | [] | no_license | swarupovo/psd_generator | 315f10e11f148844fe6b553e8e21faee8f6c2c1d | 1cccd273d16ae19ddb90960aa41330ca8fad63e2 | refs/heads/master | 2020-06-15T23:10:50.887483 | 2019-07-05T13:57:36 | 2019-07-05T13:57:36 | 195,416,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | #!/home/webskitters/Desktop/certificate/psd_test_code/psd_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"swarup.adhikary@webskitters.com"
] | swarup.adhikary@webskitters.com | |
c0a4b1ecee5eb7705fb4d6c81545e651d56f3071 | d36c4c882089b9b81e6e3b6323eeb9c43f5160a9 | /7KYU/Square Area Inside Circle/solution.py | dead9b201402be6e5751806d9e7f0d05e24b1f5d | [] | no_license | stuartstein777/CodeWars | a6fdc2fa6c4fcf209986e939698d8075345dd16f | d8b449a16c04a9b883c4b5e272cc90a4e6d8a2e6 | refs/heads/master | 2023-08-27T20:32:49.018950 | 2023-08-24T23:23:29 | 2023-08-24T23:23:29 | 233,281,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import math
def square_area_to_circle(size):
radius = math.sqrt(size) / 2
return round((math.pi * (radius * radius)), 8) | [
"qmstuart@gmail.com"
] | qmstuart@gmail.com |
ef14e05b00b14f120326d7133682265e3176e41e | 93a613f09d564a1d45ecc01b54b73745ce2850b7 | /majora2/migrations/0023_biosampleartifact_secondary_accession.py | 0d98165508518f2dfdfd9b53251418ed78c4a31c | [] | no_license | pythseq/majora | fa17c77fa8a916c688fd2b40744d768dd851b99b | 40b918d32b4061cddee5f7279f97e70eb894623d | refs/heads/master | 2022-12-23T20:09:41.233844 | 2020-09-28T18:18:42 | 2020-09-28T18:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 2.2.10 on 2020-03-22 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('majora2', '0022_auto_20200322_1616'),
]
operations = [
migrations.AddField(
model_name='biosampleartifact',
name='secondary_accession',
field=models.CharField(blank=True, max_length=256, null=True),
),
]
| [
"samstudio8@gmail.com"
] | samstudio8@gmail.com |
43255b07c55ba8aa33bb1da1bc2e3805ee24f9ec | 00518d41d893c016a81280f1a578ab36224cbd0c | /sqlEngine.py | 15e06f3bca1bcd63bdda2d7d7f33a16153984550 | [] | no_license | JahnaviN/miniSqlEngine | 0c4e488ca80ff28e8e344c0ae4ffd7672c7a55c7 | 3c183c1aa7672ac6e8c660202a8450420d65fb6c | refs/heads/master | 2021-01-10T16:31:00.520308 | 2015-12-11T21:14:39 | 2015-12-11T21:14:39 | 47,120,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,329 | py | # Syntax:
# select * from <tableName>
# select aggregate(column) from <tableName>
# select <colnames> from <tableName> [ colnames = seperated only by , and no extra spaces]
# select distinct(colName) from <tableName>
# select distinct <colnames> from <tableName>
# select <colNames> from <tableName> where <conditions> [ seperated by space Ex: a = 1 and b = 2]
# select * from <tableNames>
# select <colNames> from <tableNames>
# select <colnames> from <tableNames> where <join-condition>
import csv
import sys
import re
from collections import OrderedDict
def main():
dictionary = {}
readMetadata(dictionary)
processQuery(str(sys.argv[1]),dictionary)
def readMetadata(dictionary):
f = open('./metadata.txt','r')
check = 0
for line in f:
if line.strip() == "<begin_table>":
check = 1
continue
if check == 1:
tableName = line.strip()
dictionary[tableName] = [];
check = 0
continue
if not line.strip() == '<end_table>':
dictionary[tableName].append(line.strip());
def processQuery(query,dictionary):
query = (re.sub(' +',' ',query)).strip();
if "from" in query:
obj1 = query.split('from');
else:
sys.exit("Incorrect Syntax")
obj1[0] = (re.sub(' +',' ',obj1[0])).strip();
if "select" not in obj1[0].lower():
sys.exit("Incorrect Syntax")
object1 = obj1[0][7:]
object1 = (re.sub(' +',' ',object1)).strip();
l = []
l.append("select")
if "distinct" in object1 and "distinct(" not in object1:
object1 = object1[9:]
l.append("distinct")
l.append(object1)
object1 = l
# select distinct List<colnames> from <table>
object3 = ""
if "distinct" in object1[1] and "distinct(" not in object1[1]:
object3 = object1[1];
object3 = (re.sub(' +',' ',object3)).strip()
object1[1] = object1[2]
colStr = object1[1];
colStr = (re.sub(' +',' ',colStr)).strip()
columnNames = colStr.split(',');
for i in columnNames:
columnNames[columnNames.index(i)] = (re.sub(' +',' ',i)).strip();
obj1[1] = (re.sub(' +',' ',obj1[1])).strip();
object2 = obj1[1].split('where');
tableStr = object2[0]
tableStr = (re.sub(' +',' ',tableStr)).strip();
tableNames = tableStr.split(',')
for i in tableNames:
tableNames[tableNames.index(i)] = (re.sub(' +',' ',i)).strip();
for i in tableNames:
if i not in dictionary.keys():
sys.exit("Table not found")
if len(object2) > 1 and len(tableNames) == 1:
object2[1] = (re.sub(' +',' ',object2[1])).strip();
processWhere(object2[1],columnNames,tableNames,dictionary)
return
elif len(object2) > 1 and len(tableNames) > 1:
object2[1] = (re.sub(' +',' ',object2[1])).strip();
processWhereJoin(object2[1],columnNames,tableNames,dictionary)
return
if(len(tableNames) > 1):
join(columnNames,tableNames,dictionary)
return
if object3 == "distinct":
distinctMany(columnNames,tableNames,dictionary)
return
if len(columnNames) == 1:
#aggregate -- Assuming (len(columnNames) == 1) i.e aggregate function
for col in columnNames:
if '(' in col and ')' in col:
funcName = ""
colName = ""
a1 = col.split('(');
funcName = (re.sub(' +',' ',a1[0])).strip()
colName = (re.sub(' +',' ',a1[1].split(')')[0])).strip()
aggregate(funcName,colName,tableNames[0],dictionary)
return
elif '(' in col or ')' in col:
sys.exit("Syntax error")
selectColumns(columnNames,tableNames,dictionary);
def processWhere(whereStr,columnNames,tableNames,dictionary):
a = whereStr.split(" ")
# print a
if(len(columnNames) == 1 and columnNames[0] == '*'):
columnNames = dictionary[tableNames[0]]
printHeader(columnNames,tableNames,dictionary)
tName = tableNames[0] + '.csv'
fileData = []
readFile(tName,fileData)
check = 0
for data in fileData:
string = evaluate(a,tableNames,dictionary,data)
for col in columnNames:
if eval(string):
check = 1
print data[dictionary[tableNames[0]].index(col)],
if check == 1:
check = 0
print
def evaluate(a,tableNames,dictionary,data):
string = ""
for i in a:
# print i
if i == '=':
string += i*2
elif i in dictionary[tableNames[0]] :
string += data[dictionary[tableNames[0]].index(i)]
elif i.lower() == 'and' or i.lower() == 'or':
string += ' ' + i.lower() + ' '
else:
string += i
# print string
return string
def processWhereJoin(whereStr,columnNames,tableNames,dictionary):
tableNames.reverse()
l1 = []
l2 = []
readFile(tableNames[0] + '.csv',l1)
readFile(tableNames[1] + '.csv',l2)
fileData = []
for item1 in l1:
for item2 in l2:
fileData.append(item2 + item1)
# dictionary["sample"] = dictionary[b] + dictionary[a]
dictionary["sample"] = []
for i in dictionary[tableNames[1]]:
dictionary["sample"].append(tableNames[1] + '.' + i)
for i in dictionary[tableNames[0]]:
dictionary["sample"].append(tableNames[0] + '.' + i)
dictionary["test"] = dictionary[tableNames[1]] + dictionary[tableNames[0]]
tableNames.remove(tableNames[0])
tableNames.remove(tableNames[0])
tableNames.insert(0,"sample")
if(len(columnNames) == 1 and columnNames[0] == '*'):
columnNames = dictionary[tableNames[0]]
# print header
for i in columnNames:
print i,
print
a = whereStr.split(" ")
# check = 0
# for data in fileData:
# string = evaluate(a,tableNames,dictionary,data)
# for col in columnNames:
# if eval(string):
# check = 1
# print data[dictionary[tableNames[0]].index(col)],
# if check == 1:
# check = 0
# print
check = 0
for data in fileData:
string = evaluate(a,tableNames,dictionary,data)
for col in columnNames:
if eval(string):
check = 1
if '.' in col:
print data[dictionary[tableNames[0]].index(col)],
else:
print data[dictionary["test"].index(col)],
if check == 1:
check = 0
print
del dictionary['sample']
def selectColumns(columnNames,tableNames,dictionary):
if len(columnNames) == 1 and columnNames[0] == '*':
columnNames = dictionary[tableNames[0]]
for i in columnNames:
if i not in dictionary[tableNames[0]]:
sys.exit("error")
printHeader(columnNames,tableNames,dictionary)
tName = tableNames[0] + '.csv'
fileData = []
readFile(tName,fileData)
printData(fileData,columnNames,tableNames,dictionary)
def aggregate(func,columnName,tableName,dictionary):
if columnName == '*':
sys.exit("error")
if columnName not in dictionary[tableName]:
sys.exit("error")
tName = tableName + '.csv'
fileData = []
readFile(tName,fileData)
colList = []
for data in fileData:
colList.append(int(data[dictionary[tableName].index(columnName)]))
if func.lower() == 'max':
print max(colList)
elif func.lower() == 'min':
print min(colList)
elif func.lower() == 'sum':
print sum(colList)
elif func.lower() == 'avg':
print sum(colList)/len(colList)
elif func.lower() == 'distinct':
distinct(colList,columnName,tableName,dictionary);
else :
print "ERROR"
print "Unknown function : ", '"' + func + '"'
def distinct(colList,columnName,tableName,dictionary):
print "OUTPUT :"
string = tableName + '.' + columnName
print string
colList = list(OrderedDict.fromkeys(colList))
for col in range(len(colList)):
print colList[col]
def distinctMany(columnNames,tableNames,dictionary):
printHeader(columnNames,tableNames,dictionary)
temp = []
check = 0
for tab in tableNames:
tName = tab + '.csv'
with open(tName,'rb') as f:
reader = csv.reader(f)
for row in reader:
for col in columnNames:
x = row[dictionary[tableNames[0]].index(col)]
if x not in temp:
temp.append(x)
check =1
print x,
if check == 1 :
check = 0
print
def join(columnNames,tableNames,dictionary):
tableNames.reverse()
l1 = []
l2 = []
readFile(tableNames[0] + '.csv',l1)
readFile(tableNames[1] + '.csv',l2)
fileData = []
for item1 in l1:
for item2 in l2:
fileData.append(item2 + item1)
# dictionary["sample"] = dictionary[b] + dictionary[a]
dictionary["sample"] = []
for i in dictionary[tableNames[1]]:
dictionary["sample"].append(tableNames[1] + '.' + i)
for i in dictionary[tableNames[0]]:
dictionary["sample"].append(tableNames[0] + '.' + i)
dictionary["test"] = dictionary[tableNames[1]] + dictionary[tableNames[0]]
# print dictionary["test"]
tableNames.remove(tableNames[0])
tableNames.remove(tableNames[0])
tableNames.insert(0,"sample")
if(len(columnNames) == 1 and columnNames[0] == '*'):
columnNames = dictionary[tableNames[0]]
# print header
for i in columnNames:
print i,
print
# printData(fileData,columnNames,tableNames,dictionary)
for data in fileData:
for col in columnNames:
if '.' in col:
print data[dictionary[tableNames[0]].index(col)],
else:
print data[dictionary["test"].index(col)],
print
# del dictionary[tableNames[0]]
def printHeader(columnNames,tableNames,dictionary):
print "OUTPUT : "
# Table headers
string = ""
for col in columnNames:
for tab in tableNames:
if col in dictionary[tab]:
if not string == "":
string += ','
string += tab + '.' + col
print string
def printData(fileData,columnNames,tableNames,dictionary):
for data in fileData:
for col in columnNames:
print data[dictionary[tableNames[0]].index(col)],
print
def readFile(tName,fileData):
with open(tName,'rb') as f:
reader = csv.reader(f)
for row in reader:
fileData.append(row)
if __name__ == "__main__":
main() | [
"jahnavi@Jahnavi.Nukireddy"
] | jahnavi@Jahnavi.Nukireddy |
a4192251a1f0165bc9861caa80f4688fd57d879e | 3b81dfbacf97918d36fb5accbcef0b610378e1a8 | /python-basic/item/shoot/02-老蒋开枪设计类,创建对象.py | 141feaad46a116d7abd5069ed6c48ff39f865cf1 | [] | no_license | XiaoFei-97/the-way-to-python | 11706f0845f56246ba8ea0df8ff34e622bbdad2d | 3667a24f4f4238998e9c6ed42cdc49c68881a529 | refs/heads/master | 2020-03-21T06:46:36.939073 | 2018-06-23T03:51:11 | 2018-06-23T03:51:11 | 138,241,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | class Person(object):
"""人的类"""
def __init__(self,name):
super(Person,self).__init__()
self.name = name
class Gun(object):
"""枪的类"""
def __init__(self,name):
super(Gun,self).__init__()
self.name = name #用来记录枪的类型
class Danjia(object):
"""弹夹的类"""
def __init__(self,max_num):
super(Gun,self).__init__()
self.max_num = max_num #用来录弹夹的容量
class Zidan(object):
"""子弹的类"""
def __init__(self,shanghai):
super(Zidan,self).__init__()
self.shanghai = shanghai #用来记录子弹的杀伤力
def main():
'''用来控制整个程序的流程'''
pass
#1.创建老蒋对象
laojiang = Person("老蒋")
#2.创建一个敌人
#3.创建子弹对象
zidan = Zidan(20)
#4.创建弹夹对象
danjia = Danjia(30)
#5.创建枪的对象
ak47 = Gun("AK47")
#6.把子弹装到弹夹中
#7.把弹夹装到枪中
#8.老蒋拿起枪
#9.老蒋开枪杀敌人
if __name__="__main__":
main()
| [
"jack_970124@163.com"
] | jack_970124@163.com |
96781964961a6b8473dc819f30a615209b263664 | 82fbbcef99c345d7c7acae5c6e5a2f01eea956bf | /sif_embedding_perso.py | 956e1a0fd8e1b7a3fd3d68cd2be465498738d1a2 | [
"MIT"
] | permissive | woctezuma/steam-descriptions | 06ae1cc602f06fbb789e55297d69eebbed265c99 | 16e694dfa565dd84acf1f5007bb8dde90f45a2a8 | refs/heads/master | 2023-08-09T22:19:04.354387 | 2023-02-06T14:03:43 | 2023-02-06T14:03:43 | 189,773,250 | 1 | 0 | MIT | 2023-09-14T16:24:17 | 2019-06-01T20:06:35 | Python | UTF-8 | Python | false | false | 11,203 | py | # Objective: learn a Word2Vec model, then build a sentence embedding based on a weighted average of word embeddings.
# References:
# [1] Sanjeev Arora, Yingyu Liang, Tengyu Ma, "A Simple but Tough-to-Beat Baseline for Sentence Embeddings", 2016.
# [2] Jiaqi Mu, Pramod Viswanath, All-but-the-Top: Simple and Effective Postprocessing for Word Representations, 2018.
import logging
import math
import multiprocessing
import random
import numpy as np
import spacy
from gensim.corpora import Dictionary
from gensim.models import Word2Vec
from benchmark_utils import load_benchmarked_app_ids, print_ranking
from hard_coded_ground_truth import compute_retrieval_score, plot_retrieval_scores
from sentence_models import filter_out_words_not_in_vocabulary
from SIF_embedding import remove_pc
from steam_spy_based_ground_truth import (
compute_retrieval_score_based_on_sharing_genres,
compute_retrieval_score_based_on_sharing_tags,
)
from universal_sentence_encoder import perform_knn_search_with_app_ids_as_input
from utils import load_game_names, load_tokens
def retrieve_similar_store_descriptions(
compute_from_scratch=True,
use_unit_vectors=False,
alpha=1e-3, # in SIF weighting scheme, parameter in the range [3e-5, 3e-3]
num_removed_components_for_sentence_vectors=0, # in SIF weighting scheme
pre_process_word_vectors=False,
num_removed_components_for_word_vectors=0,
count_words_out_of_vocabulary=True,
use_idf_weights=True,
shuffle_corpus=True,
use_glove_with_spacy=True,
use_cosine_similarity=True,
num_neighbors=10,
no_below=5, # only relevant with Word2Vec
no_above=0.5, # only relevant with Word2Vec
only_print_banners=True,
):
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO,
)
game_names, _ = load_game_names(include_genres=False, include_categories=False)
steam_tokens = load_tokens()
documents = list(steam_tokens.values())
if shuffle_corpus:
# Useful for Doc2Vec in 'doc2vec_model.py'. It might be useful for other methods.
random.shuffle(documents)
if compute_from_scratch:
if not use_glove_with_spacy:
# Use self-trained Word2Vec vectors
dct = Dictionary(documents)
print(f'Dictionary size (before trimming): {len(dct)}')
dct.filter_extremes(no_below=no_below, no_above=no_above)
print(f'Dictionary size (after trimming): {len(dct)}')
model = Word2Vec(documents, workers=multiprocessing.cpu_count())
wv = model.wv
else:
# Use pre-trained GloVe vectors loaded from spaCy
# Reference: https://spacy.io/models/en#en_vectors_web_lg
spacy_model_name = (
'en_vectors_web_lg' # either 'en_core_web_lg' or 'en_vectors_web_lg'
)
nlp = spacy.load(spacy_model_name)
wv = nlp.vocab
if pre_process_word_vectors:
# Jiaqi Mu, Pramod Viswanath, All-but-the-Top: Simple and Effective Postprocessing for Word Representations,
# in: ICLR 2018 conference.
# Reference: https://openreview.net/forum?id=HkuGJ3kCb
if use_glove_with_spacy:
wv.vectors.data -= np.array(wv.vectors.data).mean(axis=0)
if num_removed_components_for_word_vectors > 0:
wv.vectors.data = remove_pc(
wv.vectors.data,
npc=num_removed_components_for_word_vectors,
)
else:
wv.vectors -= np.array(wv.vectors).mean(axis=0)
if num_removed_components_for_word_vectors > 0:
wv.vectors = remove_pc(
wv.vectors,
npc=num_removed_components_for_word_vectors,
)
wv.init_sims()
if use_unit_vectors and not use_glove_with_spacy:
# Pre-computations of unit word vectors, which replace the unnormalized word vectors. A priori not required
# here, because another part of the code takes care of it. A fortiori not required when using spaCy.
wv.init_sims(
replace=True,
) # TODO IMPORTANT choose whether to normalize vectors
index2word_set = set(wv.index2word) if not use_glove_with_spacy else None
num_games = len(steam_tokens)
word_counter = {}
document_per_word_counter = {}
counter = 0
for app_id in steam_tokens:
counter += 1
if (counter % 1000) == 0:
print(
'[{}/{}] appID = {} ({})'.format(
counter,
num_games,
app_id,
game_names[app_id],
),
)
reference_sentence = steam_tokens[app_id]
if not count_words_out_of_vocabulary:
# This has an impact on the value of 'total_counter'.
reference_sentence = filter_out_words_not_in_vocabulary(
reference_sentence,
index2word_set,
wv,
)
for word in reference_sentence:
try:
word_counter[word] += 1
except KeyError:
word_counter[word] = 1
for word in set(reference_sentence):
try:
document_per_word_counter[word] += 1
except KeyError:
document_per_word_counter[word] = 1
total_counter = sum(word_counter.values())
# Inverse Document Frequency (IDF)
idf = {}
for word in document_per_word_counter:
idf[word] = math.log(
(1 + num_games) / (1 + document_per_word_counter[word]),
)
# Word frequency. Caveat: over the whole corpus!
word_frequency = {}
for word in word_counter:
word_frequency[word] = word_counter[word] / total_counter
sentence_vector = {}
if not use_glove_with_spacy:
word_vector_length = wv.vector_size
else:
word_vector_length = wv.vectors_length
X = np.zeros([num_games, word_vector_length])
counter = 0
for i, app_id in enumerate(steam_tokens.keys()):
counter += 1
if (counter % 1000) == 0:
print(
'[{}/{}] appID = {} ({})'.format(
counter,
num_games,
app_id,
game_names[app_id],
),
)
reference_sentence = steam_tokens[app_id]
num_words_in_reference_sentence = len(reference_sentence)
reference_sentence = filter_out_words_not_in_vocabulary(
reference_sentence,
index2word_set,
wv,
)
if not count_words_out_of_vocabulary:
# NB: Out-of-vocabulary words are not counted in https://stackoverflow.com/a/35092200
num_words_in_reference_sentence = len(reference_sentence)
weighted_vector = np.zeros(word_vector_length)
for word in reference_sentence:
if use_idf_weights:
weight = idf[word]
else:
weight = alpha / (alpha + word_frequency[word])
# TODO IMPORTANT Why use the normalized word vectors instead of the raw word vectors?
if not use_glove_with_spacy:
if use_unit_vectors:
# Reference: https://github.com/RaRe-Technologies/movie-plots-by-genre
word_vector = wv.vectors_norm[wv.vocab[word].index]
else:
word_vector = wv.vectors[wv.vocab[word].index]
else:
word_vector = wv.get_vector(word)
if use_unit_vectors:
word_vector_norm = wv[word].vector_norm
if word_vector_norm > 0:
word_vector = word_vector / word_vector_norm
weighted_vector += weight * word_vector
if len(reference_sentence) > 0:
sentence_vector[app_id] = (
weighted_vector / num_words_in_reference_sentence
)
else:
sentence_vector[app_id] = weighted_vector
X[i, :] = sentence_vector[app_id]
# Reference: https://stackoverflow.com/a/11620982
X = np.where(np.isfinite(X), X, 0)
print('Saving the sentence embedding.')
np.save('data/X.npy', X)
else:
print('Loading the sentence embedding.')
X = np.load('data/X.npy', mmap_mode='r')
if num_removed_components_for_sentence_vectors > 0:
X = remove_pc(X, npc=num_removed_components_for_sentence_vectors)
app_ids = [int(app_id) for app_id in steam_tokens]
query_app_ids = load_benchmarked_app_ids(append_hard_coded_app_ids=True)
matches_as_app_ids = perform_knn_search_with_app_ids_as_input(
query_app_ids,
label_database=X,
app_ids=app_ids,
use_cosine_similarity=use_cosine_similarity,
num_neighbors=num_neighbors,
)
print_ranking(
query_app_ids,
matches_as_app_ids,
num_elements_displayed=num_neighbors,
only_print_banners=only_print_banners,
)
retrieval_score = compute_retrieval_score(
query_app_ids,
matches_as_app_ids,
num_elements_displayed=num_neighbors,
verbose=False,
)
retrieval_score_by_genre = compute_retrieval_score_based_on_sharing_genres(
query_app_ids,
matches_as_app_ids,
num_elements_displayed=num_neighbors,
verbose=False,
)
retrieval_score_by_tag = compute_retrieval_score_based_on_sharing_tags(
query_app_ids,
matches_as_app_ids,
num_elements_displayed=num_neighbors,
verbose=False,
)
return retrieval_score, retrieval_score_by_genre, retrieval_score_by_tag
def main():
# Initialize 'data/X.npy'
retrieve_similar_store_descriptions(compute_from_scratch=True)
# Try different values for the number of sentence components to remove.
# NB: 'data/X.npy' will be read from the disk, which avoids redundant computations.
scores = {}
genre_scores = {}
tag_scores = {}
for i in range(0, 20, 5):
print(f'num_removed_components_for_sentence_vectors = {i}')
scores[i], genre_scores[i], tag_scores[i] = retrieve_similar_store_descriptions(
compute_from_scratch=False,
num_removed_components_for_sentence_vectors=i,
)
print(scores)
print(genre_scores)
print(tag_scores)
plot_retrieval_scores(scores)
plot_retrieval_scores(genre_scores)
plot_retrieval_scores(tag_scores)
return
if __name__ == '__main__':
main()
| [
"woctezuma@users.noreply.github.com"
] | woctezuma@users.noreply.github.com |
aee8fe622623403d160b84a20a514c268c8c9447 | 8a36c91678850c0563e4b8afdf4b2f37dce61f86 | /nupack_utils.py | d2dab0e777ea13fae7fe9557e4e873312c080132 | [] | no_license | zchen15/pseudoknot_scanner | d245bb88c24991a8a8f48bf747342525de9de860 | 4e0e092fe24be2a1cfab75efbf563b5c43994dc4 | refs/heads/main | 2023-01-01T21:22:21.478499 | 2020-10-24T04:47:24 | 2020-10-24T04:47:24 | 306,806,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,430 | py | """
2016-02-11, Zhewei Chen
Updated nupack_utils.py with wrapper for complexes executable
This file contains utility functions to enable easy interfacing
between Python and calls to the NUPACK core executables. It additionally
contains utility scripts for converting structures from dot-paren
notation to pair lists.
"""
from __future__ import print_function # for Python 2/3 compatibility
import os
import subprocess
import time
import warnings
import numpy as np
from scipy import sparse
import pandas as pd
# ############################################################### #
def energy(sequence, structure, T=37.0, material='rna1995', prefix=None, NUPACKHOME=None, delete_files=True):
"""
Calculate the energy (in units of kcal/mol) of a particular structure for a
specified nucleic acid sequence.
Arguments:
sequence -- A string of nucleic acid base codes
structure -- A string in dot-paren format of the same length as sequence
Keyword Arguments:
T -- Temperature in degrees Celsius (default 37.0)
material -- The material parameters file to use (default 'rna1995')
prefix -- The file prefix to be used when calling the ene executable
(default None)
NUPACKHOME -- The file location of the NUPACK installation (default None)
delete_files -- A flag to remove the prefix.out and prefix.in files (default
True)
Return:
The energy of the nucleic acid sequence in the given secondary structure
"""
# Check all inputs
nupack_home = check_nupackhome(NUPACKHOME)
material = check_material(material)
prefix = check_prefix(prefix, 'energy')
input_file = prefix + '.in'
output_file = prefix + '.out'
# Make the input file
f = open(input_file, 'w')
f.write('%s\n%s\n' % (sequence, structure))
f.close()
# Run NUPACK's energy executable
args = [nupack_home + '/bin/energy',
'-T', str(T),
'-material', material,
prefix]
with open(output_file, 'w') as outfile:
subprocess.check_call(args, stdout=outfile)
outfile.close()
# Parse the output
en = float(np.loadtxt(output_file, comments='%'))
# Remove files if requested
if delete_files:
subprocess.check_call(['rm', '-f', input_file, output_file])
return en
# ############################################################### #
# ############################################################### #
def prob(sequence, structure, T=37.0, material='rna1995', prefix=None, NUPACKHOME=None, delete_files=True):
"""
Calculate the probability of a nucleic acid sequence adopting a particular
secondary structure.
Arguments:
sequence -- A string of nucleic acid base codes
structure -- A string in dot-paren format of the same length as sequence
Keyword Arguments:
T -- Temperature in degrees Celsius (default 37.0)
material -- The material parameters file to use (default 'rna1995')
prefix -- The file prefix to be used when calling the ene executable
(default None)
NUPACKHOME -- The file location of the NUPACK installation (default None)
delete_files -- A flag to remove the prefix.out and prefix.in files (default
True)
Return:
The probability of the nucleic acid sequence adopting the given secondary
structure
"""
# Check all inputs
nupack_home = check_nupackhome(NUPACKHOME)
material = check_material(material)
prefix = check_prefix(prefix, 'prob')
input_file = prefix + '.in'
output_file = prefix + '.out'
# Make the input file
f = open(input_file, 'w')
f.write('%s\n%s\n' % (sequence, structure))
f.close()
# Run NUPACK's prob executable
args = [nupack_home + '/bin/prob',
'-T', str(T),
'-material', material,
prefix]
with open(output_file, 'w') as outfile:
subprocess.check_call(args, stdout=outfile)
outfile.close()
# Parse the output
pr = float(np.loadtxt(output_file, comments='%'))
# Remove files if requested
if delete_files:
subprocess.check_call(['rm', '-f', input_file, output_file])
return pr
# ############################################################### #
# ############################################################### #
def mfe(sequence, T=37.0, material='rna1995', prefix=None, NUPACKHOME=None, delete_files=True):
"""
Calculate the MFE and MFE structure of a nucleic acid sequence.
Argument:
sequence -- A string of nucleic acid base codes
Keyword Arguments:
T -- Temperature in degrees Celsius (default 37.0)
material -- The material parameters file to use (default 'rna1995')
prefix -- The file prefix to be used when calling the ene executable
(default None)
NUPACKHOME -- The file location of the NUPACK installation (default None)
delete_files -- A flag to remove the prefix.mfe and prefix.in files
(default True)
Return:
The MFE and MFE structure in dot-paren format. If there are multiple MFE
structures, only one is returned.
"""
# Check all inputs
nupack_home = check_nupackhome(NUPACKHOME)
material = check_material(material)
prefix = check_prefix(prefix, 'mfe')
input_file = prefix + '.in'
output_file = prefix + '.mfe'
# Make the input file
f = open(input_file, 'w')
f.write(sequence + '\n')
f.close()
# Run NUPACK's mfe executable
args = [nupack_home + '/bin/mfe',
'-T', str(T),
'-material', material,
prefix]
subprocess.check_call(args)
# Parse the output
df = pd.read_csv(output_file, header=None, comment='%', names=['col'],
error_bad_lines=False)
en = float(df.col[1])
struct = df.col[2]
# Remove files if requested
if delete_files:
subprocess.check_call(['rm', '-f', input_file, output_file])
return struct, en
# ############################################################### #
# ############################################################### #
def pairs(sequence, T=37.0, material='rna1995', prefix=None, NUPACKHOME=None, delete_files=True):
"""
Calculate the probabilities of all possible base pairs of a nucleic acid
sequence over the ensemble of unpseudoknotted structures.
Argument:
sequence -- A string of nucleic acid base codes
Keyword Arguments:
T -- Temperature in degrees Celsius (default 37.0)
material -- The material parameters file to use (default 'rna1995')
prefix -- The file prefix to be used when calling the ene executable
(default None)
NUPACKHOME -- The file location of the NUPACK installation (default None)
delete_files -- A flag to remove the prefix.ppairs and prefix.in files
(default True)
Return:
The pair probabilities matrix right-augmented with the unpaired
probabilities.
Notes:
Pseudoknots are not allowed.
Pair probability matrices are symmetric, by definition, except for the
unpaired probability column).
"""
# Check all inputs
nupack_home = check_nupackhome(NUPACKHOME)
material = check_material(material)
prefix = check_prefix(prefix, 'mfe')
input_file = prefix + '.in'
output_file = prefix + '.ppairs'
# Make the input file
f = open(input_file, 'w')
f.write(sequence + '\n')
f.close()
# Run NUPACK's mfe executable
args = [nupack_home + '/bin/pairs',
'-T', str(T),
'-material', material,
prefix]
subprocess.check_call(args)
# Parse the output
df = pd.read_csv(output_file, header=None, comment='%',
names=['i', 'j', 'p'], delimiter='\t')
# Build a sparse pair probabilities matrix with indexing starting at 0
# df.ix[0] is garbage. df.i[1] is # of bases. Indices 2 and up are probs.
P = sparse.csc_matrix((df.p[1:], (df.i[1:] - 1, df.j[1:] - 1)),
shape=(int(df.i[0]), int(df.i[0])+1))
# Fill in lower triangle (ignore sparse efficiency warning)
# Note: don't have to worry about diagonal; it's necessarily zero
with warnings.catch_warnings():
warnings.simplefilter("ignore")
P[:, :-1] = P[:, :-1] + P[:, :-1].transpose()
# Remove files if requested
if delete_files:
subprocess.check_call(['rm', '-f', input_file, output_file])
return np.array(P.todense())
# ############################################################### #
# ############################################################### #
def dotparens_2_pairlist(structure):
"""
Convert a dot-paren structure into a list of pairs called pairlist
and an array, plist, whose entry at index i is the base paired to base i.
Argument:
structure -- A string in dot-paren format
Return:
pairlist -- An array of ordered pairs for each base pair
plist -- An array such that plist[i] is paired with i
Notes:
Array indexing in Python is zero-based.
plist[i] is -1 if base i is unpaired.
This only works for single-stranded structures, and not complexes with
multiple strands.
Example:
structure = '.(((...)))'
pairlist = np.array([[1,9],[2,8],[3,7]])
plist = np.array([-1,9,8,7,-1,-1,-1,3,2,1])
"""
# Length of the sequence
seqlen = len(structure)
pairlist = []
leftlist = []
ind = 0
# While loop steps through list. Each left bracket is stored.
# Whenever we get a right bracket, it is necessarily pair with
# the last left bracket in leftlist. This pair is documented
# and the first entry in leftlist is then deleted.
while ind < seqlen:
if structure[ind] == '(':
leftlist.append(ind)
elif structure[ind] == ')':
pairlist.append([leftlist[-1], ind])
leftlist.remove(leftlist[-1])
ind = ind + 1
pairlist.sort()
# Get plist
plist = [-1]*seqlen
for x in pairlist:
plist[x[0]] = x[1]
plist[x[1]] = x[0]
return np.array(pairlist), np.array(plist)
# ############################################################### #
# ############################################################### #
def complexes(sequence, max_complex_size, Pairs=False, Ordered=False, custom_complex=None, T=37.0, material='rna1995', prefix=None, NUPACKHOME=None, delete_files=True):
"""
Calculate the partition functions of all strand complexes up to a specified size
Argument:
sequence -- array of strings containing sequences of nucleic acid base codes
max_complex_size -- max size each strand will bind to each other. Dimer = 2, Trimer = 3, etc...
custom_complex -- array of strings specifying additional custom complexes to calculate (default None)
Keyword Arguments:
T -- Temperature in degrees Celsius (default 37.0)
material -- The material parameters file to use (default 'rna1995')
prefix -- The file prefix to be used when calling the ene executable (default None)
NUPACKHOME -- The file location of the NUPACK installation (default None)
delete_files -- A flag to remove the prefix.ppairs and prefix.in files (default True)
Return:
out_cx -- contains a 2D array of complexes.
Notes:
Example:
"""
# Check all inputs
nupack_home = check_nupackhome(NUPACKHOME)
material = check_material(material)
prefix = check_prefix(prefix, 'complexes')
input_file = prefix + '.in'
input_file2 = prefix + '.list'
output_file = prefix + '.cx'
output_file2 = prefix + '.cx-epairs'
output_file3 = prefix + '.ocx'
output_file4 = prefix + '.ocx-epairs'
output_file5 = prefix + '.ocx-key'
output_file6 = prefix + '.ocx-ppairs'
# Write input sequences
f = open(input_file, 'w')
f.write(str(len(sequence))+'\n')
for i in range(0,len(sequence)):
f.write(sequence[i]+'\n')
f.write(str(max_complex_size)+'\n')
f.close()
# Write list file for custom complexes
f = open(input_file2, 'w')
if custom_complex!=None:
for i in range(0,len(custom_complex)):
f.write(custom_complex[i]+'\n')
f.close()
# Run NUPACK's complexes executable
if Pairs and Ordered:
args = [nupack_home + '/bin/complexes','-T', str(T), '-material', material,'-dangles','none','-ordered','-pairs', prefix]
elif Pairs:
args = [nupack_home + '/bin/complexes','-T', str(T), '-material', material, '-pairs', prefix]
else:
args = [nupack_home + '/bin/complexes','-T', str(T), '-material', material,'-dangles','none', prefix]
subprocess.check_call(args)
# Parse the output files for data
out_cx = BuildBlockArrayByFile(output_file,len(sequence))
if Pairs and Ordered:
out_key, out_pp = getCXPPairs(prefix,sequence)
# Remove files if requested
if delete_files:
subprocess.check_call(['rm', '-f', input_file, input_file2, output_file,output_file2,output_file3,output_file4,output_file5,output_file6])
if Pairs and Ordered:
return out_cx, out_pp, out_key
else:
return out_cx
# For building complexes array for large files
def BuildBlockArrayByFile(filename,N):
out=np.zeros([N,N+1])
f=open(filename,'r')
count=0
for line in f:
l = line.strip().split()
if l[0][0] is not '%':
# format is same for all lines, so know energy already at this point
ene = float(l[-1])
# convert all entries in list except energy to ints
l = [int(x) for x in l[1:-1]]
a,b = getIndex(l)
out[a-1][b] = ene
if b != 0:
out[b-1][a] = ene
count+=1
if count%10000==0:
print('Building block array',count)
return out
# Get index of interacting blocks and return index locations
def getIndex(data):
N=len(data)
a=0
b=0
for i in range(0,N):
if data[i]==1:
if a<1:
a=i+1
elif b<1:
b=i+1
elif data[i]==2:
a=i+1
b=i+1
# early exit if indices already found (small speedup)
if (a > 0 and b > 0):
break
return a, b
# Get complex pair probabilities from ocx-ppairs file, works for complex size 2 only
def getCXPPairs(filename,seq):
file1=filename+'.ocx-ppairs'
file2=filename+'.ocx-key'
# Obtain ocx key of complex size 2
ocxkey=[]
f=open(file2,'r')
for line in f:
l = line.strip().split()
if l[0]!='%':
if len(l)<4:
ocxkey.append([int(l[0]),int(l[2]),0])
else:
ocxkey.append([int(l[0]),int(l[2]),int(l[3])])
ocxkey=np.array(ocxkey)
# Obtain pair probabilities data
f=open(file1,'r')
ocxppairs=[]
out=[]
cx=0
maxN=0
newArray=0
count=0
for line in f:
l = line.strip().split()
if l!=[]:
if newArray==1 and len(l)==1:
maxN=int(l[0])
out=np.zeros([maxN,maxN+1])
newArray=0
elif newArray==0 and len(l)==2 and 'complex' in l[1]:
newArray=1
ocxppairs.append(out)
elif l[0]!='%' and len(l)==3:
# Obtain values
prob = float(l[-1])
a = int(l[0])
b = int(l[1])
if b>maxN:
b=0
out[a-1][b]=prob
if b!=0:
out[b-1][a]=prob
# low granularity progress output
if count%10000==0:
print('CX PPairs parsing line',count)
count+=1
ocxppairs.append(out)
ocxppairs=np.array(ocxppairs[1:])
return ocxkey, ocxppairs
# get mfe structure array from file
def getMFEPairs(filename):
f=open(filename,'r')
out_mfe=[]
out=[]
new=0
for line in f:
l = line.strip().split()
if l!=[]:
if l[0]=='%' and len(l)>1 and 'complex' in l[1]:
new=1
elif l[0]!='%' and new==1 and len(l)==1:
new=0
N=int(l[0])
out_mfe.append(out)
out=np.zeros([N,N+1])
elif l[0]!='%' and len(l)==2:
# convert all entries in list except energy to ints
a=int(l[0])
b=int(l[1])
out[a-1][b] = 1
out[b-1][a] = 1
out_mfe.append(out)
out_mfe=out_mfe[1:]
return out_mfe
# ############################################################### #
# MISC useful functions
# Count frequency of A,T,G,C in sequence
def CalcATGC(seq):
N=len(seq)
out=np.zeros([N,4])
for i in range(0,N):
A=0
T=0
G=0
C=0
cN=len(seq[i])
for j in range(0,cN):
if seq[i][j]=='A':
A+=1
elif seq[i][j]=='T':
T+=1
elif seq[i][j]=='G':
G+=1
elif seq[i][j]=='C':
C+=1
out[i]=[A,T,G,C]
return out
# ############################################################### #
def check_prefix(prefix=None, calc_style=''):
"""
Generate a prefix for a file for a NUPACK calculation.
"""
# If prefix is provided, make sure file does not already exist
if prefix is not None:
if os.path.isfile(prefix + '.in') or os.path.isfile(prefix + '.out') \
or os.path.isfile(prefix + '.mfe') \
or os.path.isfile(prefix + '.ppairs'):
raise ValueError('Files with specified prefix already exist.')
else:
return prefix
else:
# Generate prefix based on time.
prefix = time.strftime('%Y-%m-%d-%H_%M_%S_')
prefix += str(calc_style)
# Check to make sure file name does not already exist
prefix_base = prefix
i = 0
while os.path.isfile(prefix + '.in') \
or os.path.isfile(prefix + '.out') \
or os.path.isfile(prefix + '.mfe') \
or os.path.isfile(prefix + '.ppairs'):
prefix = prefix_base + '_%08d' % i
i += 1
return prefix
# ############################################################### #
def check_nupackhome(user_supplied_dir=None):
"""
Validate or generate a string with the NUPACKHOME directory path.
Notes:
If user_supplied_dir is not None, checks to make sure the directory looks
like NUPACKHOME and then strips trailing slash if there is one.
"""
if user_supplied_dir is None:
try:
nupackhome = os.environ['NUPACKHOME']
except KeyError:
raise RuntimeError('NUPACKHOME environment variable not set.')
elif user_supplied_dir.endswith('/'):
# Strip trailing slash if there is one
nupackhome = user_supplied_dir[:-1]
# Make sure NUPACK looks ok and has executables
if os.path.isdir(nupackhome + '/bin') \
and os.path.isfile(nupackhome + '/bin/mfe') \
and os.path.isfile(nupackhome + '/bin/pairs') \
and os.path.isfile(nupackhome + '/bin/energy') \
and os.path.isfile(nupackhome + '/bin/prob'):
return nupackhome
else:
raise RuntimeError('NUPACK not compiled in %s.' % nupackhome)
def check_material(material):
"""
Check material input.
Notes:
The 'rna1999' parameters will not work with temperatures other than 37.0
degrees C.
"""
if material not in ['rna1999', 'dna1998', 'rna1995']:
print('!! Improper material parameter. Allowed values are:')
print('!! ''rna1999'', ''rna1995'', ''dna1998''.')
raise ValueError('Improper material parameter: ' + str(material))
else:
return material
| [
"zchen@caltech.edu"
] | zchen@caltech.edu |
93fcf60be9475d9cd490935255c7a9803947da13 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/coghq/DistributedTriggerAI.py | 374f8cd57b81b637e18ee7e8befda3be1dea203f | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 422 | py |
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
import DistributedSwitchAI
class DistributedTriggerAI(DistributedSwitchAI.DistributedSwitchAI):
"""
DistributedTriggerAI class: The server side representation
of a Cog HQ trigger. This is the object that remembers what the
trigger is doing. The DistributedTrigger, is the client side
version.
"""
pass
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
94d19d1919340743e72d4ebb192343c2b15a4bb0 | ecb7156e958d10ceb57c66406fb37e59c96c7adf | /Leetcode Exercise/Leetcode234_Palindrome Linked List/mySolution.py | dbf19308c870dfebb7d2d431d79233914dcedce8 | [] | no_license | chenshanghao/RestartJobHunting | b53141be1cfb8713ae7f65f02428cbe51ea741db | 25e5e7be2d584faaf26242f4f6d6328f0a6dc4d4 | refs/heads/master | 2020-07-27T17:39:58.756787 | 2019-10-18T06:27:27 | 2019-10-18T06:27:27 | 209,175,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if not head or not head.next:
return True
slow, fast = head, head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
slow = slow.next
slow = self.reserveLinkedList(slow)
while slow:
if slow.val != head.val:
return False
slow = slow.next
head = head.next
return True
def reserveLinkedList(self, head):
if not head or not head.next:
return head
dummy = ListNode(-1)
while(head):
tmp = head
head = head.next
tmp.next = dummy.next
dummy.next = tmp
return dummy.next
| [
"21551021@zju.edu.cn"
] | 21551021@zju.edu.cn |
4922816df820e6cd6e13b69cbe2a4082fa22d185 | a969a8f859e3f13c5bcfc5f7392da85b2e225bfa | /SLiM_simulations_review/slim_genetree_to_vcf_norescale_toIndAnc_backAnc.py | d7c04f3cf062c73fb936aebaaa2a3ebcc59d73fe | [] | no_license | Schumerlab/hybridization_review | 4977d95d93c451e20c212227887334bb17457f6f | 69a398b89365cc069c6856d990c2b74293b52486 | refs/heads/master | 2023-08-07T00:23:24.853939 | 2021-06-02T22:34:41 | 2021-06-02T22:34:41 | 373,316,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,010 | py | __author__ = 'Quinn'
import msprime, pyslim
import numpy as np
import argparse
from time import perf_counter
parser = argparse.ArgumentParser(description="For F4 simulations convert SLiM tree output into a VCF")
parser.add_argument('--out', help="Output prefix, required", required=True)
parser.add_argument('--input', help="Input tree, required", required=True)
parser.add_argument('--numInds', help="Number of individuals to subset from each pop")
parser.set_defaults(numInds=10)
args = parser.parse_args()
outputPrefix = args.out
inputFile = args.input
subNum = int(args.numInds)
totalStart = perf_counter()
start = perf_counter()
ts = pyslim.load(inputFile).simplify()
end = perf_counter()
#print("Load = " + str(end - start) + " sec")
start = perf_counter()
mut_ts = msprime.mutate(ts, rate=1.2e-8, random_seed=1, keep=True, model=msprime.InfiniteSites(alphabet=1))
#mut_ts = pyslim.SlimTreeSequence(msprime.mutate(ts, rate=1e-5, keep=True))
end = perf_counter()
#print("Mutate = " + str(end - start) + " sec")
numTrees = ts.num_trees
numInds = mut_ts.num_individuals
numPops = mut_ts.num_populations
popList = ["p1","p2","p3","p4","p5"]
popIndDict = {}
for pop in popList:
popIndDict[pop] = []
for i in range(0,numInds):
indID = ts.individual(i).id
indPop = ts.individual(i).population
popName = popList[indPop]
if ts.individual(i).time == 0.0:
popIndDict[popName].append(indID)
subPopDict = {}
for j in popIndDict:
#print(f"We have {len(popIndDict[j])} individuals in the {j} population.")
subPopDict[j] = np.random.choice(popIndDict[j], size=subNum, replace=False)
indivlist = []
indivnames = []
with open(outputPrefix +"_sim_individuals.txt", "w") as indfile:
#indfile.writelines("\t".join(["vcf_label", "tskit_id", "slim_id", "popNum", "popName"]) + "\n")
for pop in popList:
for i in subPopDict[pop]:
indivlist.append(i)
ind = mut_ts.individual(i)
vcf_label = pop + "_" + str(ind.id)
indivnames.append(vcf_label)
data = [vcf_label, pop, str(ts.individual(i).metadata.pedigree_id)]
indfile.writelines("\t".join(data) + "\n")
with open(outputPrefix + "_sim_genotypes.vcf", "w") as vcffile:
mut_ts.write_vcf(vcffile, individuals=indivlist, individual_names=indivnames)
#start = perf_counter()
#p5time = 1
#p3time = 1
#for x in ts.nodes():
# if x.population == 4:
# if int(x.time) > p5time:
# p5time = x.time + 1
# elif x.population == 3:
# if int(x.time) > p3time:
# p3time = x.time + 1
#was_p3 = [x.id for x in ts.nodes() if (x.population == 2 and x.time == p5time)]
#was_p3 = [x.id for x in ts.nodes() if (x.population == 2)]
#end = perf_counter()
#print("Was p3 = " + str(end - start) + " sec")
samp_inds = subPopDict['p5']
start = perf_counter()
indNodes = {}
wNodes = list()
for ind in samp_inds:
nodes = list(ts.individual(ind).nodes)
indNodes[ind] = list(ts.individual(ind).nodes)
for node in nodes:
#print(ts.nodes(node).population)
wNodes.append(node)
#samp_nodes = np.concatenate([ind.nodes for ind in ts.individuals() if ind.id in samp_inds]).tolist()
end = perf_counter()
#print("Sample nodes = " + str(end - start) + " sec")
outputName = outputPrefix + "_p5_indAnc.txt"
outFile = open(outputName, 'w')
header = "chr\tstart\tend"
for indName in subPopDict['p5']:
header = header + "\tp5_" + str(indName)
outFile.write(header + "\n")
#outFile.close()
print("Number of Trees: " + str(numTrees))
count = 0
outStr = ""
for tree in ts.trees():
loopStart = perf_counter()
#start = perf_counter()
#outFile = open(outputName, 'a+')
count += 1
interval = tree.interval
#print(interval)
#outFile.write("1")
outStr += "1"
for i in interval:
#outFile.write("\t" + str(i))
outStr += "\t" + str(i)
for ind in samp_inds:
sampGeno = 0
for indNode in indNodes[ind]:
pop = tree.population(indNode)
node = indNode
countBack = 0
while pop == 4:
parentNode = tree.parent(node)
parentPop = tree.population(parentNode)
if parentPop == pop:
pop = parentPop
node = parentNode
countBack += 1
else:
pop = parentPop
#print(ts.node(parentNode))
if pop == 2:
sampGeno += 1
#outFile.write("\t" + str(sampGeno))
outStr += "\t" + str(sampGeno)
#outFile.write("\n")
outStr += "\n"
#outFile.close()
loopEnd = perf_counter()
#print("Tree parse = " + str(loopEnd - loopStart) + " sec")
#print("At tree: " + str(count) + " of " + str(numTrees))
#print("Total run time: " + str(loopEnd - totalStart) + " sec")
#outFile = open(outputName, 'a+')
outFile.write(outStr)
outFile.close()
totalEnd = perf_counter()
print(outputPrefix + " Total run time: " + str(totalEnd - totalStart) + " sec")
| [
"mollyschumer@BIO-C02YR1A0LVDM.local"
] | mollyschumer@BIO-C02YR1A0LVDM.local |
c324290754311f05f0b020770073134bfd94dbd0 | dc7a373c3d4f2968907f84bcde96855a56249015 | /autoconnect.py | 2eb8241305bd096ee996cab0608b177b5c4cc251 | [
"MIT"
] | permissive | lminy/thesis-scripts | f5be7801ddb108cc986765e74aeb73a1ad7900b6 | 7cfad8b04360c6797f112593cab02159bfde63db | refs/heads/master | 2021-01-02T09:04:06.448375 | 2017-08-02T15:40:07 | 2017-08-02T15:40:07 | 99,132,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,818 | py | #!/usr/bin/env python
# Requirements :
# sudo pip install netifaces
# sudo apt-get install wireless-tools
SSID = 'ssid'
INTERFACE = 'wlan0'
import subprocess
import re
import time
import logging
logger = logging.getLogger("AutoConnect")
logger.setLevel(logging.INFO)
fh = logging.FileHandler("autoconnect.log")
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info("Starting autoconnect")
#################################
########### FUNCTIONS ###########
#################################
def run(command):
"""
Return the output of command
"""
p = subprocess.Popen(command.split(),stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
return ''.join(iter(p.stdout.readline, b''))
def run_bg(command) :
"""
Run the command in background
"""
subprocess.Popen(command.split())
def get_ip(interface) :
import netifaces as ni
ni.ifaddresses(interface)
ip = ni.ifaddresses(interface)[ni.AF_INET][0]['addr']
return ip
def print_line(text) : # https://stackoverflow.com/a/3249684/5955402
from sys import stdout
from time import sleep
stdout.write("\r%s " % text)
stdout.flush()
def println(text) :
from sys import stdout
from time import sleep
stdout.write("\n%s\n" % text)
logger.info(text)
def is_connected(interface) :
"""
# Version 1 : AP Could be out of range and iwconfig can still see it connected...
output = run("sudo iwconfig %s" % interface)
if re.search(SSID, output) :
return True
else :
return False
"""
# Version 2 : better than 1 but not perfect
output = run("sudo iwconfig %s" % interface)
if re.search('Not-Associated', output) :
return False
else :
return True
run("sudo service network-manager stop")
run("sudo ifconfig wlan0 up")
############################
########### MAIN ###########
############################
print 'AUTO-CONNECT on %s' % SSID
was_connected = False
if is_connected(INTERFACE) :
println("Already connected. IP=%s" % get_ip(INTERFACE))
was_connected = True
while True :
if is_connected(INTERFACE) :
print_line("Connected")
was_connected = True
else :
if was_connected :
println("Disconnected")
print_line("Connecting...")
error = run("sudo iwconfig wlan0 essid %s" % SSID)
if(len(error)==0) :
#print "Connected!"
run_bg("sudo dhclient wlan0")
time.sleep(1)
run("sudo killall dhclient")
#if "RTNETLINK answers: File exists" in error or error == "" :
#else :
# print "Error while getting the IP (dhclient) : " + error
if is_connected(INTERFACE) :
println("Connected successfully! IP : " + get_ip(INTERFACE))
was_connected = True
else :
print_line("Failed to connect")
was_connected = False
else :
print "Error while connecting : " + error
time.sleep(1)
| [
"laurent.miny@hotmail.com"
] | laurent.miny@hotmail.com |
36bb45aabfcceb44fb37a15fe408059273425236 | 28d3597f54e03dcf4dfbd86b4557ebe0872e9330 | /pollster/urls.py | cd796b78f3314e273746ef4c592450fb9eb98d50 | [] | no_license | wenchnoob/django_demo | 439ecde19a2a2a20fdab6d8376fd240efaf38590 | 3fb05cb38d2faffd502a4072b70a4ae722898c63 | refs/heads/master | 2023-07-07T06:15:47.877769 | 2021-08-12T23:19:25 | 2021-08-12T23:19:25 | 395,465,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | """pollster URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('pages.urls')),
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]
| [
"wcdutreuil@gmail.com"
] | wcdutreuil@gmail.com |
2a5b6ea2fdb883878cc91d9cdc2864c4be3c2418 | c3a70d03ecb42f9d0e67ca887171402e8a9f610a | /image_thumbnail.py | 691440fbb9636d34406ffc9036ea0010e26851c4 | [] | no_license | Fabio-Ottaviani-Dev/image-upload-and-thumbnail | 1335586d39cdf5d7fe7dd2f0e73bd35c102fc014 | 8237879aa12871495f7a98c8c3ce48f9cf8d4b49 | refs/heads/master | 2022-12-01T22:09:08.070326 | 2020-07-18T19:59:11 | 2020-07-18T19:59:11 | 252,869,702 | 0 | 0 | null | 2022-11-22T05:50:02 | 2020-04-04T00:01:36 | Python | UTF-8 | Python | false | false | 2,305 | py | import os, random
from datetime import datetime
from flask import jsonify
from werkzeug.utils import secure_filename
from PIL import Image
# ----------------------------------------------------------------------------
class imageThumbnail:
def __init__(self):
self.UPLOAD_FOLDER = 'image'
self.ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
self.IMAGE_SIZE = 925, 617 # W | H
self.THUMBNAIL_SIZE = 260, 195 # W | H
def allowed_file(self, filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in self.ALLOWED_EXTENSIONS
def get_unique_filename(self):
random_int = random.randint(0, 99999999)
datetime_now = datetime.now()
return datetime_now.strftime('%m%d%Y-%H%M%S-%f{}'.format(random_int))
def upload(self, file):
if file and self.allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(self.UPLOAD_FOLDER, filename))
return self.thumbnail(filename)
else:
return jsonify({
'success' : False,
'message' : 'File type not allowed, the allowed file types are: png, jpg, jpeg.'
}), 400
def thumbnail(self, filename):
source_path = '{}/{}'.format(self.UPLOAD_FOLDER, filename)
dest_path = '{}/{}'.format(self.UPLOAD_FOLDER, self.get_unique_filename())
image = Image.open(source_path)
image_rgb = image.convert('RGB')
image_rgb.thumbnail(self.IMAGE_SIZE, Image.ANTIALIAS)
image_rgb.save(dest_path+'.jpg', 'JPEG', quality=95)
image_rgb.thumbnail(self.THUMBNAIL_SIZE, Image.ANTIALIAS)
image_rgb.save(dest_path+'_ico.jpg', 'JPEG', quality=95)
try:
os.remove('{}/{}'.format(self.UPLOAD_FOLDER, filename))
return jsonify({
'success': True,
'message' : 'The required image has been upload and resize - operation successfully completed'
}), 200
except OSError as e:
return jsonify({
'success': False,
'message' : 'Error: {} - {}'.format(e.filename, e.strerror)
}), 500
# ----------------------------------------------------------------------------
| [
"F0Dev"
] | F0Dev |
f16f65d40aa4deb5a70bedca0900c9a5ad0c7832 | 9cc92316b675eda133ed2d17c2630728b12f3140 | /polls/urls.py | d539b022d2404d855a45d8b8981bd96e44f97fdd | [] | no_license | MelinaLaura/Polls-App | 43f683de9ae57c04ea3a0c7fcbc2ab49b9dc486b | b391e25316137d677f2205c1593dd65b49fed979 | refs/heads/main | 2023-05-03T22:35:32.362593 | 2021-05-18T13:54:43 | 2021-05-18T13:54:43 | 367,315,529 | 0 | 0 | null | 2021-05-18T13:54:44 | 2021-05-14T09:34:23 | Python | UTF-8 | Python | false | false | 366 | py | from django.urls import path
from . import views
app_name = 'polls'
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
"83724903+MelinaLaura@users.noreply.github.com"
] | 83724903+MelinaLaura@users.noreply.github.com |
b20f8e143e88c992748a2c5e0536af1c718347ce | 709a8e53c3cc19c9197d4a6614f445a849c912b1 | /efd_exam/soil/soil_temp_solver.py | d76057173851050a7f94fc24928d9d0ae71a3126 | [] | no_license | adair-kovac/efd_exam | 441bdaf9b9049060474760fb9a4625dd4e06fe36 | eb64522dcebe8d1b489a9696d75731a0c5b12564 | refs/heads/main | 2023-04-04T21:16:00.785525 | 2021-04-12T19:16:40 | 2021-04-12T19:16:40 | 357,051,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py | '''
We solve the heat equation:
dT/dt = alpha * d^2 T/dz^2
using Euler's method.
Our conditions are:
I.C. - T(t=0, z)
B.C. - T(z -> infinity) [this is the same for all t]
B.C. - T(t, z = 1.5)
'''
import numpy as np
from soil import soil_data_loader as data_loader
from soil import finite_difference
from soil import soil_temp_plot
alpha = .4e-6
observation_depths_str = ["1.6", "3.9", "5.8", "8.5", "10.4", "15", "25"]
def main():
matrix, observation_depths, observation_times, original_data = initialize_data()
for j in range(1, len(observation_depths_str)):
for i in range(1, len(observation_times)):
matrix[i][j] = finite_difference.temp_at_next_time(matrix, observation_times,
observation_depths, i-1, j,
alpha, matrix[i-1][j])
observation_depths_str_model = observation_depths_str + ["30"]
time_column = original_data["seconds_since"]
soil_temp_plot.plot(time_column, matrix, observation_depths_str_model, "Finite Difference Numerical Solution")
columns = ["T-" + depth for depth in observation_depths_str]
soil_temp_plot.plot(time_column, original_data[columns], observation_depths_str, "Actual Data")
model_data_dim = matrix[:, :-1]
soil_temp_plot.plot(time_column, original_data[columns] - model_data_dim, observation_depths_str,
"Actual Minus Modelled")
soil_temp_plot.contour_plot(observation_depths[:-1], time_column, original_data[columns],
"Actual Temperature by Depth and Time")
def initialize_data():
observation_depths = [float(x)/100 for x in observation_depths_str] # convert cm to m
surface_boundary_temperatures_by_time = []
data = data_loader.load_data()
observation_times = data["seconds_since"]
for i, row in data.iterrows():
surface_boundary_temperatures_by_time.append(float(row["T-1.6"]))
deep_temperature = np.average(data["T-25"])
initial_temperatures_by_depth = [data.iloc()[0]["T-" + depth] for depth in observation_depths_str]
num_rows = len(observation_times)
num_columns = len(initial_temperatures_by_depth) + 1
observation_depths.append(.3) # Adding a final row for the temperature at depth BC
matrix = np.zeros((num_rows, num_columns))
matrix[:, 0] = surface_boundary_temperatures_by_time
initial_temperatures_by_depth.append(deep_temperature)
matrix[0] = initial_temperatures_by_depth
matrix[:, -1] = deep_temperature
return matrix, observation_depths, observation_times, data
if __name__=="__main__":
main() | [
"u1334098@utah.edu"
] | u1334098@utah.edu |
de543dd2d61597a1e0284af168a9d1d2d12c2484 | 9172b47b48c04baff8bc5422d59761deeac39ea2 | /MainIoT/urls.py | 312f28e4441ace64fdb053a28db51f8020fddc07 | [] | no_license | choredarck/WebPageIoTdi2020 | 9b453ef22975eece6ff975203a056269938c9333 | 91176c873ef275c24cc517c776046fe8dc4e83a2 | refs/heads/master | 2023-01-04T19:08:44.953144 | 2020-11-05T09:05:23 | 2020-11-05T09:05:23 | 310,321,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | """MainIoT URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from chartsensor import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('saludo/', views.saludo, name= "que-onda"),
path('fecha/', views.dameFecha, name= "fecha"),
#path('edades/<int:edad>/<int:agno>', calculaEdad),
path('', views.Index.as_view(), name="Index"),
path('api/data/', views.get_data, name="api-data"),
path('api/chart/data/', views.ChartData.as_view()),
] +static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | [
"luis-nuro@live.com.mx"
] | luis-nuro@live.com.mx |
8fd13d52b6fca8fb65d98acb253969f4ef423a0c | 295a119fa0b4a4aabf3634e32309442c6b305e30 | /GFAR_python/kgrec_dataset.py | 6090f0a9474429e63965678c55c2d30f84208863 | [] | no_license | mesutkaya/recsys2020 | 3d8dd6a709a2868521e8e177ded1b58da6991968 | 8a8c7088bebc3309b8517f62248386ea7be39776 | refs/heads/master | 2022-12-05T14:53:22.865842 | 2020-08-21T10:22:57 | 2020-08-21T10:22:57 | 282,897,625 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,401 | py | import os
import pandas as pd
from sklearn.model_selection import train_test_split
import random
'''
KGRec - music dataset = https://www.upf.edu/web/mtg/kgrec
By using 'implicit_lf_dataset.csv' it creates 5 random splits and train validation and test sets
'''
WORKING_DIR = os.getcwd()
MAIN_DIR = os.path.abspath(os.path.join(WORKING_DIR, os.pardir))
DATA_DIR = os.path.join(MAIN_DIR, "data/kgrec/")
OUT_DIR = DATA_DIR
def get_count(tp, id):
playcount_groupbyid = tp[[id, 'rating']].groupby(id, as_index=False)
count = playcount_groupbyid.size()
return count
def print_stats(tp):
usercount, moviecount = get_count(tp, 'userId'), get_count(tp, 'itemId')
sparsity_level = float(tp.shape[0]) / (usercount.shape[0] * moviecount.shape[0])
print("There are %d triplets from %d users and %d songs (sparsity level %.3f%%)" % (tp.shape[0],
usercount.shape[
0],
moviecount.shape[
0],
sparsity_level * 100))
def numerize(tp, user2id, movie2id):
uid = list(map(lambda x: user2id[x], tp['userId']))
sid = list(map(lambda x: movie2id[x], tp['itemId']))
#print(uid[1])
tp['userId'] = uid
tp['itemId'] = sid
return tp
def filter_triplets(tp, min_uc=20, min_sc=1):
# Only keep the triplets for songs which were listened to by at least min_sc users.
moviecount = get_count(tp, 'itemId')
tp = tp[tp['itemId'].isin(moviecount.index[moviecount >= min_sc])]
# Only keep the triplets for users who listened to at least min_uc songs
# After doing this, some of the songs will have less than min_uc users, but should only be a small proportion
usercount = get_count(tp, 'userId')
tp = tp[tp['userId'].isin(usercount.index[usercount >= min_uc])]
return tp
raw_data = pd.read_csv(os.path.join(DATA_DIR, 'implicit_lf_dataset.csv'), header=0, sep='\t')
print_stats(raw_data)
raw_data = filter_triplets(raw_data, min_uc=20, min_sc=1)
print_stats(raw_data)
# Map the string ids to unique incremental integer ids for both users and songs
usercount, songcount = get_count(raw_data, 'userId'), get_count(raw_data, 'itemId')
unique_uid = usercount.index
unique_sid = songcount.index
song2id = dict((sid, i) for (i, sid) in enumerate(unique_sid))
user2id = dict((uid, i) for (i, uid) in enumerate(unique_uid))
with open(os.path.join(OUT_DIR, 'users.txt'), 'w') as f:
for uid in unique_uid:
f.write('%s\n' % user2id[uid])
f.close()
with open(os.path.join(OUT_DIR, 'items.txt'), 'w') as f:
for sid in unique_sid:
f.write('%s\n' % song2id[sid])
f.close()
random.seed(2812020)
# Create train/validation/test sets, five different random splits
for i in range(1, 6):
FOLD_DIR = os.path.join(OUT_DIR, str(i))
if not os.path.exists(FOLD_DIR):
os.makedirs(FOLD_DIR)
seed = random.randint(0, 1000000)
print("seed : " + str(seed))
train_validation, test = train_test_split(raw_data, test_size=0.2, stratify=raw_data.userId, shuffle=True,
random_state=seed)
train, validation = train_test_split(train_validation, test_size=0.25, stratify=train_validation.userId,
shuffle=True,
random_state=seed)
tv_tp = numerize(train_validation, user2id, song2id)
tv_tp.to_csv(os.path.join(FOLD_DIR, 'train.csv'), index=False, header=False)
train_tp = numerize(train, user2id, song2id)
train_tp.to_csv(os.path.join(FOLD_DIR, 't.csv'), index=False, header=False)
test_tp = numerize(test, user2id, song2id)
test_tp.to_csv(os.path.join(FOLD_DIR, 'test.csv'), index=False, header=False)
vad_tp = numerize(validation, user2id, song2id)
vad_tp.to_csv(os.path.join(FOLD_DIR, 'val.csv'), index=False, header=False)
# Since we mapped the IDs, save the corresponding ratings.
raw_data = numerize(raw_data, user2id, song2id)
raw_data.to_csv(os.path.join(DATA_DIR, 'ratings.csv'), index=False, header=False) | [
"mesutt.kayaa@gmail.com"
] | mesutt.kayaa@gmail.com |
7fc045062d1d679bc74cc6bd4c75f09c7eccaacd | d4eec8dafdf95084189316dfbc774d0b6ae21463 | /bcs-app/backend/apps/configuration/yaml_mode/views.py | b834138be79fee180860850707b420bcdb547d9f | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"ICU",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"Zlib",
"LicenseRef-scancode-openssl",
"NAIST-2003",
"ISC",
"NTP",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | dd-guo/bk-bcs-saas | 8b9411a22cee9c7982595ff4860720e603dbfaa9 | 45d69d9a72039fbb4f05638785af7dcbc1c075e4 | refs/heads/master | 2020-12-01T04:03:22.626481 | 2019-12-27T06:10:51 | 2019-12-27T06:10:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,828 | py | # -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import json
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.renderers import BrowsableAPIRenderer
from . import serializers, init_tpls
from .deployer import DeployController
from .release import ReleaseData, ReleaseDataProcessor
from backend.apps.datalog.utils import create_data_project, create_and_start_standard_data_flow
from backend.apps.configuration.mixins import TemplatePermission
from backend.apps.configuration.models import get_template_by_project_and_id
from backend.apps.configuration.showversion.serializers import GetShowVersionSLZ, GetLatestShowVersionSLZ
from backend.components import paas_cc
from backend.utils.error_codes import error_codes
from backend.utils.renderers import BKAPIRenderer
class InitialTemplatesViewSet(viewsets.ViewSet):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def get_initial_templates(self, request, project_id):
return Response(init_tpls.get_initial_templates())
class YamlTemplateViewSet(viewsets.ViewSet, TemplatePermission):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def _template_data(self, request, **kwargs):
template_data = request.data or {}
template_data.update(**kwargs)
return template_data
def create_template(self, request, project_id):
"""
request.data = {
'name': '',
'desc': '',
'show_version': {
'name': '',
}
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'content': 'Kind:Deployment', 'action': 'create'}]
}]
}
"""
data = self._template_data(request, project_id=project_id)
serializer = serializers.CreateTemplateSLZ(data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
template = serializer.save()
return Response({'template_id': template.id})
def update_template(self, request, project_id, template_id):
"""
request.data = {
'name': '',
'desc': '',
'show_version': {
'name': '',
'show_version_id': '',
}
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'content': 'Kind:Deployment', 'action': 'update', 'id': 3}]
}]
}
"""
template = get_template_by_project_and_id(project_id, template_id)
data = self._template_data(request, project_id=project_id)
serializer = serializers.UpdateTemplateSLZ(template, data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
template = serializer.save()
return Response({'template_id': template.id})
def get_template_by_show_version(self, request, project_id, template_id, show_version_id):
serializer = GetShowVersionSLZ(data=self.kwargs)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data['template']
self.can_view_template(request, template)
with_file_content = request.query_params.get('with_file_content')
with_file_content = False if with_file_content == 'false' else True
serializer = serializers.GetTemplateFilesSLZ(
validated_data, context={'with_file_content': with_file_content}
)
return Response(serializer.data)
def get_template(self, request, project_id, template_id):
serializer = GetLatestShowVersionSLZ(data=self.kwargs)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data['template']
self.can_view_template(request, template)
serializer = serializers.GetTemplateFilesSLZ(
validated_data, context={'with_file_content': True}
)
return Response(serializer.data)
class TemplateReleaseViewSet(viewsets.ViewSet, TemplatePermission):
renderer_classes = (BKAPIRenderer, BrowsableAPIRenderer)
def _request_data(self, request, project_id, template_id, show_version_id):
request_data = request.data or {}
show_version = {
'show_version_id': show_version_id,
'template_id': template_id,
'project_id': project_id
}
request_data['show_version'] = show_version
return request_data
# TODO use resources module function
def _get_namespace_info(self, access_token, project_id, namespace_id):
resp = paas_cc.get_namespace(access_token, project_id, namespace_id)
if resp.get('code') != 0:
raise error_codes.APIError(f"get namespace(id:{namespace_id}) info error: {resp.get('message')}")
return resp.get('data')
def _raw_release_data(self, project_id, initial_data):
show_version = initial_data['show_version']
namespace_info = self._get_namespace_info(
self.request.user.token.access_token, project_id, initial_data['namespace_id']
)
raw_release_data = ReleaseData(
project_id=project_id,
namespace_info=namespace_info,
show_version=show_version['show_version'],
template_files=initial_data['template_files']
)
return raw_release_data
def preview_or_apply(self, request, project_id, template_id, show_version_id):
"""
request.data = {
'is_preview': True,
'namespace_id': 'test',
'template_files': [{
'resource_name': 'Deployment',
'files': [{'name': 'nginx.yaml', 'id': 3}]
}]
}
"""
data = self._request_data(request, project_id, template_id, show_version_id)
serializer = serializers.TemplateReleaseSLZ(data=data)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
template = validated_data['show_version']['template']
self.can_use_template(request, template)
# 在数据平台创建项目信息
username = request.user.username
cc_app_id = request.project.cc_app_id
english_name = request.project.english_name
create_data_project(username, project_id, cc_app_id, english_name)
# 创建/启动标准日志采集任务
create_and_start_standard_data_flow(username, project_id, cc_app_id)
processor = ReleaseDataProcessor(
user=self.request.user, raw_release_data=self._raw_release_data(project_id, validated_data)
)
release_data = processor.release_data()
if validated_data['is_preview']:
return Response(release_data.template_files)
controller = DeployController(
user=self.request.user,
release_data=release_data
)
controller.apply()
return Response()
| [
"gejun.coolfriend@gmail.com"
] | gejun.coolfriend@gmail.com |
892bfb3c653774d571cee594977f41fa0d804b1c | e4b241d2d730a3c1cba5723837ec294be8307e4e | /split_data_train_validation_test.py | 27210e73848a8633d187c8fd67913dee59eef35a | [] | no_license | ruhan/toyslim | 7219002a8eead4d338ba1fa4c38a145a41bb696e | 008abbac61246d1845d58afda44584be88b72bda | refs/heads/master | 2021-01-15T11:12:22.578488 | 2018-05-23T15:10:33 | 2018-05-23T15:10:33 | 22,106,072 | 40 | 15 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from util import split_train_validation_test
import sys
split_train_validation_test(sys.argv[1])
| [
"ruhanbidart@gmail.com"
] | ruhanbidart@gmail.com |
00fa4d011176e57511ded5ed70adff09c00870ef | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/linear_model/plot_ard.py | d372542275a23bab2e67a592ff0f450684f6bdcd | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,909 | py | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for ARD
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from mrex.linear_model import ARDRegression, LinearRegression
# #############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
# #############################################################################
# Plot the true weights, the estimated weights, the histogram of the
# weights, and predictions with standard deviations
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], np.full(len(relevant_features), 5.),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=1)
clf_poly = ARDRegression(threshold_lambda=1e5)
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial ARD", linewidth=2)
plt.plot(X_plot, y_plot, color='gold', linewidth=2,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
| [
"shkolanovaya@gmail.com"
] | shkolanovaya@gmail.com |
b0fc4f4a0ac9f56f67e06fa5df02898d6212fef3 | 585bac463cb1919ac697391ff130bbced73d6307 | /40_CombinationSumII/solution.py | 04afdc7ee5801e8baa42abe0630dd12e4380d840 | [] | no_license | llgeek/leetcode | ce236cf3d3e3084933a7a4a5e8c7766f7f407285 | 4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c | refs/heads/master | 2021-01-22T23:44:13.318127 | 2020-03-11T00:59:05 | 2020-03-11T00:59:05 | 85,667,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | """
use Counter
"""
from typing import List
from collections import Counter
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
def backtracker(ret, cnt, target, path, start):
if target == 0:
ret.append(path[:])
return
if start >= len(cnt): return
else:
backtracker(ret, cnt, target, path, start+1)
val, num = cnt[start]
maxtimes = min(target // val, num)
for i in range(maxtimes):
path.append(val)
target -= val
backtracker(ret, cnt, target, path, start+1)
for i in range(maxtimes):
path.pop()
target += val
cnt = sorted(Counter(candidates).items(), key = lambda x: x[0])
ret = []
backtracker(ret, cnt, target, [], 0)
return ret
if __name__ == "__main__":
candidates = [10,1,2,7,6,1,5]
# candidates = [2,5,2,1,2]
target = 8
# target = 5
sol = Solution()
print(sol.combinationSum2(candidates, target)) | [
"lchen@matterport.com"
] | lchen@matterport.com |
e2e17ddf83490a92b43c3b1fb5c4eca695f5ce34 | 9a1a4d69a1139f97d1ac11da6335718a37d6fe65 | /move_detect/camera_detect.py | e174f677c7baa8f16d916001ad5ae650502f54d8 | [] | no_license | Miura55/cv_code | 46a5f561ed0640a9530a39dce50d28ba946bd0d6 | 5a132c2d646e46a1061598c9ae94eadb3e20d8b3 | refs/heads/master | 2021-07-06T03:28:20.232263 | 2021-01-10T17:22:22 | 2021-01-10T17:22:22 | 220,672,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | import cv2
cap = cv2.VideoCapture(0)
avg = None
while True:
# 1フレームずつ取得する。
ret, frame = cap.read()
if not ret:
break
# グレースケールに変換
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 比較用のフレームを取得する
if avg is None:
avg = gray.copy().astype("float")
continue
# 現在のフレームと移動平均との差を計算
cv2.accumulateWeighted(gray, avg, 0.6)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# デルタ画像を閾値処理を行う
thresh = cv2.threshold(frameDelta, 3, 255, cv2.THRESH_BINARY)[1]
# 画像の閾値に輪郭線を入れる
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
frame = cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)
# 結果を出力
cv2.imshow("Frame", frame)
key = cv2.waitKey(30)
if key == 27:
break
cap.release()
cv2.destroyAllWindows() | [
"acordion.piano@gmail.com"
] | acordion.piano@gmail.com |
19112e29a384c727f21c12f77a78ac6d016e0339 | 920e4a204c3b5a6c625e6634f14f7a704b4ef778 | /http_response_body_test.py | 773e2f705db6cdf91e760b4ed732e723e867f663 | [] | no_license | jason967/ChatBot | 23c6ad852bfa6cf166b5703569f315e2f2df35ab | 8b76d728a0fb7b6fe9810853377924d6c42a9d71 | refs/heads/master | 2022-11-19T05:53:48.987076 | 2020-07-17T07:40:09 | 2020-07-17T07:40:09 | 280,165,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | from flask import Flask
app = Flask(__name__)
@app.route('/json/object')
def get_json_object():
response = {"id":1}
return response
@app.route('/json/list')
def get_json_list():
list = [1, 2, 3, 4, 5]
response = {"list": list}
return response
@app.route('/sensor/data/list')
def get_sensor_data_list():
data1 = {"device_id": "LED01", "data": "on", "datetime": "20190731 00:12:47"}
data2 = {"device_id": "LED02", "data": "off", "datetime": "20190731 00:58:01"}
list = [data1, data2]
response = {"data_list": list}
return response
if __name__ == '__main__':
app.run(debug=True) | [
"jason967@naver.com"
] | jason967@naver.com |
46f1492e0079cbd9e43a52216150bcb80318ccfe | 2feaddc19de5490a1b55af08079d7e1d866f4c2d | /test/includes/common.py | b4fad481873ade2896fdf63bd350d326132c9932 | [
"BSD-3-Clause"
] | permissive | drakkar-lig/walt-python-packages | 4beba93394da306550a54313800bb455b8652e81 | 2e487767c697aded22ba3e08b26964b45e154559 | refs/heads/master | 2023-09-04T10:53:48.768130 | 2023-09-01T08:05:11 | 2023-09-01T08:05:11 | 24,328,535 | 6 | 3 | BSD-3-Clause | 2023-09-01T08:12:36 | 2014-09-22T12:56:10 | Python | UTF-8 | Python | false | false | 1,764 | py | import os
import sys
from pathlib import Path
TEST_IMAGE_URL = "hub:eduble/pc-x86-64-test-suite"
def test_suite_image():
p = Path("/tmp/test_suite_image")
if not p.exists():
p.write_text(f"pc-x86-64-test-suite-{os.getpid()}\n")
return p.read_text().strip()
def test_suite_node():
p = Path("/tmp/test_suite_node")
if not p.exists():
p.write_text(f"testnode-{os.getpid()}\n")
return p.read_text().strip()
def test_create_vnode():
node_name = test_suite_node()
from walt.client import api
node = api.nodes.create_vnode(node_name)
assert node.name == node_name
assert node_name in api.nodes.get_nodes()
return node
TEST_CONTEXT = {}
def set_py_test_mode(mode, num_test=0):
TEST_CONTEXT["mode"] = mode
TEST_CONTEXT["num_test"] = int(num_test)
def define_test(s):
if TEST_CONTEXT["mode"] == "describe":
print(TEST_CONTEXT["num_test"], s)
TEST_CONTEXT["num_test"] += 1
def decorate(f):
pass
elif TEST_CONTEXT["mode"] == "run":
if TEST_CONTEXT["num_test"] == 0:
def decorate(f):
f()
else:
def decorate(f):
pass
TEST_CONTEXT["num_test"] -= 1
return decorate
def skip_test(reason):
skip_notify_file = Path(os.environ["TESTSUITE_TMP_DIR"]) / "skipped"
skip_notify_file.write_text(reason)
sys.exit(1)
def get_first_items(item_set, n_items, item_label):
it = iter(item_set)
result = []
try:
for _ in range(n_items):
result.append(next(it))
except StopIteration:
skip_test(f"requires at least two {item_label}s")
if n_items == 1:
return result[0]
else:
return tuple(result)
| [
"etienne.duble@imag.fr"
] | etienne.duble@imag.fr |
56223a3e9f7a42ca9c71a1e8022f48dc3ad0b795 | 10d0aa9386d7032f7b7bdd8db9666292331e90de | /PageLocators/shanghupage_locator.py | c867013c05f1ecbf4c5313585d7d5a40421c2d67 | [] | no_license | wangweimin110/UI_AUTO | f188446277f0d10a4da9d2ff50de20990fc59178 | a992c36f34e24eb5200af00d565fc3ad7459e702 | refs/heads/master | 2022-12-05T17:54:51.181778 | 2020-08-27T15:52:22 | 2020-08-27T15:52:22 | 289,970,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # -*- coding: utf-8 -*-
from selenium.webdriver.common.by import By
class ShanghuPageLocator:
'''元素定位'''
#商户管理
business_management = (By.XPATH,'//span[text()="商户管理"]')
#商户签约维护
mcm = (By.XPATH,"//span[text()='商户签约维护']")
#商户编号
merchant_id = (By.XPATH,'//*[@id="nuiPageLoad280query_id"]/input')
| [
"1391691574@qq.com"
] | 1391691574@qq.com |
dc636fddcd61807fbfe299b988f3c61a5dfaa317 | 155497bf1cad4dd8ea3db387323df6cf938c6b19 | /02/lone_sum.py | ff2288f79138c1a4dfc77a05d76e67ef3be4d3db | [] | no_license | gp70/homework | 45a9fb22eb67958173a20fa30c73f1305c8bffc4 | 404b3444573998eac5042fbfe60cdd33b384420a | refs/heads/master | 2021-01-23T21:22:52.596980 | 2017-12-06T18:41:21 | 2017-12-06T18:41:21 | 102,896,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | def lone_sum(a, b, c):
sum = a + b + c
if a==b:
sum = sum - 2*a
if b==c:
sum = sum - 2*b
if a==c:
sum = sum - 2*c
if a==b==c:
sum = 0
return sum
| [
"gareth.petterson70@myhunter.cuny.edu"
] | gareth.petterson70@myhunter.cuny.edu |
b25cf15c6c2a589558393f10e01a8f63da93d570 | 6201152c40c52f8fb8f5789727661f63884fb5b8 | /xception/xception.py | 9c003ecd0d68cea1be7594e17207c5825031111b | [] | no_license | huuthai37/twostreamUCF11 | 4c7280f2a6778289bc28f9fa956c66959c158e33 | aa5689ab384c69704313d42bc1f0a7d277475168 | refs/heads/master | 2020-03-08T05:11:03.240535 | 2018-04-26T17:56:01 | 2018-04-26T17:56:01 | 127,941,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | # -*- coding: utf-8 -*-
"""Xception V1 model for Keras.
"""
from keras.models import Model
from keras import layers
from keras.layers import Dense
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Conv2D
from keras.layers import SeparableConv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Dropout
from keras.layers import GlobalMaxPooling2D
def XceptionFix(include_top=True, input_shape=None, classes=1000, weights=None, drop_rate=0.5):
if input_shape is None:
img_input = Input((299,299,3))
else:
img_input = Input(input_shape)
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])
x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
if include_top:
x = GlobalAveragePooling2D()(x)
x = Dropout(drop_rate)(x)
x = Dense(classes, activation='softmax')(x)
model = Model(img_input, x)
if weights is not None:
model.load_weights(weights)
return model | [
"huuthai37@gmail.com"
] | huuthai37@gmail.com |
2f4b614f6938af9db813b67d3ccf8c651d7fcff7 | dbcfd020a5c6008f7db96acd17eb183557821863 | /1.py | 3588b32b87e4484b8f9cb3acf510c75af3bf65ea | [] | no_license | vadim788/homework | acbfe7c75849cb77847f5802b9646ec70349299a | 009d5dc4cf4b471f780aa6573cf948b15ba1b3e6 | refs/heads/master | 2020-04-13T10:49:41.763131 | 2018-12-26T09:56:58 | 2018-12-26T09:56:58 | 163,154,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | def matri(rows):
matri = []
for i in range(0,rows):
matri.append(list(map(int, input().rstrip().split())))
return matri
dimension = (input("Введіть кількість рядків та стовпчиків: ")).split(" ")
matri = matri(int(dimension[0]))
mx = max(map(max, matri))
for i, e in enumerate(matri):
try:
j = e.index(mx)
break
except ValueError:
pass
print(i,j) | [
"vadim743@icloud.com"
] | vadim743@icloud.com |
4b683ad281548db46b49c58011469c5752cda59a | 2f2b0b1761ff7c83d289e439f15520b2cfb27dad | /hw3_KNN_Boosting/classify3.py | e57d79a8624dabf6ab4525222c0d9246f1276a04 | [] | no_license | shuowenwei/Intro2MachineLearning_CS475 | 823b0dc3f0852eb9714a6d5c3d43ecbde928e1b3 | 107d12dc2643165e470ea566ec0b39ff7aed74d0 | refs/heads/master | 2021-01-24T10:15:20.792707 | 2016-11-28T04:59:13 | 2016-11-28T04:59:13 | 70,026,671 | 1 | 2 | null | 2016-11-16T03:25:00 | 2016-10-05T03:17:18 | Python | UTF-8 | Python | false | false | 15,473 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 07 11:29:12 2016
@author: mygao
"""
import os
import argparse
import sys
import pickle
import numpy
from cs475_types import ClassificationLabel, FeatureVector, Instance, Predictor, InstanceKnn
def load_data(filename):
instances = []
with open(filename) as reader:
for line in reader:
if len(line.strip()) == 0:
continue
# Divide the line into features and label.
split_line = line.split(" ")
label_string = split_line[0]
int_label = -1
try:
int_label = int(label_string)
except ValueError:
raise ValueError("Unable to convert " + label_string + " to integer.")
label = ClassificationLabel(int_label)
feature_vector = FeatureVector()
####label is a string, feature_vector is a list, instances is a list
####### deal with feature vector, into (index,value)
for item in split_line[1:]:
try:
index = int(item.split(":")[0])
except ValueError:
raise ValueError("Unable to convert index " + item.split(":")[0] + " to integer.")
try:
value = float(item.split(":")[1])
except ValueError:
raise ValueError("Unable to convert value " + item.split(":")[1] + " to float.")
if value != 0.0:
feature_vector.add(index, value)
instance = Instance(feature_vector, label)
instances.append(instance)
return instances
def get_args():
parser = argparse.ArgumentParser(description="This is the main test harness for your algorithms.")
parser.add_argument("--data", type=str, required=True, help="The data to use for training or testing.")
parser.add_argument("--mode", type=str, required=True, choices=["train", "test"],
help="Operating mode: train or test.")
parser.add_argument("--model-file", type=str, required=True,
help="The name of the model file to create/load.")
parser.add_argument("--predictions-file", type=str, help="The predictions file to create.")
parser.add_argument("--algorithm", type=str, help="The name of the algorithm for training.")
# TODO This is where you will add new command line options
parser.add_argument("--online-learning-rate", type =float, help= "The learning rate for perceptton",
default=1.0)
parser.add_argument("--online-training-iterations", type = int, help= "The number of training iternations for online methods.",
default=5)
parser.add_argument("--pegasos-lambda", type = float, help= "The regularization parameter for Pegasos.",
default=1e-4)
parser.add_argument("--knn", type = int, help= "The value of K for KNN classification.",
default=5)
parser.add_argument("--num-boosting-iterations", type = int, help= "The value of boosting iteratons to run.",
default=10)
##########
args = parser.parse_args()
check_args(args)
return args
def check_args(args):
if args.mode.lower() == "train":
if args.algorithm is None:
raise Exception("--algorithm should be specified in mode \"train\"")
else:
if args.predictions_file is None:
raise Exception("--algorithm should be specified in mode \"test\"")
if not os.path.exists(args.model_file):
raise Exception("model file specified by --model-file does not exist.")
####################
def EuclideanDistance(feature_vector1, feature_vector2, maxFeature):
x1 = numpy.zeros(maxFeature)
x2 = numpy.zeros(maxFeature)
for f in feature_vector1._data:
x1[f[0]-1] = f[1]
for f in feature_vector2._data:
x2[f[0]-1] = f[1]
dist = numpy.linalg.norm(x1-x2)
#print dist
return dist
def knn_GetNeighbors(instances, testInstance, k, maxFeature):
distances = []
for e in instances:
dist0 = EuclideanDistance(e._feature_vector, testInstance._feature_vector, maxFeature)
Dist = InstanceKnn(dist0,e._feature_vector,e._label)
distances.append(Dist)
sortedDist = sorted(distances, key=lambda x : x._dist)
neighbors = []
for x in range(k):
mem = Instance(sortedDist[x]._feature_vector,sortedDist[x]._label)
neighbors.append(mem)
return neighbors
def dwKnn_GetNeighbors(instances, testInstance, k, maxFeature):
distances = []
for e in instances:
dist0 = EuclideanDistance(e._feature_vector, testInstance._feature_vector, maxFeature)
Dist = InstanceKnn(dist0,e._feature_vector,e._label)
distances.append(Dist)
sortedDist = sorted(distances, key=lambda x : x._dist)
neighbors = []
for x in range(k):
mem = Instance(sortedDist[x]._dist, sortedDist[x]._feature_vector,sortedDist[x]._label)
neighbors.append(mem)
return neighbors
def knn_GetLabel(neighbors):
classVotes = {}
for e in neighbors:
response = str(e._label)
#print type(response)
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
#print classVotes
sortedVotes = sorted(classVotes, key=classVotes.get, reverse=True)
#print sortedVotes
pred = sortedVotes[0]
return int(pred)
def dwKnn_GetLabel(neighbors):
classVotes = {}
for e in neighbors:
response = str(e._label)
if response in classVotes:
classVotes[response] += 1.0/(e._dist**2+1)
else:
classVotes[response] = 1.0/(e._dist**2+1)
sortedVotes = sorted(classVotes, key=classVotes.get, reverse=True)
pred = sortedVotes[0]
return int(pred)
##################
class knn(Predictor):
def train(self, instances, k):
self._instances = instances
self._k= k
maxFeature = ComputeMaxFeature(instances)
self._maxFeature = maxFeature
def predict(self,instance):
neighbors = knn_GetNeighbors(self._instances,instance, self._k,self._maxFeature)
label = knn_GetLabel(neighbors)
return label
###########################
class distanceWeighted_knn(Predictor):
def train(self, instances, k):
self._instances = instances
self._k= k
maxFeature = ComputeMaxFeature(instances)
self._maxFeature = maxFeature
def predict(self,instance):
neighbors = dwKnn_GetNeighbors(self._instances,instance, self._k,self._maxFeature)
label = dwKnn_GetLabel(neighbors)
return label
###############
### this function finds the x_{kj} for every sample, j feature
def getFeatureValues(instances, feature_index):
xkj = [] # rename 'xkj' to 'instancesFeature'
for e in instances:
val = 0
for f in e._feature_vector._data:
if f[0] == feature_index:
val = f[1]
xkj.append(val)
return xkj # rename 'xkj' to 'instancesFeature'
######## this function finds the h_{j,c} value given x_{jk}
def hjc(xkjList, cutoff): # rename 'hjc' to 'getStumpLabel', '' to ''
"""
greater = []
for e in xkjList:
if e > cutoff:
greater.append(1)
else:
greater.append(0)
return greater
"""
# list comprehension is just a faster way to construct an object
return [1 if e > cutoff else 0 for e in xkjList]
#### this function finds the best cutoff c in feature j
def htj(instances, weights, feature_index): # rename 'htj' to 'getCutoff'
xkj = getFeatureValues(instances, feature_index) # rename 'xkj' to 'instancesFeature'
xkj_sorted = sorted(xkj) # rename 'xkj_sorted' to 'instancesFeature_sorted'
"""
cutoffList = []
for i in range(sampleSize-1):
c = 0.5*(xkj_sorted[i]+xkj_sorted[i+1])
cutoffList.append(c)
"""
cutoffList = [ 0.5*(xkj_sorted[i] + xkj_sorted[i+1]) for i in range(sampleSize-1)]
cutoffList = list(set(cutoffList))
error = float("inf")
res = []
for c in range(len(cutoffList)):
cand_err = 0
hjcx = hjc(xkj, cutoffList[c]) # rename 'hjc' to 'getStumpLabel', 'hjcx' to 'stumpLabel'
for i in range(sampleSize):
cand_err += weights[i] * int(str(hjcx[i]) != str(instances[i]._label)) # rename 'hjc' to 'getStumpLabel', 'hjcx' to 'stumpLabel'
if cand_err < error:
error = cand_err
res = [feature_index, cutoffList[c], error, hjcx] # rename 'hjcx' to 'stumpLabel'
return res
##########this function finds the best j,c
def ht(instances, weights): # rename 'ht' to 'updateWeights'
error = float("inf")
result =[]
for j in range(maxFeature):
candidate = htj(instances, weights, j+1)
if candidate[2] < error:
result = candidate
error = candidate[2]
return result
# convert integer labels when needed and codes more reader friendly
def labelConvert(label):
if label == 1 or label == '1':
return 1
if label == 0 or label == '0':
return -1
if label == -1 or label == '-1':
return 0
#############################
class adaboost(Predictor):
def __init__(self):
#self._weights = numpy.ones((sampleSize, T)) * (1.0/sampleSize)
self._weights = [1.0/sampleSize] * sampleSize
self._res = []
def train(self, instances):
import math
for t in range(iterations):
everyth = ht(instances, self._weights) # rename 'ht' to 'updateWeights', 'everyth' to 'newWeights'
eps = everyth[2]
htv = everyth[3] # rename 'htv' to 'stumpLabel'
if eps <= 0.000001:
break
else:
if eps == 1:
alp = -float("inf")
else:
alp = 0.5 * math.log((1-eps)/eps)
Dt = [0]*sampleSize
for i in range(sampleSize):
lab = labelConvert(int(str(instances[i]._label)))
Dt[i] = self._weights[i] * numpy.exp(-alp * lab * labelConvert(htv[i])) # rename 'htv' to 'stumpLabel'
# you can do this if you like :)
#Dt = [self._weights[i] * numpy.exp(-alp * labelConvert(str(instances[i]._label)) * labelConvert(htv[i])) for i in range(sampleSize)]
Dtsum = sum(Dt)
self._weights = [x/Dtsum for x in Dt]
self._res.append( [alp, everyth[0], everyth[1], everyth[2]] ) # rename 'everyth' to 'newWeights'
return self._res
def predict(self, instance):
cand0 = 0
cand1 = 0
#print [self._res[0][0], self._res[0][1], self._res[0][2]]
for i in range(len(self._res)):
alp = self._res[i][0]
feature_index = self._res[i][1]
cutoff = self._res[i][2]
ind = 0
for f in instance._feature_vector._data:
if f[0] == feature_index and f[1] > cutoff:
ind = 1
if ind == 0:
cand0 = cand0 + alp
else:
cand1 = cand1 + alp
if cand0 >= cand1:
return 0
else:
return 1
def train(instances, algorithm, k):
# TODO Train the model using "algorithm" on "data"
# TODO This is where you will add new algorithms that will subclass Predictor
if algorithm == "knn":
sol = knn()
sol.train(instances,k)
return sol
if algorithm == "distance_knn":
sol = distanceWeighted_knn()
sol.train(instances, k)
return sol
if algorithm == "adaboost":
sol = adaboost()
sol.train(instances)
return sol
####################
def write_predictions(predictor, instances, predictions_file):
try:
with open(predictions_file, 'w') as writer:
for instance in instances:
label = predictor.predict(instance)
writer.write(str(label))
writer.write('\n')
except IOError:
raise Exception("Exception while opening/writing file for writing predicted labels: " + predictions_file)
def ComputeMaxFeature(instances):
maxfeature = 0
for e in instances:
for f in e._feature_vector._data:
if f[0] > maxfeature:
maxfeature = f[0]
return maxfeature
def main():
args = get_args()
global maxFeature
global sampleSize
global iterations
iterations = args.num_boosting_iterations
if args.mode.lower() == "train":
# Load the training data.
instances = load_data(args.data)
maxFeature = ComputeMaxFeature(instances)
sampleSize = len(instances)
# Train the model.
predictor = train(instances, args.algorithm, args.knn)
#print predictor
try:
with open(args.model_file, 'wb') as writer:
pickle.dump(predictor, writer)
except IOError:
raise Exception("Exception while writing to the model file.")
except pickle.PickleError:
raise Exception("Exception while dumping pickle.")
elif args.mode.lower() == "test":
# Load the test data.
instances = load_data(args.data)
predictor = None
# Load the model.
try:
with open(args.model_file, 'rb') as reader:
predictor = pickle.load(reader)
except IOError:
raise Exception("Exception while reading the model file.")
except pickle.PickleError:
raise Exception("Exception while loading pickle.")
write_predictions(predictor, instances, args.predictions_file)
else:
raise Exception("Unrecognized mode.")
if __name__ == "__main__":
main()
#python classify3.py --mode train --algorithm knn --model-file speech.mc.knn.model --data speech.mc.train
#python classify3.py --mode test --model-file speech.mc.knn.model --data speech.mc.dev --predictions-file speech.mc.dev.predictions
#python classify3.py --mode train --algorithm distance_knn --model-file easy.distance_knn.model --data easy.train
#python classify3.py --mode test --model-file easy.distance_knn.model --data easy.dev --predictions-file easy.dev.predictions
#python classify3.py --mode train --algorithm adaboost --model-file easy.adaboost.model --data easy.train
#python classify3.py --mode test --model-file easy.adaboost.model --data easy.dev --predictions-file easy.dev.predictions
#python classify3.py --mode train --algorithm adaboost --model-file easy.adaboost.model --data easy.train --num-boosting-iterations 10
#python classify3.py --mode test --model-file easy.adaboost.model --data easy.dev --predictions-file easy.dev.predictions
#python compute_accuracy.py easy.dev easy.dev.predictions
| [
"weisw9@gmail.com"
] | weisw9@gmail.com |
afce090b08d1a7012052ef2f6514c20bc7a9a2bf | 91a2c3c2a8813470485b740f04be01ec01c7656b | /DBPierreP/env/lib/python3.6/io.py | 44cc5fc03b2ee11bf6a3c70cb55258cc1c9d2971 | [] | no_license | piquemalpierre/DBPierreP | 399786bee4d111759ee300512d3fe06b3b104a0a | 411d0a4ca6f051de686b67f8bae3ab9cea6e889d | refs/heads/master | 2020-03-22T03:02:22.011413 | 2018-07-04T14:49:32 | 2018-07-04T14:49:32 | 139,409,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /Users/pierre/anaconda3/lib/python3.6/io.py | [
"p.piquemal@hotmail.fr"
] | p.piquemal@hotmail.fr |
bd2966e9056c67dfe6f46c61d7e98a57c1775964 | 804012f2d450cf4d63ed86f994f6f21f8ac14fe8 | /venv/bin/pip3.6 | 1059069331caabe24b6394a3a2d2c421f59f316a | [
"MIT"
] | permissive | YoUNG824/DDPG | 6d8b60044025c0d681432b239d1a6143275dac07 | 1b8ae2d9687107cb587f6cd81112e75e779bf575 | refs/heads/master | 2020-12-06T14:40:08.181364 | 2020-01-08T05:45:36 | 2020-01-08T05:45:36 | 232,487,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | 6 | #!/Users/wxy/Documents/DDPG/DDPG/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"wxy@XUEYANG-WANG.local"
] | wxy@XUEYANG-WANG.local |
324a2174c7ebc34906abae67f0ba03c2095abe94 | 92a22a2e256ceeda4e3aef8fad3a5559172c9710 | /RS_PYSpark_estimatePi.py | dbf4205976f7c179c706ebaa3cced3dd14559ed0 | [
"Unlicense"
] | permissive | BrooksIan/EstimatingPi | 4e47e8322b2203178bed0afc07c6a3f4326794d2 | a58f2cc3207ea42c8464129a3ad38c4047bd791a | refs/heads/master | 2022-12-19T09:11:25.329040 | 2020-09-09T16:37:04 | 2020-09-09T16:37:04 | 292,111,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # # Estimating $\pi$
#
# This PySpark example shows you how to estimate $\pi$ in parallel
# using Randon Sampleing integration.
from __future__ import print_function
import sys
import time
import cdsw
from random import random
from operator import add
# Connect to Spark by creating a Spark session
from pyspark.sql import SparkSession
# Set Default Value
defaultVal = 1000000
defaultParitions = 2
n_val = defaultVal
# Establish Spark Connection
spark = SparkSession\
.builder\
.appName("PythonPi")\
.getOrCreate()
# Start Simulation
# Set Number of Samples
if( (len(sys.argv)-1) > 0):
if(sys.argv[1] != None):
n_val=int(sys.argv[1])
# Start Timer
startTime = time.process_time()
partitions = defaultParitions
n = n_val * partitions
def f(_):
x = random() * 2 - 1
y = random() * 2 - 1
return 1 if x ** 2 + y ** 2 < 1 else 0
# To access the associated SparkContext
count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
PiEst = 4.0 * count / n
print("Pi is estimated at %0.8f" % (PiEst))
# Stop Timer
stopTime = time.process_time()
elapsedTime = stopTime-startTime
print("Elapsed Process Time: %0.8f" % (elapsedTime))
# Return Paramaters to CDSW User Interface
cdsw.track_metric("NumIters", n_val)
cdsw.track_metric("PiEst", PiEst)
cdsw.track_metric("ProcTime", elapsedTime)
# Stop Spark Connection
spark.stop() | [
"brooks_ian@yahoo.com"
] | brooks_ian@yahoo.com |
000c1ebab7161995ba2a7f947ebcf545cd414d7d | 6b5431368cb046167d71c1f865506b8175127400 | /challenges/estimando-o-valor-de-pi-1/tests.py | 620927474107e9a20a9a8627a9b42bd69d3f8c26 | [] | no_license | Insper/design-de-software-exercicios | e142f4824a57c80f063d617ace0caa0be746521e | 3b77f0fb1bc3d76bb99ea318ac6a5a423df2d310 | refs/heads/master | 2023-07-03T12:21:36.088136 | 2021-08-04T16:18:03 | 2021-08-04T16:18:03 | 294,813,936 | 0 | 1 | null | 2021-08-04T16:18:04 | 2020-09-11T21:17:24 | Python | UTF-8 | Python | false | false | 617 | py | from strtest import str_test
class TestCase(str_test.TestCaseWrapper):
TIMEOUT = 2
def test_1(self):
for n in [1, 2, 3, 4, 10, 100, 1000, 10000]:
s = 0
for i in range(1, n + 1):
s += 6 / (i**2)
esperado = s**0.5
obtido = self.function(n)
msg = 'Não funcionou para n={0}. Esperado={1}. Obtido={2}'.format(
n, esperado, obtido)
if abs(obtido - s) < 0.01:
msg += ' Será que você não se esqueceu da raíz quadrada?'
self.assertAlmostEqual(esperado, obtido, msg=msg)
| [
"andrew.kurauchi@gmail.com"
] | andrew.kurauchi@gmail.com |
f2d0269e1a162457d910241459f384571d16809c | facbb3b45a8dacbf3e60647d3f6d561eb99141e7 | /mysite/mysite/settings.py | ece98285bc2d000b6d912498fee733569ffc4d43 | [] | no_license | MichaelAndreyeshchev/blockchain-donations-app | e91e26ef7fabffeff39e23bbbf71e4ce024808d1 | 4e459c17ae9a7d3d21cc256c772bfc9b279e5380 | refs/heads/main | 2023-07-14T22:05:26.162577 | 2021-09-01T20:06:26 | 2021-09-01T20:06:26 | 385,263,917 | 6 | 6 | null | 2021-07-27T15:09:13 | 2021-07-12T13:50:24 | Python | UTF-8 | Python | false | false | 3,413 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-vf8+3(m4z_jr+zc52vcwj-z=m4#1096%#-f3t8%sh2cf=yie55'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
Path(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'cryptonate_database',
'USER': 'admin',
'PASSWORD': 'abc123',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"michaelandrev@gmail.com"
] | michaelandrev@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.