seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
34266922439 | import grpc
import service_pb2
import service_pb2_grpc
def run():
channel = grpc.insecure_channel('localhost:50051') # Адрес сервера
stub = service_pb2_grpc.MyServiceStub(channel)
request = service_pb2.Request(id = 1, par_1 = 5, par_2 = 4)
response = stub.MyMethod(request)
print(response.result)
if __name__ == '__main__':
run()
| noemabbbg/factorial | factorial/hz chto/client1.py | client1.py | py | 371 | python | en | code | 0 | github-code | 90 |
20044876461 | if __name__ == '__main__':
t = int(input())
for _ in range(t):
n = int(input())
p = set()
li = list(map(int, input().split()))
new_list = []
for i in li:
if i not in p:
p.add(i)
new_list.append(i)
print(' '.join([str(x) for x in new_list]))
| dkarthicks27/ML_Database | codeforces/restore_permutation.py | restore_permutation.py | py | 341 | python | en | code | 0 | github-code | 90 |
18370714609 | from collections import Counter
N = int(input())
A = list(map(int,input().split()))
c = Counter(A)
t = 0
for a in A:
t ^= a
if c[0] == N:
print('Yes')
elif N % 3 == 0:
if c[0] == int(N//3) and len(c) == 2:
print('Yes')
elif len(c) == 3 and t == 0:
print('Yes')
else:
print('No')
else:
print('No') | Aasthaengg/IBMdataset | Python_codes/p02975/s781845991.py | s781845991.py | py | 348 | python | en | code | 0 | github-code | 90 |
27159902164 | data_a = [2,3,4,5,6]
data_b = [data_a, 3,5,6,8,9]
print(f'''
Data a = {data_a}
Data b = {data_b} <- Nested data a in list data b
''')
customer01 = ['John Wick', 35, "London"]
customer02 = ['Blondie', 32, 'Los Angeles']
customer03 = ['Tarantino', 38, 'Las Vegas']
customers = [customer01, customer02, customer03]
print(customers)
for customer in customers:
print(f'''
Customer Data
Name : {customer[0]}
Age : {customer[1]}
Adress: {customer[2]}
''')
customer_copy = customers.copy()
customer02[0]= "Rhed Bustamante"
print(f'''
{customers}
{customer_copy}
''') | susilo-hidayat/Latihan | 28. NESTED_LIST.py | 28. NESTED_LIST.py | py | 590 | python | en | code | 0 | github-code | 90 |
35854918370 | from __future__ import annotations
import collections
import itertools
import json
import shutil
import os
from collections.abc import Callable, Iterable
from pathlib import Path as P
from typing import Optional
import click
import requests
import yaml
# pylint: disable=redefined-builtin
from requests.exceptions import ConnectionError, HTTPError
from url_normalize import url_normalize
from kapitan import cached
from kapitan import targets as kapitan_targets
from kapitan import defaults
from kapitan.cached import reset_cache as reset_reclass_cache
from kapitan.refs.base import RefController, PlainRef
from kapitan.refs.secrets.vaultkv import VaultBackend
from kapitan.resources import inventory_reclass
from commodore import __install_dir__
from commodore.config import Config
ArgumentCache = collections.namedtuple(
"ArgumentCache",
[
"inventory_path",
"yaml_multiline_string_style",
"yaml_dump_null_as_empty",
],
)
class FakeVaultBackend(VaultBackend):
def __init__(self):
"init FakeVaultBackend ref backend type"
super().__init__(None)
def __getitem__(self, ref_path):
return PlainRef(ref_path)
class ApiError(Exception):
pass
class IndentedListDumper(yaml.Dumper):
"""
Dumper which preserves indentation of list items by overriding indentless.
"""
def increase_indent(self, flow=False, *args, **kwargs):
return super().increase_indent(flow=flow, indentless=False)
def yaml_load(file):
"""
Load single-document YAML and return document
"""
with open(file, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
def yaml_load_all(file):
"""
Load multi-document YAML and return documents in list
"""
with open(file, "r", encoding="utf-8") as f:
return list(yaml.safe_load_all(f))
def _represent_str(dumper, data):
"""
Custom string rendering when dumping data as YAML.
Hooking this method into PyYAML with
yaml.add_representer(str, _represent_str)
will configure the YAML dumper to render strings which contain newline
characters as block scalars with the last newline stripped.
"""
style = None
if "\n" in data:
style = "|"
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=style)
def yaml_dump(obj, file):
"""
Dump obj as single-document YAML
"""
yaml.add_representer(str, _represent_str)
with open(file, "w", encoding="utf-8") as outf:
yaml.dump(obj, outf, Dumper=IndentedListDumper)
def yaml_dump_all(obj, file):
"""
Dump obj as multi-document YAML
"""
yaml.add_representer(str, _represent_str)
with open(file, "w", encoding="utf-8") as outf:
yaml.dump_all(obj, outf, Dumper=IndentedListDumper)
def lieutenant_query(api_url, api_token, api_endpoint, api_id, params={}):
try:
r = requests.get(
url_normalize(f"{api_url}/{api_endpoint}/{api_id}"),
headers={"Authorization": f"Bearer {api_token}"},
params=params,
)
except ConnectionError as e:
raise ApiError(f"Unable to connect to Lieutenant at {api_url}") from e
try:
resp = json.loads(r.text)
except json.JSONDecodeError as e:
raise ApiError("Client error: Unable to parse JSON") from e
try:
r.raise_for_status()
except HTTPError as e:
extra_msg = "."
if r.status_code >= 400:
if "reason" in resp:
extra_msg = f": {resp['reason']}"
else:
extra_msg = f": {e}"
raise ApiError(f"API returned {r.status_code}{extra_msg}") from e
else:
return resp
def _verbose_rmtree(tree, *args, **kwargs):
click.echo(f" > deleting {tree}/")
shutil.rmtree(tree, *args, **kwargs)
def clean_working_tree(config: Config):
# Defining rmtree as a naked Callable means that mypy won't complain about
# _verbose_rmtree and shutil.rmtree having slightly different signatures.
rmtree: Callable
if config.debug:
rmtree = _verbose_rmtree
else:
rmtree = shutil.rmtree
click.secho("Cleaning working tree", bold=True)
rmtree(config.inventory.inventory_dir, ignore_errors=True)
rmtree(config.inventory.lib_dir, ignore_errors=True)
rmtree(config.inventory.libs_dir, ignore_errors=True)
rmtree(config.inventory.output_dir, ignore_errors=True)
rmtree(config.catalog_dir, ignore_errors=True)
# pylint: disable=too-many-arguments
def kapitan_compile(
config: Config,
targets: Iterable[str],
output_dir: Optional[P] = None,
search_paths=None,
fake_refs=False,
reveal=False,
):
if not output_dir:
output_dir = config.work_dir
if not search_paths:
search_paths = []
search_paths = search_paths + [
config.work_dir,
__install_dir__,
]
reset_reclass_cache()
refController = RefController(config.refs_dir)
if fake_refs:
refController.register_backend(FakeVaultBackend())
click.secho("Compiling catalog...", bold=True)
cached.args["compile"] = ArgumentCache(
inventory_path=config.inventory.inventory_dir,
yaml_multiline_string_style="literal",
yaml_dump_null_as_empty=False,
)
kapitan_targets.compile_targets(
inventory_path=config.inventory.inventory_dir,
search_paths=search_paths,
output_path=output_dir,
targets=targets,
parallel=4,
labels=None,
ref_controller=refController,
verbose=config.trace,
prune=False,
indent=2,
reveal=reveal,
cache=False,
cache_paths=None,
fetch=config.fetch_dependencies,
# We always want to force-fetch when we want to fetch dependencies
force_fetch=config.fetch_dependencies,
validate=False,
schemas_path=config.work_dir / "schemas",
jinja2_filters=defaults.DEFAULT_JINJA2_FILTERS_PATH,
)
def kapitan_inventory(
config: Config, key: str = "nodes", ignore_class_notfound: bool = False
) -> dict:
"""
Reset reclass cache and render inventory.
Returns the top-level key according to the kwarg.
"""
reset_reclass_cache()
inv = inventory_reclass(
config.inventory.inventory_dir, ignore_class_notfound=ignore_class_notfound
)
return inv[key]
def rm_tree_contents(basedir):
"""
Delete all files in directory `basedir`, but do not delete the directory
itself.
"""
basedir = P(basedir)
if not basedir.is_dir():
raise ValueError("Expected directory as argument")
for f in basedir.glob("*"):
if f.name.startswith("."):
# pathlib's glob doesn't filter hidden files, skip them here
continue
if f.is_dir():
shutil.rmtree(f)
else:
os.unlink(f)
# pylint: disable=unsubscriptable-object
def relsymlink(src: P, dest_dir: P, dest_name: Optional[str] = None):
if dest_name is None:
dest_name = src.name
# pathlib's relative_to() isn't suitable for this use case, since it only
# works for dropping a path's prefix according to the documentation. See
# https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.relative_to
link_src = os.path.relpath(src, start=dest_dir)
link_dst = dest_dir / dest_name
if not P(src).exists():
raise click.ClickException(
f"Can't link {link_src} to {link_dst}. Source does not exist."
)
if link_dst.exists() or link_dst.is_symlink():
os.remove(link_dst)
os.symlink(link_src, link_dst)
def sliding_window(iterable, n):
# sliding_window('ABCDEFG', 4) -> ABCD BCDE CDEF DEFG
it = iter(iterable)
window = collections.deque(itertools.islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
| projectsyn/commodore | commodore/helpers.py | helpers.py | py | 7,958 | python | en | code | 43 | github-code | 90 |
33371789976 | from face import base
import argparse
import cv2
import numpy as np
model = None
def do_recognize(rimg):
img, bbox = model.get_input(rimg)
f1 = model.get_feature(img)
return f1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='/home/fish/work/insightface/models/model-r100-ii/model,0',
help='path to load model.')
parser.add_argument('--ga-model', default='/home/fish/work/insightface/models/gamodel-r50/model,0',
help='path to load model.')
# parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--cpu', default=0, type=int, help='cpu id')
parser.add_argument('--det', default=0, type=int,
help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
args = parser.parse_args()
model = base.FaceModel(args)
rimg = cv2.imread('../images/upload_image/Trump.jpeg')
f1 = do_recognize(rimg)
rimg = cv2.imread('../images/upload_image/Fish.jpg')
f2 = do_recognize(rimg)
rimg = cv2.imread('../images/upload_image/Trump1.jpeg')
f3 = do_recognize(rimg)
dist = np.sum(np.square(f1 - f2))
print(dist)
dist = np.sum(np.square(f1 - f3))
print(dist)
sim = np.dot(f1, f2.T)
print(sim)
sim = np.dot(f1, f3.T)
print(sim)
# print(f1[0:10])
# gender, age = model.get_ga(img)
# print(gender)
# print(age)
# f2 = model.get_feature(img)
# print(f2)
# import os
# for root, dirs, files in os.walk("./upload_image", topdown=False):
# for name in files:
# path = os.path.join(root, name)
# print(path)
# rimg = cv2.imread(path)
# rimg = rotate_img(rimg)
# st = time.time()
# bbox, points = model.get_det(rimg)
# end = time.time()
# print(bbox)
# print(end - st)
# while True:
# img = model.get_input(rimg)
# st = time.time()
# bbox, points = model.get_det(rimg)
# end = time.time()
#
# print(end - st)
# print((int(bbox[0][0]), int(bbox[0][1])), (int(bbox[0][2]), int(bbox[0][3])))
# cv2.rectangle(rimg, (int(bbox[0][0]), int(bbox[0][1])), (int(bbox[0][2]), int(bbox[0][3])), (0,0,255))
# cv2.rectangle(rimg, (100, 100), (200, 200), (0,255,0))
# cv2.rectangle(rimg, (int(bbox[1][0]), int(bbox[1][1])), (int(bbox[1][2]), int(bbox[1][3])), (0,255,0))
# cv2.rectangle(rimg, (int(bbox[2][0]), int(bbox[2][1])), (int(bbox[2][2]), int(bbox[2][3])), (255,0,0))
#
# cv2.imshow("test", rimg)
#
# cv2.waitKey(0)
# sys.exit(0)
# img = cv2.imread('/raid5data/dplearn/megaface/facescrubr/112x112/Tom_Hanks/Tom_Hanks_54733.png')
# f2 = model.get_feature(img)
# dist = np.sum(np.square(f1-f2))
# print(dist)
# sim = np.dot(f1, f2.T)
# print(sim)
# #diff = np.subtract(source_feature, target_feature)
# #dist = np.sum(np.square(diff),1)
| Li-Fish/Web-FaceRecognize | trash/test.py | test.py | py | 3,328 | python | en | code | 2 | github-code | 90 |
32647413320 | #!/usr/bin/python3
#coding=utf-8
import re
import os
import bs4
import time
import json
import pytube
import requests
from pytube import exceptions
from urllib.parse import urlparse
from urllib.parse import unquote
ua = "Mozilla/5.0 (Linux; Android 6.0.1; SM-G532G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.120 Mobile Safari/537.36"
for i in ['/video','/video/YouTube','/video/Facebook','/video/Instagram','/video/XNXX','/video/Like','/video/Snack Video']:
try:
os.mkdir('/sdcard'+i)
except FileExistsError:
pass
def File_size(path):
if os.path.isfile(path):
byte = os.stat(path).st_size
for i in ['B','KB','MB','GB','TB']:
if byte > 1024.0:
byte /= 1024.0
else:
return "%3.2f %s" % (byte, i)
def YouTube():
try:
url = input ("[+] Enter URL : ")
yt = pytube.YouTube(url)
title = yt.title
print ("\n[✓] Author : "+yt.author)
print ("[✓] Title : "+title)
print ("[✓] Views : "+str(yt.views))
res = input("\n[+] Choose Resolution\n\n[H] High Resolution\n[L] Low Resolution\n\n[?] Select : ").upper()
reso = yt.streams.get_highest_resolution() if res == 'H' else yt.streams.first()
reso = reso.url
req = requests.get(reso, stream = True)
save = os.path.join('/sdcard','video','YouTube',yt.video_id + '.mp4')
with open(save,'wb') as file:
print ("[!] Downloading Video...")
for data in req.iter_content(chunk_size=1024):
file.write(data)
print ("\n[✓] Download Complete")
print ("[✓] File Name : "+os.path.basename(save))
print ("[✓] File Size : "+File_size(save))
print ("[✓] File Path : "+os.path.realpath(save))
input ("\n[+] Press Enter To Go Back")
Main()
except exceptions.RegexMatchError:
print ("\n[!] Invalid URL!")
input ("[+] Press Enter To Go Back")
Main()
except exceptions.VideoUnavailable:
print ("\n[!] Video Not Found!")
input ("[+] Press Enter To Go Back")
Main()
def Facebook():
try:
url = input("[+] Enter URL : ")
host = urlparse(url).netloc
if host in ['www.facebook.com','mbasic.facebook.com','m.facebook.com']:
url = url.replace('m.facebook','mbasic.facebook').replace('www.facebook','mbasic.facebook')
a = requests.get(url)
if 'video_redirect' in a.text:
b = unquote(a.text.split('?src=')[1].split('"')[0])
c = re.findall('<title>(.*?)<\/title>',a.text)[0]
au = c.split(' - ')[0]
print ("\n[✓] Author : "+au)
print ("[✓] Title : "+c.split(' - ')[1].replace('| Facebook',''))
lanjut = input("\n[?] Download Video [Y/n] ").upper()
if lanjut == 'Y':
save = os.path.join('/sdcard','video','Facebook',c.split(' - ')[1] + '.mp4')
with open(save,'wb') as file:
print ("[!] Downloading Video...")
d = requests.get(b,stream = True)
for data in d.iter_content(chunk_size=1024):
file.write(data)
print ("\n[✓] Download Complete")
print ("[✓] File Name : "+os.path.basename(save))
print ("[✓] File Size : "+File_size(save))
print ("[✓] File Path : "+os.path.realpath(save))
input ("\n[+] Press Enter To Go Back")
Main()
else:
time.sleep(0.5) ; Main()
else:
print ("\n[!] Video Not Found!")
input ("[+] Press Enter To Go Back")
Main()
else:
print ("\n[!] Invalid URL")
input ("[+] Press Enter To Go Back")
Main()
except IndexError:
print ("\n[!] Error")
input ("[+] Press Enter To Go Back")
Main()
def Instagram():
try:
url = input("[+] Enter URL : ")
host = urlparse(url).netloc
if host in ['www.instagram.com']:
a = requests.get(url,params = {'__a':'1'},headers = {'user-agent':ua})
b = json.loads(a.text)['graphql']['shortcode_media']
if b['is_video']:
print ("\n[✓] Author : "+b['owner']['username'])
print ("[✓] Title : "+str(b['title']))
print ("[✓] Views : "+str(b['video_view_count']))
lanjut = input ("\n[?] Download Video [Y/n] ").upper()
if lanjut == 'Y':
save = os.path.join('/sdcard','video','Instagram',b['id'] + '.mp4')
with open(save,'wb') as file:
print ("[!] Downloading Video...")
c = requests.get(b['video_url'],stream = True)
for data in c.iter_content(chunk_size=1024):
file.write(data)
print ("\n[✓] Download Complete")
print ("[✓] File Name : "+os.path.basename(save))
print ("[✓] File Size : "+File_size(save))
print ("[✓] File Path : "+os.path.realpath(save))
input ("\n[+] Press Enter To Go Back")
Main()
else:
time.sleep(0.5) ; Main()
else:
print ("\n[!] Video Not Found")
input ("[+] Press Enter To Go Back")
Main()
else:
print ("\n[!] Invalid URL")
input ("[+] Press Enter To Go Back")
Main()
except KeyError:
print ("\n[!] Error")
input ("[+] Press Enter To Go Back")
Main()
def xnxx():
try:
print ("[!] Please Turn On VPN Before Continue\n")
url = input('[+] Enter URL : ')
host = urlparse(url).netloc
if host in ['www.xnxx.com']:
a = requests.get(url).text
if 'View Low Qual' in a and 'View High Qual' in a:
title = re.findall('<title>(.*?)<\/title>',a)[0].replace('- XNXX.COM','')
views = bs4.BeautifulSoup(a,'html.parser').find(class_="metadata").text.replace('\n','').replace('\t','').split('-')[2]
rating = bs4.BeautifulSoup(a,'html.parser').find(class_='rating-box').text
print ("\n[✓] Title : "+title)
print ("[✓] Views : "+views)
print ("[✓] Rating : "+rating)
res = input("\n[+] Choose Resolution\n\n[H] High Resolution\n[L] Low Resolution\n\n[?] Select : ").upper()
html = bs4.BeautifulSoup(a,'html.parser')
if res == 'H':
url = html.find('a',string = 'View High Qual')['href']
else:
url = html.find('a',string = 'View Low Qual')['href']
save = os.path.join('/sdcard','video','XNXX',title + '.mp4')
with open(save,'wb') as file:
print ("[!] Downloading Video...")
r = requests.get(url,stream = True)
for data in r.iter_content(chunk_size=1024):
file.write(data)
print ("\n[✓] Download Complete")
print ("[✓] File Name : "+os.path.basename(save))
print ("[✓] File Size : "+File_size(save))
print ("[✓] File Path : "+os.path.realpath(save))
input ("\n[+] Press Enter To Go Back")
Main()
else:
print ("\n[!] Video Not Found")
input ("[+] Press Enter To Go Back")
Main()
else:
print ("\n[!] Invalid URL")
input ("[+] Press Enter To Go Back")
Main()
except TypeError:
print ("\n[!] Error")
input ("[+] Press Enter To Go Back")
Main()
except requests.exceptions.SSLError:
print ("\n[!] Connection Error")
input ("[+] Press Enter To Go Back")
Main()
def like():
try:
url = input("[+] Enter URL : ")
host = urlparse(url).netloc
if host in ['likee.video']:
a = requests.get(url,headers = {'User-Agent':ua}).text
b = bs4.BeautifulSoup(a,'html.parser').find('script',type = 'application/ld+json').contents[0]
c = json.loads(b)
print ("\n[✓] Author: "+c['creator']['name'])
print ("[✓] Title : "+c['name'])
print ("[✓] Upload Date : "+c['uploadDate'])
lanjut = input ("\n[?] Download Video [Y/n] ").upper()
if lanjut == 'Y':
save = os.path.join('/sdcard','video','Like',re.findall('[0-9]+',c['url'])[0] + '.mp4')
mp4 = urlparse(c['contentUrl'])._replace(scheme = 'https').geturl()
with open(save,'wb') as file:
print ("[!] Downloading Video...")
r = requests.get(mp4,stream = True)
for data in r.iter_content(chunk_size=1024):
file.write(data)
print ("\n[✓] Download Complete")
print ("[✓] File Name : "+os.path.basename(save))
print ("[✓] File Size : "+File_size(save))
print ("[✓] File Path : "+os.path.realpath(save))
input ("\n[+] Press Enter To Go Back")
Main()
else:
time.sleep(0.5) ; Main()
else:
print ("\n[!] Invalid URL")
input ("[+] Press Enter To Go Back")
Main()
except KeyError:
print ("\n[!] Error")
input ("[+] Press Enter To Go Back")
Main()
def SnackVideo():
try:
url = input("[+] Enter URL : ")
host = urlparse(url).netloc
if host in ['www.snackvideo.com']:
header = {'Host':'www.snackvideo.com',
'sec-ch-ua':'" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'sec-ch-ua-mobile':'?1',
'upgrade-insecure-requests':'1',
'user-agent':ua,
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site':'none',
'sec-fetch-mode':'navigate',
'sec-fetch-user':'?1',
'sec-fetch-dest':'document',
'accept-encoding':'gzip, deflate, br',
'accept-language':'en-GB,en;q=0.9'
}
a = requests.get(url,headers = header).text
b = bs4.BeautifulSoup(a,'html.parser').find('script',type = 'application/json',id = '__NEXT_DATA__').contents[0]
c = json.loads(b)
#print (c)
print ("\n[✓] Author : "+c['props']['pageProps']['videoList'][0]['userName'])
print ("[✓] Title : "+c['props']['pageProps']['videoList'][0]['caption'])
print ("[✓] Platfrom : "+c['props']['pageProps']['platform'])
lanjut = input ("\n[?] Download Video [Y/n] ").upper()
if lanjut == 'Y':
save = os.path.join('/sdcard/','video','Snack Video',c['props']['pageProps']['videoList'][0]['caption'] + '.mp4')
with open(save,'wb') as file:
r = requests.get(c['props']['pageProps']['videoList'][0]['src'],stream = True)
for data in r.iter_content(chunk_size=1024):
file.write(data)
print ("\n[✓] Download Complete")
print ("[✓] File Name : "+os.path.basename(save))
print ("[✓] File Size : "+File_size(save))
print ("[✓] File Path : "+os.path.realpath(save))
input ("\n[+] Press Enter To Go Back")
Main()
else:
time.sleep(0.5) ; Main()
else:
print ("\n[!] Invalid URL")
input ("[+] Press Enter To Go Back")
Main()
except KeyError:
print ("\n[!] Error")
input ("[+] Press Enter To Go Back")
Main()
def Main():
os.system('clear')
try:
pilih = int(input("[+] SELAMAT DATANG BWANG [+]\n\n[1] YouTube\n[2] Facebook\n[3] Instagram\n[4] XNXX\n[5] Like\n[6] Snack Video\n[0] Keluar\n\n[?] Pilih : "))
if pilih == 1:
YouTube()
elif pilih == 2:
Facebook()
elif pilih == 3:
Instagram()
elif pilih == 4:
xnxx()
elif pilih == 5:
like()
elif pilih == 6:
SnackVideo()
elif pilih == 0:
os.abort()
else:
raise ValueError
except ValueError:
print ("[!] Input Tidak Valid :(")
time.sleep(1.5)
Main()
except KeyboardInterrupt:
exit("\n[!] Exit")
except EOFError:
os.abort()
except requests.exceptions.ConnectionError:
print ("\n[!] No Connection")
exit("[!] Exit!")
except requests.exceptions.Timeout:
print ("\n[!] The request timed out")
exit("[!] Exit!")
except requests.exceptions.ConnectTimeout:
print ("\n[!] The request timed out while trying to connect to the remote server")
exit("[!] Exit!")
except Exception as err:
print ("\n[!] "+str(err))
exit("[!] Exit!")
if __name__ == "__main__":
Main()
| MR-X-junior/Download | main.py | main.py | py | 11,857 | python | en | code | 0 | github-code | 90 |
18382031439 | n,k=map(int,input().split())
if k>(n-2)*(n-1)//2:
print(-1)
exit()
elif k==(n-2)*(n-1)//2:
print(n-1)
for i in range(n-1):
print(1,i+2)
exit()
ans=[]
for i in range(n-1):
ans.append((1,i+2))
cnt=(n-2)*(n-1)//2
a=2
b=3
while cnt>k:
cnt-=1
ans.append((a,b))
b+=1
if b==n+1:
b=a+2
a+=1
print(len(ans))
for x in ans:
print(*x) | Aasthaengg/IBMdataset | Python_codes/p02997/s778823267.py | s778823267.py | py | 339 | python | en | code | 0 | github-code | 90 |
18011930479 | MOD = 1
m = 100
COMB_table = [[0]*(m+1) for _ in range(m+1)]
fac = [0] * m
finv = [0] * m
inv = [0] * m
def COMBinitialize(m):
fac[0] = 1
finv[0] = 1
if m > 1:
fac[1] = 1
finv[1] = 1
inv[1] = 1
for i in range(2, m):
fac[i] = fac[i-1] * i % MOD
inv[i] = MOD - inv[MOD % i] * (MOD // i) % MOD
finv[i] = finv[i - 1] * inv[i] % MOD
def COMB(n, k):
if n < k:
return 0
if n < 0 or k < 0:
return 0
return fac[n] * (finv[k] * finv[n - k] % MOD) % MOD
def make_COMB_table(m):
for i in range(m+1):
for j in range(i+1):
if j == 0 or j == i:
COMB_table[i][j] = 1
else:
COMB_table[i][j] = COMB_table[i-1][j-1] + COMB_table[i-1][j]
def COMB_2(n, k):
if n < k:
return 0
if n < 0 or k < 0:
return 0
return COMB_table[n][k]
make_COMB_table(m)
def main():
from collections import Counter
N, A, B = (int(i) for i in input().split())
V = [int(i) for i in input().split()]
V.sort(reverse=True)
ans = sum(v for v in V[:A])/A
c = Counter(V)
c_ans = Counter(V[:A])
cnt = 0
if V[0] != V[A-1]:
cnt = COMB_2(c[V[A-1]], c_ans[V[A-1]])
else:
for i in range(A, B+1):
cnt += COMB_2(c[V[0]], i)
print(ans)
print(cnt)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03776/s345334248.py | s345334248.py | py | 1,418 | python | en | code | 0 | github-code | 90 |
16508981663 | from collections import deque
import sys
n = int(sys.stdin.readline())
q = deque()
for i in range(n):
command = sys.stdin.readline().split()
if len(command) == 2:
q.append(int(command[1]))
else:
if command[0] == 'front':
if q:
print(q[0])
else:
print(-1)
elif command[0] == 'back':
if q:
print(q[-1])
else:
print(-1)
elif command[0] == 'size':
print(len(q))
elif command[0] == 'empty':
if q:
print(0)
else:
print(1)
elif command[0] == 'pop':
if q:
x = q.popleft()
print(x)
else:
print(-1)
# 출력해야하는 명령어가 주어질 때마다, 한 줄씩 출력해야한다는 문제를 제대로 읽지 않아서
# push 할 때도 출력이 되게끔 했더니.. 시간을 지체하며 틀렸다! 문제 잘 읽자. | somm12/Algorithm-Study | baekjoon/queue/10845.py | 10845.py | py | 1,047 | python | ko | code | 0 | github-code | 90 |
41332896240 | class employee:
company="APPLE"
def show(self):
print(f"the name of the employee is {self.name} and he is working in {self.company}")
@classmethod
def change_company(cls,newcomapny):
cls.company=newcomapny
e1=employee()
e1.name="raghu"
e1.show()
e2=employee()
e2.name="ramesh"
e2.show()
e2.change_company("TESLA")
e2.show()
print(employee.company)
| rohit9098singh/python_programming | ch29_1_classmethod.py | ch29_1_classmethod.py | py | 389 | python | en | code | 0 | github-code | 90 |
20438393717 | import requests, os, sys, collections, time, urllib.parse
from datetime import datetime
token_path = os.path.expanduser("~/.youtrack-token")
if not os.path.exists(token_path):
print("Please follow the instructions at https://www.jetbrains.com/help/youtrack/devportal/authentication-with-permanent-token.html to obtain a YouTrack permanent token")
print("and save the token to the .youtrack_token file in your home directory.")
sys.exit(1)
if len(sys.argv) < 3:
print("Usage:")
print(" Vote distribution by time: python3 youtrack-vote-distribution.py <server> [month] <issue ID>")
print(" Recently top voted issues: python3 youtrack-vote-distribution.py <server> report <output file> <query>")
sys.exit(1)
YOUTRACK_API = sys.argv[1] + '/api'
token = open(token_path).readline().strip()
headers = {
'Authorization': 'Bearer ' + token,
'Accept': 'application/json'
}
def youtrack_request(request):
while True:
try:
time.sleep(2)
return requests.get(YOUTRACK_API + request, headers=headers).json()
except requests.exceptions.ConnectionError as e:
print(e)
time.sleep(10)
def collect_vote_timestamps(issue_id):
vote_timestamps = {}
r = youtrack_request(f'/issues/{issue_id}/activities?fields=timestamp,author(login),added,removed,category&categories=VotersCategory')
for vote in r:
voter = vote['author']['login']
if vote['added']:
vote_timestamps[voter] = datetime.fromtimestamp(vote['timestamp'] // 1000)
else:
if voter in vote_timestamps: del vote_timestamps[voter]
return vote_timestamps
def collect_vote_timestamps_recursive(issue_id):
result = collect_vote_timestamps(issue_id)
link_types = youtrack_request(f'/issues/{issue_id}/links?fields=linkType(name),issues(idReadable)')
for link_type in link_types:
if link_type['linkType']['name'] == 'Duplicate':
for issue in link_type['issues']:
duplicate_id = issue['idReadable']
issue_details = youtrack_request(f'/issues/{duplicate_id}?fields=reporter(login),created')
result[issue_details['reporter']['login']] = datetime.fromtimestamp(issue_details['created'] // 1000)
result.update(collect_vote_timestamps(duplicate_id))
return result
def distribution_per_year(votes, include_month = False):
distro = collections.Counter()
for voter, date in votes.items():
key = f'{date.year}.{date.month}' if include_month else date.year
distro[key] += 1
return list(distro.items())
def extract_custom_field(issue, name):
for f in issue['customFields']:
if f['projectCustomField']['field']['name'] == name:
value = f['value']
return value['name'] if value else 'Unspecified'
def query_issues(query):
result = []
issues = youtrack_request(f'/issues?fields=idReadable,summary,votes,customFields(projectCustomField(field(name)),value(name))&$top=500&query={query} order by:votes')
for issue in issues:
issue_id = issue['idReadable']
subsystem = extract_custom_field(issue, 'Subsystem')
result.append((issue_id, issue['summary'], issue['votes'], subsystem))
return result
def top_voted_issues_per_subsystem(issues):
this_year = datetime.now().year
top_per_subsystem = {}
for issue_id, summary, votes, subsystem in issues:
vote_distribution = distribution_per_year(collect_vote_timestamps_recursive(issue_id))
votes_this_year = 0
for year, votes in vote_distribution:
if year == this_year: votes_this_year = votes
if not votes_this_year: continue
print(f'{issue_id} {summary}: {votes_this_year}')
if subsystem not in top_per_subsystem:
top_per_subsystem[subsystem] = []
top_per_subsystem[subsystem].append((issue_id, summary, votes_this_year))
for list in top_per_subsystem.values():
list.sort(key=lambda i: -i[2])
return top_per_subsystem
issue_id = sys.argv[2]
if issue_id == 'report':
report_file = open(sys.argv[3], "w")
issues = query_issues(' '.join([urllib.parse.quote_plus(arg) for arg in sys.argv[4:]]))
top_per_subsystem = top_voted_issues_per_subsystem(issues)
subsystems = list(top_per_subsystem.keys())
subsystems.sort()
for subsystem in subsystems:
issues = top_per_subsystem[subsystem]
print(f"## Subsystem: {subsystem}", file=report_file)
print("| Issue | Votes |", file=report_file)
print("| --- | --- |", file=report_file)
for issue_id, summary, votes in issues:
print(f"| {issue_id} | {votes} |", file=report_file)
print("", file=report_file)
else:
include_month = False
if issue_id == 'month':
issue_id = sys.argv[3]
include_month = True
print(distribution_per_year(collect_vote_timestamps_recursive(issue_id), include_month))
| yole/youtrack-vote-distribution | youtrack-vote-distribution.py | youtrack-vote-distribution.py | py | 4,985 | python | en | code | 0 | github-code | 90 |
42092030226 | import math
import pygame as pygame
from pygame.math import Vector2
from src.discrete_fourier_transform import discrete_fourier_transform
from src.settings import Settings
from src.signal_generator import SignalGenerator
class FourierSeries:
def __init__(self):
pygame.init()
pygame.event.set_allowed([pygame.QUIT])
self.clock = pygame.time.Clock()
self.settings = Settings()
self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height),
flags=pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.NOFRAME)
self.signal_generator = SignalGenerator()
self.x_signal = self.signal_generator.generate_signal()[0]
self.y_signal = self.signal_generator.generate_signal()[1]
self.fourierX = discrete_fourier_transform(self.x_signal)
self.fourierY = discrete_fourier_transform(self.y_signal)
self.time = 0
self.signal = []
def _check_events(self):
for event in pygame.event.get():
if event == pygame.QUIT:
pygame.quit()
def _set_position(self):
vector_x = self._draw_fourier(self.settings.screen_width * 5 / 7,
self.settings.screen_height / 5, 0, self.fourierX)
vector_y = self._draw_fourier(self.settings.screen_width / 7,
self.settings.screen_height / 2, math.pi / 2, self.fourierY)
vector = Vector2(vector_x.x, vector_y.y)
return {'vector_x': vector_x, 'vector_y': vector_y, 'vector': vector}
def _draw_fourier(self,x, y, rotation, fourier):
for i in range(len(fourier)):
# tracking x, y coordinates
prev_x = x
prev_y = y
freq = fourier[i].get('freq')
radius = fourier[i].get('amp')
phase = fourier[i].get('phase')
x += radius * math.cos(freq * self.time + phase + rotation)
y += radius * math.sin(freq * self.time + phase + rotation)
pygame.draw.circle(self.screen,
self.settings.circle_color,
self.settings.translate.__add__(Vector2(prev_x, prev_y)),
radius, 1)
pygame.draw.line(self.screen, self.settings.line_color,
self.settings.translate.__add__(Vector2(prev_x, prev_y)),
self.settings.translate.__add__(Vector2(x, y)), 1)
return Vector2(x, y)
def _draw_signal(self, surface, signal, color):
for i in range(len(signal)-1):
# pygame.draw.circle(self.screen, self.settings.line_color,
# self.settings.translate.__add__(Vector2(self.signal[i].x, self.signal[i].y)),1)
pygame.draw.line(surface, color,
(signal[i].x, signal[i].y), (signal[i+1].x, signal[i+1].y))
def _draw_position_lines(self, surface, vector_x, vector_y, vector, color):
pygame.draw.line(surface, color, (vector_x.x, vector_x.y), (vector.x, vector.y))
pygame.draw.line(surface, color, (vector_y.x, vector_y.y), (vector.x, vector.y))
# epicycles control
if self.time > math.pi * 2:
self.time = 0
self.signal = []
def _draw(self):
vectors = self._set_position()
vector_x = vectors['vector_x']
vector_y = vectors['vector_y']
vector = vectors['vector']
self.signal.insert(0, vector)
# drawing section
self._draw_position_lines(self.screen, vector_x, vector_y, vector, self.settings.line_color)
self._draw_signal(self.screen, self.signal, self.settings.line_color)
dt = 2 * math.pi / len(self.fourierY)
self.time += dt
def run(self):
while 1:
self._check_events()
self.screen.fill(self.settings.bg_color)
self._draw()
self.clock.tick(60)
pygame.display.flip()
if __name__ == '__main__':
fs = FourierSeries()
fs.run()
| lukaszmichalskii/Fourier-Series | src/fourier_series.py | fourier_series.py | py | 4,134 | python | en | code | 0 | github-code | 90 |
12844545310 | """Add on delete cascade to selection options to allow for deletion of filter types
Revision ID: 9cec67ca7bb0
Revises: e04509401aff
Create Date: 2020-01-23 19:06:09.695080
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9cec67ca7bb0'
down_revision = 'e04509401aff'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('selection_option_filter_type_id_fkey', 'selection_option', type_='foreignkey')
op.create_foreign_key(None, 'selection_option', 'filter_type', ['filter_type_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'selection_option', type_='foreignkey')
op.create_foreign_key('selection_option_filter_type_id_fkey', 'selection_option', 'filter_type', ['filter_type_id'], ['id'])
# ### end Alembic commands ###
| akash-cis/PROJECTS | socialai/WebApp-API-develop/migrations/versions/2020-01-23-19-06_9cec67ca7bb0_add_on_delete_cascade_to_selection_.py | 2020-01-23-19-06_9cec67ca7bb0_add_on_delete_cascade_to_selection_.py | py | 1,018 | python | en | code | 0 | github-code | 90 |
15230174426 | from django.shortcuts import render,redirect
from django.contrib.auth.models import User, auth
from .models import Bus,Reservation,Contact
# Create your views here.
def index(request):
return render(request,'index.html')
def register(request):
if request.method == "POST":
if User.objects.filter(username=request.POST['username']).exists():
print("User Already Exists")
elif User.objects.filter(email=request.POST['email']).exists():
print("Email Already Existed")
else:
u = User.objects.create_user(username=request.POST['username'],
email=request.POST['email'],
password=request.POST['password'])
u.save()
return redirect('login')
else:
return render(request, 'register.html')
def login(request):
if request.method == "POST":
user = auth.authenticate(username=request.POST['username'],
password=request.POST['password'])
if user is not None:
auth.login(request, user)
return redirect('dashboard')
else:
print('Invalid Credentials')
return redirect('login')
else:
return render(request, 'index.html')
def logout(request):
auth.logout(request)
return redirect('index')
def dashboard(request):
buses = Bus.objects.all()
context = {
'buses': buses
}
return render(request,'routes-buses.html', context)
def reservation(request, bus_id):
bus = Bus.objects.get(pk=bus_id)
context = {'bus': bus}
return render(request, 'book-bus.html', context)
def my_reservation(request):
if request.method == 'POST':
username = request.POST.get('name')
email = request.POST.get('email')
phone = request.POST.get('phone')
adults = request.POST.get('adults')
childrens = request.POST.get('childrens')
total_fare = request.POST.get('total')
bus_name = request.POST.get('bus_name')
route = request.POST.get('route')
bus_type = request.POST.get('type')
duration = request.POST.get('duration')
#store for future reference
reservation = Reservation(
username=username,
email=email,
phone=phone,
adults=adults,
childrens=childrens,
total_fare=total_fare,
bus_name=bus_name,
route=route,
bus_type=bus_type,
duration=duration
)
reservation.save()
# Create a dictionary to pass the data to the template
context = {
'username': username,
'email': email,
'phone': phone,
'adults': adults,
'childrens': childrens,
'total_fare': total_fare,
'bus_name': bus_name,
'route': route,
'bus_type': bus_type,
'duration': duration
}
return render(request, 'my-reservation.html', context)
else:
try:
reservation = Reservation.objects.get(username = request.user)
context = {
'username': reservation.username,
'email': reservation.email,
'phone': reservation.phone,
'adults': reservation.adults,
'childrens': reservation.childrens,
'total_fare': reservation.total_fare,
'bus_name': reservation.bus_name,
'route': reservation.route,
'bus_type': reservation.bus_type,
'duration': reservation.duration
}
except Reservation.DoesNotExist:
context = {}
return render(request, 'my-reservation.html', context)
def contactus(request):
if request.method == 'POST':
data = Contact.objects.create(name=request.POST['name'],
email=request.POST['email'],
subject=request.POST['subject'],
message=request.POST['message'])
data.save()
return redirect('index')
else:
return render(request, 'contact-us.html') | abhisalunkhe/bus_reservation | bus/views.py | views.py | py | 4,268 | python | en | code | 0 | github-code | 90 |
42280134007 | from __future__ import print_function, absolute_import
import underworld.function as fn
from underworld.scaling import non_dimensionalise as nd
from underworld.scaling import units as u
class Density(object):
def __init__(self):
self.temperatureField = None
self.pressureField = None
self.name = None
class ConstantDensity(Density):
def __init__(self, reference_density):
"""Constant density function
Parameters
----------
reference_density : density
Returns
-------
An UWGeodynamics Constant Density object
"""
self.reference_density = reference_density
self._density = nd(reference_density)
self.name = "Constant ({0})".format(str(reference_density))
def effective_density(self):
return fn.Function.convert(self._density)
class LinearDensity(Density):
def __init__(self, reference_density, thermalExpansivity=3e-5 / u.kelvin,
reference_temperature=273.15 * u.degK, beta=0. / u.pascal,
reference_pressure=0. * u.pascal):
""" The LinearDensity function calculates:
density = rho0 * (1 + (beta * deltaP) - (alpha * deltaT))
where deltaP is the difference between P and the reference P,
and deltaT is the difference between T and the reference T
Parameters
----------
reference_density : reference density
thermalExpansivity : thermal expansivity of the material at the
temperature of reference.
reference_temperature : reference temperature
beta : coefficient of compressibility
reference_pressure : reference pressure
Returns
-------
An UWGeodynamics Linear Density object.
"""
super(LinearDensity, self).__init__()
self.name = "Linear (ref: {0})".format(str(reference_density))
self.reference_density = reference_density
self.reference_temperature = reference_temperature
self.thermalExpansivity = thermalExpansivity
self.reference_pressure = reference_pressure
self._alpha = nd(thermalExpansivity)
self._beta = nd(beta)
self._Tref = nd(reference_temperature)
self._Pref = nd(reference_pressure)
def effective_density(self):
"""calculate effective_density based
on PT conditions"""
density = nd(self.reference_density)
# Temperature dependency
if not self.temperatureField:
raise RuntimeError("No temperatureField found!")
t_term = self._alpha * (self.temperatureField - self._Tref)
# Pressure dependency
if not self.pressureField:
raise RuntimeError("No pressureField found!")
p_term = self._beta * (self.pressureField - self._Pref)
return density * (1.0 + p_term - t_term)
| underworldcode/underworld2 | underworld/UWGeodynamics/_density.py | _density.py | py | 2,927 | python | en | code | 140 | github-code | 90 |
18515756719 | N = int(input())
arr = list(map(int, input().split()))
ans = 0
flg = False
for i in range(1,N):
if flg:
flg = False
continue
if arr[i] == arr[i-1]:
ans += 1
flg = True
else:
flg = False
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03296/s523375783.py | s523375783.py | py | 248 | python | en | code | 0 | github-code | 90 |
8500375615 | import discord
from discord.ext import commands
class Utility_av(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def av(self, ctx, user : discord.Member=None):
'''avatar command'''
if user == None:
_embed = discord.Embed(title=f"{ctx.author}", color=discord.Colour.blue())
_embed.set_image(url=ctx.author.avatar_url)
await ctx.send(ctx.author.mention, embed=_embed)
return True
else:
if isinstance(user, discord.member.Member):
_embed = discord.Embed(title=f"{user}", color=discord.Colour.blue())
_embed.set_image(url=user.avatar_url)
await ctx.send(ctx.author.mention, embed=_embed)
return True
await ctx.send(f"Couldn't find the user as `{user}`")
def setup(client):
client.add_cog(Utility_av(client)) | nikhilvayeda/bhendi-bot-3 | cogs/av.py | av.py | py | 970 | python | en | code | 8 | github-code | 90 |
18259620299 | from collections import defaultdict
N, P = map(int, input().split())
S = input().strip()[::-1]
if P in [2, 5]:
ans = 0
for r in range(N):
if int(S[r]) % P == 0:
ans += N - r
print(ans)
exit()
cum = [0] * (N + 1)
for i in range(N):
now = int(S[i]) * pow(10, i, P)
cum[i + 1] = (cum[i] + now) % P
cnt = defaultdict(int)
for _cum in cum:
cnt[_cum] += 1
ans = 0
for k, v in cnt.items():
ans += v * (v - 1) // 2
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02757/s128153813.py | s128153813.py | py | 476 | python | en | code | 0 | github-code | 90 |
18582888359 | def main():
import sys
def input(): return sys.stdin.readline().rstrip()
max_n = 100005
is_prime = [True]*max_n
is_prime[0], is_prime[1] = False, False
i = 2
while i*i < max_n:
if is_prime[i]:
k = 2
while i*k < max_n:
is_prime[i*k] = False
k+= 1
i+= 1
table = [0]*max_n
for i in range(3, max_n):
table[i] = table[i-1]
if is_prime[i] and is_prime[(i+1)//2]:
table[i] += 1
q = int(input())
for i in range(q):
l, r = map(int, input().split())
tmp = table[r]-table[l]
if is_prime[l] and is_prime[(l+1)//2]:
tmp += 1
print(tmp)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p03476/s703169154.py | s703169154.py | py | 778 | python | en | code | 0 | github-code | 90 |
72999725738 | import tkinter as tk
from tkinter import ttk
root = tk.Tk()
combo1 = ttk.Combobox(root, values=['Option 1', 'Option 2', 'Option 3'])
combo1.pack()
combo2 = ttk.Combobox(root, state='disabled')
combo2.pack()
def enable_combo2(event):
combo2['state'] = 'readonly'
combo2['values'] = ['Suboption 1', 'Suboption 2', 'Suboption 3']
combo1.bind('<<ComboboxSelected>>', enable_combo2)
root.mainloop() | piroboyd/Tkinter_project | pokmin comboboxy.py | pokmin comboboxy.py | py | 407 | python | en | code | 0 | github-code | 90 |
7976888172 | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Offer17:
# 某个提交代码真滴 思路六批 贴一下 这递归的思路着实厉害
# def HasSubtree(self, pRoot1, pRoot2):
# # write code here
# if not pRoot1 or not pRoot2:
# return False
# return self.is_subtree(pRoot1, pRoot2) or self.HasSubtree(pRoot1.left, pRoot2) or self.HasSubtree(pRoot1.right, pRoot2)
# def is_subtree(self, A, B):
# if not B:
# return True
# if not A or A.val != B.val:
# return False
# return self.is_subtree(A.left,B.left) and self.is_subtree(A.right, B.right)
def HasSubtree(self, pRoot1, pRoot2):
if pRoot1==None or pRoot2==None:
return False
q=[pRoot1]
while len(q)!=0:
treenode=q[0]
if treenode.val==pRoot2.val:
prtlist=[treenode]
sonlist=[pRoot2]
res=True
while len(sonlist)!=0:
if prtlist[0]==None:
res=False
break
if sonlist[0].val==prtlist[0].val:
if sonlist[0].left!=None:
sonlist.append(sonlist[0].left)
prtlist.append(prtlist[0].left)
if sonlist[0].right!=None:
sonlist.append(sonlist[0].right)
prtlist.append(prtlist[0].right)
del sonlist[0]
del prtlist[0]
else:
res=False
break
if res:
return True
if q[0].left!=None:
q.append(q[0].left)
if q[0].right!=None:
q.append(q[0].right)
del q[0]
return False | LordwithGlory/Daily_Python | offer17.py | offer17.py | py | 1,978 | python | en | code | 0 | github-code | 90 |
35525619176 | # https://www.hackerrank.com/challenges/re-group-groups/problem
"""
group()
A group() expression returns one or more subgroups of the match
groups()
A groups() expression returns a tuple containing all the subgroups of the match
groupdict()
A groupdict() expression returns a dictionary containing all the named subgroups
of the match, keyed by the subgroup name
"""
import re
# Capture the last alphanumeric character
pattern = r'([a-zA-Z0-9])\1+'
S = raw_input().strip()
matches = re.search(pattern, S)
print(matches.group(1) if matches else -1)
| urianchang/Algorithms | HackerRank/Algorithms/Python/Regex_and_Parsing/group_groups_groupdict.py | group_groups_groupdict.py | py | 552 | python | en | code | 17 | github-code | 90 |
4949468020 | import functools
import math
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
import util
from variable import LatentVariable
LABEL_REAL, LABEL_FAKE = 1, 0
class AdversarialLoss:
def __init__(self):
self.loss = nn.BCELoss(reduction="mean")
self.device = util.current_device()
def __call__(self, y_hat: torch.Tensor, label: int):
if label not in [LABEL_REAL, LABEL_FAKE]:
raise Exception("Invalid label is passed to adversarial loss")
y_true = torch.full(y_hat.size(), label, device=self.device)
return self.loss(y_hat, y_true)
class InfoGANLoss:
def __init__(self, latent_vars: Dict[str, LatentVariable]):
self.latent_vars = latent_vars
self.discrete_loss = nn.CrossEntropyLoss()
self.continuous_loss = NormalNLLLoss()
self.device = util.current_device()
def __call__(
self, cs_hat: Dict[str, torch.Tensor], cs_true: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if cs_hat.keys() != cs_true.keys():
raise Exception("The keys of cs_hat is different from cs_true")
losses: List[torch.Tensor] = []
details: Dict[str, torch.Tensor] = {}
for key in cs_hat.keys():
c_hat, c_true = cs_hat[key], cs_true[key]
if self.latent_vars[key].prob_name == "categorical":
# loss for discrete variable
_, targets = c_true.max(dim=1)
loss = self.discrete_loss(c_hat, targets)
elif self.latent_vars[key].prob_name == "normal":
# loss for continuous variable
dim: int = self.latent_vars[key].dim
mean, ln_var = c_hat[:, :dim], c_hat[:, dim:]
loss = self.continuous_loss(c_true, mean, ln_var)
loss = loss * self.latent_vars[key].params["weight"]
details[key] = loss
losses.append(loss)
return functools.reduce(lambda x, y: x + y, losses), details
class NormalNLLLoss:
def __call__(
self, x: torch.Tensor, mean: torch.Tensor, ln_var: torch.Tensor
) -> torch.Tensor:
x_prec = torch.exp(-ln_var)
x_diff = x - mean
x_power = (x_diff * x_diff) * x_prec * -0.5
loss = (ln_var + math.log(2 * math.pi)) / 2 - x_power
return torch.mean(loss)
| raahii/infogan-pytorch | src/loss.py | loss.py | py | 2,385 | python | en | code | 14 | github-code | 90 |
39311885789 | """
Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right to left for the next level and alternate between).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
from collections import deque
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
f = -1
res = list()
lvl = deque()
lvl.append(root)
res.append([root.val])
while lvl:
lvl1 = list()
while lvl:
cur = lvl.popleft()
if cur.left:
lvl1.append(cur.left)
if cur.right:
lvl1.append(cur.right)
lvl = deque(lvl1)
lvl1 = lvl1[::f]
if lvl1:
res.append([cur.val for cur in lvl1])
f *= -1
return res
| at3103/Leetcode | 103_Binary Tree Zigzag Level Order Traversal.py | 103_Binary Tree Zigzag Level Order Traversal.py | py | 1,349 | python | en | code | 0 | github-code | 90 |
456329564 | import cv2
import numpy as np
PATH = '/home/felipe/Imagens/ball.png'
frame = cv2.imread(PATH)
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 260, param1=30, param2=65, minRadius=0, maxRadius=0)
if circles is not None:
for x, y, r in circles[0]:
cv2.circle(frame,(x,y),r,(0,255,0),2)
cv2.imshow('Xamaa',frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
| null | Resgate/teste/balls_black.py | balls_black.py | py | 420 | python | en | code | null | code-starcoder2 | 51 |
517585627 | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views import generic
from django.utils import timezone
from .models import Genre, Item, Order
from django.db.models import Sum
import re
from django.db.models import Q
# Create your views here.
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def indexView(request):
genre_list = Genre.objects.order_by('pub_date')[:4]
context = {'genre_list': genre_list}
if (request.POST):
mail = request.POST['mail']
dir = request.POST['dir']
id = mail + "," + dir
item_list = Item.objects.filter(inCart = True)
totalPrice = 0
for item in item_list:
totalPrice += item.price
Order.objects.create(ordererMail = mail, ordererDir = dir, orderPrice = totalPrice, orderId = id)
for item in item_list:
item.inCart = False
item.save()
return render(request, 'shop/index.html', context)
def genreView(request, genre_id):
genre = get_object_or_404(Genre, id=genre_id)
genre_list = Genre.objects.order_by('pub_date')[:4]
item_list = Item.objects.filter(genre__id = genre_id).order_by('pub_date')[:4]
context = {'genre_list': genre_list, 'item_list': item_list, 'actgenre': genre}
return render(request, 'shop/genre.html', context)
def itemView(request, genre_id, item_id):
# Echar un ojo a esto ya que deberia buscar genero y dentro del genero el item
genre_list = Genre.objects.order_by('pub_date')[:4]
item = get_object_or_404(Item, id=item_id)
context = {'genre_list': genre_list, 'item': item}
return render(request, 'shop/detail.html', context)
def cartView(request, item_id=None):
if (item_id):
addedItem = Item.objects.get(id = item_id)
addedItem.inCart = not addedItem.inCart
addedItem.save()
genre_list = Genre.objects.order_by('pub_date')[:4]
item_list = Item.objects.filter(inCart = True).order_by('pub_date')
totalPrice = 0
for item in item_list:
totalPrice += item.price
context = {'genre_list': genre_list, 'item_list': item_list, 'total': totalPrice}
return render(request, 'shop/cart.html', context)
def search(request):
genre_list = Genre.objects.order_by('pub_date')[:4]
item_list = None
if ('searchbox' in request.GET) and request.GET['searchbox'].strip():
query_string = request.GET['searchbox']
search_fields = ['text', 'id', 'genre__text', 'genre__id']
entry_query = get_query(query_string, search_fields)
item_list = Item.objects.filter(entry_query).order_by('pub_date')
context = {'genre_list': genre_list, 'item_list': item_list}
return render(request, 'shop/search.html', context) | null | Entrega/LTAW/PRACTICA4/shop/views.py | views.py | py | 4,093 | python | en | code | null | code-starcoder2 | 51 |
242828482 | # 문제 20 : 몫과 나머지
# 공백으로 구분하여 두 숫자가 주어진다.
# 첫 번째 숫자로 두 번째 숫자를 나누었을 때 그 몫과 나머지를 공백으로 구분하여 출력하시오.
data = list(map(int, input().split()))
result = data[0] // data[1]
left = data[0] % data[1]
print(result, left)
| null | code/20.py | 20.py | py | 330 | python | en | code | null | code-starcoder2 | 51 |
146379151 | #!/usr/bin/env python3
from pprint import pprint
a = 5
b = 2
max = a if (a > b) else b
print(max)
#dies ist eine effizientere methode als untereindander zu schreiben jedoch schlechter lesbar
| null | python-test7.py | python-test7.py | py | 196 | python | en | code | null | code-starcoder2 | 51 |
41247363 | import requests
from bs4 import BeautifulSoup
def aliexpress(product,budget):
pages=1
max_pages=2
url='http://www.aliexpress.com/wholesale?catId=0&initiative_id=SB_20170821004256&SearchText='+str(product)
while(pages<=max_pages):
print("Page no. = " +str(pages))
print("Page link = " + str(url))
pages+=1
source_code=requests.get(url)
text=source_code.text
soup=BeautifulSoup(text,'lxml')
for page in soup.findAll('div',{'class' : 'ui-pagination-navi util-left'}):
for link in page.findAll('a',{'class' : 'page-next ui-pagination-next'}):
print('Im inside the loop')
url=link.get('href')
max_pages=link.text
url='https:' + str(url)
print(url)
break
for product in soup.findAll('li',{'class' : ['list-item list-item-first ','list-item ']}):
for info in product.findAll('div',{'class' : 'info'}):
for details in info.findAll('a',{'class' : 'history-item product '}):
link=details.get('href')
title=details.get('title')
print('\nName of the product : ' + str(title))
print('Product link :'+str(link)+'\n')
| null | aliexpress.py | aliexpress.py | py | 1,405 | python | en | code | null | code-starcoder2 | 51 |
373881030 | from .SVM import SVM
from .DecisionTree import DecisionTree
model_list = {
"SVM": SVM,
"DecisionTree": DecisionTree,
}
def get_model(model_name,conf):
if model_name in model_list.keys():
return model_list[model_name](conf)
else:
raise NotImplementedError
| null | model/__init__.py | __init__.py | py | 290 | python | en | code | null | code-starcoder2 | 51 |
327864145 | """empty message
Revision ID: f49f08e77ed5
Revises: f484298d9b7b
Create Date: 2019-03-26 22:27:17.726994
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f49f08e77ed5'
down_revision = 'f484298d9b7b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('finished', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('task', 'finished')
# ### end Alembic commands ###
| null | migrations/versions/f49f08e77ed5_.py | f49f08e77ed5_.py | py | 652 | python | en | code | null | code-starcoder2 | 51 |
327716316 | import mock
import pytest
from squeaknode.config.config import SqueaknodeConfig
from squeaknode.core.lightning_address import LightningAddressHostPort
from squeaknode.core.squeak_controller import SqueakController
from squeaknode.core.squeak_core import SqueakCore
from squeaknode.core.squeak_peer import SqueakPeer
from squeaknode.db.squeak_db import SqueakDb
from squeaknode.node.squeak_rate_limiter import SqueakRateLimiter
from squeaknode.node.squeak_whitelist import SqueakWhitelist
@pytest.fixture
def config():
squeaknode_config = SqueaknodeConfig()
squeaknode_config.read()
return squeaknode_config
@pytest.fixture
def regtest_config():
squeaknode_config = SqueaknodeConfig(
dict_config={'core': {'network': 'regtest'}}
)
squeaknode_config.read()
return squeaknode_config
@pytest.fixture
def squeak_db():
# return SqueakDb(None, None, None)
return mock.Mock(spec=SqueakDb)
@pytest.fixture
def squeak_core():
return mock.Mock(spec=SqueakCore)
@pytest.fixture
def lightning_host_port():
return LightningAddressHostPort(host="my_lightning_host", port=8765)
@pytest.fixture
def price_msat():
return 777
@pytest.fixture
def max_squeaks_per_address_per_hour():
return 5000
@pytest.fixture
def squeak_whitelist():
return mock.Mock(spec=SqueakWhitelist)
@pytest.fixture
def squeak_rate_limiter():
return mock.Mock(spec=SqueakRateLimiter)
@pytest.fixture
def squeak_controller(
squeak_db,
squeak_core,
squeak_whitelist,
squeak_rate_limiter,
config,
):
return SqueakController(
squeak_db,
squeak_core,
squeak_whitelist,
squeak_rate_limiter,
config,
)
@pytest.fixture
def regtest_squeak_controller(
squeak_db,
squeak_core,
squeak_whitelist,
squeak_rate_limiter,
regtest_config,
):
return SqueakController(
squeak_db,
squeak_core,
squeak_whitelist,
squeak_rate_limiter,
regtest_config,
)
def test_nothing():
assert True
def test_get_buy_offer(squeak_controller):
assert squeak_controller.get_buy_offer is not None
def test_get_network_default(squeak_controller):
assert squeak_controller.get_network() == "testnet"
def test_get_network_regtest(regtest_squeak_controller):
assert regtest_squeak_controller.get_network() == "regtest"
# def test_get_network_regtest(config, squeak_controller):
# # with mock.patch.object(Config, 'squeaknode_network', new_callable=mock.PropertyMock) as mock_config:
# # mock_config.return_value = 'regtest'
# config.squeaknode_network = "regtest"
# print(config.squeaknode_network)
# assert squeak_controller.get_network() == "regtest"
def test_create_peer(squeak_db, squeak_controller):
squeak_controller.create_peer(
"fake_peer_name",
"fake_host",
5678,
)
squeak_db.insert_peer.assert_called_with(
SqueakPeer(
peer_id=None,
peer_name="fake_peer_name",
host="fake_host",
port=5678,
uploading=False,
downloading=False,
)
)
def test_create_peer_default_port(config, squeak_db, squeak_controller):
squeak_controller.create_peer(
"fake_peer_name",
"fake_host",
0,
)
squeak_db.insert_peer.assert_called_with(
SqueakPeer(
peer_id=None,
peer_name="fake_peer_name",
host="fake_host",
port=config.core.default_peer_rpc_port,
uploading=False,
downloading=False,
)
)
| null | tests/core/test_squeak_controller.py | test_squeak_controller.py | py | 3,619 | python | en | code | null | code-starcoder2 | 51 |
139945641 | class Solution:
def oddEvenList(self, head: ListNode) -> ListNode:
cnt = 1
odd, even = ListNode(0), ListNode(0)
firsteven = even
cur = ListNode(0, head)
while cur.next:
cur = cur.next
if cnt%2:
odd.next = cur
odd = odd.next
else:
even.next = cur
even = even.next
cnt += 1
#
# 1. break the even from the last odd
# 2. connect odd to first even
#
even.next = None
odd.next = firsteven.next
return head | null | src/328-odd_even_linkedlist.py | 328-odd_even_linkedlist.py | py | 620 | python | en | code | null | code-starcoder2 | 51 |
283641680 | import math
def prime(value):
if value % 2 == 0 or value % 10 == 0 or value % 3 == 0:
return False
for i in range(3, math.ceil(math.sqrt(value)) - 1, 2):
if value % i == 0:
return False
return True
if prime(int(input())):
print("Число простое")
else:
print("Число составное")
| null | the_simplest_prime_num.py | the_simplest_prime_num.py | py | 354 | python | en | code | null | code-starcoder2 | 51 |
259617514 | import sys
input = sys.stdin.readline
sensor = int(input())
base = int(input())
coord = list(map(int, input().split()))
coord.sort()
#기지국의 개수가 센서의 크기와 같거나 크면 -> 센서의 위치에 그냥 설치
if sensor <= base:
print(0)
sys.exit()
dist = []
#각 인접 센서 사이의 거리
for i in range(1, sensor):
dist.append(coord[i]-coord[i-1])
dist.sort(reverse=True)
#k개의 구간으로 나누기 -> 가장 큰 원소부터 k-1개 제거
for i in range(base-1):
dist.pop(0)
print(sum(dist)) | null | 정렬/2212_센서.py | 2212_센서.py | py | 547 | python | en | code | null | code-starcoder2 | 51 |
356051032 | import time
import traceback, sys
import random
from statistics import mean
from statistics import median
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
QApplication.setAttribute(Qt.AA_Use96Dpi) # This fixes the scaling issue in Windows
# Sets (0 = OR set, 1 = AND set, 2 = XOR set, 3 = IF->THEN set, 4 = IFF set)
sets = [
[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]],
[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]],
[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]],
[[0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 1, 1]],
[[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 1]],
]
class Environment:
def __init__(self):
self.ta = [] # The Tsetlin Machine
self.X = [] # X input
self.y = 0 # y input
self.s = 3.9 # s - used for rewards
self.num_states = 10 # number of states (leave at 10 since the GUI has been hardcoded for that value)
self.num_rounds = 100000 # Maximum number of rounds to play before giving up
self.verbose = 0 # Determines whether the environment will print
self.operation = 1 # The current set being worked on
self.turbo = 0 # Deactivates time delay from reward/punishments
self.noise = 0 # Toggles noise on/off
self.running = 0 # Whether the environment is running
self.round = -1 # The current round number, set to -1 if inactive (for GUI purposes)
self.message = '' # Message displayed in lower right corner of the GUI
self.s_stats = '' # String for the statistics shown in the GUI
self.passed_test = [0] * 4 # Used in the truth table displayed in the lower left corner of the GUI
self.stats = [] # Bookkeeping about the current operation
self.game = 0 # How many games have been played with this operation
# Initialize the Tsetlin Automata here (for the GUI)
self.ta = [[Tsetlin(self.num_states) for x in range(4)] for y in range(2)]
def set_X(self, X):
self.X = X
def set_y(self, y):
self.y = y
def pause(self):
if not self.turbo:
time.sleep(0.025)
def setup_tsetlin(self): # Load current example into the Tsetlin Automata
for y in range(2):
for x in range(4):
self.ta[y][x].value = env.X[x % 2]
if x >= 2:
self.ta[y][x].value = int(not (self.ta[y][x].value))
def get_tsetlin_state(self, x):
return self.ta[int(x/4)][x % 4].state
def eval_clause(self, disjunctive): # Returns the sum of our conjunctive clauses, or the disjunction
clauses = [1, 1] # An empty conjunctive clause is always true
for y in range(2):
for x in range(4):
if self.ta[y][x].included():
clauses[y] &= self.ta[y][x].value
if disjunctive: # If disjunctive is set, we OR the two clauses together
return clauses[0] | clauses[1]
else: # Otherwise, we add the two clauses together
return clauses[0] + clauses[1]
def feedback(self, type): # Gives Type I or II Feedback to our literals
for y in range(2):
for x in range(4):
r = random.random()
if type == 1: # ** Type I Feedback **
if self.eval_clause(1): # Target clause evaluates to 1
if self.ta[y][x].included():
if self.ta[y][x].value: # > included, literal 1
if r < (1 / self.s):
pass
if r < ((self.s - 1) / self.s):
self.ta[y][x].reward() # >> reward
else:
if self.ta[y][x].value: # > not included, literal 1
if r < (1 / self.s):
pass
elif r < ((self.s - 1) / self.s):
self.ta[y][x].penalize() # >> penalty
else: # > not included, literal 0
if r < (1 / self.s):
self.ta[y][x].reward() # >> reward
else: # Target clause evaluates to 0
if self.ta[y][x].included():
if self.ta[y][x].value: # > included, literal 1
if r < (1 / self.s):
self.ta[y][x].penalize() # >> penalty
else: # > included, literal 0
if r < (1 / self.s):
self.ta[y][x].penalize() # >> penalty
else:
if self.ta[y][x].value: # > not included, literal 1
if r < (1 / self.s):
self.ta[y][x].reward() # >> reward
else: # > not included, literal 0
if r < (1 / self.s):
self.ta[y][x].reward() # >> reward
else: # ** Type II Feedback **
if self.eval_clause(1): # > target clause evaluates to 1
if not self.ta[y][x].included() \
and not self.ta[y][x].value:
self.ta[y][x].penalize() # >> penalty
def get_literal(self, counter, type):
s_literal = [" X₁ ", " X₂ ", "¬X₁ ", "¬X₂ "]
s_literal_alt = [" X1 ", " X2 ", "!X1 ", "!X2 "]
if type:
return s_literal_alt[counter % 4]
return s_literal[counter % 4]
def is_included(self, counter):
if self.ta[int(counter / 4)][counter % 4].included():
return 1
else:
return 0
def get_conjunction(self, counter):
y = int(counter / 4)
_x = counter % 4
if not self.ta[y][_x].included():
return 0
num_literals = []
y = int(counter / 4)
for x in range(4):
if self.ta[y][x].included():
num_literals.append(x)
for x in range(0, counter % 4 + 1):
if x in num_literals:
num_literals.remove(x)
if len(num_literals):
return 1
else:
return 0
def playGame(self):
self.game += 1
# Reset the Tsetlin Automata, and give them their states
self.ta = [[Tsetlin(self.num_states) for x in range(4)] for y in range(2)]
# Used for GUI truth table
self.passed_test = [0, 0, 0, 0]
for round in range(self.num_rounds):
self.round = round
self.message = ''
if not self.running: # Escape if user decided to stop
self.message = 'Cancelled'
break
example = sets[self.operation][random.randint(0, 3)] # Get a random example each round
# example = sets[self.operation][round % 4] # Go through the examples in order
self.set_X([example[0], example[1]])
self.set_y(example[2])
if self.noise and random.random() < 0.4: # Add 40% noise to the dataset
r = random.randint(0, 2)
if r == 2:
self.y = int(not self.y) # Cast the value as int to avoid it showing as True/False in the GUI
else:
self.X[r] = int(not self.X[r])
# Give the Tsetlin Automata their respective values for this example
self.setup_tsetlin()
while self.eval_clause(1) != self.y: # If this formula is different from y
if self.y and not (self.eval_clause(0)): # and y = 1 and the sum of the conjunctive clauses is 0
self.feedback(1) # then give Type I Feedback
self.message = "Type I Feedback"
elif not self.y and self.eval_clause(0): # Otherwise, if y = 0 and sum of the conjunctive clauses > 0
self.feedback(2) # then give Type II Feedback
self.message = "Type II Feedback"
valid = True # Now, let us check if the current formula passed the entire truth table, if so, we can stop
for i in range(4):
example = sets[self.operation][i]
self.set_X([example[0], example[1]])
self.set_y(example[2])
self.setup_tsetlin()
if self.eval_clause(1) == self.y:
self.passed_test[i] = 1
else:
self.passed_test[i] = 0
valid = False
if valid:
break
# End round
if not self.message == 'Cancelled':
self.message = "Solved in " + f"{env.round + 1:,}" + " rounds"
self.stats.append(self.round + 1)
class Tsetlin:
def __init__(self, n):
self.n = n # n is the number of states per action
self.state = random.choice([self.n, self.n + 1]) # Initial state is selected randomly
self.value = 0
def included(self):
if self.state > self.n:
return True
else:
return False
def reward(self):
if self.n >= self.state > 1: # Reward: Move towards the left if 1 < state <= n
self.state -= 1
elif self.n < self.state < 2 * self.n: # Reward: Move towards the right if n < state < 2n
self.state += 1
env.pause()
def penalize(self):
if self.state <= self.n: # Penalty: Move right towards the center if state <= n
self.state += 1
elif self.state > self.n: # Penalty: Move left towards the center if state > n
self.state -= 1
env.pause()
# ==========================================================================
# GUI
# ==========================================================================
class myCanvas(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(0, 0, 800, 600)
def paintEvent(self, e):
painter = QPainter()
painter.begin(self)
self.drawCanvas(painter)
painter.end()
def drawCanvas(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
pen = QPen()
pen.setColor(QColor('white'))
painter.setPen(pen)
myWidth = 2 # The width of the lines
y_shift = 40
font = QFont('Helvetica Lt Std', 16)
font.setPixelSize(21)
fm = QFontMetrics(font)
painter.setFont(font)
painter.setBrush(QColor(255, 255, 255, 255))
painter.drawRect(0, 0, 800, 600)
painter.setBrush(QColor(255, 0, 0, 255))
pen.setWidth(myWidth)
pen.setColor(QColor('black'))
painter.setPen(pen)
painter.drawLine(QPoint(391, 40 + y_shift), QPoint(391, 375 + y_shift)) # Vertical Line
painter.setPen(QPen(Qt.black, myWidth, Qt.DashLine))
painter.drawLine(QPoint(0, 205 + y_shift), QPoint(777, 205 + y_shift)) # Dashed Horizontal Line
painter.setPen(QPen(Qt.black, myWidth, Qt.SolidLine))
if env.round > -1:
s_round = f"{env.round + 1:,}"
painter.drawText(0, 25, 'Game ' + str(env.game) + ', Round ' + s_round)
counter = 0
text_y = 400 + y_shift
for x in range(17):
if x % 2:
state = env.get_tsetlin_state(counter)
if state <= env.num_states:
painter.setBrush(QColor(255, 0, 0, 255))
else:
painter.setBrush(QColor(0, 0, 255, 255))
painter.drawRoundedRect(x * 46, (20 - state) * 15 + y_shift + 40, 46, 46, 12, 12)
pen.setColor(QColor('white'))
painter.setPen(pen)
text_width = fm.width(str(state))
painter.drawText(x * 46 + (23 - text_width/2), (20 - state) * 15 + y_shift + 70, str(state))
pen.setColor(QColor('black'))
painter.setPen(pen)
# Draw/Print the conjunctive clauses
if not env.is_included(counter):
pen.setColor(QColor('lightGray'))
painter.setPen(pen)
text_width = fm.width(env.get_literal(counter, 0))
painter.drawText(x * 46 + (23 - text_width/2), text_y, env.get_literal(counter, 0))
pen.setColor(QColor('black'))
painter.setPen(pen)
if counter % 4 < 3:
if not env.get_conjunction(counter):
pen.setColor(QColor('lightGray'))
painter.setPen(pen)
text_width = fm.width("^")
painter.drawText(x * 46 + 46 + (23 - text_width/2), text_y, "^")
pen.setColor(QColor('black'))
painter.setPen(pen)
counter += 1
# End drawing states
painter.drawText(40, text_y, "(")
painter.drawText(777 - 40, text_y, ")")
painter.drawText(373, text_y, ") v (")
if env.round > -1:
painter.drawText(0, 50, env.s_stats)
ops = ["OR", "AND", "XOR", "IF→THEN", "IFF"]
text = "Running " + ops[env.operation] + " set"
text_width = fm.width(text)
painter.drawText(775 - text_width, 25, text)
text = "X = [" + str(env.X[0]) + ", " + str(env.X[1]) + "], y = " + str(env.y)
text_width = fm.width(text)
painter.drawText(775 - text_width, 50, text)
pen.setColor(QColor('lightGray'))
painter.setPen(pen)
text = env.message
text_width = fm.width(text)
painter.drawText(775 - text_width, 540, text)
for i in range(4):
check = "✗"
if env.passed_test[i]:
check = "✓"
example = sets[env.operation][i]
text = str(example[0]) + " " + str(example[1]) + " | " + str(example[2]) + " " + check
painter.drawText(0, 480 + 20 * i, text)
painter.drawLine(QPoint(38, 465), QPoint(38, 540))
pen.setColor(QColor('black'))
painter.setPen(pen)
def _trigger_refresh(self):
self.update()
def reset_environment():
env.round = -1
env.stats = [] # Reset the statistics
env.s_stats = ''
env.game = 0
class Window(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.threadpool = QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
self.timer = QTimer()
self.timer.setInterval(1000 / 60)
self.timer.timeout.connect(self.recurring_timer)
self.timer.start()
p = self.palette()
p.setColor(self.backgroundRole(), Qt.white)
self.setPalette(p)
grid_layout = QGridLayout()
self.setLayout(grid_layout)
# OR Button
self.or_button = QPushButton("OR")
self.or_button.setCheckable(True)
self.or_button.clicked[bool].connect(self.setOperation)
grid_layout.addWidget(self.or_button, 0, 0) # row, column (y, x)
# AND Button
self.and_button = QPushButton("AND")
self.and_button.setCheckable(True)
self.and_button.toggle()
self.and_button.clicked[bool].connect(self.setOperation)
grid_layout.addWidget(self.and_button, 0, 1)
# XOR Button
self.xor_button = QPushButton("XOR")
self.xor_button.setCheckable(True)
grid_layout.addWidget(self.xor_button, 0, 2)
self.xor_button.clicked[bool].connect(self.setOperation)
# IF->THEN Button
self.ifthen_button = QPushButton("IF→THEN")
self.ifthen_button.setCheckable(True)
self.ifthen_button.clicked[bool].connect(self.setOperation)
grid_layout.addWidget(self.ifthen_button, 0, 3)
# IFF Button
self.iff_button = QPushButton("IFF")
self.iff_button.setCheckable(True)
grid_layout.addWidget(self.iff_button, 0, 4)
self.iff_button.clicked[bool].connect(self.setOperation)
# Noise Button
self.noise_button = QPushButton("Noise")
self.noise_button.setCheckable(True)
grid_layout.addWidget(self.noise_button, 0, 6)
self.noise_button.clicked[bool].connect(self.toggleNoise)
# Fast Forward Button
self.ff_button = QPushButton("⯈⯈")
self.ff_button.setCheckable(True)
grid_layout.addWidget(self.ff_button, 0, 7)
self.ff_button.clicked[bool].connect(self.toggleSpeed)
# Run Button
self.run_button = QPushButton("Run")
self.run_button.setCheckable(True)
grid_layout.addWidget(self.run_button, 0, 8)
self.run_button.clicked[bool].connect(self.toggleRunning)
self.canvas = myCanvas()
grid_layout.addWidget(self.canvas, 1, 0, 5, 9)
self.setGeometry(600, 400, 800, 600)
self.setWindowTitle('IKT440 | Assignment 2')
def recurring_timer(self):
self.canvas._trigger_refresh() # This is our time-step, and updates the window every 1/60 second
def progress_fn(self, n):
print("Done.")
def execute_this_fn(self, progress_callback):
env.playGame()
return "Done."
def print_output(self, s):
print(s)
def thread_complete(self):
print("Thread complete.")
if self.run_button.isChecked():
self.run_button.toggle()
env.running = not env.running
self.or_button.setEnabled(True)
self.and_button.setEnabled(True)
self.xor_button.setEnabled(True)
self.ifthen_button.setEnabled(True)
self.iff_button.setEnabled(True)
env.s_stats = f"min = {min(env.stats):,} | max = {max(env.stats):,} | r̄ = {mean(env.stats):,.0f}" \
f" | median = {median(env.stats):,.0f}"
def toggleNoise(self, pressed):
env.noise = not env.noise
reset_environment()
def toggleSpeed(self, pressed):
env.turbo = not env.turbo
def toggleRunning(self, pressed):
env.running = not env.running
if env.running:
self.or_button.setEnabled(False)
self.and_button.setEnabled(False)
self.xor_button.setEnabled(False)
self.ifthen_button.setEnabled(False)
self.iff_button.setEnabled(False)
worker = Worker(self.execute_this_fn) # Any other args, kwargs are passed to the run function
worker.signals.result.connect(self.print_output)
worker.signals.finished.connect(self.thread_complete)
worker.signals.progress.connect(self.progress_fn)
self.threadpool.start(worker) # Execute
else:
self.or_button.setEnabled(True)
self.and_button.setEnabled(True)
self.xor_button.setEnabled(True)
self.ifthen_button.setEnabled(True)
self.iff_button.setEnabled(True)
def setOperation(self):
source = self.sender()
if not self.or_button.isChecked() and source.text() == "OR":
self.or_button.toggle()
if not self.and_button.isChecked() and source.text() == "AND":
self.and_button.toggle()
if not self.xor_button.isChecked() and source.text() == "XOR":
self.xor_button.toggle()
if not self.ifthen_button.isChecked() and source.text() == "IF→THEN":
self.ifthen_button.toggle()
if not self.iff_button.isChecked() and source.text() == "IFF":
self.iff_button.toggle()
if self.or_button.isChecked() and source.text() != "OR":
self.or_button.toggle()
elif self.and_button.isChecked() and source.text() != "AND":
self.and_button.toggle()
elif self.xor_button.isChecked() and source.text() != "XOR":
self.xor_button.toggle()
elif self.ifthen_button.isChecked() and source.text() != "IF→THEN":
self.ifthen_button.toggle()
elif self.iff_button.isChecked() and source.text() != "IFF":
self.iff_button.toggle()
if source.text() == "OR":
env.operation = 0
elif source.text() == "AND":
env.operation = 1
elif source.text() == "XOR":
env.operation = 2
elif source.text() == "IF→THEN":
env.operation = 3
else:
env.operation = 4
reset_environment()
# ==========================================================================
# Multithreading
# ==========================================================================
class WorkerSignals(QObject):
finished = pyqtSignal()
error = pyqtSignal(tuple)
result = pyqtSignal(object)
progress = pyqtSignal(int)
class Worker(QRunnable):
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
@pyqtSlot()
def run(self):
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
env = Environment()
app = QApplication([])
Window = Window()
Window.setGeometry(0, 0, 800, 600)
Window.show()
app.exec_()
| null | Tsetlin_GUI.py | Tsetlin_GUI.py | py | 23,671 | python | en | code | null | code-starcoder2 | 51 |
639110471 | '''
fcn.py 的任务
1. 实现双线插值
2. 实现FCN本人
(人 •͈ᴗ•͈) ۶♡♡
'''
import numpy as np
import torch
from torch import nn
from torchvision import models
#双插
def Bilinear_interpolation (src, new_size):
'''
使用双线性插值方法放大图像
params:
src(np.ndarray):输入图片
new_size(tuple):目标尺寸
ret:
dst(np.ndarry):目标图像
'''
dst_h, dst_w = new_size #目标图像的hw
src_h, src_w = src.shape[:2] #原始图像的hw
#如果跟需求符合, 就不需要缩放,直接拷贝
if src_h == dst_h and src_w == dst_w:
return src.copy()
scale_x = float(src_w) / dst_w
scale_y = float(src_H) / dst_h
#遍历目标图上的每个像素点
##构建一张目标大小的空图,遍历差值
dst = np.zeros((dst_h,dst_w,3),dtype=np.int8)
##因为是彩色图,遍历三层: a.rgb三通道 b.height c.width
for n in range(3):
for dst_y in range(dst_h):
for dst_x in range(dst_w):
#目标像素在原图上的坐标 src+0.5 = (dst_x + 0.5) *scale_x
#加0.5的偏差,可以保证图像缩小时,不会漏掉像素点 详细看:https://www.cnblogs.com/kk17/p/9989984.html
src_x = (dst_x + 0.5)*scale_x -0.5
src_y = (dst_y + 0.5)*scale_y -0.5
#计算在原图某像素点的4个近邻点的位置
src_x_0 = int(np.floor(src_x)) #*floor()向下取整数 ex: floor(1.2) = 1.0
src_y_0 = int(np.floor(src_y))
src_x_1 = min(src_x_0 + 1, src_w - 1 ) #防止出界
src_y_1 = min(src_y_0 + 1, src_h - 1 )
'''
初始化反卷积核
'''
def bilinear_kernel(in_channels, out_channels, kernel_size):
"""Define a bilinear kernel according to in channels and out channels.
Returns:
return a bilinear filter tensor
"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
bilinear_filter = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float32)
weight[range(in_channels), range(out_channels), :, :] = bilinear_filter
return torch.from_numpy(weight)
pretrained_net = models.vgg16_bn(pretrained=False)
#FCN本人 对应fcn.png
class FCN(nn.Module):
def __init__(self, num_classes):
super().__init__() #关于torch中super用法 #https://blog.csdn.net/genous110/article/details/90105497
self.stage1 = pretrained_net.features[:7]
self.stage2 = pretrained_net.features[7:14]
self.stage3 = pretrained_net.features[14:24]
self.stage4 = pretrained_net.features[24:34]
self.stage5 = pretrained_net.features[34:]
self.scores1 = nn.Conv2d(512, num_classes, 1)
self.scores2 = nn.Conv2d(512, num_classes, 1)
self.scores3 = nn.Conv2d(128, num_classes, 1)
self.conv_trans1 = nn.Conv2d(512, 256, 1)
self.conv_trans2 = nn.Conv2d(256, num_classes, 1)
self.upsample_8x = nn.ConvTranspose2d(num_classes, num_classes, 16, 8, 4, bias=False)
self.upsample_8x.weight.data = bilinear_kernel(num_classes, num_classes, 16)
self.upsample_2x_1 = nn.ConvTranspose2d(512, 512, 4, 2, 1, bias=False)
self.upsample_2x_1.weight.data = bilinear_kernel(512, 512, 4)
self.upsample_2x_2 = nn.ConvTranspose2d(256, 256, 4, 2, 1, bias=False)
self.upsample_2x_2.weight.data = bilinear_kernel(256, 256, 4)
def forward(self, x):
s1 = self.stage1(x)
s2 = self.stage2(s1)
s3 = self.stage3(s2)
s4 = self.stage4(s3)
s5 = self.stage5(s4)
scores1 = self.scores1(s5)
s5 = self.upsample_2x_1(s5)
add1 = s5 + s4
scores2 = self.scores2(add1)
add1 = self.conv_trans1(add1)
add1 = self.upsample_2x_2(add1)
add2 = add1 + s3
output = self.conv_trans2(add2)
output = self.upsample_8x(output)
return output
if __name__ == "__main__":
rgb = torch.randn(1, 3, 352, 480)
net = FCN(12)
out = net(rgb)
print('喵喵喵喵喵喵喵喵---------------')
print(out.shape) | null | models/fcn.py | fcn.py | py | 4,413 | python | en | code | null | code-starcoder2 | 51 |
53594229 | import setuptools
from distutils.core import Extension
with open("README.md") as f:
long_description = f.read()
setuptools.setup(
name="codesnap",
version="0.0.4",
author="Tian Gao",
author_email="gaogaotiantian@hotmail.com",
description="A profiling tool that can visualize python code in flame graph",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gaogaotiantian/codesnap",
packages=setuptools.find_packages("src"),
package_dir={"":"src"},
package_data={
"codesnap": [
"html/*.js",
"html/*.css",
"html/*.html"
]
},
ext_modules=[
Extension(
"codesnap.snaptrace",
sources = [
"src/codesnap/modules/snaptrace.c",
]
)
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Quality Assurance",
],
python_requires=">=3.5",
)
| null | setup.py | setup.py | py | 1,134 | python | en | code | null | code-starcoder2 | 51 |
628842232 | import os
import pickle
import numpy as np
from datetime import timedelta
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_model import ARIMA
def forecast(ser, start_date, end_date):
""" Function that uses the ARIMA model to return the forecasted
price of a user's stay and a visualization of the prices
"""
# Fit model to data before requested date
history = ser[ser.index.date < start_date]
arima_params = pickle.load(open(os.path.join("models", "ARIMA_params.pkl"), "rb+"))
model = ARIMA(history, order=(9, 2, 6))
results = model.fit(arima_params)
# Calculate how many values we need to forecast
duration = (end_date - start_date).days
predictions = results.forecast(duration)[0]
# Create plot of forecasted values with confidence interval
month = timedelta(days=31)
fig, ax = plt.subplots(figsize=(10, 5))
fig.suptitle("Airbnb Price Forecasts")
plt.ylabel("Price($)")
plot_start = start_date - 2 * month
plot_end = end_date + month
ax.plot(ser[(ser.index.date >= plot_start) & (ser.index.date <= plot_end)], c="r")
results.plot_predict(plot_start, plot_end, ax=ax)
ax.lines.pop(2)
# Return computed price and the plot
return np.sum(predictions), fig | null | forecast.py | forecast.py | py | 1,265 | python | en | code | null | code-starcoder2 | 51 |
180396234 | import base64
import os
import sublime
import queue
from threading import Thread
from .session import Session
from . import formatter
from .client import Client
from ..log import log
def done(response):
return response.get("status") == ["done"]
def b64encode_file(path):
with open(path, "rb") as file:
return base64.b64encode(file.read()).decode("utf-8")
class Repl(object):
def __init__(self, window, host, port, options={"print_capabilities": True}):
self.client = Client(host, port).go()
self.printq = queue.Queue()
self.tapq = queue.Queue()
self.options = options
def create_session(self, owner, capabilities, response):
new_session_id = response["new-session"]
new_session = Session(new_session_id, self.client)
new_session.info = capabilities
self.client.register_session(owner, new_session)
return new_session
def create_sessions(self, session, response):
capabilities = response
session.info = capabilities
if self.options.get("print_capabilities"):
session.output(response)
session.send(
{"op": "clone", "session": session.id},
handler=lambda response: done(response)
and self.create_session("plugin", capabilities, response),
)
session.send(
{"op": "clone", "session": session.id},
handler=lambda response: done(response)
and self.create_session("user", capabilities, response),
)
def handle_sideloader_provide_response(self, session, response):
if "status" in response and "unexpected-provide" in response["status"]:
name = response["name"]
session.output({"err": f"unexpected provide: {name}\n"})
def sideloader_provide(self, session, response):
if "name" in response:
name = response["name"]
op = {
"id": response["id"],
"op": "sideloader-provide",
"type": response["type"],
"name": name,
}
path = os.path.join(sublime.packages_path(), "tutkain/clojure/src", name)
if os.path.isfile(path):
log.debug({"event": "sideloader/provide", "path": path})
op["content"] = b64encode_file(path)
else:
op["content"] = ""
session.send(
op,
handler=lambda response: self.handle_sideloader_provide_response(
session, response
),
)
def describe(self, session):
def handler(response):
if done(response):
self.start_formatter({"newline_on_done": False})
self.create_sessions(session, response)
session.send({"op": "describe"}, handler=handler)
def add_tap(self, session):
session.send(
{"op": "tutkain/add-tap"},
handler=lambda response: done(response) and self.describe(session),
)
def add_middleware(self, session, response):
if done(response):
session.send(
{
"op": "add-middleware",
"middleware": [
"tutkain.nrepl.middleware.test/wrap-test",
"tutkain.nrepl.middleware.tap/wrap-tap",
],
},
handler=lambda response: done(response) and self.add_tap(session),
)
elif "err" in response:
session.output(response)
session.output(
{
"err": """*** [Tutkain] Sideloading failed. See error message above for details. Some features are unavailable. ***\n"""
}
)
session.send(
{"op": "clone"},
handler=lambda response: done(response)
and self.initialize_without_sideloader(session.info, response),
)
def sideload(self, session):
session.send(
{"op": "sideloader-start"},
handler=lambda response: self.sideloader_provide(session, response),
)
session.send(
{"op": "eval", "code": """(require 'tutkain.nrepl.util.pprint)"""},
pprint=False,
handler=lambda response: self.add_middleware(session, response),
)
def start_formatter(self, settings):
format_loop = Thread(
daemon=True,
target=formatter.format_loop,
args=(
self.client.recvq,
self.printq,
self.tapq,
settings,
),
)
format_loop.name = "tutkain.connection.format_loop"
format_loop.start()
def initialize_without_sideloader(self, capabilities, response):
session = self.create_session("plugin", capabilities, response)
if self.options.get("print_capabilities"):
session.output(capabilities)
def handler(response):
if done(response):
self.start_formatter({"newline_on_done": True})
self.create_session("user", capabilities, response)
# Send the clone op via the client instead of the plugin session because some servers do
# not support sending the op via the session.
self.client.send({"op": "clone"}, handler=handler)
def initialize_sessions(self, capabilities, response):
if "sideloader-start" in capabilities["ops"]:
session = self.create_session("sideloader", capabilities, response)
self.sideload(session)
else:
self.initialize_without_sideloader(capabilities, response)
def clone(self, capabilities):
self.client.send(
{"op": "clone"},
handler=lambda response: done(response)
and self.initialize_sessions(capabilities, response),
)
def go(self):
self.client.send(
{"op": "describe"},
handler=lambda response: done(response) and self.clone(response),
)
return self
| null | src/repl/__init__.py | __init__.py | py | 6,193 | python | en | code | null | code-starcoder2 | 51 |
249862627 | import json, socket
from modules import query, response, data_structures, cashing, tools
ADDRESS = ("127.0.0.1", 53)
CASH_FILE = "cash.json"
ROOT_SERVERS = (('199.9.14.201', 53),
('198.41.0.4', 53),
('199.7.91.13', 53))
Q_TYPES = [1, 2]
class DNSServer:
def __init__(self, forwarder_addr, cash=None, iterative=True):
self.forwarder = forwarder_addr
self.id = 1 # последовательное упрощает атаку отравления кэша
self.cash = cash if cash else cashing.Cash()
self.iterative = iterative
def execute(self):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as server:
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(ADDRESS)
server.settimeout(2)
while True:
try:
data, addr = server.recvfrom(1024)
except socket.timeout:
continue
resp = self.process_query(data)
if not resp:
continue
resp_b = response.ResponseHandler.make_response(resp)
server.sendto(resp_b, addr)
def process_query(self, data):
q_in = query.QueryHandler.parse_query(data)
q_in_id = q_in.header.id
url = q_in.question.url
q_type = q_in.question.q_type
if q_type not in Q_TYPES:
return None
cash_value = self.cash.get_answer(url, q_type)
print(cash_value)
if cash_value:
return self.construct_response(url, q_type, q_in_id, cash_value)
if self.iterative:
return self.get_response_iterative(url, q_type, q_in_id)
else:
return self.get_response_recurs(url, q_type, q_in_id)
def get_response_iterative(self, url, q_type, q_in_id):
labels = tools.Tools.get_label_list(url)
current_ns_servers = ROOT_SERVERS
for i in range(len(labels) - 1, -2, -1):
current_url = '.'.join(labels[i:]) if i != -1 else url
q_out = self.construct_query(current_url, 2 if i != -1 else q_type)
q_out_b = query.QueryHandler.make_query(q_out)
for server in current_ns_servers:
data = self.send_query_get_response(q_out_b, server)
if data:
break
server_resp = response.ResponseHandler.parse_response(data)
if server_resp.header.flags.rcode != 0:
return self.construct_response_with_error(q_in_id, server_resp.header.flags.rcode)
self.cash_response(server_resp)
if i != -1:
answer = self.cash.get_answer(current_url, 2)
print(answer)
if answer:
current_ns_servers = [(ns, 53) for ns in self.cash.get_answer(current_url, 2)][:3]
else:
print(current_url, q_type)
answer = self.cash.get_answer(current_url, q_type)
return self.construct_response(url, q_type, q_in_id, answer)
def get_response_recurs(self, url, q_type, q_in_id):
q_out = self.construct_query(url, q_type)
q_out_b = query.QueryHandler.make_query(q_out)
data = self.send_query_get_response(q_out_b, self.forwarder)
if not data:
return None
forwarder_resp = response.ResponseHandler.parse_response(data)
if forwarder_resp.header.flags.rcode != 0:
return self.construct_response_with_error(q_in_id, forwarder_resp.header.flags.rcode)
self.cash_response(forwarder_resp)
cash_value = self.cash.get_answer(url, q_type)
if not cash_value:
cash_value = []
return self.construct_response(url, q_type, q_in_id, cash_value)
def send_query_get_response(self, query_b, address):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as client:
client.sendto(query_b, address)
return client.recvfrom(1024)[0]
def cash_response(self, resp):
for rr in resp.answers_rrs + resp.authority_rrs + resp.additional_rrs:
self.cash.add_record(rr)
with open(CASH_FILE, 'wt') as f:
json.dump(self.cash.make_serializable(), f)
def construct_response(self, url, q_type, q_id, answers_list):
flags = data_structures.Flags(1, 0, 1, 0, 0)
header = data_structures.Header(q_id, flags, 1, len(answers_list), 0, 0)
question = data_structures.Question(url, q_type)
answers = []
for answer in answers_list:
answers.append(data_structures.ResourceRecord(url, q_type, 60, answer))
return response.Response(header, question, answers, [], [])
def construct_response_with_error(self, q_id, error):
flags = data_structures.Flags(1, 0, 1, 0, error)
header = data_structures.Header(q_id, flags, 0, 0, 0, 0)
return response.Response(header, None, [], [], [])
def construct_query(self, url, q_type):
flags = data_structures.Flags(0, 0, 1, 0, 0)
header = data_structures.Header(self.id, flags, 1, 0, 0, 0)
question = data_structures.Question(url, q_type)
return query.Query(header, question)
def main():
with open('config.txt', 'rt') as g:
lines = g.readlines()
addr = lines[0]
forwarder = (addr, 53)
with open(CASH_FILE, 'rt') as f:
cash_j = json.load(f)
cash = cashing.Cash.get_cash_from_json(cash_j)
server = DNSServer(forwarder, cash, iterative=True)
server.execute()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| null | dns_server/dns_server.py | dns_server.py | py | 5,852 | python | en | code | null | code-starcoder2 | 51 |
540271277 | # 支払い金額を求める
# 価格
beer_v = 200
otumami_v = 100
yakitori_v = 100
# 個数
beer_c = 2
otumami_c = 1
yakitori_c = 2
# 割引率
yakitori_rate = 0.2
# 使用ポイント数
point = 150
# 計算
beer_sum = beer_v * beer_c
otumami_sum = otumami_v * otumami_c
yakitori_sum = yakitori_v * (1 - yakitori_rate) * yakitori_c
payment = beer_sum + otumami_sum + yakitori_sum - point
# 結果を表示
print("支払金額", payment, "円")
| null | src/task20180705.py | task20180705.py | py | 452 | python | en | code | null | code-starcoder2 | 51 |
44372395 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging.handlers
import os
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
from creat_train_dataset import ImageCreatTrainDataset
from files_tools import save_json_file
PYTHON_LOGGER = logging.getLogger(__name__)
if not os.path.exists("log"):
os.mkdir("log")
HDLR = logging.handlers.TimedRotatingFileHandler("log/model1.log",
when="midnight", backupCount=60)
STREAM_HDLR = logging.StreamHandler()
FORMATTER = logging.Formatter("%(asctime)s %(filename)s [%(levelname)s] %(message)s")
HDLR.setFormatter(FORMATTER)
STREAM_HDLR.setFormatter(FORMATTER)
PYTHON_LOGGER.addHandler(HDLR)
PYTHON_LOGGER.addHandler(STREAM_HDLR)
PYTHON_LOGGER.setLevel(logging.DEBUG)
# Absolute path to the folder location of this python file
FOLDER_ABSOLUTE_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
DATASET = os.path.join(FOLDER_ABSOLUTE_PATH, "dog_cat_dataset")
IMG_DIM = 32
EPOCHS = 20
BATCH_SIZE = 32
LEARNING_RATE = 0.01
dataset = ImageCreatTrainDataset(DATASET, IMG_DIM)
dataset.load_dataset()
train_x, train_y = dataset.get_train_data()
test_x, test_y = dataset.get_test_data()
labels, nb_labels = dataset.get_labels()
PYTHON_LOGGER.info("First layer dim: {}".format(IMG_DIM * IMG_DIM * 3))
model = Sequential()
model.add(Flatten(input_shape=(IMG_DIM, IMG_DIM, 3)))
model.add(Dense(IMG_DIM * IMG_DIM * 3, activation="relu"))
model.add(Dense(nb_labels, activation="softmax"))
loss = "categorical_crossentropy" if nb_labels > 2 else "binary_crossentropy"
sgd = SGD(LEARNING_RATE)
model.compile(loss=loss, optimizer=sgd, metrics=["accuracy"])
H = model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=EPOCHS, batch_size=BATCH_SIZE)
# evaluate the network
PYTHON_LOGGER.info("Evaluating network")
predictions = model.predict(test_x, batch_size=BATCH_SIZE)
print(classification_report(test_y.argmax(axis=1), predictions.argmax(axis=1), target_names=labels))
# plot the training loss and accuracy
range_plot = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure()
plt.plot(range_plot, H.history["loss"], label="train_loss", color='red')
plt.plot(range_plot, H.history["val_loss"], label="val_loss", color='green')
plt.plot(range_plot, H.history["accuracy"], label="train_acc", color='blue')
plt.plot(range_plot, H.history["val_accuracy"], label="val_acc", color='pink')
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
save_json_file({"img_dim": IMG_DIM, "labels": labels}, "model_1.json")
model.save("model_1.h5")
| null | model1.py | model1.py | py | 2,854 | python | en | code | null | code-starcoder2 | 51 |
130172641 | #coding:utf-8
#Author : Crgig Richards
#Created : 2016.9.6
#Description : display current directory and subdirectories size
import os
directory = '.' # Set the variable directory to be the current directory
dir_size = 0 # Init file size
fsizedicr = {"Bytes": 1,
'Kilobytes':float(1)/1024,'Megabyets':float(1)/(1024*1024),'Gigabytes':float(1)/(1024*1024*1024)}
for (path,dirs,files) in os.walk(directory):
for file in files:
filename = os.path.join(path,file)
dir_size += os.path.getsize(filename)
for key in fsizedicr:
print("Folder Size: " + str(round(fsizedicr[key]*dir_size,2)) + '' + key)
| null | display_file_size.py | display_file_size.py | py | 649 | python | en | code | null | code-starcoder2 | 51 |
557736386 | import time
import os
import threading
# total time is 60 * 2
total_time = 30
alarm_1 = 25
alarm_2 = 20
class Alarm(threading.Thread):
def __init__(self, hours, minutes):
super(Alarm, self).__init__()
self.hours = int(hours)
self.minutes = int(minutes)
self.keep_running = True
def run(self):
try:
while self.keep_running:
now = time.localtime()
if (now.tm_hour == self.hours and now.tm_min == self.minutes):
print("ALARM NOW!")
os.popen("voltage.mp3")
return
time.sleep(60)
except:
return
def just_die(self):
self.keep_running = False
start = int(time.time())
end = start + total_time
#print("Start :" + str(start))
#print("End :" + str(end))
next_second = start + 1
current = start
while (current < end):
current = int(time.time())
if current == next_second:
current_minutes = int((end - current) / 60)
current_seconds = ((end - current) % 60)
if current_minutes < 10:
timer_string = "0" + str(current_minutes)
else:
timer_string = str(current_minutes)
if current_seconds < 10:
timer_string = timer_string + ":0" + str(current_seconds)
else:
timer_string = timer_string + ":" + str(current_seconds)
if current_seconds == alarm_1:
timer_string = timer_string + " " + str(alarm_1) + " ALARM"
if current_seconds == alarm_2:
timer_string = timer_string + " " + str(alarm_2) + " ALARM"
print(timer_string)
next_second += 1
| null | Python/basictimer.py | basictimer.py | py | 1,690 | python | en | code | null | code-starcoder2 | 51 |
610188336 | #!/usr/bin/env python
try:
import tkinter
from tkinter import ttk
from tkinter import *
except ImportError:
import Tkinter
from Tkinter import ttk
from Tkinter import *
import cv2
import PIL.Image, PIL.ImageTk
import numpy as np
from keras.models import model_from_json
from keras.preprocessing import image
# load model
model = model_from_json(open("fer.json", "r").read())
# load weights
model.load_weights('fer.h5')
face_haar_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
class App:
def __init__(self, window, window_title, video_source=0):
self.window = window
self.window.title(window_title)
self.video_source = video_source
# open video source (by default this will try to open the computer webcam)
self.vid = MyVideoCapture(self.video_source)
frame0 = Frame(self.window, width=800, height=600, bd=1)
frame0.pack()
frame1 = Frame(frame0, bd=2, relief=RAISED)
frame1.pack(expand=1, fill=X, pady=10, padx=5)
canvas1 = Canvas(frame1, bg='yellow', width=800, height=20)
canvas1.pack()
self.canvas = tkinter.Canvas(frame1, width=400, height=300)
self.canvas.pack(padx=5, pady=10, side=tkinter.LEFT, anchor=NW)
canvas1.create_text(400, 10, text='NonLutte - Facial Expression Recognition App', font=('verdana', 20, 'bold'))
canvas2 = Canvas(frame1, bg='gray', width=400, height=300)
canvas2.create_text(75, 20, text='Video feed unavailable', font=('verdana', 10, 'bold'))
canvas2.pack(padx=5, pady=10, side=tkinter.LEFT)
canvas3 = Canvas(frame1, bg='gray', width=400, height=300)
canvas3.create_text(75, 20, text='Video feed unavailable', font=('verdana', 10, 'bold'))
canvas3.pack(padx=5, pady=10, side=tkinter.LEFT, anchor=SW)
# canvas4 = Canvas(frame1, bg='gray', width=400, height=300)
# canvas4.pack(padx=5, pady=10, side=tkinter.RIGHT, anchor=SE)
frame1.pack(expand=1, fill=X, pady=10, padx=5)
#
# # Create a canvas that can fit the above video source size
# #self.canvas = tkinter.Canvas(window, width = self.vid.width, height = self.vid.height)
# self.canvas = tkinter.Canvas(window, width = 800, height = 600)
btn = tkinter.Button(self.window, text="Close", command=self.window.destroy)
btn.pack(side="bottom", padx=10, pady=10)
self.pb = ttk.Progressbar(self.window, orient="horizontal", length=750, mode="determinate", value=0)
self.pb.pack()
# After it is called once, the update method will be automatically called every delay milliseconds
self.delay = 15
self.update()
self.window.mainloop()
def update(self):
# Get a frame from the video source
ret, frame = self.vid.get_expression()
if ret:
self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)
self.pb['value'] = float(np.random.randint(0, 100 + 1))
self.window.after(self.delay, self.update)
class MyVideoCapture:
def __init__(self, video_source=0):
# Open the video source
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened():
raise ValueError("Unable to open video source", video_source)
# Get video source width and height
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
def get_expression(self):
while True:
cap = cv2.VideoCapture(0)
ret, test_img = cap.read() # captures frame and returns boolean value and captured image
if not ret:
continue
gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)
for (x, y, w, h) in faces_detected:
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7)
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from image
roi_gray = cv2.resize(roi_gray, (48, 48))
img_pixels = image.img_to_array(roi_gray)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
predictions = model.predict(img_pixels)
# find max indexed array
max_index = np.argmax(predictions[0])
#self.cv2.create_text(400, 10, text=max_index, font=('verdana', 20, 'bold'))
emotions = ('anger', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
predicted_emotion = emotions[max_index]
cv2.putText(test_img, predicted_emotion, (int(x+20), int(y-20)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
resized_img = cv2.resize(test_img, (400, 300))
return (ret, cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB))
# Release the video source when the object is destroyed
def __del__(self):
if self.vid.isOpened():
self.vid.release()
# Create a window and pass it to the Application object
App(tkinter.Tk(), "NonLutte - Facial Expression Recognition App")
| null | VisualAI_EmotionDetection_Final_APP.py | VisualAI_EmotionDetection_Final_APP.py | py | 5,039 | python | en | code | null | code-starcoder2 | 51 |
1065438 | from turtle import Screen
from paddle import Paddle
screen = Screen()
screen.bgcolor("black")
screen.setup(height=600, width=800)
screen.title("Pong")
screen.tracer(0)
r_paddle = Paddle((350, 0))
l_paddle = Paddle((-350, 0))
screen.listen()
screen.onkey(r_paddle.go_up, "w")
screen.onkey(r_paddle.go_down, "s")
screen.onkey(l_paddle.go_up, "Up")
screen.onkey(l_paddle.go_down, "Down")
game_on = True
while game_on:
screen.update()
screen.exitonclick() | null | main.py | main.py | py | 463 | python | en | code | null | code-starcoder2 | 51 |
415624995 | import scrapy
from scrapy_splash import SplashRequest
from bs4 import BeautifulSoup
# 爬取地址http://45.76.194.124/news/1.html#
class NewsSpider(scrapy.Spider):
name = "onetwo"
start_urls = [
"http://45.76.194.124/news/1.html#",
]
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url
, self.parse
, args={'wait': '10'}
# , endpoint='render.json'
)
def parse(self, response):
content = response.text
# content = response.xpath('//*[@id="mp-editor"]/p').extract()
with open("124.html", 'w+', encoding="utf-8") as f:
for i in content:
f.write(i)
soup = BeautifulSoup(open("124.html", "r+", encoding="utf-8"), 'html.parser')
result = soup.find_all("td", {"id": "NewsList"})
soup1 = BeautifulSoup(str(result[0]), "html.parser")
news_table = []
for k, i in enumerate(soup1.find_all("tr")):
news_info = []
for j in i.find_all("td"):
news_info.append(j.text)
# print(type(j.text))
for m in j.find_all("a"):
# print(j.text)
# print(m.get("href"))
news_info.append(m.get("href"))
news_table.append(news_info)
news_table.pop(0) # 第一个是空的,把它丢掉
print(news_table)
"""
news_table的其中一条数据
['2018-03-21 21:02:40', 'Compliance and Your Data Center', 'https://www.infosecurity-magazine.com:443/blogs/compliance-data-center/', 'https://www.infosecurity-magazine.com/news/']
"""
| null | spiders/newsone.py | newsone.py | py | 1,759 | python | en | code | null | code-starcoder2 | 51 |
368504799 | def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return (data, label)
transform_train = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomFlipLeftRight(), transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param, saturation=jitter_param), transforms.RandomLighting(lighting_param), transforms.ToTensor(), normalize])
transform_test = transforms.Compose([transforms.Resize(256, keep_ratio=True), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
train_data = gluon.data.DataLoader(imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train), batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
val_data = gluon.data.DataLoader(imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test), batch_size=batch_size, shuffle=False, num_workers=num_workers)
return (train_data, val_data, batch_fn) | null | Data Set/bug-fixing-5/e915c0b4968a5879ffc40d7c58ec78cd178ae6bf-<get_data_loader>-fix.py | e915c0b4968a5879ffc40d7c58ec78cd178ae6bf-<get_data_loader>-fix.py | py | 1,289 | python | en | code | null | code-starcoder2 | 51 |
619354040 | import matplotlib.pyplot as plt
values = list(range(1, 1000))
squares = list(v**2 for v in range(1, 1000))
plt.scatter(values, squares, s=40, c=squares, cmap=plt.cm.Blues)
plt.title('Squares of Numbers', fontsize=24)
plt.xlabel('Numbers', fontsize=14)
plt.ylabel('Squares', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.axis([0, 1100, 0, 1100000])
plt.show() | null | code/squares.py | squares.py | py | 392 | python | en | code | null | code-starcoder2 | 51 |
339957247 | from MyUtil import MyUtil as MyUtil
from ElasticNodes import ElasticNodes
from MySingletons import MyDevice
import numpy as np
import torch
# class ReverseLayerFunction(torch.autograd.Function):
# @staticmethod
# def forward(self, x, alpha=1.0):
# self.alpha = alpha
#
# return x.view_as(x)
#
# @staticmethod
# def backward(self, grad_output):
# output = grad_output.neg() * self.alpha
#
# return output, None
class NeuralNetwork(ElasticNodes):
layers = None
layer_value = None
output_layer_value = None
weight = None
bias = None
momentum = None
bias_momentum = None
output_weight = None
output_bias = None
output_momentum = None
output_bias_momentum = None
activation_function = None
output_activation_function = None
loss_function = None
learning_rate = 0.01
momentum_rate = 0.95
error_value = None
loss_value = None
classification_rate = None
misclassified = None
output_beta = None
output_beta_decreasing_factor = None
__Eh = None
__Eh2 = None
@property
def number_hidden_layers(self):
return len(self.layers) - 2
@property
def input_size(self):
return self.layers[0]
@property
def output_size(self):
return self.layers[-1]
@property
def output(self):
return self.output_layer_value
@property
def raw_output(self):
return torch.max(self.output, axis=1)
@property
def outputed_classes(self):
return torch.argmax(self.output, axis=1)
@property
def residual_error(self):
return 1 - self.raw_output.values
ACTIVATION_FUNCTION_AFFINE = 1
ACTIVATION_FUNCTION_SIGMOID = ACTIVATION_FUNCTION_AFFINE + 1
ACTIVATION_FUNCTION_TANH = ACTIVATION_FUNCTION_SIGMOID + 1
ACTIVATION_FUNCTION_RELU = ACTIVATION_FUNCTION_TANH + 1
ACTIVATION_FUNCTION_LINEAR = ACTIVATION_FUNCTION_RELU + 1
ACTIVATION_FUNCTION_SOFTMAX = ACTIVATION_FUNCTION_LINEAR + 1
ACTIVATION_FUNCTION_REVERSE_LAYER = ACTIVATION_FUNCTION_SOFTMAX + 1
LOSS_FUNCTION_MSE = ACTIVATION_FUNCTION_REVERSE_LAYER + 1
LOSS_FUNCTION_CROSS_ENTROPY = LOSS_FUNCTION_MSE + 1
PRUNE_NODE_STRATEGY_SINGLE = LOSS_FUNCTION_CROSS_ENTROPY + 1
PRUNE_NODE_STRATEGY_MULTIPLE = PRUNE_NODE_STRATEGY_SINGLE + 1
def __init__(self, layers: list, init_weights: bool = True):
self.layers = layers
self.weight = []
self.bias = []
self.momentum = []
self.bias_momentum = []
self.activation_function = []
for i in range(self.number_hidden_layers):
nodes_before = layers[i]
nodes_after = layers[i + 1]
if init_weights:
self.weight.append(self.xavier_weight_initialization(nodes_after, nodes_before))
self.bias.append(self.xavier_weight_initialization(1, nodes_after))
self.momentum.append(torch.zeros(self.weight[i].shape, dtype=torch.float, device=MyDevice().get()))
self.bias_momentum.append(torch.zeros(self.bias[i].shape, dtype=torch.float, device=MyDevice().get()))
else:
self.weight.append(None)
self.bias.append(None)
self.momentum.append(None)
self.bias_momentum.append(None)
self.momentum_rate = 0
self.activation_function.append(self.ACTIVATION_FUNCTION_SIGMOID)
if init_weights:
nodes_before = layers[-2]
nodes_after = layers[-1]
self.output_weight = self.xavier_weight_initialization(nodes_after, nodes_before)
self.output_bias = self.xavier_weight_initialization(1, nodes_after)
self.output_momentum = torch.zeros(self.output_weight.shape, dtype=torch.float, device=MyDevice().get())
self.output_bias_momentum = torch.zeros(self.output_bias.shape, dtype=torch.float, device=MyDevice().get())
else:
self.output_weight = None
self.output_bias = None
self.output_momentum = None
self.output_bias_momentum = None
self.momentum_rate = 0
self.output_activation_function = self.ACTIVATION_FUNCTION_SOFTMAX
self.loss_function = self.LOSS_FUNCTION_CROSS_ENTROPY
ElasticNodes.__init__(self, len(self.layers))
##### Weight initializations #####
def xavier_weight_initialization(self, n_out: int, n_in: int, uniform: bool = False):
if uniform:
return torch.nn.init.xavier_uniform(tensor=torch.zeros(int(n_out), int(n_in), dtype=torch.float,
requires_grad=True, device=MyDevice().get()))
return torch.nn.init.xavier_normal_(tensor=torch.zeros(int(n_out), int(n_in), dtype=torch.float,
requires_grad=True, device=MyDevice().get()))
def he_weight_initialization(self, n_out, n_in, shape=None):
#TODO
mean = 0.0
sigma = np.sqrt(2 / n_in)
if shape is None:
shape = (n_out, n_in)
return np.random.normal(mean, sigma, shape)
##### Noise #####
def masking_noise(self, x: torch.tensor, noise_ratio: float = 0.0):
return x.detach().masked_fill(torch.rand(x.shape, device=MyDevice().get()) <= noise_ratio, 0)
##### Activation functions #####
@staticmethod
def sigmoid(z: torch.tensor):
return torch.sigmoid(z)
@staticmethod
def tanh(z):
return torch.tanh(z)
@staticmethod
def relu(z):
return torch.nn.functional.relu(z)
@staticmethod
def linear(layer_value: torch.tensor, weight: torch.tensor, bias: torch.tensor):
return torch.nn.functional.linear(layer_value, weight, bias)
@staticmethod
def softmax(z, axis: int = 1):
return torch.nn.functional.softmax(z, dim=axis)
def reset_grad(self):
for i in range(self.number_hidden_layers):
self.weight[i] = self.weight[i].detach()
self.bias[i] = self.bias[i].detach()
self.weight[i].requires_grad = True
self.bias[i].requires_grad = True
self.output_weight = self.output_weight.detach()
self.output_bias = self.output_bias.detach()
self.output_weight.requires_grad = True
self.output_bias.requires_grad = True
def feedforward(self, x: torch.Tensor, y: torch.Tensor, train: bool = False):
return self.forward_pass(x, train=train).calculate_error(y)
def backpropagate(self):
self.loss_value.backward()
return self
def test(self, x: torch.Tensor, y: torch.Tensor, is_beta_updatable: bool = False):
self.feedforward(x=x, y=y)
m = y.shape[0]
true_classes = torch.argmax(y, axis=1)
self.misclassified = torch.sum(torch.ne(self.outputed_classes, true_classes)).item()
self.classification_rate = 1 - self.misclassified / m
if is_beta_updatable:
class_label = self.output_layer_value.max(axis=2)
for i in range(m):
if self.true_classes[i] == class_label[i]:
self.output_beta = np.max(self.output_beta * self.output_beta_decreasing_factor, 0)
self.output_beta_decreasing_factor = np.max(self.output_beta_decreasing_factor - 0.01, 0)
else:
self.output_beta = max(self.output_beta * (1 + self.output_beta_decreasing_factor), 1)
self.output_beta_decreasing_factor = max(self.output_beta_decreasing_factor + 0.01, 1)
return self
def train(self, x: torch.Tensor, y: torch.Tensor, weight_no: int = None, is_neg_grad: bool = False):
self.feedforward(x=x, y=y, train=True).backpropagate()
if weight_no is None:
for weight_no in range(self.number_hidden_layers, -1, -1):
self.update_weight(weight_no=weight_no, is_neg_grad=is_neg_grad)
else:
self.update_weight(weight_no=weight_no, is_neg_grad=is_neg_grad)
def update_weight(self, weight_no: int, is_neg_grad: bool = False):
if weight_no >= self.number_hidden_layers:
dW: torch.Tensor = self.learning_rate * self.output_weight.grad
db: torch.Tensor = self.learning_rate * self.output_bias.grad
if self.momentum_rate > 0:
self.output_momentum: torch.Tensor = self.momentum_rate * self.output_momentum + dW
self.output_bias_momentum: torch.Tensor = self.momentum_rate * self.output_bias_momentum + db
dW: torch.Tensor = self.output_momentum
db: torch.Tensor = self.output_bias_momentum
if is_neg_grad:
self.output_weight: torch.Tensor = self.output_weight - dW.neg()
self.output_bias: torch.Tensor = self.output_bias - db.neg()
else:
self.output_weight: torch.Tensor = self.output_weight - dW
self.output_bias: torch.Tensor = self.output_bias - db
else:
dW: torch.Tensor = self.learning_rate * self.weight[weight_no].grad
db: torch.Tensor = self.learning_rate * self.bias[weight_no].grad
if self.momentum_rate > 0:
self.momentum[weight_no]: torch.Tensor = self.momentum_rate * self.momentum[weight_no] + dW
self.bias_momentum[weight_no]: torch.Tensor = self.momentum_rate * self.bias_momentum[weight_no] + db
dW: torch.Tensor = self.momentum[weight_no]
db: torch.Tensor = self.bias_momentum[weight_no]
if is_neg_grad:
self.weight[weight_no]: torch.Tensor = self.weight[weight_no] - dW.neg()
self.bias[weight_no]: torch.Tensor = self.bias[weight_no] - db.neg()
else:
self.weight[weight_no]: torch.Tensor = self.weight[weight_no] - dW
self.bias[weight_no]: torch.Tensor = self.bias[weight_no] - db
def forward_pass(self, x: torch.Tensor, train: bool = False):
if train:
self.reset_grad()
self.layer_value = []
self.layer_value.append(x)
for i in range(self.number_hidden_layers):
if self.activation_function[i] == self.ACTIVATION_FUNCTION_AFFINE:
self.layer_value.append(self.linear(self.layer_value[i], self.weight[i], self.bias[i]))
elif self.activation_function[i] == self.ACTIVATION_FUNCTION_SIGMOID:
self.layer_value.append(self.sigmoid(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))
elif self.activation_function[i] == self.ACTIVATION_FUNCTION_TANH:
self.layer_value.append(self.tanh(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))
elif self.activation_function[i] == self.ACTIVATION_FUNCTION_RELU:
self.layer_value.append(self.relu(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))
elif self.activation_function[i] == self.ACTIVATION_FUNCTION_LINEAR:
raise TypeError('Not implemented')
elif self.activation_function[i] == self.ACTIVATION_FUNCTION_SOFTMAX:
self.layer_value.append(self.softmax(self.linear(self.layer_value[i], self.weight[i], self.bias[i])))
elif self.activation_function[i] == self.ACTIVATION_FUNCTION_REVERSE_LAYER:
self.layer_value.append(self.reverse_layer(self.layer_value[i]))
if self.output_activation_function == self.ACTIVATION_FUNCTION_AFFINE:
self.output_layer_value = self.linear(self.layer_value[-1], self.output_weight, self.output_bias)
elif self.output_activation_function == self.ACTIVATION_FUNCTION_SIGMOID:
self.output_layer_value = self.sigmoid(self.linear(self.layer_value[-1], self.output_weight, self.output_bias))
elif self.output_activation_function == self.ACTIVATION_FUNCTION_TANH:
self.output_layer_value = self.tanh(self.linear(self.layer_value[-1], self.output_weight, self.output_bias))
elif self.output_activation_function == self.ACTIVATION_FUNCTION_RELU:
self.output_layer_value = self.relu(self.linear(self.layer_value[-1], self.output_weight, self.output_bias))
elif self.output_activation_function == self.ACTIVATION_FUNCTION_SOFTMAX:
self.output_layer_value = self.softmax(self.linear(self.layer_value[-1], self.output_weight, self.output_bias), axis=1)
elif self.output_activation_function == self.ACTIVATION_FUNCTION_REVERSE_LAYER:
self.output_layer_value = self.reverse_layer(self.layer_value[-1])
return self
def calculate_error(self, y: torch.tensor):
self.error_value = y - self.output_layer_value
if self.loss_function == self.LOSS_FUNCTION_MSE:
self.loss_value = torch.nn.functional.mse_loss(self.output_layer_value, y)
elif self.loss_function == self.LOSS_FUNCTION_CROSS_ENTROPY:
self.loss_value = torch.nn.functional.cross_entropy(self.output_layer_value, torch.argmax(y, 1))
return self
def compute_expected_values(self, in_place: bool = False):
self.data_mean, self.data_variance, self.data_standard_deviation = \
MyUtil.recursive_mean_standard_deviation(self.layer_value[0],
self.data_mean,
self.data_variance,
self.number_samples_feed)
self.Eh, self.Eh2 = self.compute_inbound_expected_values()
def compute_inbound_expected_values(self, number_hidden_layer: int = None):
nhl = number_hidden_layer # readability
if nhl is None:
nhl = self.number_hidden_layers - 1
if nhl == 0:
inference, center, std = (1, self.data_mean, self.data_standard_deviation)
py = MyUtil.probit(center, std)
Eh = inference * self.sigmoid(self.linear(self.weight[0], py, self.bias[0].T))
else:
Eh, _ = self.compute_inbound_expected_values(number_hidden_layer=nhl - 1)
weight, bias = (self.weight[nhl], self.bias[nhl]) if nhl < self.number_hidden_layers + 1 else (self.output_weight, self.output_bias)
Eh = self.sigmoid(self.linear(weight, Eh.T, bias.T))
return Eh, Eh ** 2
@property
def Eh(self):
return self.__Eh
@Eh.setter
def Eh(self, value: torch.tensor):
self.__Eh = value
@property
def Eh2(self):
return self.__Eh2
@Eh2.setter
def Eh2(self, value: torch.tensor):
self.__Eh2 = value
@property
def Ey(self):
return self.softmax(self.linear(self.output_weight, self.Eh.T, self.output_bias.T), axis=0)
@property
def Ey2(self):
return self.softmax(self.linear(self.output_weight, self.Eh2.T, self.output_bias.T), axis=0)
@property
def network_variance(self):
return MyUtil.frobenius_norm(self.Ey2 - self.Ey ** 2)
def compute_bias(self, y):
return MyUtil.frobenius_norm((self.Ey.T - y) ** 2)
def width_adaptation_stepwise(self, y, prune_strategy: int = None):
if prune_strategy is None:
prune_strategy = self.PRUNE_NODE_STRATEGY_MULTIPLE
nhl: int = self.number_hidden_layers
self.number_samples_feed = self.number_samples_feed + 1
self.number_samples_layer[nhl] = self.number_samples_layer[nhl] + 1
self.compute_expected_values()
self.bias_mean[nhl], self.bias_variance[nhl], self.bias_standard_deviation[nhl] = \
MyUtil.recursive_mean_standard_deviation(self.compute_bias(y),
self.bias_mean[nhl],
self.bias_variance[nhl],
self.number_samples_feed)
self.var_mean[nhl], self.var_variance[nhl], self.var_standard_deviation[nhl] = \
MyUtil.recursive_mean_standard_deviation(self.network_variance,
self.var_mean[nhl],
self.var_variance[nhl],
self.number_samples_feed)
if self.number_samples_layer[nhl] <= 1 or self.growable[nhl]:
self.minimum_bias_mean[nhl] = self.bias_mean[nhl]
self.minimum_bias_standard_deviation[nhl] = self.bias_standard_deviation[nhl]
else:
self.minimum_bias_mean[nhl] = np.min([self.minimum_bias_mean[nhl], self.bias_mean[nhl]])
self.minimum_bias_standard_deviation[nhl] = np.min([self.minimum_bias_standard_deviation[nhl], self.bias_standard_deviation[nhl]])
if self.number_samples_layer[nhl] <= self.input_size + 1 or self.prunable[nhl][0] != -1:
self.minimum_var_mean[nhl] = self.var_mean[nhl]
self.minimum_var_standard_deviation[nhl] = self.var_standard_deviation[nhl]
else:
self.minimum_var_mean[nhl] = np.min([self.minimum_var_mean[nhl], self.var_mean[nhl]])
self.minimum_var_standard_deviation[nhl] = np.min([self.minimum_var_standard_deviation[nhl], self.var_standard_deviation[nhl]])
self.BIAS.append(self.bias_mean[nhl])
self.VAR.append(self.var_mean[nhl])
if self.output_size == 512: # STL or CIFAR
alpha_1 = 1.45
alpha_2 = 0.95
else:
alpha_1 = 1.25
alpha_2 = 0.75
self.growable[nhl] = self.is_growable(self.compute_bias(y), alpha_1, alpha_2)
self.prunable[nhl] = self.is_prunable(prune_strategy, 2 * alpha_1, 2 * alpha_2)
def is_growable(self, bias: torch.tensor, alpha_1: float = 1.25, alpha_2: float = 0.75):
nhl = self.number_hidden_layers # readability
current = self.bias_mean[nhl] + self.bias_standard_deviation[nhl]
biased_min = self.minimum_bias_mean[nhl] \
+ (alpha_1 * torch.exp(-bias) + alpha_2) * self.minimum_bias_standard_deviation[nhl]
if self.number_samples_layer[nhl] > 1 and current >= biased_min:
return True
return False
def is_prunable(self, prune_strategy: int = None, alpha_1: float = 2.5, alpha_2: float = 1.5):
if prune_strategy is None:
prune_strategy = self.PRUNE_NODE_STRATEGY_MULTIPLE
nhl = self.number_hidden_layers # readability
current = self.var_mean[nhl] + self.var_standard_deviation[nhl]
biased_min = self.minimum_var_mean[nhl] \
+ (alpha_1 * torch.exp(-self.network_variance) + alpha_2) * self.minimum_var_standard_deviation[nhl]
if not self.growable[nhl] \
and self.layers[nhl] > 1 \
and self.number_samples_layer[nhl] > self.input_size + 1 \
and current >= biased_min:
if prune_strategy == self.PRUNE_NODE_STRATEGY_SINGLE:
return torch.argmin(self.Eh)
elif prune_strategy == self.PRUNE_NODE_STRATEGY_MULTIPLE:
nodes_to_prune = torch.where(self.Eh < torch.abs(torch.mean(self.Eh) - torch.var(self.Eh)))
if len(nodes_to_prune[0]):
return nodes_to_prune[0]
else:
return torch.argmin(self.Eh)
return [-1]
def grow_node(self, layer_number: int):
self.layers[layer_number] += 1
if layer_number >= 0:
self.grow_weight_row(layer_number - 1)
self.grow_bias(layer_number - 1)
if layer_number <= self.number_hidden_layers:
self.grow_weight_column(layer_number)
def grow_weight_row(self, layer_number: int):
def add_element(tensor_data: torch.tensor, momentum_tensor_data: torch.tensor, n_out: int):
tensor_data = torch.cat((tensor_data, self.xavier_weight_initialization(1, n_out)), axis=0)
momentum_tensor_data = torch.cat((momentum_tensor_data, torch.zeros(1, n_out, dtype=torch.float, device=MyDevice().get())), axis=0)
return tensor_data, momentum_tensor_data
if layer_number >= len(self.weight):
[_, n_out] = self.output_weight.shape
self.output_weight, self.output_momentum = add_element(self.output_weight, self.output_momentum, n_out)
else:
[_, n_out] = self.weight[layer_number].shape
self.weight[layer_number], self.momentum[layer_number] = add_element(self.weight[layer_number], self.momentum[layer_number], n_out)
def grow_weight_column(self, layer_number: int):
def add_element(tensor_data: torch.tensor, momentum_tensor_data: torch.tensor, n_out: int):
tensor_data = torch.cat((tensor_data, self.xavier_weight_initialization(n_out, 1)), axis=1)
momentum_tensor_data = torch.cat((momentum_tensor_data, torch.zeros(n_out, 1, dtype=torch.float, device=MyDevice().get())), axis=1)
return tensor_data, momentum_tensor_data
if layer_number >= len(self.weight):
[n_out, _] = self.output_weight.shape
self.output_weight, self.output_momentum = add_element(self.output_weight, self.output_momentum, n_out)
else:
[n_out, _] = self.weight[layer_number].shape
self.weight[layer_number], self.momentum[layer_number] = add_element(self.weight[layer_number], self.momentum[layer_number], n_out)
def grow_bias(self, layer_number):
def add_element(tensor_data: torch.tensor, momentum_tensor_data: torch.tensor, n_out: int):
tensor_data = torch.cat((tensor_data, self.xavier_weight_initialization(1, n_out)), axis=1)
momentum_tensor_data = torch.cat((momentum_tensor_data, torch.zeros(1, n_out, dtype=torch.float, device=MyDevice().get())), axis=1)
return tensor_data, momentum_tensor_data
if layer_number >= len(self.bias):
[n_out, _] = self.output_bias.shape
self.output_bias, self.output_bias_momentum = add_element(self.output_bias, self.output_bias_momentum, n_out)
else:
[n_out, _] = self.bias[layer_number].shape
self.bias[layer_number], self.bias_momentum[layer_number] = add_element(self.bias[layer_number], self.bias_momentum[layer_number], n_out)
pass
def prune_node(self, layer_number: int, node_number: int):
self.layers[layer_number] -= 1
if layer_number >= 0:
self.prune_weight_row(layer_number - 1, node_number)
self.prune_bias(layer_number - 1, node_number)
if layer_number <= self.number_hidden_layers:
self.prune_weight_column(layer_number, node_number)
def prune_weight_row(self, layer_number: int, node_number: int):
def remove_nth_row(tensor_data: torch.tensor, n: int):
return torch.cat([tensor_data[:n], tensor_data[n+1:]])
if layer_number >= len(self.weight):
self.output_weight = remove_nth_row(self.output_weight, node_number)
self.output_momentum = remove_nth_row(self.output_momentum, node_number)
else:
self.weight[layer_number] = remove_nth_row(self.weight[layer_number], node_number)
self.momentum[layer_number] = remove_nth_row(self.momentum[layer_number], node_number)
def prune_weight_column(self, layer_number: int, node_number: int):
def remove_nth_column(weight_tensor: torch.tensor, n: int):
return torch.cat([weight_tensor.T[:n], weight_tensor.T[n+1:]]).T
if layer_number >= len(self.weight):
self.output_weight = remove_nth_column(self.output_weight, node_number)
self.output_momentum = remove_nth_column(self.output_momentum, node_number)
else:
self.weight[layer_number] = remove_nth_column(self.weight[layer_number], node_number)
self.momentum[layer_number] = remove_nth_column(self.momentum[layer_number], node_number)
def prune_bias(self, layer_number: int, node_number: int):
def remove_nth_element(bias_tensor: torch.tensor, n: int):
bias_tensor = torch.cat([bias_tensor[0][:n], bias_tensor[0][n+1:]])
return bias_tensor.view(1, bias_tensor.shape[0])
if layer_number >= len(self.bias):
self.output_bias = remove_nth_element(self.output_bias, node_number)
self.output_bias_momentum = remove_nth_element(self.output_bias_momentum, node_number)
else:
self.bias[layer_number] = remove_nth_element(self.bias[layer_number], node_number)
self.bias_momentum[layer_number] = remove_nth_element(self.bias_momentum[layer_number], node_number) | null | NeuralNetwork.py | NeuralNetwork.py | py | 24,881 | python | en | code | null | code-starcoder2 | 51 |
12236195 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import datetime
from django.utils import timezone
# from django.utils.html import mark_safe
from .thumbs import ImageWithThumbsField
class Department(models.Model):
dept_code = models.CharField(max_length=3)
dept_name = models.CharField(max_length=50)
def __str__(self):
return self.dept_name
class Course(models.Model):
course_code = models.CharField(max_length=6)
course_name = models.CharField(max_length=50)
dept_fk = models.ForeignKey(Department, on_delete=models.CASCADE)
#dept_fk = models.ManyToManyField(Department, on_delete=models.SET_NULL)
course_desc = models.TextField('Course Description',max_length=100)
def __str__(self):
return self.course_name
class Student(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE, null=True, blank=True)
# course_fk = models.ManyToManyField(Course) #, on_delete=models.CASCADE)
# dept_fk = models.ForeignKey(Department, on_delete=models.CASCADE,null=True,blank=True)
birth_date = models.DateField(null=True, blank=True)
phone_no = models.IntegerField(default=0)
firstname = models.CharField(max_length=20, null=True, blank=True)
lastname = models.CharField(max_length=20, null=True, blank=True)
email = models.EmailField(max_length=50, null=True, blank=True)
# def assign_things(self)
# user.first_name = self.firstname
# user.last_name = self.lastname
# user.email = self.email
# def __str__(self):
# return self.user.first_name + self.user.last_name
# def create_profile(sender,**kwargs):
# if kwargs['created']:
# user_profile=Student.objects.create(user=kwargs['instance'])
# post_save.connect(create_profile,sender=User)
# @receiver(post_save, sender=User)
# def create_user_profile(sender, instance, created, **kwargs):
# if created:
# Student.objects.create(user=instance)
# @receiver(post_save, sender=User)
# def save_user_profile(sender, instance, **kwargs):
# instance.profile.save()
# class QuestionBank(models.Model):
# question_fk = models.ForeignKey('Course', Course, on_delete=models.CASCADE)
# def __str__(self):
# return self.course_fk.course_code
class Exam(models.Model):
exam_name = models.CharField(max_length=40)
course_fk = models.ForeignKey(Course, verbose_name='Course', on_delete=models.CASCADE, null=True, blank=True)
# question_fk = models.ManyToManyField(Question)
time_limit = models.DurationField()
pub_date = models.DateTimeField('Date Published', auto_now_add=True, editable=False)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.short_description = 'Recently Published?'
was_published_recently.boolean = True
was_published_recently.admin_order_field = 'pub_date'
def __str__(self):
return self.exam_name
class Question(models.Model):
qn_text = models.TextField('Question Description',max_length=200)
qn_image = ImageWithThumbsField('Question Image', upload_to='img/', sizes=((125,125),(300,200)))
# qn_bank = models.ForeignKey(QuestionBank, on_delete=models.CASCADE, verbose_name='IN QNbank')
exams = models.ManyToManyField(Exam)
course_fk = models.ForeignKey(Course, verbose_name='Course', on_delete=models.CASCADE, null=True, blank=True)
pub_date = models.DateTimeField('date published', auto_now_add=True, editable=False)
# correct_choice = models.ForeignKey(Choice)
def __str__(self):
return self.qn_text[:20]
# def image_tag(self):
# from django.utils.html import escape
# return u'<img src="%s" />' % escape(self.qn_image)
# image_tag.short_description = 'Image'
# image_tag.allow_tags = True
# def image_img(self):
# if self.image:
# return mark_safe('<img src="%s" />' % self.qn_image.url_125x125)
# else:
# return '(No image)'
# image_img.short_description = 'Thumb'
# def image_tag(self):
# return mark_safe('<img src="%s" width="150" height="150" alt="Question Image">' % (self.qn_image))
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.short_description = 'Recently Published?'
was_published_recently.boolean = True
was_published_recently.admin_order_field = 'pub_date'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
is_correct = models.BooleanField('Correct Answer', default=False)
def __str__(self):
return self.choice_text
class Result(models.Model):
exam_fk = models.ForeignKey(Exam, on_delete=models.CASCADE)
student_fk = models.ForeignKey(Student, on_delete=models.CASCADE,null=True,blank=True)
# question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice = models.ForeignKey(Question, on_delete=models.CASCADE)
def __str__(self):
return self.student_fk.user.username
| null | exam_system/stud_app/models.py | models.py | py | 5,322 | python | en | code | null | code-starcoder2 | 51 |
509230436 |
from xai.brain.wordbase.nouns._mothball import _MOTHBALL
#calss header
class _MOTHBALLED(_MOTHBALL, ):
def __init__(self,):
_MOTHBALL.__init__(self)
self.name = "MOTHBALLED"
self.specie = 'nouns'
self.basic = "mothball"
self.jsondata = {}
| null | xai/brain/wordbase/nouns/_mothballed.py | _mothballed.py | py | 254 | python | en | code | null | code-starcoder2 | 51 |
369931592 | from __future__ import division
import sys
import argparse
wgsim_path = "wgsim"
bedtools_path = "bedtools"
samtools_path = "samtools"
def rounder(x,y):
return int(round(x / float(y))) * y
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
parser=argparse.ArgumentParser(description='Predict CNVs using dudeML')
parser._positionals.title = 'possible modes (enter \'python3 dudeML.py modeName -h\' for modeName\'s help message'
subparsers = parser.add_subparsers(help='sub-command help')
parser_1 = subparsers.add_parser('predict', help='Predict CNVs in sample based on training classifier including ploidy or frequency of CNV.')
parser_2 = subparsers.add_parser('classify', help='Train a classifier based on a provided training set.')
parser_3 = subparsers.add_parser('winStat', help='Calculate average coverage of windows for a number of bases, given the window size, relative to the chromosomes average coverage.')
parser_4 = subparsers.add_parser('winStatExtra', help='Find averaged coverage of windows, based on previously estimated median coverage.')
parser_5 = subparsers.add_parser('fvecSample', help='Format sample/test file to create sets of windows to analyse as a features vector.')
parser_6 = subparsers.add_parser('fvecTrain', help='Format training file to ID windows with structural variants and create sets of windows to train as a features vector.')
parser_7 = subparsers.add_parser('subTrain', help='Subsample training file for quicker training of the predictor, can subsample a fraction (0.0-1.0) or a number (1-N).')
parser_8 = subparsers.add_parser('simChr', help='Simulate chromosomes containing duplications and deletions using the output of simCNV.')
parser_9 = subparsers.add_parser('simCNV', help='Simulate coordinates of duplications and deletions for multiple chromosomes, which can be combined later.')
parser_10 = subparsers.add_parser('recreateTotal', help='Create the total file from known CNVs for CNV chromosome simulation.')
parser_11 = subparsers.add_parser('covSummary', help='Summarise coverage by chromosome in coverage bedfile.')
parser_12 = subparsers.add_parser('simReads', help='Following simChr, uses WGsim to simulate reads across chromosomes.')
parser_13 = subparsers.add_parser('summarize', help='For a predictions file of known duplications and deletions, finds the number of correctly and falsely identified CNVs.')
parser_14 = subparsers.add_parser('ROC', help='If CNVs are known, works out the rate of true/false positives for given dataset (generated in fvecTrain) and classifier (generated in classify).')
parser_15 = subparsers.add_parser('quantify', help='Quantify CNVs across multiple samples mapped to the same reference.')
parser_1.add_argument('-i','--INPUT',help='Input bed file, generated by winStat and fvecSample.', required=True)
parser_1.add_argument('-o','--OUTPUT',help='Output file in bed format containing predicted CNVs.', required=True)
parser_1.add_argument('-t','--TRAIN',help='Training file or folder, generated by classify function.', required=True)
parser_1.set_defaults(mode='predict')
parser_2.add_argument('-i','--INPUT',help='Input bed file, generated by fvecTrain.', required=True)
parser_2.add_argument('-o','--OUTPUT',help='Output training file in binary format.', required=True)
parser_2.add_argument('-m','--MODEL',help='Type of classifier used, can be set as follows: "CNN" - Convolutional Neural Network, "DTC" - Decision Tree Classifier, "ETC100" - Extra Trees Classifier (100 estimators), "ETC500" - Extra Trees Classifier (500 estimators), "RFC100" - Random Forest Classifier (100 estimators), "RFC500" - Random Forest Classifier (500 estimators).' ,choices=["CNN","DTC","ETC100","ETC500","RFC100","RFC500"],default="RFC100")
parser_2.set_defaults(mode='classify')
parser_3.add_argument('-i','--INPUT',help='Input bed file, generated by genomeCoverageBed.', required=True)
parser_3.add_argument('-o','--OUTPUT',help='Output bed file summarizing stats in windows.', required=True)
parser_3.add_argument("-w",'--WINDOW_SIZE',help="The window size chosen to detect CNVs across.",type=int, default=50)
parser_3.add_argument("-s",'--STEP_SIZE',help="The step size chosen to detect CNVs across.",type=int, default=50)
parser_3.add_argument("-sum","--SUMMARY",help="Summary of coverages file",type=str)
parser_3.add_argument("-chr",'--CHROMOSOME',help="Bedfile of chromosomes to estimate statistics over with start and end of chromosomes.",type=str)
parser_3.set_defaults(mode='winStat')
parser_4.add_argument('-i','--INPUT',help='Input bed file, generated by genomeCoverageBed.', required=True)
parser_4.add_argument('-o','--OUTPUT',help='Output bed file summarizing stats in windows.', required=True)
parser_4.add_argument('-cov','--COVERAGE',help='Coverage to standardize by.', required=True)
parser_4.add_argument("-w",'--WINDOW_SIZE',help="The window size chosen to detect CNVs across.",type=int, default=50)
parser_4.add_argument("-s",'--STEP_SIZE',help="The step size chosen to detect CNVs across.",type=int, default=50)
parser_4.add_argument("-chr",'--CHROMOSOME',help="List of chromosomes to estimate statistics for. Can be a single chromosome, a comma seperated list or a file, with a chromosome on each line.",type=str)
parser_4.set_defaults(mode='winStatExtra')
parser_5.add_argument("-i",'--INPUT',help="Input file in bed format, containing stats on each window, generated by winStat.",required=True)
parser_5.add_argument("-o",'--OUTPUT',help="Output file in bed format, containing stats on focal window and surrounding windows.",required=True)
parser_5.add_argument("-TE",'--TE',help="Bed or GFF file containing repeat locations in genome.")
parser_5.add_argument("-id",'--ID',help="ID of sample analysed.",type=str,default="NA")
parser_5.add_argument("-d",'--DIRECTORY',help="Directory to write output files to.",type=str,default="")
parser_5.add_argument("-windows",'--WINDOWS',help="Number of windows around focal window to include.",type=int,default=5)
parser_5.add_argument("-w",'--WINDOW_SIZE',help="Window size (bp).",type=int,default=50)
parser_5.add_argument("-s",'--STEP_SIZE',help="Step size (bp).",type=int, default=50)
parser_5.add_argument("-c",'--CUTOFF',help="Ignore windows with a higher proportion of masked positions than the cut off.",type=float, default=0.01)
parser_5.set_defaults(mode='fvecSample')
parser_6.add_argument("-i",'--INPUT',help="Input file in bed format, containing stats on each window, generated by winStat.",required=True)
parser_6.add_argument("-o",'--OUTPUT',help="Output file in bed format, containing stats on focal window and surrounding windows.",required=True)
parser_6.add_argument("-TE",'--TE',help="Bed or GFF file containing repeat locations in genome.")
parser_6.add_argument("-dels","--DELETION",help="Bed file containing known deletion locations.",required=True)
parser_6.add_argument("-dups",'--DUPLICATION',help="Bed file containing known duplication locations.",required=True)
parser_6.add_argument("-d",'--DIRECTORY',help="Directory to write output files to.",type=str,default="")
parser_6.add_argument("-windows",'--WINDOWS',help="Number of windows around focal window to include.",type=int,default=5)
parser_6.add_argument("-w",'--WINDOW_SIZE',help="Window size (bp).",type=int,default=50)
parser_6.add_argument("-s",'--STEP_SIZE',help="Step size (bp).",type=int, default=50)
parser_6.add_argument("-c",'--CUTOFF',help="Ignore windows with more masked positions than the cut off.",type=float, default=0.01)
parser_6.set_defaults(mode='fvecTrain')
parser_7.add_argument("-i",'--INPUT',help="Input bed file containing training windows.",required=True)
parser_7.add_argument("-o",'--OUTPUT',help="Output subsampled bed file containing training windows",required=True)
parser_7.add_argument("-N","--NUMBER",help="Number of samples to extract (1+) or fraction to downsample to (0-0.99).",type=float,required=True)
parser_7.set_defaults(mode='subTrain')
parser_8.add_argument('-fasta',"--FASTA",help='Fasta file containing chromosomes to simulate CNVs in.', required=True)
parser_8.add_argument('-cnvBed',help='Bed file containing loci for CNVs to simulate.', required=True)
parser_8.add_argument("-id",'--ID',help="ID to label output files.",type=str,default="NA")
parser_8.add_argument("-d",'--DIRECTORY',help="Directory to write output files to.",type=str,default="")
parser_8.set_defaults(mode='simChr')
parser_9.add_argument("-fasta","--FASTA", required=True,help="Fasta file containing chromosomes to simulate CNVs in.")
parser_9.add_argument("-CNV",help="Number of duplications and deletions to simulate per megabase.",type=int,default=50)
parser_9.add_argument("-CNVsize",help="Mean size of CNV, size determined in a poisson distribution.",type=int,default=1000)
parser_9.add_argument("-delLength",help="Mean length of deletions to simulate.",type=int,default=1000)
parser_9.add_argument("-dupLength",help="Mean length of duplications to simulate.",type=int,default=1000)
parser_9.add_argument("-N","--NUMBER",help="Ploidy of chromosomes to simulate CNVs on.",type=int,default=1)
parser_9.add_argument("-d",'--DIRECTORY',help="Directory to write output files to.",type=str,default="")
parser_9.add_argument("-c",'--CUTOFF',help="Ignore windows with a higher proportion of masked positions than the cut off.",type=float, default=0.01)
parser_9.add_argument("-TE",'--TE',help="Bed or GFF file containing repeat locations in genome.")
parser_9.set_defaults(mode='simCNV')
parser_10.add_argument("-fasta","--FASTA",help="Fasta file containing chromosomes to simulate CNVs in.", required=True)
parser_10.add_argument("-dels","--DELETION",help="Bed file containing deletion loci.", required=True)
parser_10.add_argument("-dups",'--DUPLICATION',help="Bed file containing duplication loci", required=True)
parser_10.add_argument("-o",'--OUTPUT',help="Output file containing windows with and without CNVs.", required=True)
parser_10.add_argument("-d",'--DIRECTORY',help="Directory to write output files to.",type=str,default="")
parser_10.set_defaults(mode='recreateTotal')
parser_11.add_argument("-i",'--INPUT',required=True,help="Bed file generated by genomeCoverageBed.")
parser_11.add_argument("-chr",'--CHROMOSOME',help="List of chromosomes to summarize.")
parser_11.add_argument("-sum","--SUMMARY",help="Summary file to output.")
parser_11.set_defaults(mode='covSummary')
parser_12.add_argument("-fasta","--FASTA",help="Fasta sequence to simulate reads for.",required=True)
parser_12.add_argument("-cov",'--COVERAGE',help="Coverage of sample to simulate reads for.",type=int,default=10)
parser_12.add_argument("-d",'--DIRECTORY',help="Directory to write output files to.",type=str,default="")
parser_12.add_argument("-id",'--ID',help="ID to label output files.",type=str,default="NA")
parser_12.add_argument("-RL",'--READ_LENGTH',help="Read Length (bp).",type=int,default=100)
parser_12.add_argument("-chr",'--CHROMOSOME',help="List of chromosomes to estimate statistics for.",type=str)
parser_12.add_argument("-se",'--SE',help="Simulate single end reads instead of paired end reads.",type=bool,default=False)
parser_12.set_defaults(mode='simReads')
parser_13.add_argument("-i",'--INPUT',help="Input file containing predicted CNVs, generated by predict function",required=True)
parser_13.add_argument("-o",'--OUTPUT',help="Summary bed file.",required=True)
parser_13.add_argument("-c",'--CUTOFF',help="Confidence cutoff, CNVs below this value are removed.",type=float,default=0.0)
parser_13.add_argument("-w",'--WINDOW_SIZE',help="Window size (bp).",type=int,default=50)
parser_13.add_argument("-dups",'--DUPLICATION',help="Bed file containing duplication loci.")
parser_13.add_argument("-dels","--DELETION",help="Bed file containing deletion loci.")
parser_13.add_argument("-id",'--ID',help="ID to label output files.",type=str,default="NA")
parser_13.set_defaults(mode='summarize')
parser_14.add_argument("-i",'--INPUT',help="Input bed file, generated by fvecTrain.",required=True)
parser_14.add_argument("-o",'--OUTPUT',help="File containing false-positive and true-positive rates for duplications and deletions.",required=True)
parser_14.add_argument('-t','--TRAIN',help='Training file or folder, generated by classify function.', required=True)
parser_14.set_defaults(mode='ROC')
parser_15.add_argument("-i",'--INPUT',help="List of prediction files to quantify CNVs over.",required=True)
parser_15.add_argument("-o",'--OUTPUT',help="File to output CNV windows to.",required=True)
parser_15.add_argument("-gff",'--GFF',help="GFF containing genes or other factor to identify if CNVs are present in each factor.")
parser_15.add_argument("-c",'--CUTOFF',help="Confidence cutoff, CNVs below this value are removed.",type=float,default=0.5)
parser_15.add_argument("-w",'--WINDOW_SIZE',help="Window size (bp).",type=int,default=50)
parser_15.set_defaults(mode='quantify')
# parser_14.add_argument('-foo', '--foo', action='store_true')
# parser_14.set_defaults(mode='readme')
parser.add_argument("-f",'--FUNCTION',help="The function which will be used within the script, the options are: predict, winStat, simCNV, simChr, fvecTrain, fvecSample, recreateTotal, covSummary, winStatExtra, subTrain,summarize",type=str)
parser.add_argument("-d",'--DIRECTORY',help="Path to export simulated files such as beds containing deletions & duplications or simulated fasta")
parser.add_argument("-id",'--ID',help="The sample ID",type=str, default="NA")
parser.add_argument("-i",'--INPUT',help="The input file across the various functions, may differ in format",type=str)
parser.add_argument("-o",'--OUTPUT',help="The output file across the various functions, may differ in format",type=str)
parser.add_argument('-quiet','--QUIET', help="If set, does not print any messages.", action='store_true')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
argsDict = vars(args)
function=args.FUNCTION
"""
files required for input, a training file with the coverages and std dev of different classes
an input bed file with coverages by window
an output bedfile
"""
if argsDict['mode'] in ['predict'] or function == "predict":
"""
input file is in the following format:
CHROMOSOME START END STRAIN COV-5 COV-4 COV-3 COV-2 COV-1 COV COV+1 COV+2 COV+3 COV+4 COV+5 SD-5 SD-4 SD-3 SD-2 SD-1 SD SD+1 SD+2 SD+3 SD+4 SD+5
Where COV is the average coverage of a window, up to 5 up and downstrain of the focal window, and SD is the standard deviation of coverage in each window
e.g.
2L 8000 8249 N 1.073 0.902 1.085 0.927 0.976 1.024 1 1.049 1.183 1.122 0.951 0.141 0.11 0.152 0.067 0.093 0.198 0.163 0.126 0.111 0.117 0.302
output file is in the following format:
CHROMOSOME START END STRAIN MEDIAN_COV PREDICTED_CNV PROBABILITY PREDICTED_PLOIDY PROBABILITY
e.g.
2L 8000 8249 N 1.024 N 1.0 1 1.0
"""
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.externals import joblib
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import ExtraTreesClassifier
import os
if os.path.isfile(args.TRAIN) == True:
if args.QUIET == False:
print("Classifying over a single training set")
clf = joblib.load(args.TRAIN)
clf2 = joblib.load(args.TRAIN + "2")
input = args.INPUT
test_in = pd.read_csv(args.INPUT,header=None,sep="\t")
output = args.OUTPUT
test_in2 = test_in.drop(test_in[[0,1,2,3]], axis=1)
test_Y = []
test_in2.columns = list(range(0,len(test_in2.columns)))
test_in2_y = []
test_in2_yA = []
test_in2_y2 = []
test_in2_yA2 = []
if args.QUIET == False:
print("Classifying windows")
test_in2_y.extend(list(clf.predict(test_in2)))
test_in2_y2.extend(list(clf2.predict(test_in2)))
test_in2_yA.extend(list(pd.DataFrame(clf.predict_proba(test_in2),columns=None).max(axis=1)))
test_in2_yA2.extend(list(pd.DataFrame(clf2.predict_proba(test_in2),columns=None).max(axis=1)))
out_df = pd.DataFrame({"chr":list(test_in[0]), "start":list(test_in[1]), "end":list(test_in[2]), "ID":list(test_in[3]), "coverage":list(test_in2[(len(test_in2.columns)-4)/2]) ,"CNV":test_in2_y,"CNVprob":test_in2_yA,"ploidy":test_in2_y2,"ploidyprob":test_in2_yA2})
out_df.to_csv(output,sep="\t",index =False,header=None)
elif os.path.isfile(args.TRAIN) == False and os.path.isdir(args.TRAIN) == True:
if args.QUIET == False:
print("Bootstrapping over multiple training sets")
pathe = args.TRAIN
if pathe.endswith("/") == False:
pathe += "/"
out_bs_1 = pd.DataFrame(columns=[0])
out_bs_2 = pd.DataFrame(columns=[0])
count = 0
test_in = pd.read_csv(args.INPUT,header=None,sep="\t")
output = args.OUTPUT
test_in2 = test_in.drop(test_in[[0,1,2,3]], axis=1)
test_Y = []
test_in2.columns = list(range(0,len(test_in2.columns)))
for d,s,f in os.walk(pathe):
for inf in f:
if os.path.isfile(pathe + inf) == True and os.path.isfile(pathe + inf + "2") == True:
if args.QUIET == False:
print("Processing classifier " + str(count+1))
clf = joblib.load(pathe + inf)
clf2 = joblib.load(pathe + inf + "2")
out_bs_1[count] = list(clf.predict(test_in2))
out_bs_2[count] = list(clf2.predict(test_in2))
count += 1
if args.QUIET == False:
print("Estimating consensus states")
bs_1 = list(out_bs_1.mode(axis=1)[0])
bs_1_prob = list(out_bs_1[out_bs_1 == bs_1].count(axis='columns')/float(len(out_bs_1.columns)))
bs_2 = list(out_bs_2.mode(axis=1)[0])
bs_2_prob = list(out_bs_2[out_bs_2 == bs_2].count(axis='columns')/float(len(out_bs_2.columns)))
out_df = pd.DataFrame({"chr":list(test_in[0]), "start":list(test_in[1]), "end":list(test_in[2]), "ID":list(test_in[3]), "coverage":list(test_in2[(len(test_in2.columns)/4)-1]) ,"CNV":bs_1,"CNVprob":bs_1_prob,"ploidy":bs_2,"ploidyprob":bs_2_prob})
out_df.to_csv(output,sep="\t",index =False,header=None)
elif argsDict['mode'] in ['classify'] or function == "classify":
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.externals import joblib
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import ExtraTreesClassifier
models = {"RFC100":RandomForestClassifier(n_estimators=100), "RFC500":RandomForestClassifier(n_estimators=500), "CNN":MLPClassifier(), "ETC100":ExtraTreesClassifier(n_estimators=100), "ETC500":ExtraTreesClassifier(n_estimators=500), "DTC":DecisionTreeClassifier()}
training_in = pd.read_csv(args.INPUT,header=None,sep="\t")
X = training_in.drop(training_in[[0,1,2,3,4]], axis=1)
X.columns = list(range(0,len(X.columns)))
Y = list(training_in[3])
clf = models[args.MODEL]
clf.fit(X,Y)
Y2 = list(map(str,list(training_in[4])))
clf2 = RandomForestClassifier(n_estimators=100)
clf2.fit(X,Y2)
joblib.dump(clf, args.OUTPUT)
joblib.dump(clf2, args.OUTPUT + "2")
if args.QUIET == False:
print("Classifier Trained")
elif argsDict['mode'] in ['winStat'] or function == "winStat":
import pandas as pd
import numpy as np
import scipy.stats
import os
"""
input is generated by genomeCoverageBed -d in the following format:
CHR POS COVERAGE
Following that, per chromosome, find the median coverage of covered bases.
Can find median for all chromosomes or a specified set of them, one chromosome ID per line.
"""
os.system(bedtools_path + " genomecov -d -ibam " + args.INPUT + " > dudeml_temp_covsperbase.bed")
if args.QUIET == False:
print("Calculating median coverage")
test = pd.read_table("dudeml_temp_covsperbase.bed",header=None)
covs_median = {}
splits_median = {}
for line in open(args.CHROMOSOME):
i = line.split()[0].rstrip()
covs_median[i] = test[2][test[2] != 0][test[0] == i].median()
print(i,covs_median[i])
if args.SUMMARY is not None:
out = open(args.SUMMARY,"w")
for i in covs_median:
out.write(i + "\t" + str(covs_median[i]) + "\n")
out.close()
if args.QUIET == False:
print("Calculating relative median coverage per window")
chr_stats = []
count = 0
"function takes in a pandas dataframe column and outputs a dataframe containing the start and end of window, as well as window coverage median and standard deviation"
def rolling_with_step(chr,s, window, step):
vert_idx_list = np.arange(1, s.size - window, step)
hori_idx_list = np.arange(window)
A, B = np.meshgrid(hori_idx_list, vert_idx_list)
idx_array = A + B
x_array = s.values[idx_array]
idx = list(s.index[vert_idx_list + (int(window))])
med = list(np.around(list(map(np.median, x_array)),4))
intq = list(np.around(list(map(scipy.stats.iqr, x_array)),4))
means = list(np.around(list(map(np.mean, x_array)),4))
std = list(np.around(list(map(np.std, x_array)),4))
return pd.DataFrame({"chr":chr,"start":vert_idx_list,"end":vert_idx_list + window,"med":med,"iqr":intq,"mean":means,"std":std})
out_df = pd.DataFrame(columns=["chr","start","end","med","iqr","mean","std"])
"""
For each chromosome, divide each base by the chromosome median (or total median).
Following that, finds the median and standard deviation for windows of a given size
"""
for i in covs_median:
test_chrs = test[test[0] == i]
test_chrs_3 = test_chrs[2]/covs_median[i]
wins_step = rolling_with_step(i,test_chrs_3,args.WINDOW_SIZE-1,args.STEP_SIZE)
if args.QUIET == False:
print("Chromosome " + str(i) + " processed")
out_df = pd.concat([out_df,wins_step])
out_df['chr']=out_df['chr'].astype(str)
out_df['start']=out_df['start'].astype(int)
out_df['end']=out_df['end'].astype(int)
out_df.to_csv(args.OUTPUT,sep="\t",index =False,columns=None,header=None)
os.remove("dudeml_temp_covsperbase.bed")
elif argsDict['mode'] in ['simChr'] or function == "simChr":
import pandas as pd
import numpy as np
pathOut = args.DIRECTORY
if pathOut != "" and pathOut.endswith("/") == False:
pathOut += "/"
from Bio import SeqIO
import os
os.system("cp " + args.FASTA + " " + pathOut + args.ID + "_noCNV.fa")
#os.system("maskFastaFromBed -fi " + args.FASTA + " -bed " + args.TE + " -fo " + pathOut + args.ID + "_noCNV.fa")
chrs = []
chr = {}
chr2 = {}
for r in SeqIO.parse(open(pathOut + args.ID + "_noCNV.fa"),"fasta"):
chrs.append(r.id)
chr[r.id] = str(r.seq)
chr2[r.id] = ""
for line in open(args.cnvBed):
if line.split()[3].rstrip() == "normal":
chr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]
elif line.split()[3].rstrip() == "del":
pass
elif line.split()[3].rstrip() == "dup":
if float(line.split()[-1].rstrip()) > 1.5:
for v in range(0,int(line.split()[-1].rstrip())):
chr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]
else:
chr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]
chr2[line.split()[0]] += chr[line.split()[0]][int(line.split()[1]):int(line.split()[2])]
for i in chrs:
out = open(pathOut + i + "_" + args.ID + "_CNV.fa","w")
out.write(">" + i + "\n" + chr2[i] + "\n")
out.close()
os.remove(pathOut + args.ID + "_noCNV.fa")
elif argsDict['mode'] in ['fvecTrain'] or function == "fvecTrain":
import os
import pandas as pd
import numpy as np
import math
from shutil import copyfile
pathOut = args.DIRECTORY
if pathOut != "" and pathOut.endswith("/") == False:
pathOut += "/"
def roundup(x):
return int(math.ceil(x / args.WINDOW_SIZE)) * args.WINDOW_SIZE
def rounddown(x):
return int(math.floor(x / args.WINDOW_SIZE)) * args.WINDOW_SIZE
"""If ignoring TEs is required, due to their inherit weirdness with split reads/coverage, this removes windows with TE sequences."""
if args.TE is not None:
os.system(bedtools_path + " intersect -v -wa -a "+ args.INPUT + " -b " + args.TE + " -f " + str(args.CUTOFF) + " > "+ pathOut + "dudeml_temp.bed")
elif args.TE is None:
copyfile(args.INPUT, pathOut + "dudeml_temp.bed")
del_cp = {}
dup_cp = {}
dup_temp_1 = open("dup_temp_1.bed","w")
del_temp_1 = open("del_temp_1.bed","w")
"""Reformat deletion and duplication windows to find overlapping windows with"""
for line in open(args.DUPLICATION):
line = line.rstrip()
cp = str((float(line.split()[5])*float(line.split()[4])) + ((1-float(line.split()[4])) * 1))
dup_temp_1.write("\t".join([line.split()[0],str(rounddown(int(line.split()[1]))),str(roundup(int(line.split()[2]))),cp]) + "\n")
for line in open(args.DELETION):
line = line.rstrip()
cp = str((float(line.split()[5])*float(line.split()[4])) + ((1-float(line.split()[4])) * 1))
del_temp_1.write("\t".join([line.split()[0],str(rounddown(int(line.split()[1]))),str(roundup(int(line.split()[2]))),cp]) + "\n")
dup_temp_1.close()
del_temp_1.close()
os.system(bedtools_path + " makewindows -b dup_temp_1.bed -w " + str(args.WINDOW_SIZE) + " -s " + str(args.STEP_SIZE) + " -i src > dup_temp_2.bed")
os.system(bedtools_path + " makewindows -b del_temp_1.bed -w " + str(args.WINDOW_SIZE) + " -s " + str(args.STEP_SIZE) + " -i src > del_temp_2.bed")
for line in open("dup_temp_2.bed"):
dup_cp[line.split()[0] + "\t" + str(int(line.split()[1]) + 1) + "\t" + line.split()[2]] = line.split()[3]
for line in open("del_temp_2.bed"):
del_cp[line.split()[0] + "\t" + str(int(line.split()[1]) + 1) + "\t" + line.split()[2]] = line.split()[3]
out = open(pathOut + "dudeml_temp2.bed","w")
for line in open(pathOut + "dudeml_temp.bed"):
copy = "N"
line = line.rstrip()
liner = line.split()
if line.split()[0] + "\t" + line.split()[1] + "\t" + str(int(line.split()[2])) in dup_cp:
out.write("\t".join([liner[0],liner[1],liner[2],"dup",dup_cp[line.split()[0] + "\t" + line.split()[1] + "\t" + str(int(line.split()[2]))], "\t".join(line.split()[3:])]) + "\n")
elif line.split()[0] + "\t" + line.split()[1] + "\t" + str(int(line.split()[2])) in del_cp:
out.write("\t".join([liner[0],liner[1],liner[2],"del",del_cp[line.split()[0] + "\t" + line.split()[1] + "\t" + str(int(line.split()[2]))], "\t".join(line.split()[3:])]) + "\n")
else:
if len(liner) == 5 or len(liner) == 7 or len(liner) == 8:
out.write("\t".join([liner[0],liner[1],liner[2],"N","1.0", "\t".join(line.split()[3:])]) + "\n")
out.close()
v=args.WINDOW_SIZE
if args.STEP_SIZE is not None:
v=int(args.STEP_SIZE)
elif args.STEP_SIZE is None:
v=int(args.WINDOW_SIZE)
window_pos = [[0,1,2,3,4,5]] * ((2*args.WINDOWS) + 1)
output = open(args.OUTPUT,"w")
count = 0
for line in open(pathOut + "dudeml_temp2.bed"):
count += 1
if count % 100000 == 0:
if args.QUIET == False:
print(int(count),"windows processed")
window_pos += [window_pos.pop(0)]
window_pos[(2*args.WINDOWS)] = line.rstrip().split()
class_ud = "N"
if len(list(set([item[0] for item in window_pos]))) == 1:
if window_pos[args.WINDOWS][3] == "dup" or window_pos[args.WINDOWS][3] == "Dup":
class_ud = "Dup"
elif window_pos[args.WINDOWS][3] == "del" or window_pos[args.WINDOWS][3] == "Del":
class_ud = "Del"
cc = 0
cv = 0
for k in window_pos:
if int(k[1]) == int(window_pos[args.WINDOWS][1]) - (v*(args.WINDOWS - cc)):
cv += 1
cc += 1
if cv == len(window_pos):
cq = [str(window_pos[args.WINDOWS][0]),str(window_pos[args.WINDOWS][1]), str(window_pos[args.WINDOWS][2]), class_ud,str(window_pos[args.WINDOWS][4])]
for k in window_pos:
cq.append(str(k[5]))
cq.append(str(k[6]))
cq.append(str(k[7]))
cq.append(str(k[8]))
output.write("\t".join(cq) + "\n")
output.close()
os.remove("dudeml_temp.bed")
os.remove("dudeml_temp2.bed")
os.remove("dup_temp_1.bed")
os.remove("del_temp_1.bed")
os.remove("dup_temp_2.bed")
os.remove("del_temp_2.bed")
elif argsDict['mode'] in ['fvecSample'] or function == "fvecSample":
import os
import pandas as pd
import numpy as np
import gzip
from shutil import copyfile
pathOut = args.DIRECTORY
if pathOut != "" and pathOut.endswith("/") == False:
pathOut += "/"
test = pd.read_csv(args.INPUT,header=None,sep="\t")
if args.OUTPUT.endswith(".gz"):
output = open(args.OUTPUT.rstrip(".gz"), 'w')
else:
output = open(args.OUTPUT,"w")
if args.TE is not None:
os.system(bedtools_path + " intersect -v -wa -a "+ args.INPUT + " -b " + args.TE + " -f " + str(args.CUTOFF) + " > "+ pathOut + "dudeml_temp.bed")
elif args.TE is None:
copyfile(args.INPUT, pathOut + "dudeml_temp.bed")
v=args.WINDOW_SIZE
if args.STEP_SIZE is not None:
v=int(args.STEP_SIZE)
elif args.STEP_SIZE is None:
v=int(args.WINDOW_SIZE)
window_pos = [[0,1,2,3,4,5]] * ((2*args.WINDOWS) + 1)
count = 0
for line in open(pathOut + "dudeml_temp.bed"):
count += 1
if count % 100000 == 0:
if args.QUIET == False:
print(int(count),"windows processed")
window_pos += [window_pos.pop(0)]
window_pos[(2*args.WINDOWS)] = line.rstrip().split()
if len(list(set([item[0] for item in window_pos]))) == 1:
cc = 0
cv = 0
for k in window_pos:
if int(k[1]) == int(window_pos[args.WINDOWS][1]) - (v*(args.WINDOWS- cc)):
cv += 1
cc += 1
if cv == len(window_pos):
cq = [str(window_pos[args.WINDOWS][0]),str(window_pos[args.WINDOWS][1]), str(window_pos[args.WINDOWS][2]), str(args.ID)]
for k in window_pos:
cq.append(str(k[3]))
cq.append(str(k[4]))
cq.append(str(k[5]))
cq.append(str(k[6]))
output.write("\t".join(cq) + "\n")
if args.OUTPUT.endswith(".gz"):
os.system("gzip " + args.OUTPUT.rstrip(".gz"))
os.remove(pathOut + "dudeml_temp.bed")
elif argsDict['mode'] in ['simCNV'] or function == "simCNV":
import pandas as pd
import numpy as np
from Bio import SeqIO
import random
import os
df_del = pd.DataFrame(columns = [1,2,3,4])
df_dup = pd.DataFrame(columns = [1,2,3,4])
pathOut = args.DIRECTORY
if pathOut != "" and pathOut.endswith("/") == False:
pathOut += "/"
out = open(pathOut + "chrs.bed","w")
if args.QUIET == False:
print("Generating duplication and deletion coordinates")
for r in SeqIO.parse(open(args.FASTA),"fasta"):
out.write("\t".join([r.id,"1",str(len(str(r.seq)))]) + "\n")
dup_lengths = []
del_lengths = []
cnv_count = round((len(str(r.seq))/1000000)*args.CNV)
while len(dup_lengths) < cnv_count:
x = round(np.random.normal(args.dupLength, args.CNVsize, 1)[0])
if x > 50:
dup_lengths.append(x)
while len(del_lengths) < cnv_count:
x = round(np.random.normal(args.delLength, args.CNVsize, 1)[0])
if x > 50:
del_lengths.append(x)
dup_start = list(np.random.randint(len(str(r.seq)), size=(1, cnv_count))[0])
del_start = list(np.random.randint(len(str(r.seq)), size=(1, cnv_count))[0])
dup_ends = list(map(int,[a + b for a, b in zip(dup_start, dup_lengths)]))
del_ends = list(map(int,[a + b for a, b in zip(del_start, del_lengths)]))
dups = pd.DataFrame({1:[r.id]*cnv_count,2:dup_start,3:dup_ends,4:dup_lengths})
dels = pd.DataFrame({1:[r.id]*cnv_count,2:del_start,3:del_ends,4:del_lengths})
df_dup = df_dup.append(dups)
df_del = df_del.append(dels)
out.close()
df_dup.to_csv(pathOut + "dup.bed",header=False,index=False,sep="\t")
df_del.to_csv(pathOut + "del.bed",header=False,index=False,sep="\t")
os.system(bedtools_path + " sort -i " + pathOut + "dup.bed | " + bedtools_path + " merge -i stdin > " + pathOut + "dup2.bed")
os.system(bedtools_path + " sort -i " + pathOut + "del.bed | " + bedtools_path + " merge -i stdin > " + pathOut + "del2.bed")
if args.TE is not None:
os.system(bedtools_path + " intersect -v -wa -a "+ pathOut + "del2.bed -b " + args.TE + " -f " + str(args.CUTOFF) + " > "+ pathOut + "del3.bed")
os.system(bedtools_path + " intersect -v -wa -a "+ pathOut + "dup2.bed -b " + args.TE + " -f " + str(args.CUTOFF) + " > "+ pathOut + "dup3.bed")
elif args.TE is None:
os.system("cp "+ pathOut + "del2.bed "+ pathOut + "del3.bed")
os.system("cp "+ pathOut + "dup2.bed "+ pathOut + "dup3.bed")
os.system(bedtools_path + " intersect -wa -v -a " + pathOut + "dup3.bed -b " + pathOut + "del3.bed > " + pathOut + "dup4.bed")
os.system(bedtools_path + " intersect -wa -v -a " + pathOut + "del3.bed -b " + pathOut + "dup3.bed > " + pathOut + "del4.bed")
no_chrs = list(range(1, int(args.NUMBER)+1))
chr_freq = {}
for i in no_chrs:
chr_freq[i] = i/args.NUMBER
no_chrs = list(range(1, int(args.NUMBER)+1))
chr_freq = {}
if args.QUIET == False:
print("Generating duplication and deletion frequencies")
for i in no_chrs:
chr_freq[i] = round(i/args.NUMBER,3)
for i in ["del","dup"]:
out = open(pathOut + str(i) + "5.bed","w")
for line in open(pathOut + i + "4.bed"):
if i == "del":
num = random.randint(1,args.NUMBER)
out.write(line.rstrip() + "\tdel\t" + str(chr_freq[num]) + "\t0\n")
elif i == "dup":
num = random.randint(1,args.NUMBER)
count = np.random.choice([2,3,4,5,6,7,8,9,10], 1, p=[0.5, 0.1, 0.1, 0.05, 0.05,0.05,0.05,0.05,0.05])[0]
freqs = num/args.NUMBER
cp = (count*freqs) + ((1-freqs) * 1)
while cp == 1.0:
num = random.randint(1,args.NUMBER)
count = np.random.choice([2,3,4,5,6,7,8,9,10], 1, p=[0.5, 0.1, 0.1, 0.05, 0.05,0.05,0.05,0.05,0.05])[0]
out.write(line.rstrip() + "\tdup\t" + str(chr_freq[num]) + "\t" + str(count) + "\n")
out.close()
for j in chr_freq:
out = open(pathOut + i + "." + str(j) + ".bed","w")
for line in open(pathOut + i + "5.bed"):
if float(line.split()[4]) >= chr_freq[j]:
out.write(line)
out.close()
if args.QUIET == False:
print("Removing overlaps, generating total file")
for i in no_chrs:
print("Creating bedfiles for sample " + str(i))
os.system("bedtools makewindows -b " + pathOut + "chrs.bed -w 5 > " + pathOut + "normal." + str(i) + ".bed")
os.system(bedtools_path + " intersect -v -wa -a " + pathOut + "normal." + str(i) + ".bed -b " + pathOut + "dup." + str(i) + ".bed | " + bedtools_path + " intersect -v -wa -a stdin -b " + pathOut + "del." + str(i) + ".bed | " + bedtools_path + " sort -i stdin | " + bedtools_path + " merge -i stdin > " + pathOut + "normal2." + str(i) + ".bed")
out = open(pathOut + "normal3." + str(i) + ".bed","w")
for line in open(pathOut + "normal2." + str(i) + ".bed"):
out.write(line.rstrip() + "\tnormal\t1\t1\n")
out.close()
os.system("cat " + pathOut + "normal3." + str(i) + ".bed " + pathOut + "dup." + str(i) + ".bed " + pathOut + "del." + str(i) + ".bed | " + bedtools_path + " sort -i stdin > " + pathOut + "total." + str(i) + ".bed")
os.remove(pathOut + "normal3." + str(i) + ".bed")
os.remove(pathOut + "normal2." + str(i) + ".bed")
os.remove(pathOut + "normal." + str(i) + ".bed")
os.remove(pathOut + "del.bed")
os.remove(pathOut + "del2.bed")
os.remove(pathOut + "del3.bed")
os.remove(pathOut + "del4.bed")
os.remove(pathOut + "del5.bed")
os.remove(pathOut + "dup.bed")
os.remove(pathOut + "dup2.bed")
os.remove(pathOut + "dup3.bed")
os.remove(pathOut + "dup4.bed")
os.remove(pathOut + "dup5.bed")
os.remove(pathOut + "chrs.bed")
elif argsDict['mode'] in ['recreateTotal'] or function == "recreateTotal":
import pandas as pd
import numpy as np
from Bio import SeqIO
import random
import os
out = open(pathOut + "chrs.bed","w")
for r in SeqIO.parse(open(args.FASTA),"fasta"):
out.write("\t".join([r.id,"1",str(len(str(r.seq)))]) + "\n")
out.close()
if args.QUIET == False:
print("recreating bedfiles for sample")
pathOut = args.DIRECTORY
if pathOut != "" and pathOut.endswith("/") == False:
pathOut += "/"
os.system("bedtools makewindows -b " + pathOut + "chrs.bed -w 3 > " + pathOut + "normal.bed")
os.system(bedtools_path + " intersect -v -wa -a " + pathOut + "normal." + str(i) + ".bed -b " + args.DUPLICATION + " | " + bedtools_path + " intersect -v -wa -a stdin -b " + args.DELETION + " | " + bedtools_path + " sort -i stdin | " + bedtools_path + " merge -i stdin > " + pathOut + "normal2.bed")
out = open(pathOut + "normal3.bed","w")
for line in open(pathOut + "normal2.bed"):
out.write(line.rstrip() + "\tnormal\t1\t1\n")
out.close()
os.system("cat " + pathOut + "normal3.bed " + args.DUPLICATION + " " + args.DELETION + " | " + bedtools_path + " sort -i stdin > " + args.OUTPUT)
os.remove(pathOut + "normal3.bed")
os.remove(pathOut + "normal2.bed")
os.remove(pathOut + "normal.bed")
elif argsDict['mode'] in ['covSummary'] or function == "covSummary":
test = pd.read_csv(args.INPUT,header=None,sep="\t")
covs_median = {}
covs_std = {}
covs_mean = {}
if args.CHROMOSOME is None:
chrs = list(test[0].unique())
for i in chrs:
test2 = test[2][test[2] != 0][test[0] == i]
covs_median[i] = test2[2].median()
covs_mean[i] = test2[2].mean()
covs_std[i] = test2[2].std()
print("\t".join(list(map(str,i,covs_median[i],covs_mean[i],covs_std[i]))))
elif args.CHROMOSOME is not None:
for line in open(args.CHROMOSOME):
i = line.split()[0].rstrip()
test2 = test[2][test[2] != 0][test[0] == i]
covs_median[i] = test2[2].median()
covs_mean[i] = test2[2].mean()
covs_std[i] = test2[2].std()
print(i,covs_median[i],covs_mean[i],covs_std[i])
covs_median["total"] = test[2][test[2] != 0].median()
covs_mean["total"] = test[2][test[2] != 0].mean()
covs_std["total"] = test[2][test[2] != 0].std()
if args.QUIET == False:
print("total",covs_median["total"],covs_mean["total"],covs_std["total"])
if(isset(args.SUMMARY)):
out = open(args.SUMMARY,"w")
for i in covs_median:
if args.QUIET == False:
print("\t".join(list(map(str,i,covs_median[i],covs_mean[i],covs_std[i]))))
out.write("\t".join(list(map(str,i,covs_median[i],covs_mean[i],covs_std[i]))) + "\n")
out.close()
elif argsDict['mode'] in ['winStatExtra']:
import pandas as pd
import numpy as np
cov = float(args.COVERAGE)
test = pd.read_csv(args.INPUT,header=None,sep="\t")
v=100
if args.STEP_SIZE is not None:
v=int(args.STEP_SIZE)
elif args.STEP_SIZE is None:
v=int(args.WINDOW_SIZE)
def rolling_with_step(chr,s, window, step):
vert_idx_list = np.arange(0, s.size - window, step)
hori_idx_list = np.arange(window)
A, B = np.meshgrid(hori_idx_list, vert_idx_list)
idx_array = A + B
x_array = s.values[idx_array]
idx = list(s.index[vert_idx_list + (int(window))])
med = list(np.around(list(map(np.median, x_array)),4))
std = list(np.around(list(map(np.std, x_array)),4))
return pd.DataFrame({"chr":chr,"start":vert_idx_list,"end":vert_idx_list+window,"med":med,"std":std})
out_df = pd.DataFrame(columns=["chr","start","end","med","std"])
if args.CHROMOSOME is None:
chrs = list(test[0].unique())
for i in chrs:
test_chrs = test[test[0] == i]
#test_chrs[3] = test_chrs[2]
test_chrs_3 = test_chrs[2]/cov
wins_step = rolling_with_step(i,test_chrs_3,args.WINDOW_SIZE,v)
out_df = pd.concat([out_df,wins_step])
elif args.CHROMOSOME is not None:
chrs = []
for line in open(args.CHROMOSOME):
chrs.append(line.split()[0].rstrip())
for i in chrs:
test_chrs = test[test[0] == i]
test_chrs_3 = test_chrs[2]/cov
wins_step = rolling_with_step(i,test_chrs_3,args.WINDOW_SIZE,v)
out_df = pd.concat([out_df,wins_step])
out_df = out_df.replace(r'\\n','', regex=True)
out_df.to_csv(args.OUTPUT,sep="\t",index =False,columns=None,header=None)
elif argsDict['mode'] in ['subTrain'] or function == "subTrain":
import pandas as pd
import numpy as np
if args.NUMBER < 1.0:
fract = float(args.NUMBER)
test = pd.read_csv(args.INPUT,header=None,sep="\t")
out_df = pd.DataFrame(columns=test.columns)
dict_types = test[3].value_counts().to_dict()
for i in dict_types:
if dict_types[i] * fract < 10000.0:
subwin = test[test[3] ==i]
out_df = pd.concat([out_df,subwin])
elif dict_types[i] * fract > 10000.0:
subwin = test[test[3] ==i].sample(replace = True, frac = fract)
out_df = pd.concat([out_df,subwin])
elif args.NUMBER > 1:
count = int(args.NUMBER)
test = pd.read_csv(args.INPUT,header=None,sep="\t")
out_df = pd.DataFrame(columns=test.columns)
dict_types = test[3].value_counts().to_dict()
for i in dict_types:
subwin = test[test[3] ==i].sample(replace = True, n = count)
out_df = pd.concat([out_df,subwin])
out_df = out_df.round(3)
out_df.to_csv(args.OUTPUT,sep="\t",index =False,columns=None,header=None)
elif argsDict['mode'] in ['simReads'] or function == "simReads":
from Bio import SeqIO
import os
cov = args.COVERAGE
pathOut = args.DIRECTORY
if pathOut != "" and pathOut.endswith("/") == False:
pathOut += "/"
chr_lens = {}
if args.SE == False:
for r in SeqIO.parse(open(args.FASTA),"fasta"):
chr_lens[r.id] = len(str(r.seq))
if args.CHROMOSOME is not None:
for line in open(args.CHROMOSOME,"r"):
chr = line.split()[0].rstrip()
reads = round(chr_lens[chr]/(2*int(args.READ_LENGTH)))*int(cov)
os.system(wgsim_path + " -N " + str(reads) + " -1 " + str(args.READ_LENGTH) + " -2 " + str(args.READ_LENGTH) + " " + pathOut + chr + "_" + args.ID + "_CNV.fa " + pathOut + chr + "_1.fq " + pathOut + chr + "_2.fq > stdout")
for line in open(args.CHROMOSOME,"r"):
chr = line.split()[0].rstrip()
os.system("cat " + pathOut + chr + "_1.fq >> " + pathOut + args.ID + "_" + str(args.COVERAGE) + "_1.fq")
os.system("cat " + pathOut + chr + "_2.fq >> " + pathOut + args.ID + "_" + str(args.COVERAGE) + "_2.fq")
os.remove(pathOut + chr + "_1.fq")
os.remove(pathOut + chr + "_2.fq")
elif args.CHROMOSOME is None:
for chr in chr_lens:
reads = round(chr_lens[chr]/(2*int(args.READ_LENGTH)))*int(cov)
os.system(wgsim_path + " -N " + str(reads) + " -1 " + str(args.READ_LENGTH) + " -2 " + str(args.READ_LENGTH) + " " + pathOut + chr + "_" + args.ID + "_CNV.fa " + pathOut + chr + "_1.fq " + pathOut + chr + "_2.fq > stdout")
for chr in chr_lens:
os.system("cat " + pathOut + chr + "_1.fq >> " + pathOut + args.ID + "_" + str(args.COVERAGE) + "_1.fq")
os.system("cat " + pathOut + chr + "_2.fq >> " + pathOut + args.ID + "_" + str(args.COVERAGE) + "_2.fq")
os.remove(pathOut + chr + "_1.fq")
os.remove(pathOut + chr + "_2.fq")
elif args.SE == True:
for r in SeqIO.parse(open(args.FASTA),"fasta"):
chr_lens[r.id] = len(str(r.seq))
if args.CHROMOSOME is not None:
for line in open(args.CHROMOSOME,"r"):
chr = line.split()[0].rstrip()
reads = round(chr_lens[chr]/(int(args.READ_LENGTH)))*int(cov)
os.system(wgsim_path + " -N " + str(reads) + " -1 " + str(args.READ_LENGTH) + " " + pathOut + chr + "_" + args.ID + "_CNV.fa " + pathOut + chr + ".fq /dev/null > stdout")
for line in open(args.CHROMOSOME,"r"):
chr = line.split()[0].rstrip()
os.system("cat " + pathOut + chr + ".fq >> " + pathOut + args.ID + "_" + str(args.COVERAGE) + ".fq")
os.remove(pathOut + chr + ".fq")
elif args.CHROMOSOME is None:
for chr in chr_lens:
reads = round(chr_lens[chr]/(2*int(args.READ_LENGTH)))*int(cov)
os.system(wgsim_path + " -N " + str(reads) + " -1 " + str(args.READ_LENGTH) + " " + pathOut + chr + "_" + args.ID + "_CNV.fa " + pathOut + chr + ".fq /dev/null > stdout")
for chr in chr_lens:
os.system("cat " + pathOut + chr + ".fq >> " + pathOut + args.ID + "_" + str(args.COVERAGE) + ".fq")
os.remove(pathOut + chr + ".fq")
elif argsDict['mode'] in ['summarize'] or function == "summarize":
import os
import sys
import math
import shutil
os.system("grep -w 'Del' " + args.INPUT + " | " + bedtools_path + " sort -i stdin | " + bedtools_path + " merge -c 4,6,7,8,9 -o distinct,mode,mode,mode,mode -d " + str(args.WINDOW_SIZE) + " -i stdin > del_temp_total.bed")
os.system("grep -w 'Dup' " + args.INPUT + " | " + bedtools_path + " sort -i stdin | " + bedtools_path + " merge -c 4,6,7,8,9 -o distinct,mode,mode,mode,mode -d " + str(args.WINDOW_SIZE) + " -i stdin > dup_temp_total.bed")
os.system("grep -v 'Dup' " + args.INPUT + " | grep -v 'Del' > non_temp_total.bed")
if args.DELETION is not None and args.DUPLICATION is not None:
os.system(bedtools_path + " intersect -wa -wb -a " + args.DELETION + " -b del_temp_total.bed > Del_temp_True-Positive.bed")
os.system(bedtools_path + " intersect -wa -wb -a " + args.DUPLICATION + " -b dup_temp_total.bed > Dup_temp_True-Positive.bed")
os.system(bedtools_path + " intersect -wa -v -a " + args.DELETION + " -b del_temp_total.bed > Del_temp_False-Negative.bed")
os.system(bedtools_path + " intersect -wa -v -a " + args.DUPLICATION + " -b dup_temp_total.bed > Dup_temp_False-Negative.bed")
os.system(bedtools_path + " intersect -wa -v -a del_temp_total.bed -b " + args.DELETION + " > Del_temp_False-Positive.bed")
os.system(bedtools_path + " intersect -wa -v -a dup_temp_total.bed -b " + args.DUPLICATION + " > Dup_temp_False-Positive.bed")
for i in ["Del","Dup"]:
out = open(i + "_temp_False-Negative2.bed", "w")
for line in open(i + "_temp_False-Negative.bed"):
out.write("\t".join([line.split()[0],line.split()[1],line.split()[2],args.ID,i,"1.0","NA","1.0","False-Negative"]) + "\n")
out.close()
out = open(i + "_temp_False-Positive2.bed", "w")
for line in open(i + "_temp_False-Positive.bed"):
out.write(line.rstrip() + "\tFalse-Positive\n")
out.close()
os.system(bedtools_path + " sort -i " + i + "_temp_True-Positive.bed | " + bedtools_path + " merge -c 10,11,12,13,14 -o distinct,mode,mode,mode,mode -i stdin > " + i + "_temp_True-Positive2.bed")
out = open(i + "_temp_True-Positive3.bed","w")
for line in open(i + "_temp_True-Positive2.bed"):
out.write(line.rstrip() + "\tTrue-Positive\n")
out.close()
os.system("cat Del_temp_True-Positive3.bed Dup_temp_True-Positive3.bed Dup_temp_False-Positive2.bed Del_temp_False-Positive2.bed Del_temp_False-Negative2.bed Dup_temp_False-Negative2.bed | " + bedtools_path + " sort -i stdin > total_sum_temp.bed")
out = open(args.OUTPUT,"w")
for line in open("total_sum_temp.bed"):
if float(line.split()[5]) > args.CUTOFF:
out.write(line)
out.close()
for k in ["dup_temp_total.bed","del_temp_total.bed","Dup_temp_True-Positive.bed","Del_temp_True-Positive.bed","Del_temp_False-Negative.bed","Dup_temp_False-Negative.bed","Del_temp_False-Positive.bed","Dup_temp_False-Positive.bed","Dup_temp_True-Positive2.bed","Del_temp_True-Positive2.bed","Del_temp_False-Negative2.bed","Dup_temp_False-Negative2.bed","Del_temp_False-Positive2.bed","Dup_temp_False-Positive2.bed","Dup_temp_True-Positive3.bed","Del_temp_True-Positive3.bed","total_sum_temp.bed"]:
os.remove(k)
elif args.DELETION is None and args.DUPLICATION is None:
os.system("cat dup_temp_total.bed del_temp_total.bed | " + bedtools_path + " sort -i stdin > total_sum_temp.bed")
out = open(args.OUTPUT,"w")
for line in open("total_sum_temp.bed"):
if float(line.split()[5]) > args.CUTOFF:
out.write(line)
out.close()
os.remove("dup_temp_total.bed")
os.remove("del_temp_total.bed")
os.remove("total_sum_temp.bed")
if argsDict['mode'] in ['ROC'] or function == "ROC":
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.externals import joblib
import os
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from scipy import interp
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import ExtraTreesClassifier
models = {"RFC100":RandomForestClassifier(n_estimators=100), "RFC500":RandomForestClassifier(n_estimators=500), "CNN":MLPClassifier(), "ETC100":ExtraTreesClassifier(n_estimators=100), "ETC500":ExtraTreesClassifier(n_estimators=500), "DTC":DecisionTreeClassifier()}
training_in = pd.read_csv(args.INPUT,header=None,sep="\t")
clf = joblib.load(args.TRAIN)
out_df = pd.DataFrame(columns=["type","fpr","tpr"])
for i in ["Del","Dup"]:
training_in_subA = training_in[training_in[3] == "N" ]
training_in_subB = training_in[training_in[3] == i]
training_in_subC = pd.concat([training_in_subA,training_in_subB])
training_in_sub2 = training_in_subC.drop(training_in_subC[[0,1,2,3,4]], axis=1)
training_in_sub2.columns = list(range(0,len(training_in_sub2.columns)))
training_in_subC[3][training_in_subC[3] == "N"] = 2
training_in_subC[3][training_in_subC[3] == i] = 1
training_in_sub_prob = np.array(list(clf.predict_proba(training_in_sub2)[:, 1]))
sub_in = np.array(list(training_in_subC[3].as_matrix()))
fpr, tpr, threshold = roc_curve(sub_in, training_in_sub_prob, pos_label=2)
sub_list = pd.DataFrame({"type":i,"fpr":list(fpr),"tpr":list(tpr)})
out_df = pd.concat([out_df,sub_list])
out_df.to_csv(args.OUTPUT,sep="\t",index =False)
if argsDict['mode'] in ['quantify'] or function == "quantify":
import pandas as pd
import os
import shutil
def myround(x, base=args.WINDOW_SIZE):
return base * round(x/base)
def factor_counts_gff(row):
row_counts = []
t = row.iloc[4:].value_counts()
row_counts.append(row[0])
row_counts.append(row[1])
row_counts.append(row[2])
row_counts.append(row[3])
row_counts.append(sum(t[t.index == "N"]))
row_counts.append(sum(t[t.index == "Del"]))
row_counts.append(sum(t[t.index == "Dup"]))
return(row_counts)
def copy_counts_gff(row):
row_counts = []
t = row.iloc[4:].value_counts()
row_counts.append(row[0])
row_counts.append(row[1])
row_counts.append(row[2])
row_counts.append(row[3])
row_counts.append(sum(t[t.index == 0.0]))
row_counts.append(sum(t[t.index == 1.0]))
row_counts.append(sum(t[t.index == 2.0]))
row_counts.append(sum(t[t.index == 3.0]))
row_counts.append(sum(t[t.index == 4.0]))
row_counts.append(sum(t[t.index == 5.0]))
row_counts.append(sum(t[t.index == 6.0]))
row_counts.append(sum(t[t.index == 7.0]))
row_counts.append(sum(t[t.index == 8.0]))
row_counts.append(sum(t[t.index == 9.0]))
row_counts.append(sum(t[t.index >= 10.0]))
return(row_counts)
if args.GFF is not None:
comb_CN = pd.DataFrame(columns=["chr","start","end","gene"])
comb_CP = pd.DataFrame(columns=["chr","start","end","gene"])
count = 1
for line in open(args.INPUT,"r"):
print("processing " + line.rstrip())
os.system(bedtools_path + """ intersect -wa -wb -a """ + args.GFF + """ -b """ + line.rstrip() + """ | awk -F "\t" '{print $1"\t"$4"\t"$5"\t"$13"\t"$15"\t"$16"\t"$17"\t"$18}' > dudeml_temp1.bed""")
os.system(bedtools_path + """ intersect -wa -wb -a """ + args.GFF + """ -b """ + line.rstrip() + """ | awk -F "ID=" '{print $2}' | awk -F ";" '{print $1}' | awk -F "-mRNA-1" '{print $1}' > dudeml_temp2.bed""")
os.system("paste dudeml_temp1.bed dudeml_temp2.bed > dudeml_temp3.bed")
os.mkdir('tempDir_bed')
df = pd.read_csv("dudeml_temp3.bed",header = None,sep="\t")
df_grouped = df.groupby(8)
for index, group in df_grouped:
group.to_csv("tempDir_bed/" + index,sep="\t",index =False,header=False)
# os.system(bedtools_path + " sort -i tempDir_bed/" + index + " | mergeBed -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct >> dudeml_temp4.bed")
os.system("""for file in tempDir_bed/*; do """ + bedtools_path + """ sort -i ${file} | """ + bedtools_path + """ merge -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct >> dudeml_temp4.bed; done""")
#for v in list(df[8].unique()):
# sub = df[df[8] == v]
# comb_CP4.to_csv("tempDir_bed/" + v ,sep="\t",index =False,header=False)
#for line in open("dudeml_temp3.bed","r"):
# out = open("tempDir_bed/" + line.rstrip().split("\t")[-1],"a")
# out.write(line)
#for d,s,f in os.walk("tempDir_bed/"):
# for inf in f:
# os.system(bedtools_path + " sort -i tempDir_bed/" + inf + " | mergeBed -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct >> dudeml_temp4.bed")
shutil.rmtree("tempDir_bed/")
os.system(bedtools_path + " sort -i dudeml_temp4.bed > dudeml_temp5.bed")
os.remove("dudeml_temp4.bed")
# os.system(bedtools_path + " sort -i dudeml_temp3.bed | mergeBed -i stdin -c 4,5,6,7,8,9 -o distinct,mode,median,mode,median,distinct > dudeml_temp4.bed")
df = pd.read_csv("dudeml_temp5.bed",header = None,sep="\t")
df.columns = ["chr","start","end","strain","CNV","CNVprob","CP","CPprob","gene"]
df.loc[(df['CNV'] == "Dup") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = "N"
df.loc[(df['CNV'] == "Del") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = "N"
comb_CN['chr'] = df['chr']
comb_CN['start'] = df['start']
comb_CN['end'] = df['end']
comb_CN['gene'] = df['gene']
comb_CP['chr'] = df['chr']
comb_CP['start'] = df['start']
comb_CP['end'] = df['end']
comb_CP['gene'] = df['gene']
if pd.isnull(df['strain'][0]) == False:
comb_CP[str(df['strain'][0])] = df["CP"]
comb_CN[str(df['strain'][0])] = df["CNV"]
count += 1
elif pd.isnull(df['strain'][0]) == True:
comb_CP[str(count)] = df["CP"]
comb_CN[str(count)] = df["CNV"]
count += 1
comb_CP.to_csv(args.OUTPUT + ".copy_raw.txt",sep="\t",index =False)
comb_CN.to_csv(args.OUTPUT + ".factor_raw.txt",sep="\t",index =False)
print("Quantify CNVs in each window.")
comb_CP2 = comb_CP.apply(copy_counts_gff, axis=1)
comb_CN2 = comb_CN.apply(factor_counts_gff, axis=1)
comb_CP3 = pd.DataFrame(comb_CP2)
comb_CN3 = pd.DataFrame(comb_CN2)
comb_CP4 = pd.DataFrame()
comb_CN4 = pd.DataFrame()
comb_CN4[["chr","start","end","gene","N","Del","Dup"]] = pd.DataFrame(comb_CN3[0].values.tolist(), index= comb_CN3.index)
comb_CP4[["chr","start","end","gene","0.0","1.0","2.0","3.0","4.0","5.0","6.0","7.0","8.0","9.0",">=10.0"]] = pd.DataFrame(comb_CP3[0].values.tolist(), index= comb_CP3.index)
comb_CP4.to_csv(args.OUTPUT + ".copy.txt",sep="\t",index =False)
comb_CN4.to_csv(args.OUTPUT + ".factor.txt",sep="\t",index =False)
os.remove("dudeml_temp1.bed")
os.remove("dudeml_temp2.bed")
os.remove("dudeml_temp3.bed")
os.remove("dudeml_temp5.bed")
elif args.GFF is None:
def copy_counts(row):
row_counts = []
t = row.iloc[2:].value_counts()
row_counts.append(row[0])
row_counts.append(row[1])
row_counts.append(row[2])
row_counts.append(sum(t[t.index == 0.0]))
row_counts.append(sum(t[t.index == 1.0]))
row_counts.append(sum(t[t.index == 2.0]))
row_counts.append(sum(t[t.index == 3.0]))
row_counts.append(sum(t[t.index == 4.0]))
row_counts.append(sum(t[t.index >= 5.0]))
return(row_counts)
def factor_counts(row):
row_counts = []
t = row.iloc[2:].value_counts()
row_counts.append(row[0])
row_counts.append(row[1])
row_counts.append(row[2])
row_counts.append(sum(t[t.index == "N"]))
row_counts.append(sum(t[t.index == "Del"]))
row_counts.append(sum(t[t.index == "Dup"]))
return(row_counts)
comb_CN = pd.DataFrame(columns=["chr","start","end"])
comb_CP = pd.DataFrame(columns=["chr","start","end"])
count = 1
for line in open(args.INPUT,"r"):
print("processing " + line.rstrip())
df = pd.read_csv(line.rstrip(),header = None,sep="\t")
df.columns = ["chr","start","end","strain","cov","CNV","CNVprob","CP","CPprob"]
df.loc[(df['CNV'] == "Dup") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = "N"
df.loc[(df['CNV'] == "Del") & (df['CNVprob'] < args.CUTOFF), ['CNV']] = "N"
comb_CN['chr'] = df['chr']
comb_CN['start'] = df['start']
comb_CN['end'] = df['end']
comb_CP['chr'] = df['chr']
comb_CP['start'] = df['start']
comb_CP['end'] = df['end']
if pd.isnull(df['strain'][0]) == False:
comb_CP[str(df['strain'][0])] = df["CP"]
comb_CN[str(df['strain'][0])] = df["CNV"]
count += 1
elif pd.isnull(df['strain'][0]) == True:
comb_CP[str(count)] = df["CP"]
comb_CN[str(count)] = df["CNV"]
count += 1
print("Quantify CNVs in each window.")
comb_CP2 = comb_CP.apply(copy_counts, axis=1)
comb_CN2 = comb_CN.apply(factor_counts, axis=1)
comb_CP3 = pd.DataFrame(comb_CP2)
comb_CN3 = pd.DataFrame(comb_CN2)
comb_CP4 = pd.DataFrame()
comb_CP4[["chr","start","end","0","1.0","2.0","3.0","4.0",">=5.0"]] = pd.DataFrame(comb_CN3[0].values.tolist(), index= comb_CN3.index)
comb_CN4 = pd.DataFrame()
comb_CN4[["chr","start","end","N","Del","Dup"]] = pd.DataFrame(comb_CN3[0].values.tolist(), index= comb_CN3.index)
comb_CN4 = comb_CN4.loc[comb_CN4['Del'] != 0 or comb_CN4['Dup'] != 0]
comb_CP4 = comb_CP4.loc[comb_CN4['Del'] != 0 or comb_CN4['Dup'] != 0]
comb_CP4.to_csv(args.OUTPUT + ".copy",sep="\t",index =False)
comb_CN4.to_csv(args.OUTPUT + ".factor",sep="\t",index =False)
| null | dudeML.py | dudeML.py | py | 57,536 | python | en | code | null | code-starcoder2 | 51 |
157601671 | import keras
import numpy as np
from PIL import Image
import os
import matplotlib.pyplot as plt
from keras.utils import plot_model
MODEL_PATH = 'LENET-5CNN.h5'
PIC_FOLDER = 'C:/Users/Hsinyao/Desktop//Keras/pic/'
def preprocess_image(IMG):
img = Image.open(IMG)
img = img.resize((28, 28), Image.ANTIALIAS)
im_arr = np.array(img.convert('L'))
for i in range(28):
for j in range(28):
im_arr[i][j] = 255 - im_arr[i][j]
if (im_arr[i][j] > 25):
im_arr[i][j] = 255
else:
im_arr[i][j] = 0
im_arr = im_arr.astype(float)
im_arr /= 255
im_arr = im_arr.reshape((1, 28, 28, 1))
return im_arr
def predict(IMG_FOLDER):
filenames = os.listdir(IMG_FOLDER)
for filename in filenames:
if filename.split('.')[-1] == 'png':
img_array = preprocess_image(IMG_FOLDER + filename)
model = keras.models.load_model(MODEL_PATH)
plot_model(model, to_file='HsinyaoCNN.png', show_layer_names=True, show_shapes=True)
predict_value = model.predict(img_array)
print(np.argmax(predict_value))
predict(PIC_FOLDER) | null | my_keras_app.py | my_keras_app.py | py | 1,164 | python | en | code | null | code-starcoder2 | 51 |
650013833 | #!/usr/bin/python3
# searches in a dir for a filename: recursive search
# os_walk searches in the whole dir, including subdirs, returning with a join the
# complete path/filename :)
import os
import sys
import subprocess
def find_files(filename, search_path):
result = []
# Walking top-down from the root : os_walk --> dirpath, dirnames, filenames
for root, dir, files in os.walk(search_path):
if filename in files:
result.append(os.path.join(root, filename))
return result
def main():
# Clear the screen
subprocess.call('clear', shell=True)
# Clear buffer
sys.stdout.flush()
while True:
try:
filename = input ("Enter filename to search: ")
if not filename:
print ("You haven't type anything as filename, try again...")
continue
print ("File to be searched", filename)
break
except ValueError as e:
print(e)
sys.exit()
except KeyboardInterrupt:
print ("You pressed Ctrl+C")
sys.exit()
while True:
try:
rootDir = input ("Enter the root directory ( e.g.: /tmp or . ) : ")
if not rootDir:
print ("You haven't specified a path, try again...")
continue
print ("Dir to search is", rootDir)
break
except ValueError as e:
print(e)
sys.exit()
except KeyboardInterrupt:
print ("You pressed Ctrl+C")
sys.exit()
print(find_files(filename, rootDir))
if __name__ == "__main__":
main()
| null | python_practices/bash2python_scripting/find_file_after_walk_dir.py | find_file_after_walk_dir.py | py | 1,663 | python | en | code | null | code-starcoder2 | 51 |
248374868 | #!/usr/bin/python3
import json
import flask
import random
import os
import ankura
import time
import pickle
from tqdm import tqdm
import sys
import tempfile
import threading
app = flask.Flask(__name__, static_url_path='')
user_data = list()
dataset_name = sys.argv[1]
train_size = 10000
test_size = 500
number_of_topics = 50
label_weight = 1
smoothing = 0
if sys.argv[1]=='newsgroups':
attr_name = 'coarse_newsgroup'
corpus = ankura.corpus.newsgroups()
elif sys.argv[1]=='yelp':
attr_name = 'binary_rating'
corpus = ankura.corpus.yelp()
elif sys.argv[1]=='tripadvisor':
attr_name = 'label'
corpus = ankura.corpus.tripadvisor()
elif sys.argv[1]=='amazon':
attr_name = 'binary_rating'
corpus = ankura.corpus.amazon()
def calculate_user_data_accuracy(user_data, Q, test_corpus, train_corpus, attr_name):
for i, data in enumerate(user_data):
anchor_vectors = ankura.anchor.tandem_anchors(data[0], Q, corpus)
lr_accuracy = ankura.validate.anchor_accuracy(Q, anchor_vectors, test_corpus, train_corpus, attr_name)
print('Instance', i, 'Free Classifier Accuracy:', data[1], 'Logistic Regression Accuracy:', lr_accuracy)
return
@ankura.util.pickle_cache(sys.argv[1] + '.pickle')
def load_data():
split = ankura.pipeline.test_train_split(corpus, num_train=train_size, num_test=test_size, return_ids=True)
(train_ids, train_corpus), (test_ids, test_corpus) = split
Q, labels = ankura.anchor.build_labeled_cooccurrence(corpus, attr_name, train_ids,
label_weight=label_weight, smoothing=smoothing)
gs_anchor_indices = ankura.anchor.gram_schmidt_anchors(corpus, Q, k=number_of_topics, return_indices=True)
gs_anchor_vectors = Q[gs_anchor_indices]
gs_anchor_tokens = [[corpus.vocabulary[index]] for index in gs_anchor_indices]
return Q, labels, train_ids, train_corpus, test_ids, test_corpus, gs_anchor_vectors, gs_anchor_indices, gs_anchor_tokens
Q, labels, train_ids, train_corpus, test_ids, test_corpus, gs_anchor_vectors, gs_anchor_indices, gs_anchor_tokens = load_data()
@app.route('/')
def serve_itm():
return app.send_static_file('index.html')
@app.route('/vocab')
def get_vocab():
return flask.jsonify(vocab=corpus.vocabulary)
@app.route('/finished', methods=['GET', 'POST'])
def finish():
directory = os.path.join('FinalAnchors', sys.argv[1])
try:
os.makedirs(directory)
except FileExistsError:
pass
pickle.dump(user_data, tempfile.NamedTemporaryFile(mode='wb',
delete=False,
prefix=sys.argv[1],
suffix='.pickle',
dir=directory,
))
t = threading.Thread(target=calculate_user_data_accuracy, args=(user_data, Q, test_corpus, train_corpus, attr_name,))
t.start()
return 'OK'
@app.route('/topics')
def topic_request():
raw_anchors = flask.request.args.get('anchors')
start=time.time()
if raw_anchors is None:
anchor_tokens, anchor_vectors = gs_anchor_tokens, gs_anchor_vectors
else:
anchor_tokens = json.loads(raw_anchors)
anchor_vectors = ankura.anchor.tandem_anchors(anchor_tokens, Q, corpus)
print('***tadem_anchors:', time.time()-start)
start=time.time()
C, topics = ankura.anchor.recover_topics(Q, anchor_vectors, epsilon=1e-5, get_c=True)
print('C SHAPE :', C.shape)
print('***recover_topics:', time.time()-start)
start=time.time()
topic_summary = ankura.topic.topic_summary(topics[:len(corpus.vocabulary)], corpus)
print('***topic_summary:', time.time()-start)
start=time.time()
classifier = ankura.topic.free_classifier_dream(corpus, attr_name, labeled_docs=train_ids, topics=topics, C=C, labels=labels)
print('***Get Classifier:', time.time()-start)
contingency = ankura.validate.Contingency()
start=time.time()
for doc in test_corpus.documents:
gold = doc.metadata[attr_name]
pred = classifier(doc)
contingency[gold, pred] += 1
print('***Classify:', time.time()-start)
print('***Accuracy:', contingency.accuracy())
user_data.append((anchor_tokens, contingency.accuracy()))
return flask.jsonify(anchors=anchor_tokens,
topics=topic_summary,
accuracy=contingency.accuracy())
if __name__ == '__main__':
if len(sys.argv)>2:
port = int(sys.argv[2])
else:
port=5000
app.run(debug=True, host='0.0.0.0', port=port)
| null | tbuie.py | tbuie.py | py | 4,660 | python | en | code | null | code-starcoder2 | 51 |
200021098 | import logging
from threading import Thread, Event
class Job(Thread):
def __init__(self, interval, run_on_start, execute, *args, **kwargs):
Thread.__init__(self)
self.stopped = Event()
self.interval = interval
self.run_on_start = run_on_start
self.execute = execute
self.args = args
self.kwargs = kwargs
self.logger = logging.getLogger('timeloop')
def stop(self):
self.stopped.set()
self.join()
def run(self):
if self.run_on_start:
self.logger.info("Executing on start: {}".format(self.execute))
self.execute(*self.args, **self.kwargs)
while not self.stopped.wait(self.interval.total_seconds()):
self.logger.info("Executing on interval: {}".format(self.execute))
self.execute(*self.args, **self.kwargs)
| null | timeloop/job.py | job.py | py | 865 | python | en | code | null | code-starcoder2 | 51 |
243731428 | def Vigener(openText, key, whatDo):
alpha = {0: 'abcdefghijklmnopqrstuvwxyz',
1: 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'}
openText = openText.lower()
if whatDo == "Шифруем":
DO = 1
else:
DO = -1
if ord(key[0]) <= 127:
alpha_i = 0
else:
alpha_i = 1
code_key, amount_key, no_alph = encode(key, alpha[alpha_i])
if no_alph:
return 0
code_text, amount_text, no_alph = encode(openText, alpha[alpha_i])
iter = 0
res = []
text = ''
for i in code_text:
res.append((i+DO*code_key[iter]) % len(alpha[alpha_i]))
iter += 1
if iter >= amount_key:
iter = 0
k = 0
for i in range(len(openText)):
if not no_alph or no_alph[0][0] != i:
text += alpha[alpha_i][res[k]]
k += 1
else:
text += no_alph[0][1]
del no_alph[0]
return text
def encode(Text, alpha):
amount = 0
i = 0
no_alph = []
code = []
for c in Text:
flag = 0
if c in alpha:
amount += 1
code.append(alpha.index(c))
flag = 1
if not flag:
no_alph.append([i, c])
i += 1
return code, amount, no_alph
| null | Vigenеre.py | Vigenеre.py | py | 1,320 | python | en | code | null | code-starcoder2 | 51 |
46971736 | from tfmodel.model import PFNet, Transformer, DummyNet
import tensorflow as tf
import tensorflow_probability
import tensorflow_addons as tfa
import pickle
import numpy as np
import os
from sklearn.model_selection import train_test_split
import sys
import glob
import io
import os
import yaml
import uuid
import matplotlib
import matplotlib.pyplot as plt
import sklearn
import kerastuner as kt
from argparse import Namespace
import time
import json
import random
class PFNetLoss:
def __init__(self, num_input_classes, num_output_classes, classification_loss_coef=1.0, charge_loss_coef=1e-3, momentum_loss_coef=1.0, momentum_loss_coefs=[1.0, 1.0, 1.0]):
self.num_input_classes = num_input_classes
self.num_output_classes = num_output_classes
self.momentum_loss_coef = momentum_loss_coef
self.momentum_loss_coefs = tf.constant(momentum_loss_coefs)
self.charge_loss_coef = charge_loss_coef
self.classification_loss_coef = classification_loss_coef
self.gamma = 10.0
def mse_unreduced(self, true, pred):
return tf.math.pow(true-pred,2)
def separate_prediction(self, y_pred):
N = self.num_output_classes
pred_id_logits = y_pred[:, :, :N]
pred_charge = y_pred[:, :, N:N+1]
pred_momentum = y_pred[:, :, N+1:]
return pred_id_logits, pred_charge, pred_momentum
def separate_truth(self, y_true):
true_id = tf.cast(y_true[:, :, :1], tf.int32)
true_charge = y_true[:, :, 1:2]
true_momentum = y_true[:, :, 2:]
return true_id, true_charge, true_momentum
def loss_components(self, y_true, y_pred):
pred_id_logits, pred_charge, pred_momentum = self.separate_prediction(y_pred)
pred_id = tf.cast(tf.argmax(pred_id_logits, axis=-1), tf.int32)
true_id, true_charge, true_momentum = self.separate_truth(y_true)
true_id_onehot = tf.one_hot(tf.cast(true_id, tf.int32), depth=self.num_output_classes)
#l1 = tf.nn.softmax_cross_entropy_with_logits(true_id_onehot, pred_id_logits)*self.classification_loss_coef
l1 = tfa.losses.sigmoid_focal_crossentropy(tf.squeeze(true_id_onehot, [2]), pred_id_logits, from_logits=False, gamma=self.gamma)*self.classification_loss_coef
l2 = self.mse_unreduced(true_momentum, pred_momentum) * self.momentum_loss_coef * self.momentum_loss_coefs
l2s = tf.reduce_sum(l2, axis=-1)
l3 = self.charge_loss_coef*self.mse_unreduced(true_charge, pred_charge)[:, :, 0]
return l1, l2s, l3, l2
def my_loss_full(self, y_true, y_pred):
l1, l2, l3, _ = self.loss_components(y_true, y_pred)
loss = l1 + l2 + l3
return loss
def my_loss_cls(self, y_true, y_pred):
l1, l2, l3, _ = self.loss_components(y_true, y_pred)
loss = l1
return loss
def my_loss_reg(self, y_true, y_pred):
l1, l2, l3, _ = self.loss_components(y_true, y_pred)
loss = l3
return loss
def plot_confusion_matrix(cm):
fig = plt.figure(figsize=(5,5))
plt.imshow(cm, cmap="Blues")
plt.title("Reconstructed PID (normed to gen)")
plt.xlabel("MLPF PID")
plt.ylabel("Gen PID")
plt.colorbar()
plt.tight_layout()
return fig
def plot_regression(val_x, val_y, var_name, rng):
fig = plt.figure(figsize=(5,5))
plt.hist2d(
val_x,
val_y,
bins=(rng, rng),
cmap="Blues",
#norm=matplotlib.colors.LogNorm()
);
plt.xlabel("Gen {}".format(var_name))
plt.ylabel("MLPF {}".format(var_name))
return fig
def plot_multiplicity(num_pred, num_true):
fig = plt.figure(figsize=(5,5))
xs = np.arange(len(num_pred))
plt.bar(xs, num_true, alpha=0.8)
plt.bar(xs, num_pred, alpha=0.8)
plt.xticks(xs)
return fig
def plot_num_particle(num_pred, num_true, pid):
fig = plt.figure(figsize=(5,5))
plt.scatter(num_true, num_pred)
plt.title("particle id {}".format(pid))
plt.xlabel("num true")
plt.ylabel("num pred")
a = min(np.min(num_true), np.min(num_pred))
b = max(np.max(num_true), np.max(num_pred))
plt.xlim(a, b)
plt.ylim(a, b)
return fig
def plot_to_image(figure):
"""
Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call.
"""
buf = io.BytesIO()
# Use plt.savefig to save the plot to a PNG in memory.
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
return image
def plot_distributions(val_x, val_y, var_name, rng):
fig = plt.figure(figsize=(5,5))
plt.hist(val_x, bins=rng, density=True, histtype="step", lw=2, label="gen");
plt.hist(val_y, bins=rng, density=True, histtype="step", lw=2, label="MLPF");
plt.xlabel(var_name)
plt.legend(loc="best", frameon=False)
plt.ylim(0,1.5)
return fig
def plot_particles(y_pred, y_true, pid=1):
#Ground truth vs model prediction particles
fig = plt.figure(figsize=(10,10))
ev = y_true[0, :]
msk = ev[:, 0] == pid
plt.scatter(ev[msk, 3], np.arctan2(ev[msk, 4], ev[msk, 5]), s=2*ev[msk, 2], marker="o", alpha=0.5)
ev = y_pred[0, :]
msk = ev[:, 0] == pid
plt.scatter(ev[msk, 3], np.arctan2(ev[msk, 4], ev[msk, 5]), s=2*ev[msk, 2], marker="s", alpha=0.5)
plt.xlabel("eta")
plt.ylabel("phi")
plt.xlim(-5,5)
plt.ylim(-4,4)
return fig
class ConfusionMatrixValidation:
def __init__(self, X_test, y_test, loss_cls, outdir, model, num_input_classes, num_output_classes, file_writer_cm):
self.X_test = X_test
self.y_test = y_test
self.loss_cls = loss_cls
self.outdir = outdir
self.model = model
self.num_input_classes = num_input_classes
self.num_output_classes = num_output_classes
self.file_writer_cm = file_writer_cm
def log_confusion_matrix(self, epoch, logs):
outdir = self.outdir
model = self.model
X_test = self.X_test
y_test = self.y_test
test_pred = model.predict(X_test, batch_size=5)
msk = X_test[:, :, 0] != 0
if isinstance(test_pred, tuple):
test_pred = tf.concat(list(test_pred), axis=-1)
l1, l2, l3, l2_r = self.loss_cls.loss_components(y_test, test_pred)
logs["epoch"] = int(epoch)
logs["l1"] = float(tf.reduce_mean(l1).numpy())
logs["l2"] = float(tf.reduce_mean(l2).numpy())
logs["l2_split"] = [float(x) for x in tf.reduce_mean(l2_r, axis=[0,1])]
logs["l3"] = float(tf.reduce_mean(l3).numpy())
with open("{}/logs_{}.json".format(outdir, epoch), "w") as fi:
json.dump(logs, fi)
test_pred_id = np.argmax(test_pred[:, :, :self.num_output_classes], axis=-1)
counts_pred = np.unique(test_pred_id, return_counts=True)
test_pred = np.concatenate([np.expand_dims(test_pred_id, axis=-1), test_pred[:, :, self.num_output_classes:]], axis=-1)
cm = sklearn.metrics.confusion_matrix(
y_test[msk][:, 0].astype(np.int64).flatten(),
test_pred[msk][:, 0].flatten(), labels=list(range(self.num_output_classes)))
cm_normed = sklearn.metrics.confusion_matrix(
y_test[msk][:, 0].astype(np.int64).flatten(),
test_pred[msk][:, 0].flatten(), labels=list(range(self.num_output_classes)), normalize="true")
num_pred = np.sum(cm, axis=0)
num_true = np.sum(cm, axis=1)
figure = plot_confusion_matrix(cm)
cm_image = plot_to_image(figure)
figure = plot_confusion_matrix(cm_normed)
cm_image_normed = plot_to_image(figure)
msk = (test_pred[:, :, 0]!=0) & (y_test[:, :, 0]!=0)
ch_true = y_test[msk, 1].flatten()
ch_pred = test_pred[msk, 1].flatten()
figure = plot_regression(ch_true, ch_pred, "charge", np.linspace(-2, 2, 100))
ch_image = plot_to_image(figure)
figure = plot_multiplicity(num_pred, num_true)
n_image = plot_to_image(figure)
images_mult = []
for icls in range(self.num_output_classes):
n_pred = np.sum(test_pred[:, :, 0]==icls, axis=1)
n_true = np.sum(y_test[:, :, 0]==icls, axis=1)
figure = plot_num_particle(n_pred, n_true, icls)
images_mult.append(plot_to_image(figure))
images = {}
for ireg in range(l2_r.shape[-1]):
reg_true = y_test[msk, 2+ireg].flatten()
reg_pred = test_pred[msk, 2+ireg].flatten()
figure = plot_regression(reg_true, reg_pred, "reg {}".format(ireg), np.linspace(np.mean(reg_true) - 3*np.std(reg_true), np.mean(reg_true) + 3*np.std(reg_true), 100))
images[ireg] = plot_to_image(figure)
with self.file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
tf.summary.image("Confusion Matrix Normed", cm_image_normed, step=epoch)
tf.summary.image("Confusion Matrix Normed", cm_image_normed, step=epoch)
tf.summary.image("charge regression", ch_image, step=epoch)
tf.summary.image("particle multiplicity", n_image, step=epoch)
for icls, img in enumerate(images_mult):
tf.summary.image("npart {}".format(icls), img, step=epoch)
for ireg in images.keys():
tf.summary.image("regression {}".format(ireg), images[ireg], step=epoch)
tf.summary.scalar("loss_cls", tf.reduce_mean(l1), step=epoch)
for i in range(l2_r.shape[-1]):
tf.summary.scalar("loss_reg_{}".format(i), tf.reduce_mean(l2_r[:, :, i]), step=epoch)
for i in range(cm_normed.shape[0]):
tf.summary.scalar("acc_cls_{}".format(i), cm_normed[i, i], step=epoch)
tf.summary.scalar("loss_chg", tf.reduce_mean(l3), step=epoch)
def prepare_callbacks(model, outdir):
callbacks = []
tb = tf.keras.callbacks.TensorBoard(
log_dir=outdir, histogram_freq=1, write_graph=False, write_images=False,
update_freq='epoch',
#profile_batch=(10,90),
profile_batch=0,
)
tb.set_model(model)
callbacks += [tb]
terminate_cb = tf.keras.callbacks.TerminateOnNaN()
callbacks += [terminate_cb]
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=outdir + "/weights-{epoch:02d}-{val_loss:.6f}.hdf5",
save_weights_only=True,
verbose=0
)
cp_callback.set_model(model)
callbacks += [cp_callback]
return callbacks
def get_rundir(base='experiments'):
if not os.path.exists(base):
os.makedirs(base)
previous_runs = os.listdir(base)
if len(previous_runs) == 0:
run_number = 1
else:
run_number = max([int(s.split('run_')[1]) for s in previous_runs]) + 1
logdir = 'run_%02d' % run_number
return '{}/{}'.format(base, logdir)
def compute_weights_invsqrt(X, y, w):
wn = tf.cast(tf.shape(w)[-1], tf.float32)/tf.sqrt(w)
wn *= tf.cast(X[:, 0]!=0, tf.float32)
#wn /= tf.reduce_sum(wn)
return X, y, wn
def compute_weights_none(X, y, w):
wn = tf.ones_like(w)
wn *= tf.cast(X[:, 0]!=0, tf.float32)
return X, y, wn
weight_functions = {
"inverse_sqrt": compute_weights_invsqrt,
"none": compute_weights_none,
}
def scale_outputs(X,y,w):
ynew = y-out_m
ynew = ynew/out_s
return X, ynew, w
def targets_multi_output(num_output_classes):
def func(X, y, w):
return X, {
"cls": tf.one_hot(tf.cast(y[:, :, 0], tf.int32), num_output_classes),
"charge": y[:, :, 1:2],
"pt": y[:, :, 2:3],
"eta": y[:, :, 3:4],
"sin_phi": y[:, :, 4:5],
"cos_phi": y[:, :, 5:6],
"energy": y[:, :, 6:7],
}, w
return func
def make_model(config, dtype):
model = config['parameters']['model']
if model == 'gnn':
return make_gnn(config, dtype)
elif model == 'transformer':
return make_transformer(config, dtype)
elif model == 'dense':
return make_dense(config, dtype)
raise KeyError("Unknown model type {}".format(model))
def make_gnn(config, dtype):
activation = getattr(tf.nn, config['parameters']['activation'])
parameters = [
'bin_size',
'num_convs_id',
'num_convs_reg',
'num_hidden_id_enc',
'num_hidden_id_dec',
'num_hidden_reg_enc',
'num_hidden_reg_dec',
'num_neighbors',
'hidden_dim_id',
'hidden_dim_reg',
'dist_mult',
'distance_dim',
'dropout',
'skip_connection'
]
kwargs = {par: config['parameters'][par] for par in parameters}
model = PFNet(
multi_output=config["setup"]["multi_output"],
num_input_classes=config["dataset"]["num_input_classes"],
num_output_classes=config["dataset"]["num_output_classes"],
num_momentum_outputs=config["dataset"]["num_momentum_outputs"],
activation=activation,
**kwargs
)
return model
def make_transformer(config, dtype):
parameters = [
'num_layers', 'd_model', 'num_heads', 'dff', 'support', 'dropout'
]
kwargs = {par: config['parameters'][par] for par in parameters}
model = Transformer(
multi_output=config["setup"]["multi_output"],
num_input_classes=config["dataset"]["num_input_classes"],
num_output_classes=config["dataset"]["num_output_classes"],
num_momentum_outputs=config["dataset"]["num_momentum_outputs"],
dtype=dtype,
**kwargs
)
return model
def make_dense(config, dtype):
model = DummyNet(
num_input_classes=config["dataset"]["num_input_classes"],
num_output_classes=config["dataset"]["num_output_classes"],
num_momentum_outputs=config["dataset"]["num_momentum_outputs"],
)
return model
def eval_model(X, ygen, ycand, model, config, outdir, global_batch_size):
import scipy
y_pred = model.predict(X, batch_size=global_batch_size)
y_pred_raw_ids = y_pred[:, :, :config["dataset"]["num_output_classes"]]
#softmax score must be over a threshold 0.6 to call it a particle (prefer low fake rate to high efficiency)
# y_pred_id_sm = scipy.special.softmax(y_pred_raw_ids, axis=-1)
# y_pred_id_sm[y_pred_id_sm < 0.] = 0.0
msk = np.ones(y_pred_raw_ids.shape, dtype=np.bool)
#Use thresholds for charged and neutral hadrons based on matching the DelphesPF fake rate
# msk[y_pred_id_sm[:, :, 1] < 0.8, 1] = 0
# msk[y_pred_id_sm[:, :, 2] < 0.025, 2] = 0
y_pred_raw_ids = y_pred_raw_ids*msk
y_pred_id = np.argmax(y_pred_raw_ids, axis=-1)
y_pred_id = np.concatenate([np.expand_dims(y_pred_id, axis=-1), y_pred[:, :, config["dataset"]["num_output_classes"]:]], axis=-1)
np_outfile = "{}/pred.npz".format(outdir)
print("saving output to {}".format(np_outfile))
np.savez(np_outfile, X=X, ygen=ygen, ycand=ycand, ypred=y_pred_id, ypred_raw=y_pred_raw_ids)
def freeze_model(model, config, outdir):
full_model = tf.function(lambda x: model(x, training=False))
full_model = full_model.get_concrete_function(
tf.TensorSpec((None, None, config["dataset"]["num_input_features"]), tf.float32))
from tensorflow.python.framework import convert_to_constants
frozen_func = convert_to_constants.convert_variables_to_constants_v2(full_model)
graph = tf.compat.v1.graph_util.remove_training_nodes(frozen_func.graph.as_graph_def())
tf.io.write_graph(graph_or_graph_def=graph,
logdir="{}/model_frozen".format(outdir),
name="frozen_graph.pb",
as_text=False)
tf.io.write_graph(graph_or_graph_def=graph,
logdir="{}/model_frozen".format(outdir),
name="frozen_graph.pbtxt",
as_text=True)
class FlattenedCategoricalAccuracy(tf.keras.metrics.CategoricalAccuracy):
def __init__(self, use_weights=False, **kwargs):
super(FlattenedCategoricalAccuracy, self).__init__(**kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
#flatten the batch dimension
_y_true = tf.reshape(y_true, (tf.shape(y_true)[0]*tf.shape(y_true)[1], tf.shape(y_true)[2]))
_y_pred = tf.reshape(y_pred, (tf.shape(y_pred)[0]*tf.shape(y_pred)[1], tf.shape(y_pred)[2]))
super(FlattenedCategoricalAccuracy, self).update_state(_y_true, _y_pred, None)
class FlattenedMeanIoU(tf.keras.metrics.MeanIoU):
def __init__(self, use_weights=False, **kwargs):
super(FlattenedMeanIoU, self).__init__(**kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
#flatten the batch dimension
_y_true = tf.reshape(y_true, (tf.shape(y_true)[0]*tf.shape(y_true)[1], tf.shape(y_true)[2]))
_y_pred = tf.reshape(y_pred, (tf.shape(y_pred)[0]*tf.shape(y_pred)[1], tf.shape(y_pred)[2]))
super(FlattenedMeanIoU, self).update_state(_y_true, _y_pred, None)
class LearningRateLoggingCallback(tf.keras.callbacks.Callback):
# def __init__(self, opt, **kwargs):
# super(LearningRateLoggingCallback, self).__init__(**kwargs)
# self.opt = opt
def on_epoch_end(self, epoch, numpy_logs):
lr = self.model.optimizer._decayed_lr(tf.float32).numpy()
tf.summary.scalar('learning rate', data=lr, step=epoch)
def main(args, yaml_path, config):
#Switch off multi-output for the evaluation for backwards compatibility
multi_output = True
if args.action == "eval":
multi_output = False
tf.config.run_functions_eagerly(config['tensorflow']['eager'])
from tfmodel.data import Dataset
cds = config["dataset"]
dataset_def = Dataset(
num_input_features=int(cds["num_input_features"]),
num_output_features=int(cds["num_output_features"]),
padded_num_elem_size=int(cds["padded_num_elem_size"]),
raw_path=cds.get("raw_path", None),
raw_files=cds.get("raw_files", None),
processed_path=cds["processed_path"],
validation_file_path=cds["validation_file_path"],
schema=cds["schema"]
)
if args.action == "data":
dataset_def.process(
config["dataset"]["num_files_per_chunk"]
)
return
global_batch_size = config['setup']['batch_size']
config['setup']['multi_output'] = multi_output
model_name = os.path.splitext(os.path.basename(yaml_path))[0] + "-" + str(uuid.uuid4())[:8]
print("model_name=", model_name)
tfr_files = sorted(glob.glob(dataset_def.processed_path))
if len(tfr_files) == 0:
raise Exception("Could not find any files in {}".format(dataset_def.processed_path))
random.shuffle(tfr_files)
dataset = tf.data.TFRecordDataset(tfr_files).map(dataset_def.parse_tfr_element, num_parallel_calls=tf.data.experimental.AUTOTUNE)
num_events = 0
for i in dataset:
num_events += 1
print("dataset loaded, len={}".format(num_events))
n_train = config['setup']['num_events_train']
n_test = config['setup']['num_events_test']
n_epochs = config['setup']['num_epochs']
weight_func = weight_functions[config['setup']['sample_weights']]
assert(n_train + n_test <= num_events)
ps = (
tf.TensorShape([dataset_def.padded_num_elem_size, dataset_def.num_input_features]),
tf.TensorShape([dataset_def.padded_num_elem_size, dataset_def.num_output_features]),
tf.TensorShape([dataset_def.padded_num_elem_size, ])
)
ds_train = dataset.take(n_train).map(weight_func).padded_batch(global_batch_size, padded_shapes=ps)
ds_test = dataset.skip(n_train).take(n_test).map(weight_func).padded_batch(global_batch_size, padded_shapes=ps)
if multi_output:
ds_train = ds_train.map(targets_multi_output(config['dataset']['num_output_classes']))
ds_test = ds_test.map(targets_multi_output(config['dataset']['num_output_classes']))
ds_train_r = ds_train.repeat(n_epochs)
ds_test_r = ds_test.repeat(n_epochs)
#small test dataset used in the callback for making monitoring plots
#X_test = np.concatenate(list(ds_test.take(100).map(lambda x,y,w: x).as_numpy_iterator()))
#y_test = np.concatenate(list(ds_test.take(100).map(lambda x,y,w: tf.concat(y, axis=-1)).as_numpy_iterator()))
weights = config['setup']['weights']
if args.weights:
weights = args.weights
if weights is None:
outdir = 'experiments/{}'.format(model_name)
if os.path.isdir(outdir):
print("Output directory exists: {}".format(outdir), file=sys.stderr)
sys.exit(1)
else:
outdir = os.path.dirname(weights)
try:
gpus = [int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "0").split(",")]
num_gpus = len(gpus)
print("num_gpus=", num_gpus)
if num_gpus > 1:
strategy = tf.distribute.MirroredStrategy()
global_batch_size = num_gpus * global_batch_size
else:
strategy = tf.distribute.OneDeviceStrategy("gpu:0")
except Exception as e:
print("fallback to CPU", e)
strategy = tf.distribute.OneDeviceStrategy("cpu")
num_gpus = 0
actual_lr = global_batch_size*float(config['setup']['lr'])
Xs = []
ygens = []
ycands = []
#for faster loading
if args.action == "train":
dataset_def.val_filelist = dataset_def.val_filelist[:1]
for fi in dataset_def.val_filelist[:10]:
print(fi)
X, ygen, ycand = dataset_def.prepare_data(fi)
Xs.append(np.concatenate(X))
ygens.append(np.concatenate(ygen))
ycands.append(np.concatenate(ycand))
X_val = np.concatenate(Xs)
ygen_val = np.concatenate(ygens)
ycand_val = np.concatenate(ycands)
with strategy.scope():
if config['setup']['dtype'] == 'float16':
if multi_output:
raise Exception("float16 and multi_output are not supported at the same time")
model_dtype = tf.dtypes.float16
from tensorflow.keras.mixed_precision import experimental as mixed_precision
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
opt = mixed_precision.LossScaleOptimizer(
tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss_scale="dynamic"
)
else:
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
actual_lr,
decay_steps=1000,
decay_rate=0.99,
staircase=True
)
model_dtype = tf.dtypes.float32
opt = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
#if config['setup']['multi_output']:
# from tfmodel.PCGrad_tf import PCGrad
# opt = PCGrad(tf.compat.v1.train.AdamOptimizer(actual_lr))
if args.action=="train" or args.action=="eval":
model = make_model(config, model_dtype)
model.compile(
loss={
"cls": tf.keras.losses.CategoricalCrossentropy(from_logits=False),
"charge": tf.keras.losses.MeanSquaredError(),
"pt": tf.keras.losses.MeanSquaredLogarithmicError(),
"eta": tf.keras.losses.MeanSquaredError(),
"sin_phi": tf.keras.losses.MeanSquaredError(),
"cos_phi": tf.keras.losses.MeanSquaredError(),
"energy": tf.keras.losses.MeanSquaredLogarithmicError(),
},
optimizer=opt,
sample_weight_mode='temporal',
loss_weights={
"cls": config["dataset"]["classification_loss_coef"],
"charge": config["dataset"]["charge_loss_coef"],
"pt": config["dataset"]["pt_loss_coef"],
"eta": config["dataset"]["eta_loss_coef"],
"sin_phi": config["dataset"]["sin_phi_loss_coef"],
"cos_phi": config["dataset"]["cos_phi_loss_coef"],
"energy": config["dataset"]["energy_loss_coef"],
},
metrics={
"cls": [
FlattenedCategoricalAccuracy(name="acc_unweighted", dtype=tf.float64),
]
}
)
#Evaluate model once to build the layers
print(X_val.shape)
model(tf.cast(X_val[:5], model_dtype))
model.summary()
#import pdb;pdb.set_trace()
initial_epoch = 0
if weights:
model.load_weights(weights)
initial_epoch = int(weights.split("/")[-1].split("-")[1])
if args.action=="train":
#file_writer_cm = tf.summary.create_file_writer(outdir + '/val_extra')
callbacks = prepare_callbacks(
model, outdir
)
callbacks.append(LearningRateLoggingCallback())
#callbacks = []
fit_result = model.fit(
ds_train_r, validation_data=ds_test_r, epochs=initial_epoch+n_epochs, callbacks=callbacks,
steps_per_epoch=n_train//global_batch_size, validation_steps=n_test//global_batch_size,
initial_epoch=initial_epoch
)
with open("{}/history_{}.json".format(outdir, initial_epoch), "w") as fi:
json.dump(fit_result.history, fi)
model.save(outdir + "/model_full", save_format="tf")
if args.action=="eval":
eval_model(X_val, ygen_val, ycand_val, model, config, outdir, global_batch_size)
freeze_model(model, config, outdir)
if args.action=="time":
synthetic_timing_data = []
for iteration in range(config["timing"]["num_iter"]):
numev = config["timing"]["num_ev"]
for evsize in [128*10, 128*20, 128*30, 128*40, 128*50, 128*60, 128*70, 128*80, 128*90, 128*100]:
for batch_size in [1,2,3,4]:
x = np.random.randn(batch_size, evsize, config["dataset"]["num_input_features"]).astype(np.float32)
model = make_model(config, model_dtype)
model(x)
if weights:
model.load_weights(weights)
t0 = time.time()
for i in range(numev//batch_size):
model(x)
t1 = time.time()
dt = t1 - t0
time_per_event = 1000.0*(dt / numev)
synthetic_timing_data.append(
[{"iteration": iteration, "batch_size": batch_size, "event_size": evsize, "time_per_event": time_per_event}])
print("Synthetic random data: batch_size={} event_size={}, time={:.2f} ms/ev".format(batch_size, evsize, time_per_event))
with open("{}/synthetic_timing.json".format(outdir), "w") as fi:
json.dump(synthetic_timing_data, fi)
| null | mlpf/tfmodel/model_setup.py | model_setup.py | py | 27,341 | python | en | code | null | code-starcoder2 | 51 |
512743474 | # _*_ coding:utf-8 _*_
# redis未授权检测脚本 单线程版
# 使用环境;
# 1.Python 3.8.10
# 2.python安装redis和func_timeout
#
# windows环境:管理员身份
# pip3 install func_timeout
# pip3 install redis
# Linux环境: sudo easy_install redis
# sudo easy_install func_timeout
#=========================================================
# 使用命令
# url.txt导入的目标,格式为 IP:端口 示例 119.45.56.123:6379
# python.exe ./redisOneThread.py
#
import redis,time
from func_timeout import func_set_timeout
import func_timeout
file="./url.txt"
success_save_filename="./success_redisOneThread.txt"
redis_row_list=[]
#按行读取文本
def readfile(file):
file = open(file)
while 1:
lines = file.readlines(100000)
if not lines:
break
for line in lines:
list2 = line.replace("\n", "").split(":", 1)
redis_row_list.append(list2)
file.close()
#将存在漏洞的数据保存到文件
def writefile(filename,context):
fo = open(filename, "a")
fo.write(context)
fo.close()
#发送检测漏洞语句reds.info
def redisSendFifo():
for line in redis_row_list:
print("准备检测:"+line[0])
try:
r=checkTimeOut(line)
if "redis_build_id" in r:
writefile(success_save_filename,line[0]+":"+line[1]+"\n")
print(line[0]+":"+line[1]+" 存在未授权漏洞")
except func_timeout.exceptions.FunctionTimedOut:
writefile("./chaoshi.txt",line[0]+":"+line[1]+"\n")
print('执行函数超时')
#真正发送检测函数
@func_set_timeout(5)#设定函数超执行时间_
def checkTimeOut(line):
try:
r=redis.Redis(host=line[0], port=line[1], db=0,socket_connect_timeout=3)
return r.info()
except :
return "error"
#主函数
if __name__ == '__main__':
readfile(file)
redisSendFifo()
| null | redisOneThread.py | redisOneThread.py | py | 2,111 | python | en | code | null | code-starcoder2 | 51 |
300980711 | import base64
from datetime import timedelta
import logging
import time
import uuid
import warnings
import httpx
from ably.types.capability import Capability
from ably.types.tokendetails import TokenDetails
from ably.types.tokenrequest import TokenRequest
from ably.util.exceptions import AblyException, IncompatibleClientIdException
__all__ = ["Auth"]
log = logging.getLogger(__name__)
class Auth:
class Method:
BASIC = "BASIC"
TOKEN = "TOKEN"
def __init__(self, ably, options):
self.__ably = ably
self.__auth_options = options
if options.token_details:
self.__client_id = options.token_details.client_id
else:
self.__client_id = options.client_id
self.__client_id_validated = False
self.__basic_credentials = None
self.__auth_params = None
self.__token_details = None
self.__time_offset = None
must_use_token_auth = options.use_token_auth is True
must_not_use_token_auth = options.use_token_auth is False
can_use_basic_auth = options.key_secret is not None
if not must_use_token_auth and can_use_basic_auth:
# We have the key, no need to authenticate the client
# default to using basic auth
log.debug("anonymous, using basic auth")
self.__auth_mechanism = Auth.Method.BASIC
basic_key = "%s:%s" % (options.key_name, options.key_secret)
basic_key = base64.b64encode(basic_key.encode('utf-8'))
self.__basic_credentials = basic_key.decode('ascii')
return
elif must_not_use_token_auth and not can_use_basic_auth:
raise ValueError('If use_token_auth is False you must provide a key')
# Using token auth
self.__auth_mechanism = Auth.Method.TOKEN
if options.token_details:
self.__token_details = options.token_details
elif options.auth_token:
self.__token_details = TokenDetails(token=options.auth_token)
else:
self.__token_details = None
if options.auth_callback:
log.debug("using token auth with auth_callback")
elif options.auth_url:
log.debug("using token auth with auth_url")
elif options.key_secret:
log.debug("using token auth with client-side signing")
elif options.auth_token:
log.debug("using token auth with supplied token only")
elif options.token_details:
log.debug("using token auth with supplied token_details")
else:
raise ValueError("Can't authenticate via token, must provide "
"auth_callback, auth_url, key, token or a TokenDetail")
async def __authorize_when_necessary(self, token_params=None, auth_options=None, force=False):
self.__auth_mechanism = Auth.Method.TOKEN
if token_params is None:
token_params = dict(self.auth_options.default_token_params)
else:
self.auth_options.default_token_params = dict(token_params)
self.auth_options.default_token_params.pop('timestamp', None)
if auth_options is not None:
self.auth_options.replace(auth_options)
auth_options = dict(self.auth_options.auth_options)
if self.client_id is not None:
token_params['client_id'] = self.client_id
token_details = self.__token_details
if not force and not self.token_details_has_expired():
log.debug("using cached token; expires = %d",
token_details.expires)
return token_details
self.__token_details = await self.request_token(token_params, **auth_options)
self._configure_client_id(self.__token_details.client_id)
return self.__token_details
def token_details_has_expired(self):
token_details = self.__token_details
if token_details is None:
return True
expires = token_details.expires
if expires is None:
return False
timestamp = self._timestamp()
if self.__time_offset:
timestamp += self.__time_offset
return expires < timestamp + token_details.TOKEN_EXPIRY_BUFFER
async def authorize(self, token_params=None, auth_options=None):
return await self.__authorize_when_necessary(token_params, auth_options, force=True)
async def authorise(self, *args, **kwargs):
warnings.warn(
"authorise is deprecated and will be removed in v2.0, please use authorize",
DeprecationWarning)
return await self.authorize(*args, **kwargs)
async def request_token(self, token_params=None,
# auth_options
key_name=None, key_secret=None, auth_callback=None,
auth_url=None, auth_method=None, auth_headers=None,
auth_params=None, query_time=None):
token_params = token_params or {}
token_params = dict(self.auth_options.default_token_params,
**token_params)
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
log.debug("Auth callback: %s" % auth_callback)
log.debug("Auth options: %s" % self.auth_options)
if query_time is None:
query_time = self.auth_options.query_time
query_time = bool(query_time)
auth_callback = auth_callback or self.auth_options.auth_callback
auth_url = auth_url or self.auth_options.auth_url
auth_params = auth_params or self.auth_options.auth_params or {}
auth_method = (auth_method or self.auth_options.auth_method).upper()
auth_headers = auth_headers or self.auth_options.auth_headers or {}
log.debug("Token Params: %s" % token_params)
if auth_callback:
log.debug("using token auth with authCallback")
token_request = await auth_callback(token_params)
elif auth_url:
log.debug("using token auth with authUrl")
token_request = await self.token_request_from_auth_url(
auth_method, auth_url, token_params, auth_headers, auth_params)
else:
token_request = await self.create_token_request(
token_params, key_name=key_name, key_secret=key_secret,
query_time=query_time)
if isinstance(token_request, TokenDetails):
return token_request
elif isinstance(token_request, dict) and 'issued' in token_request:
return TokenDetails.from_dict(token_request)
elif isinstance(token_request, dict):
token_request = TokenRequest.from_json(token_request)
elif isinstance(token_request, str):
return TokenDetails(token=token_request)
token_path = "/keys/%s/requestToken" % token_request.key_name
response = await self.ably.http.post(
token_path,
headers=auth_headers,
body=token_request.to_dict(),
skip_auth=True
)
AblyException.raise_for_response(response)
response_dict = response.to_native()
log.debug("Token: %s" % str(response_dict.get("token")))
return TokenDetails.from_dict(response_dict)
async def create_token_request(self, token_params=None,
key_name=None, key_secret=None, query_time=None):
token_params = token_params or {}
token_request = {}
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
if not key_name or not key_secret:
log.debug('key_name or key_secret blank')
raise AblyException("No key specified: no means to generate a token", 401, 40101)
token_request['key_name'] = key_name
if token_params.get('timestamp'):
token_request['timestamp'] = token_params['timestamp']
else:
if query_time is None:
query_time = self.auth_options.query_time
if query_time:
if self.__time_offset is None:
server_time = await self.ably.time()
local_time = self._timestamp()
self.__time_offset = server_time - local_time
token_request['timestamp'] = server_time
else:
local_time = self._timestamp()
token_request['timestamp'] = local_time + self.__time_offset
else:
token_request['timestamp'] = self._timestamp()
token_request['timestamp'] = int(token_request['timestamp'])
ttl = token_params.get('ttl')
if ttl is not None:
if isinstance(ttl, timedelta):
ttl = ttl.total_seconds() * 1000
token_request['ttl'] = int(ttl)
capability = token_params.get('capability')
if capability is not None:
token_request['capability'] = str(Capability(capability))
token_request["client_id"] = (
token_params.get('client_id') or self.client_id)
# Note: There is no expectation that the client
# specifies the nonce; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes
token_request["nonce"] = token_params.get('nonce') or self._random_nonce()
token_request = TokenRequest(**token_request)
if token_params.get('mac') is None:
# Note: There is no expectation that the client
# specifies the mac; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes.
token_request.sign_request(key_secret.encode('utf8'))
else:
token_request.mac = token_params['mac']
return token_request
@property
def ably(self):
return self.__ably
@property
def auth_mechanism(self):
return self.__auth_mechanism
@property
def auth_options(self):
return self.__auth_options
@property
def auth_params(self):
return self.__auth_params
@property
def basic_credentials(self):
return self.__basic_credentials
@property
def token_credentials(self):
if self.__token_details:
token = self.__token_details.token
token_key = base64.b64encode(token.encode('utf-8'))
return token_key.decode('ascii')
@property
def token_details(self):
return self.__token_details
@property
def client_id(self):
return self.__client_id
@property
def time_offset(self):
return self.__time_offset
def _configure_client_id(self, new_client_id):
# If new client ID from Ably is a wildcard, but preconfigured clientId is set,
# then keep the existing clientId
if self.client_id != '*' and new_client_id == '*':
self.__client_id_validated = True
return
# If client_id is defined and not a wildcard, prevent it changing, this is not supported
if self.client_id is not None and self.client_id != '*' and new_client_id != self.client_id:
raise IncompatibleClientIdException(
"Client ID is immutable once configured for a client. "
"Client ID cannot be changed to '{}'".format(new_client_id), 400, 40012)
self.__client_id_validated = True
self.__client_id = new_client_id
def can_assume_client_id(self, assumed_client_id):
if self.__client_id_validated:
return self.client_id == '*' or self.client_id == assumed_client_id
elif self.client_id is None or self.client_id == '*':
return True # client ID is unknown
else:
return self.client_id == assumed_client_id
async def _get_auth_headers(self):
if self.__auth_mechanism == Auth.Method.BASIC:
# RSA7e2
if self.client_id:
return {
'Authorization': 'Basic %s' % self.basic_credentials,
'X-Ably-ClientId': base64.b64encode(self.client_id.encode('utf-8'))
}
return {
'Authorization': 'Basic %s' % self.basic_credentials,
}
else:
await self.__authorize_when_necessary()
return {
'Authorization': 'Bearer %s' % self.token_credentials,
}
def _timestamp(self):
"""Returns the local time in milliseconds since the unix epoch"""
return int(time.time() * 1000)
def _random_nonce(self):
return uuid.uuid4().hex[:16]
async def token_request_from_auth_url(self, method, url, token_params, headers, auth_params):
body = None
params = None
if method == 'GET':
body = {}
params = dict(auth_params, **token_params)
elif method == 'POST':
params = {}
body = dict(auth_params, **token_params)
from ably.http.http import Response
async with httpx.AsyncClient(http2=True) as client:
resp = await client.request(method=method, url=url, headers=headers, params=params, data=body)
response = Response(resp)
AblyException.raise_for_response(response)
try:
token_request = response.to_native()
except ValueError:
token_request = response.text
return token_request
| null | ably/rest/auth.py | auth.py | py | 13,689 | python | en | code | null | code-starcoder2 | 51 |
603279648 | import numpy as np
from numpy import linalg
def compute_stats(m, w):
"""
m: 1-D array
w: 2-D array
"""
s_i = m
A = -w
for i in range(w.shape[0]):
A[i][i] = 1 / (1 - m[i] * m[i])
A_inv = linalg.inv(A)
s_i_s_j = np.dot(m.reshape(len(m), 1), m.reshape(1, len(m))) + A_inv
return s_i, s_i_s_j
| null | boltzmann/linear_respone.py | linear_respone.py | py | 340 | python | en | code | null | code-starcoder2 | 51 |
452662021 | from pathlib import Path
import torch
from torch import nn
from torch.nn.modules import Module
from typing import TypeVar, Callable, Tuple, Optional, Any, Mapping
from model import EAST
Model = TypeVar("Model", bound=Module)
# @dataclass
# class LoadedModel(Generic[Model]):
# model: Model
# device: torch.device
def load_east_model(
serialized_model: Path, pretrained: bool = True, set_eval: bool = True
) -> Tuple[EAST, torch.device]:
return load_model(
serialized_model, model_init=lambda: EAST(pretrained), set_eval=set_eval,
)
def get_torch_device(cuda_device_num: int = 0) -> torch.device:
return torch.device(
f"cuda:{cuda_device_num}" if torch.cuda.is_available() else "cpu"
)
def load_model(
serialized_model: Path,
model_init: Callable[[], Model],
set_eval: bool = True,
cuda_device_num: int = 0,
) -> Tuple[Model, torch.device]:
device = torch.device(
f"cuda:{cuda_device_num}" if torch.cuda.is_available() else "cpu"
)
model = model_init().to(device)
model.load_state_dict(
torch.load(str(serialized_model.absolute()), map_location=device)
)
if set_eval:
model.eval()
return model, device
class EarlyStopping:
"""Early stopping regularization. Use :func:`observe_step` on each model training epoch.
Source: https://github.com/Bjarten/early-stopping-pytorch/blob/master/pytorchtools.py
"""
def __init__(
self,
model_name_prefix: str,
lower_is_better: bool,
patience: int = 7,
verbose: bool = False,
delta: float = 0.0,
) -> None:
"""Performs field assignment with the supplied parameters and initializes internal state.
Args:
model_name_prefix: Name for model
lower_is_better: If `True`, lower values of the validation metric are better.
Otherwise, larger values are considered an improvement.
patience: How long to wait after last time validation metric improved.
verbose: If True, prints a message for each validation metric improvement.
delta: Minimum change in the monitored quantity to qualify as an improvement.
"""
self.model_name_prefix = model_name_prefix
self.lower_is_better = lower_is_better
self.patience = patience
self.verbose = verbose
self.delta = delta
self.reset()
def reset(self) -> None:
"""Sets all mutable state to initial conditions.
NOTE: MUTATION: Initializes `counter`, `early_stop`, `best_val_metric`, `checkpoint_num`,
`best_name`.
"""
self.counter = 0
self.early_stop = False
self.best_val_metric: Optional[float] = None
self.checkpoint_num = 0
self.best_name = ""
def __call__(self, *args) -> bool:
"""Alias for :func:`observe_step` and then returns whether or not the
early stopping criterion was hit.
"""
self.observe_step(*args)
return self.early_stop
def observe_step(self, val_metric: float, model: nn.Module) -> None:
"""Observe the validation metric on the `model` for a discrete training step.
NOTE: MUTATION: Potentially updates `counter`, `best_score`, `early_stop`,
`best_val_metric`, `checkpoint_num`.
"""
if self.early_stop:
if self.verbose:
print(
f"Cannot observe step. Already stopped early.\n{self.saved_info()}"
)
elif self.loss_improvement(val_metric):
self.save_checkpoint(val_metric, model)
else:
self.increment()
def loss_improvement(self, val_metric: float) -> bool:
"""Evaluates to `True` iff `val_metric` is an improvement on the best observed validation metric.
`False` otherwise.
"""
return self.best_val_metric is None or (
# e.g. new loss is lower than the best & the improvement threshold
(val_metric < self.best_val_metric - self.delta)
if self.lower_is_better
else (val_metric > self.best_val_metric + self.delta)
# e.g. new accuracy is higher than the best & the improvement threshold
)
def save_checkpoint(self, val_loss: float, model: nn.Module) -> None:
"""Checkpoints model. Use when `val_loss` is an improvement.
NOTE: MUTATION: Sets `best_val_metric`, `best_score` to neg. val loss, resets `counter`,
and increments `checkpoint_num`.
"""
if self.verbose:
if self.best_val_metric is None:
print(
"Initial observation. "
f"Setting best validation metric to '{val_loss:.6f}' "
f"for checkpoint '{self.checkpoint_num}'"
)
else:
print(
f"Validation metric improvement ({self.best_val_metric:.6f} --> {val_loss:.6f}). "
f"Saving model for checkpoint '{self.checkpoint_num}'..."
)
filename = self.checkpoint_name()
torch.save(model.state_dict(), filename)
self.best_name = filename
self.best_val_metric = val_loss
self.counter = 0
self.checkpoint_num += 1
def checkpoint_name(self) -> str:
"""Current filename for model when it is checkpointed next.
"""
return f"{self.model_name_prefix}--{self.checkpoint_num}_checkpoint.pth"
def increment(self) -> None:
"""Increment internal counters due to observing a training step without an improvement of validation loss.
Sets `early_stop` to `True` iff the incrementing the `counter` here exceeds the `patience` threshold.
NOTE: MUTATION: Increments `counter`, potentially sets `early_stop`.
"""
self.counter += 1
if self.verbose:
print(f"EarlyStopping counter: {self.counter} out of {self.patience}")
if self.counter >= self.patience:
self.early_stop = True
if self.verbose:
print(f"Stopped early. {self.saved_info()}")
def saved_info(self) -> str:
"""Human-readable logging string of the current minimum validation loss and checkpoint model filename.
"""
return f"Best validation metric '{self.best_val_metric:.6f}' saved as '{self.best_name}'"
| null | reusable.py | reusable.py | py | 6,499 | python | en | code | null | code-starcoder2 | 51 |
155289661 | import unittest
import solutions.maximum_width_of_binary_tree.index as main
from solutions._class.tree_node import TreeNode, createTreeNode
class Test(unittest.TestCase):
def test_widthOfBinaryTree(self):
test_patterns = [
([0, 0, 0, 0, None, None, 0, None, None, None, 0], 4),
([1, 3, 2, 5, 3, None, 9], 4),
([0], 1),
]
for i, (arg, expected) in enumerate(test_patterns):
with self.subTest(test=i):
s = main.Solution()
tree: TreeNode = createTreeNode(arg)
self.assertEqual(s.widthOfBinaryTree(tree), expected)
if __name__ == '__main__':
unittest.main()
| null | solutions/maximum_width_of_binary_tree/test.py | test.py | py | 687 | python | en | code | null | code-starcoder2 | 51 |
230847347 |
import os
from collections import Counter
from operator import itemgetter
from classification import getModel
from classification import getTrTWContext
DATA = os.environ['data']
def eval_instances():
instance_file = os.path.join(DATA, 'twitter/self_reveal/user_pool0.csv')
filtered_file = os.path.join(DATA, 'twitter/self_reveal/user_pool2.csv')
first_model = getModel()
fout = open(filtered_file, 'w')
for line in open(instance_file):
user_id, target = line.rstrip('\n').split('\t')
context = getTrTWContext(user_id)
if context is None:
continue
weight = 1
score = first_model.eval(context, target)
if score > .25:
fout.write(user_id + '\t' + target + '\n')
fout.close()
if __name__ == "__main__":
eval_instances()
| null | bigdata/reclassification.py | reclassification.py | py | 836 | python | en | code | null | code-starcoder2 | 51 |
86864985 | class Decode:
def __init__(self, codedText : str = "", key : str = ""):
self.__codedText = codedText
self.__key = key
self.__decodedText = ""
def decode(self):
self.decodeXor()
self.decodeCesar()
return self.__decodedText
def decodeCesar(self):
decodedText = ""
key = int(self.__key[len(self.__key) - 1])
for i in range(len(self.__codedText)):
char = self.__codedText[i] # Toma el caracter i
if ord(char) - key < 32 and chr(ord(char) - key) != '\n': # Comprueba limite de ASCII para evitar errores (espacio), si da el ASCII de salto de linea, no se hace
char = chr(ord(char) + 95) # El desplazamiento llegará por el límite superior
char = chr(ord(char) - key) # Convierte el caracter a ascii, realiza el desplazamiento hacia atrás y lo regresa a caracter
decodedText = decodedText + char
self.__decodedText = decodedText
def decodeXor(self):
codedText = self.__codedText
decodedText = ""
key = self.__key
key = key[:-1]
self.__key = self.__key[len(self.__key) - 1]
temp = ""
for i in range(len(codedText)):
temp = chr(ord(codedText[i]) ^ ord(key[i]))
decodedText = decodedText + temp
temp = ""
self.__codedText = decodedText
self.__decodedText = decodedText | null | Practica_4/decode.py | decode.py | py | 1,487 | python | en | code | null | code-starcoder2 | 51 |
169059872 | i = 0
n = ['white', 'white', 'black', 'white', 'black', 'white', 'white', 'white', 'black', 'black']
# n = ['white', 'white', 'black', 'white', 'black']
answers = []
success_min = len(n) - 1
def find_parity(known):
print("Нийт малгай: ", n)
print("Мэдэгдэж байгаа: " + str(known))
no_white = known.count('white')
print ("Нийт цагаан малгайны тоо: " + str(no_white))
if no_white % 2 == 0:
parity = 'even'
else:
parity = 'odd'
print("---"*30)
return parity
def my_hat(n, i, success_min):
incorrect = 0
correct = 0
while i < len(n):
known = n[i+1:]
known_parity = find_parity(known)
if i == 0:
print("Хамгийн эхний хүн:",i+1)
if known_parity == 'even':
guess = 'white'
else:
guess = 'black'
elif i == 1:
print(i+1,"дах хүн:")
if current_parity != known_parity:
if guess == 'white':
guess = 'black'
else:
guess = 'white'
else:
print( "current_parity == known_parity")
else:
print(i+1,"дах хүн:")
past = answers[1:]
new_known = past + known
last = answers[-1]
print("last",last)
known_parity = find_parity(new_known)
print("known_parity:",known_parity)
print("current_parity:",current_parity)
if current_parity != known_parity:
if last == 'white':
guess = 'black'
else:
guess = 'white'
current_parity = known_parity
answers.append(guess)
print(" ТААСАН ХАРИУЛТ : " + guess)
if guess == n[i]:
correct += 1
print("ЗӨВ ХАРИУЛТЫН НИЙТ ТОО: " , str(correct))
else:
incorrect += 1
print( "БУРУУ ХАРИУЛТЫН НИЙТ ТОО:" , str(incorrect))
i += 1
print ("minimum needed to succeed: " , str(success_min))
return correct >= success_min
print ("Passed? " + str(my_hat(n, i, success_min)))
"""
Explain:
1. The first man counts the only white hats.
2. Then he can say “I’m white” if the total quantity of white hats is “even”. If it isn't even he can say “I’m black”. His chance is 50% 50%. He can mistake.
3. The second guy must check 2 conditions. The last total quantity of white hats is not equal to now total quantity of white hats. Second is, he can say “I’m black” if the first guy’s guess was “black”. If isn’t black he can say “I’m white”. It will be good. Because he knows the total quantity of white hats.
4. The third guy can say “I’m black” if the second guy's guess is “white”. If it isn’t “white”, he is the “white”.
5. The next guys are the same as the third guy.
"""
"""
1. Хамгийн эхний хүн буюу 10 дах хүн өмнөх бүх хүнийхээ цагаан өнгөтэй малгайг тоолно.
2. Хэрвээ нийт тоолсон цагаан малгай нь сондгой байвал "ЦАГААН" үгүй байвал "ХАР" гэж таана.
Энэ хүн нь л зөвхөн алдах эрхтэй ба боломж 50% 50% тай байна.
3. Дараагийн хүн нь өмнөх хүн нь "ЦАГААН" гэж таасан бол
"ХАР" гэж таана үгүй бол "ЦАГААН" гэж таана.
Өмнөх цагаан малгайны нийт тоо одоо байгаа цагаан малгайны тооноос өөр
байгаад өмнөх хүний хариулт цагаан байсан бол хар үгүй бол цагаан
гэж хариулна
"""
| null | WhiteOrBlackHats.py | WhiteOrBlackHats.py | py | 3,970 | python | en | code | null | code-starcoder2 | 51 |
412799970 | import numpy as np
from statistics import mode
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
class Ensamble:
# Constructor para inicializar datos
def __init__(self):
# Clasificadores
self.clf_1: KNeighborsClassifier = None
self.clf_2: GaussianNB = None
self.clf_3: MLPClassifier = None
# ¿Utilizar PCA en clf1?
self.pca_1: bool = False
# ¿Utilizar PCA en clf1?
self.pca_2: bool = False
# Predicción final
self.pred_final: list = []
self.clases: list = []
# Resultados
self.resultados: list = []
# Inicializar primer clasificador
def k_vecinos(self, n_vecinos: int, pca: bool = False):
self.clf_1 = KNeighborsClassifier(n_neighbors=n_vecinos)
self.pca_1 = pca
# Inicializar segundo clasificador
def nativa_Bayes(self, pca: bool = False):
self.clf_2 = GaussianNB()
self.pca_2 = pca
# Inicializar tercer clasificador
def red_neuronal(self, capas_ocultas: str,
activacion: str, solucionador: str,
alfa: float, max_itr: int):
layer_sizes: list = capas_ocultas.split(',')
for i in range(len(layer_sizes)):
layer_sizes[i] = int(layer_sizes[i])
hls = tuple(layer_sizes)
self.clf_3 = MLPClassifier(hidden_layer_sizes=hls,
activation=activacion,
solver=solucionador, alpha=alfa,
max_iter=max_itr)
# Entrenar clasificadores
def fit(self, X, y, pca_X):
# Decidir si se desea utilizar el espacio PCA
if self.pca_1:
self.clf_1.fit(pca_X, y)
else:
self.clf_1.fit(X, y)
if self.pca_2:
self.clf_2.fit(pca_X, y)
else:
self.clf_2.fit(X, y)
self.clf_3.fit(X, y)
# Ensamble
def ensamble_votacion(self, X, y, pca_X):
# Decidir si se desea utilizar el espacio PCA
if self.pca_1:
pred_1 = self.clf_1.predict(pca_X)
else:
pred_1 = self.clf_1.predict(X)
if self.pca_2:
pred_2 = self.clf_2.predict(pca_X)
else:
pred_2 = self.clf_2.predict(X)
pred_3 = self.clf_3.predict(X)
# Ensamble de votación: se toma la clase
# con más frecuencia gracias a la función mode()
prediccion = np.array([])
for i in range(0, len(X)):
prediccion = np.append(prediccion, mode([pred_1[i], pred_2[i], pred_3[i]]))
# Guardar los datos para la predicción final
for j in range(0, len(X)):
self.pred_final.append(int(prediccion[j]))
# Guardar la puntuación (parcial) del resultado
self.resultados.append(self.puntuacion(y, prediccion))
@staticmethod
def puntuacion(y, p):
count = 0
for i in range(0, len(y)):
if y[i] == p[i]:
count += 1
score = (count * 100) / len(y)
return score
# Limpiar datos de colecciones
def __clear_data(self):
self.pred_final.clear()
self.clases.clear()
self.resultados.clear()
# Validación cruzada (kfold)
def validacion_cruzada(self, n_splits: int, X, y, pca_X):
self.__clear_data()
# Inicializar kfold con el número de divisiones
kf = KFold(n_splits=n_splits)
# Datos de entrenamiento y prueba
# con los que no se utilizará PCA
X_train: list = []
X_test: list = []
y_train: list = []
y_test: list = []
# Datos de entrenamiento y prueba
# con los que se utilizará PCA
pca_X_train: list = []
pca_X_test: list = []
# Llenar datos de entrenamiento y prueba
for train_i, test_i in kf.split(X, y):
X_train.append(X[train_i])
X_test.append(X[test_i])
y_train.append(y[train_i])
y_test.append(y[test_i])
y_tt = y[test_i]
# Guardar datos para comparación final
for i in range(0, len(y_tt)):
self.clases.append(y_tt[i])
# Llenar datos de entrenamiento y prueba con PCA
for train_index, test_index in kf.split(pca_X, y):
pca_X_train.append(X[train_index])
pca_X_test.append(X[test_index])
# Entrenamiento y ensamble por pliegue de la validación
for i in range(0, len(X_train)):
self.fit(X_train[i], y_train[i], pca_X_train[i])
self.ensamble_votacion(X_test[i], y_test[i], pca_X_test[i])
# Retornar la puntuación promedio
return self.puntuacion(self.clases, self.pred_final)
| null | Ensamble.py | Ensamble.py | py | 4,906 | python | en | code | null | code-starcoder2 | 51 |
440408189 | # =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
from maskgen.tool_set import getMilliSecondsAndFrameCount
import cv2
from maskgen.algorithms.optical_flow import smartAddFrames
from maskgen.tool_set import getDurationStringFromMilliseconds
"""
Returns the start and end time of the frames added
"""
def transform(img,source,target,**kwargs):
start_time = getMilliSecondsAndFrameCount(kwargs['Start Time']) if 'Start Time' in kwargs else (0,1)
end_time = getMilliSecondsAndFrameCount(kwargs['End Time']) if 'End Time' in kwargs else None
frames_add = int(kwargs['Frames to Add']) if 'Frames to Add' in kwargs else None
if frames_add is not None:
end_time = (start_time[0],start_time[1] + frames_add - 1)
codec = (kwargs['codec']) if 'codec' in kwargs else 'XVID'
add_frames, end_time_millis = smartAddFrames(source, target,
start_time,
end_time,
codec=codec,
direction=kwargs['Direction'] if 'Direction' in kwargs else 'forward')
if start_time[0] > 0:
et = getDurationStringFromMilliseconds(end_time_millis)
else:
et = str(int(start_time[1]) + int(add_frames) - 1)
return {'Start Time':str(kwargs['Start Time']),
'End Time': et,
'Frames to Add': int(add_frames),
'Method': 'Pixel Motion',
'Algorithm':'Farneback',
'scale':0.8,
'levels':7,
'winsize':15,
'iterations': 3,
'poly_n':7,
'poly_sigma':1.5,
'Vector Detail':100},None
def suffix():
return '.avi'
def operation():
return {'name':'TimeAlterationWarp',
'category':'TimeAlteration',
'description':'Insert frames using optical flow given a starting point and desired end time.',
'software':'OpenCV',
'version':cv2.__version__,
'arguments': {
'Frames to Add': {
'type': 'int[0:100000000]',
'defaultvalue': 1,
'description':'Number of frames since Start Time. overrides or in lieu of an End Time.'
},
'Direction': {
'type': 'list',
'values':['forward','backward'],
'defaultvalue': 'forward',
'description': 'Direction of flow.'
},
'codec': {
'type': 'list',
'values': ['MPEG','XVID','AVC1','HFYU'],
'defaultvalue': 'XVID',
'description': 'Codec of output video.'
}
},
'transitions': [
'video.video'
]
}
| null | plugins/FlowDrivenVideoTimeWarp/__init__.py | __init__.py | py | 3,055 | python | en | code | null | code-starcoder2 | 51 |
170060026 | from cities_format import city_format
print("\n\tEnter 'q' at any time to quit.")
while True:
city = input("\nInsert city : ")
if city == 'q':
break
country = input("Insert country : ")
if country == 'q':
break
population = input("Insert population (Empty if not) : ")
if population == 'q':
break
full_name = city_format(city, country, population)
print("\n\t Neatly formatted city name: " + full_name + ". ")
| null | src/cities.py | cities.py | py | 469 | python | en | code | null | code-starcoder2 | 51 |
15963227 | import unittest
import zserio
from testutils import getZserioApi
class VariableArrayVarUIntTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "array_types.zs").variable_array_varuint
def testBitSizeOf(self):
numElements = 33
compoundArray = [self.api.TestStructure.fromFields(i, "Name" + str(i)) for i in range(numElements)]
variableArray = self.api.VariableArray.fromFields(numElements, compoundArray)
bitPosition = 2
numOneNumberIndexes = 10
expectedBitSize = (1 + numElements * (4 + 7) - numOneNumberIndexes) * 8
self.assertEqual(expectedBitSize, variableArray.bitSizeOf(bitPosition))
def testInitializeOffsets(self):
numElements = 33
compoundArray = [self.api.TestStructure.fromFields(i, "Name" + str(i)) for i in range(numElements)]
variableArray = self.api.VariableArray.fromFields(numElements, compoundArray)
bitPosition = 2
numOneNumberIndexes = 10
expectedEndBitPosition = bitPosition + (1 + numElements * (4 + 7) - numOneNumberIndexes) * 8
self.assertEqual(expectedEndBitPosition, variableArray.initializeOffsets(bitPosition))
def testRead(self):
numElements = 59
writer = zserio.BitStreamWriter()
VariableArrayVarUIntTest._writeVariableArrayToStream(writer, numElements)
reader = zserio.BitStreamReader(writer.getByteArray())
variableArray = self.api.VariableArray.fromReader(reader)
self.assertEqual(numElements, variableArray.getNumElements())
compoundArray = variableArray.getCompoundArray()
self.assertEqual(numElements, len(compoundArray))
for i in range(numElements):
testStructure = compoundArray[i]
self.assertEqual(i, testStructure.getId())
self.assertTrue(testStructure.getName() == "Name" + str(i))
def testWrite(self):
numElements = 33
compoundArray = [self.api.TestStructure.fromFields(i, "Name" + str(i)) for i in range(numElements)]
variableArray = self.api.VariableArray.fromFields(numElements, compoundArray)
writer = zserio.BitStreamWriter()
variableArray.write(writer)
reader = zserio.BitStreamReader(writer.getByteArray())
readVariableArray = self.api.VariableArray.fromReader(reader)
self.assertEqual(numElements, readVariableArray.getNumElements())
readCompoundArray = readVariableArray.getCompoundArray()
self.assertEqual(numElements, len(readCompoundArray))
for i in range(numElements):
readTestStructure = readCompoundArray[i]
self.assertEqual(i, readTestStructure.getId())
self.assertTrue(readTestStructure.getName() == "Name" + str(i))
def testWriteWrongArray(self):
numElements = 33
compoundArray = [self.api.TestStructure.fromFields(i, "Name" + str(i)) for i in range(numElements)]
variableArray = self.api.VariableArray.fromFields(numElements + 1, compoundArray)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
variableArray.write(writer)
@staticmethod
def _writeVariableArrayToStream(writer, numElements):
writer.writeBits(numElements, 8)
for i in range(numElements):
writer.writeBits(i, 32)
writer.writeString("Name" + str(i))
| null | test/language/array_types/python/VariableArrayVarUInt.py | VariableArrayVarUInt.py | py | 3,442 | python | en | code | null | code-starcoder2 | 51 |
394048038 | #!/usr/bin/env python
import pandas as pd
import sys
import os
import argparse
import math
import os
import altair as alt
import pandas as pd
import numpy as np
import yaml
import glob
from yaml import Loader, Dumper
def generic_df_reader(args):
if "npz" == args.input.split(".")[-1]:
npz = np.load('result.npz')
df = pd.DataFrame(npz['matrix'])
df.columns = npz['labels']
return df
if args.sep=="auto":
args.sep = guess_sep(args.input)
if args.header:
if args.index:
df = pd.read_csv(args.input,sep=args.sep,index_col=0)
else:
df = pd.read_csv(args.input,sep=args.sep)
else:
if args.index:
df = pd.read_csv(args.input,sep=args.sep,index_col=0,header=None)
else:
df = pd.read_csv(args.input,sep=args.sep,header=None)
return df
def guess_sep(x):
with open(x) as f:
for line in f:
tmp1 = len(line.strip().split(","))
tmp2 = len(line.strip().split("\t"))
# print (tmp1,tmp2)
if tmp1 > tmp2:
return ","
if tmp2 > tmp1:
return "\t"
else:
print ("Can't determine the separator. Please input manually")
exit()
def zoom_bar(data, zoom_bar_color_by, zoom_bar_title,zoom_width,zoom_bar_x_col,zoom_bar_x_order,color_min_v,color_max_v):
"""Create one layer heatmap for zoom bar.
Parameters
----------
data :pandas.DataFrame
Data frame with site and metric value.
zoom_bar_color_by : str
Column in `data` with values to color by.
title : str
Title of the plot.
Returns
-------
altair.Chart
"""
zoom_brush = alt.selection_interval(encodings=['x'], mark=alt.BrushConfig(stroke='black',strokeWidth=2))
zoom = (alt.Chart(data)
.mark_rect()
.encode(x=alt.X(f'{zoom_bar_x_col}:O',
sort=zoom_bar_x_order),
color=alt.Color(zoom_bar_color_by,
scale=alt.Scale(scheme='greys',
domain=[color_min_v,color_max_v]),
legend=alt.Legend(orient='left',
labelFontSize=15,
titleFontSize=16,
title=zoom_bar_title)))
.add_selection(zoom_brush)
.properties(width=zoom_width,
title='zoom bar'))
return zoom,zoom_brush
def DMS_heatmaps(data,tooltips,heatmap_color_by,heatmap_x_col,heatmap_x_order,heatmap_y_col,heatmap_y_order,color_min_v,color_max_v,heatmap_star_annotation_col,heatmap_height,zoom_brush):
"""Create main heatmap for one condition.
The heatmap is the results of three layers.
*heatmap* is the main DMS data
*wildtype* marks wildtype data with an 'x'
*nulls* creates grey cells for missing data.
If you exclude nulls, missing data is white,
which is appropriate for some color schemes
but not all.
Parameters
----------
data :pandas.DataFrame
Main dataframe
heatmap_color_by : str
Column in `data` with values to color by.
tooltips : list
Column values to show when mouse hover
Returns
-------
altair.Chart
"""
cell_selector = alt.selection_single(on='mouseover',empty='none')
# zoom_brush = alt.selection_interval(encodings=['x'], mark=alt.BrushConfig(stroke='black',strokeWidth=2))
# tmp = data.sort_values("pos2")
# tmp = tmp.drop_duplicates("pos")
# pos_oder = tmp.pos.tolist()
# tooltips = ['mutation','log2FoldChange','pvalue','padj']
# everything is site v mutant
base = (alt.Chart(data)
.encode(x=alt.X(f'{heatmap_x_col}:O',
sort=heatmap_x_order,
axis=alt.Axis(titleFontSize=15)),
y=alt.Y(f'{heatmap_y_col}:O',
sort=heatmap_y_order,
axis=alt.Axis(labelFontSize=12,
titleFontSize=15))
)
)
heatmap = (base
.mark_rect()
.encode(color=alt.Color(heatmap_color_by,
type='quantitative',
scale=alt.Scale(range=["#0505ff",'#afecfa', "#fafafa","#fff6c2", "#fc0303"],
type="linear",
exponent=4,
domain=[color_min_v,
color_max_v],
),
legend=alt.Legend(orient='left',
gradientLength=100)),
stroke=alt.value('black'),
strokeWidth=alt.condition(cell_selector,
alt.value(2),
alt.value(0)),
tooltip=tooltips
)
)
text = base.mark_text(color='black').encode(
text=f'{heatmap_star_annotation_col}:N'
)
nulls = (base
.mark_rect()
.transform_filter(f"!isValid(datum.{heatmap_color_by})")
.mark_rect(opacity=0.5)
.encode(alt.Color(f'{heatmap_color_by}:N',
scale=alt.Scale(scheme='greys'),
legend=None)
)
)
return ((heatmap + nulls +text)
.interactive()
.add_selection(cell_selector) # mouse over highlighting
.transform_filter(zoom_brush) # add zoom bar filtering
.properties(height=heatmap_height, title=' '.join(heatmap_color_by.split('_'))))
def my_args():
mainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
mainParser.add_argument('-f',"--input", help="data table to be plot",required=True)
mainParser.add_argument('-o',"--output", help="output visualization html file",required=True)
mainParser.add_argument("--reformat_config", help="reformat data table",default=None)
mainParser.add_argument('--header', help="data table has header", action='store_true')
mainParser.add_argument('--index', help="data table has index", action='store_true')
mainParser.add_argument('--sep', help="data table separator", default="auto")
# mainParser.add_argument('-s',"--sample_list", help="table rows, a list of samples, these are supposed to be folder names, one column",required=True)
# mainParser.add_argument('-f','--feature_list', help="table columns, map file name to specific feature name",required=True)
# mainParser.add_argument('--softlinks', help=argparse.SUPPRESS,default="")
# mainParser.add_argument('--treatment_bam', help=argparse.SUPPRESS)
# mainParser.add_argument('--port', help=argparse.SUPPRESS)
##------- add parameters above ---------------------
args = mainParser.parse_args()
return args
def parse_file_kasey(f):
df = pd.read_csv(f,sep="\t")
df['pos'] = [x[:-1] for x in df.mutation]
df['pos2'] = [int(x[1:-1]) for x in df.mutation]
df['mutant'] = [x[-1] for x in df.mutation]
df['sig'] = df.apply(lambda r:abs(r.log2FoldChange)>1 and r.BF,axis=1)
df['BF'] = df.BF.map({True:"*",False:""})
df.sig = df.pos.map(df.groupby("pos")['sig'].sum().to_dict())
return df
def get_plot_parameters(f):
if not os.path.isfile(f):
print (f"{f} not exist")
exit()
return yaml.load(open(f),Loader=Loader)
args = my_args()
if args.reformat_config == "kasey":
df = parse_file_kasey(args.input)
args.reformat_config = "/home/yli11/HemTools/share/misc/interactive_heatmap.kasey.yaml"
else:
df = generic_df_reader(args)
# plot parameters and pre-process some variables, such as x-order
plot_parameters = get_plot_parameters(args.reformat_config)
# print (plot_parameters)
globals().update(plot_parameters)
# print(globals())
# print (tooltips)
tooltips = tooltips.split(",")
zoom_bar_x_order,ascending = zoom_bar_x_order.split(",")
zoom_bar_x_order = df.sort_values(zoom_bar_x_order,ascending=int(ascending)).drop_duplicates(zoom_bar_x_col)[zoom_bar_x_col].tolist()
heatmap_x_order,ascending = heatmap_x_order.split(",")
heatmap_x_order = df.sort_values(heatmap_x_order,ascending=int(ascending)).drop_duplicates(heatmap_x_col)[heatmap_x_col].tolist()
heatmap_y_order,ascending = heatmap_y_order.split(",")
heatmap_y_order = df.sort_values(heatmap_y_order,ascending=int(ascending)).drop_duplicates(heatmap_y_col)[heatmap_y_col].tolist()
if heatmap_star_annotation_col=="":
df['empty'] = ""
heatmap_star_annotation_col = "empty"
# main functions
zoom,zoom_brush = zoom_bar(df, zoom_bar_color_by, zoom_bar_title,zoom_width,zoom_bar_x_col,zoom_bar_x_order,zoom_bar_color_min_v,zoom_bar_color_max_v)
expression = DMS_heatmaps(df, tooltips,heatmap_color_by,heatmap_x_col,heatmap_x_order,heatmap_y_col,heatmap_y_order,heatmap_color_min_v,heatmap_color_max_v,heatmap_star_annotation_col,heatmap_height,zoom_brush)
# save chart
chart = (alt.vconcat(zoom, expression, spacing=0)
.configure_title(anchor='start',
fontSize=20))
chart.save(args.output)
| null | bin/interactive_heatmap.py | interactive_heatmap.py | py | 8,164 | python | en | code | null | code-starcoder2 | 51 |
497589186 | #
#-*- coding: utf-8 -*-
#
# -------------------------------------------------------------------------
#
# -------------------------------------------------------------------------
import math
import numpy as np
def solveForComponents(fc, pm, kphi, kvco, N, gamma, loop_type='passive2'):
"""
:Parameters:
loop_type (str) -
* passive2 - 2nd order passive
* passive3 - 3rd order passive
* passive4 - 4th order passive
* active2 - 2nd order active
* active3 - 3rd order active
* active4 - 4th order active
fc (float) - 0dB crossover frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (1.024 default)
"""
if loop_type == 'passive2':
pll = PllSecondOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
elif loop_type == 'passive3':
pll = PllThirdOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
elif loop_type == 'passive4':
pll = PllFourthOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
return d
class PllSecondOrderPassive( object ):
""" The 2nd order passive phase locked loop object
"""
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.024)
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
def calc_components(self):
""" return a dict with the component values """
d = {}
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma)
d['t2'] = self.calc_t2(self.fc,
d['t1'],
self.gamma)
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'])
d['c1'] = self.calc_c1(d['a0'],
d['t1'],
d['t2'])
d['c2'] = self.calc_c2(d['a0'],
d['c1'])
d['r2'] = self.calc_r2(d['c2'],
d['t2'])
d['a1'] = self.calc_a1(d['c1'],
d['c2'],
d['r2'])
d['a2'] = 0
d['a3'] = 0
d['r3'] = 0
d['r4'] = 0
d['c3'] = 0
d['c4'] = 0
d['t3'] = 0
d['t4'] = 0
return d
def calc_t1(self, fc, pm, gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (default=1.024)
"""
omega_c = 2*np.pi*fc
phi = np.pi*pm/180
t1 = (np.sqrt(((1+gamma)**2)*(np.tan(phi))**2 + 4*gamma) - (1+gamma)*np.tan(phi)) / (2*omega_c)
return t1
def calc_t2(self, fc, t1, gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
t1 (float) - time constant t1 in seconds
gamma (float) - optimization factor (default=1.024)
"""
omega_c = 2*np.pi*fc
return gamma/((omega_c**2)*t1)
def calc_a0(self, kphi, kvco, N, fc, t1, t2):
"""
:Parameters:
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
fc (float) - 0dB crossover frequency in Hz
t1 (float) - time constant t1 in seconds
t2 (float) - time constant t2 in seconds
"""
omega_c = 2*np.pi*fc
x = (kphi*kvco)/(N*omega_c**2)
y_num = np.sqrt(1+(omega_c**2)*(t2**2))
y_den = np.sqrt(1+(omega_c**2)*(t1**2))
a0 = x*y_num/y_den
return a0
def calc_c1(self, a0, t1, t2):
"""
:Parameters:
a0 (float) - loop filter coefficient
t1 (float) - time constant t1 in seconds
(t2 (float) - time constant t2 in seconds
"""
return a0*t1/t2
def calc_c2(self, a0, c1):
"""
:Parameters:
a0 (float) - loop filter coefficient
c1 (float) - capacitor in Farads
"""
return a0-c1
def calc_r2(self, c2, t2):
"""
:Parameters:
c2 (float) - capacitor in Farads
t2 (float) - time constant t2 in seconds
"""
return t2/c2
def calc_a1(self, c1, c2, r2):
"""
:Parameters:
c1 (float) - capacitor in Farads
c2 (float) - capacitor in Farads
r2 (float) - resistor in Ohms
"""
return c1*c2*r2
class PllThirdOrderPassive( PllSecondOrderPassive ):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.136,
t31=0.6):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.136)
t31 (float) - ratio of T3 to T1 (default=0.6)
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.t31 = t31
def calc_components(self):
""" return a dict with the component values """
d = {}
omega_c = 2*np.pi*self.fc
# solve for time constants
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma)
d['t3'] = d['t1']*self.t31
d['t2'] = self.gamma/( (omega_c**2)*(d['t1'] + d['t3'] ) )
# solve for coefficients
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'],
d['t3'])
d['a1'] = d['a0']*(d['t1'] + d['t3'])
d['a2'] = d['a0']*d['t1']*d['t3']
# solve for components
d['c1'] = self.calc_c1(d['a0'],
d['a1'],
d['a2'],
d['t2'])
d['c3'] = self.calc_c3( d['a0'],
d['a1'],
d['a2'],
d['t2'],
d['c1'] )
d['c2'] = d['a0'] - d['c1'] - d['c3']
d['r2'] = d['t2']/d['c2']
d['r3'] = d['a2']/(d['c1']*d['c3']*d['t2'])
d['t4'] = 0
d['a3'] = 0
d['r4'] = 0
d['c4'] = 0
return d
def calc_c3( self,
a0,
a1,
a2,
t2,
c1 ):
return ( -(t2**2)*(c1**2) + t2*a1*c1 - a2*a0 )/( (t2**2)*c1 - a2 )
def calc_c1( self,
a0,
a1,
a2,
t2 ):
return (a2/(t2**2))*(1 + np.sqrt(1 + (t2/a2)*(t2*a0 - a1) ) )
def calc_a0( self,
kphi,
kvco,
N,
fc,
t1,
t2,
t3 ):
omega_c = 2*np.pi*fc
k1 = kphi*kvco/((omega_c**2)*(N))
k2 = np.sqrt( (1+(omega_c*t2)**2)/((1+(omega_c*t1)**2)*(1+(omega_c*t3)**2) ) )
return k1*k2
def calc_t1(self,
fc,
pm,
gamma,
t31=0.6,
num_iters=100):
""" numerically solve for t1 using the bisection method
see: https://en.wikibooks.org/wiki/Numerical_Methods/Equation_Solving
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (1.136)
num_iters (int) - number of times to loop
"""
a = 1e-15 # initial guess for a
b = 1.0 # initial guess for b
fa = self.func_t1(a,fc,pm,t31=t31,gamma=gamma)
fb = self.func_t1(b,fc,pm,t31=t31,gamma=gamma)
for i in range(num_iters):
guess = (a+b)/2
if (self.func_t1(guess,fc,pm,t31=t31,gamma=gamma) < 0):
b = guess
else:
a = guess
return guess
def func_t1(self,
x,
fc,
pm,
t31=0.6,
gamma=1.136):
""" simulate t1. This function is used to
numerically solve for T1.
Equation 22.31 in Dean Banerjee's Book
:Parameters:
x (float) - guess at t1
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
t31 (float) - ratio of t3 to t1
gamma (float) - optimization factor (1.136)
:Returns:
updated value for t1 based on guess (float)
"""
omega_c = 2*np.pi*fc
phi = pm*np.pi/180
val = np.arctan( gamma/(omega_c*x*(1+t31)) ) - \
np.arctan( omega_c*x ) - \
np.arctan( omega_c*x*t31 ) - phi
return val
def test4thOrderPassive( t31=0.4, t43=0.4 ):
fc = 10e3
pm = 47.8
kphi = 4e-3
kvco = 20e6
fout = 900e6
fpfd = 200e3
N = float(fout)/fpfd
fstart = 10
fstop = 100e6
ptsPerDec = 100
fref = 10e6
R = int(fref/fpfd)
# R = 1
pll = PllFourthOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=1.115,
t31=t31,
t43=t43)
d = pll.calc_components()
# return d
flt = {
'c1':d['c1'],
'c2':d['c2'],
'c3':d['c3'],
'c4':d['c4'],
'r2':d['r2'],
'r3':d['r3'],
'r4':d['r4'],
'flt_type':"passive"
}
f,g,p,fz,pz,ref_cl,vco_cl = simulatePll( fstart,
fstop,
ptsPerDec,
kphi,
kvco,
N,
R,
filt=flt)
return d, fz, pz
class PllFourthOrderPassive( PllSecondOrderPassive ):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.115,
t31=0.107,
t43=0.107):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.115)
t31 (float) - ratio of T3 to T1 (default=0.4)
t43 (float) - ratio of T4 to T3 (default=0.4)
note: for a realizable solution, t31 + t43 <= 1
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.t31 = t31
self.t43 = t43
def calc_components(self):
""" return a dict with the component values """
d = {}
omega_c = 2*np.pi*self.fc
# solve for time constants
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma,
t31=self.t31,
t43=self.t43)
# d['t1'] = 4.0685e-6
# print( 't1 = ' + str(d['t1']) )
d['t3'] = d['t1']*self.t31
d['t4'] = d['t1']*self.t31*self.t43
d['t2'] = self.gamma/( (omega_c**2)*(d['t1'] + d['t3'] + d['t4'] ) )
# solve for coefficients
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'],
d['t3'],
d['t4'])
d['a1'] = d['a0']*(d['t1'] + d['t3'] + d['t4'])
d['a2'] = d['a0']*(d['t1']*d['t3'] + d['t1']*d['t4'] + d['t3']*d['t4'])
d['a3'] = d['a0']*d['t1']*d['t3']*d['t4']
c1_t3, r3_t3 = self.calc_c1_r3(d['a0'],d['t1'],d['t2'],d['t3'])
c1_t4, r3_t4 = self.calc_c1_r3(d['a0'],d['t1'],d['t2'],d['t4'])
d['c1'] = (c1_t3 + c1_t4)/2
d['r3'] = (r3_t3 + r3_t4)/2
d['c2'], d['c3'] = self.calc_c2_c3( d['a0'],
d['a1'],
d['a2'],
d['a3'],
d['t2'],
d['r3'],
d['c1'] )
d['c4'] = d['a0']- d['c1']- d['c2'] - d['c3']
d['r2'] = d['t2']/d['c2']
d['r4'] = d['a3']/(d['t2']*d['r3']*d['c1']*d['c3']*d['c4'])
return d
def calc_c2_c3( self,
a0,
a1,
a2,
a3,
t2,
r3,
c1 ):
k0 = (a2/a3) - 1.0/t2 - 1.0/(c1*r3) - (a0 - c1)*t2*r3*c1/a3
k1 = a1 - t2*a0 - a3/(t2*r3*c1) - (a0 - c1)*r3*c1
a = a3/((t2*c1)**2)
b = t2 + r3*(c1 - a0) + (a3/(t2*c1))*((1.0/t2) - k0)
c = k1 - (k0*a3)/t2
c2 = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
c3 = (t2*a3*c1)/(r3*(k0*t2*a3*c1 - c2*(a3 - r3*((t2*c1)**2))))
return c2, c3
def calc_c1_r3( self,
a0,
t1,
t2,
tpole):
a1_t = a0*(t1+tpole)
a2_t = a0*t1*tpole
c1_t = (a2_t/(t2**2))*(1 + np.sqrt(1 + (t2/a2_t)*(t2*a0 - a1_t)) )
c3_t = (-1*(t2**2)*(c1_t**2) + t2*a1_t*c1_t - a2_t*a0)/((t2**2)*c1_t - a2_t)
r3_t = a2_t/(c1_t*c3_t*t2)
return c1_t, r3_t
def calc_a0( self,
kphi,
kvco,
N,
fc,
t1,
t2,
t3,
t4):
omega_c = 2*np.pi*fc
k1 = kphi*kvco/((omega_c**2)*(N))
k2 = np.sqrt(
(1+(omega_c*t2)**2)/((1+(omega_c*t1)**2)*(1+(omega_c*t3)**2)*(1+(omega_c*t4)**2) )
)
return k1*k2
def calc_t1(self,
fc,
pm,
gamma,
t31=0.4,
t43=0.4,
num_iters=100):
""" numerically solve for t1 using the bisection method
see: https://en.wikibooks.org/wiki/Numerical_Methods/Equation_Solving
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (1.136)
num_iters (int) - number of times to loop
"""
a = 1e-15 # initial guess for a
b = 1.0 # initial guess for b
fa = self.func_t1(a,fc,pm,t31=t31,t43=t43,gamma=gamma)
fb = self.func_t1(b,fc,pm,t31=t31,t43=t43,gamma=gamma)
for i in range(num_iters):
guess = (a+b)/2
if (self.func_t1(guess,fc,pm,t31=t31,t43=t43,gamma=gamma) < 0):
b = guess
else:
a = guess
return guess
def func_t1(self,
x,
fc,
pm,
t31=0.4,
t43=0.4,
gamma=1.136):
""" simulate t1. This function is used to
numerically solve for T1.
Equation 22.31 in Dean Banerjee's Book
:Parameters:
x (float) - guess at t1
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
t31 (float) - ratio of t3 to t1
gamma (float) - optimization factor (1.136)
:Returns:
updated value for t1 based on guess (float)
"""
omega_c = 2*np.pi*fc
phi = pm*np.pi/180
val = np.arctan( gamma/(omega_c*x*(1+t31)) ) - \
np.arctan( omega_c*x ) - \
np.arctan( omega_c*x*t31 ) -\
np.arctan( omega_c*x*t31*t43 ) - phi
return val
class PllFourthOrderPassive2(PllSecondOrderPassive):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.115):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.115)
t31 (float) - ratio of T3 to T1 (default=0.4)
t43 (float) - ratio of T4 to T3 (default=0.4)
note: for a realizable solution, t31 + t43 <= 1
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.pole3 = fc*30
self.pole4 = fc*10
def calc_t1(self,
fc,
pm,
gamma,
num_iters=100):
""" numerically solve for t1 using the bisection method
see: https://en.wikibooks.org/wiki/Numerical_Methods/Equation_Solving
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (1.136)
num_iters (int) - number of times to loop
"""
a = 1e-15 # initial guess for a
b = 1.0 # initial guess for b
fa = self.func_t1(a,fc,pm,gamma=gamma)
fb = self.func_t1(b,fc,pm,gamma=gamma)
for i in range(num_iters):
guess = (a+b)/2
if (self.func_t1(guess,fc,pm,gamma=gamma) < 0):
b = guess
else:
a = guess
return guess
def func_t1(self,
t1,
fc,
pm,
gamma=1.115):
""" simulate t1. This function is used to
numerically solve for T1.
"""
omega_c = 2*np.pi*fc
phi = pm*np.pi/180
t3 = 1.0/self.pole3
t4 = 1.0/self.pole4
# val = np.arctan2( 1.0, ( (omega_c)*(t1*t3*t4) )/gamma ) - \
# np.arctan2( 1.0, 1.0/omega_c*t1 ) - \
# np.arctan2( 1.0, 1.0/omega_c*t3 ) - \
# np.croarctan2( 1.0, 1.0/omega_c*t1*t4 ) - phi
val = np.arctan( gamma/( (omega_c)*(t1*t3*t4) ) ) - \
np.arctan( omega_c*t1 ) - \
np.arctan( omega_c*t3 ) - \
np.arctan( omega_c*t1*t4 ) - phi
return val
def calc_components(self):
""" return a dict with the component values """
d = {}
omega_c = 2*np.pi*self.fc
d['pole3'] = self.pole3
d['pole4'] = self.pole4
# solve for time constants
d['t1'] = self.calc_t1( self.fc,
self.pm,
gamma=self.gamma )
d['pole1'] = 1.0/d['t1']
d['t3'] = 1.0/self.pole3
d['t4'] = 1.0/self.pole4
d['t2'] = self.gamma/( (omega_c**2)*(d['t1'] + d['t3'] + d['t4'] ) )
d['zero'] = 1.0/d['t2']
# solve for coefficients
# d['a0'] = self.calc_a0(self.kphi,
# self.kvco,
# self.N,
# self.fc,
# d['t1'],
# d['t2'],
# d['t3'],
# d['t4'])
# d['a1'] = d['a0']*(d['t1'] + d['t3'] + d['t4'])
# d['a2'] = d['a0']*(d['t1']*d['t3'] + d['t1']*d['t4'] + d['t3']*d['t4'])
# d['a3'] = d['a0']*d['t1']*d['t3']*d['t4']
# c1_t3, r3_t3 = self.calc_c1_r3(d['a0'],d['t1'],d['t2'],d['t3'])
# c1_t4, r3_t4 = self.calc_c1_r3(d['a0'],d['t1'],d['t2'],d['t4'])
# d['c1'] = (c1_t3 + c1_t4)/2
# d['r3'] = (r3_t3 + r3_t4)/2
# d['c2'], d['c3'] = self.calc_c2_c3( d['a0'],
# d['a1'],
# d['a2'],
# d['a3'],
# d['t2'],
# d['r3'],
# d['c1'] )
# d['c4'] = d['a0']- d['c1']- d['c2'] - d['c3']
# d['r2'] = d['t2']/d['c2']
# d['r4'] = d['a3']/(d['t2']*d['r3']*d['c1']*d['c3']*d['c4'])
return d
def callSimulatePll( d ):
"""
"""
fstart = d['fstart']
fstop = d['fstop']
ptsPerDec = d['ptsPerDec']
kphi = d['kphi']
kvco = d['kvco']
N = d['N']
R = d['R']
flt_type = d['flt_type']
c1 = d['c1']
c2 = d['c2']
c3 = d['c3']
c4 = d['c4']
r2 = d['r2']
r3 = d['r3']
r4 = d['r4']
flt = {
'c1':c1,
'c2':c2,
'c3':c3,
'c4':c4,
'r2':r2,
'r3':r3,
'r4':r4,
'flt_type':flt_type
}
f, g, p, fz, pz, ref_cl, vco_cl = simulatePll( fstart,
fstop,
ptsPerDec,
kphi,
kvco,
N,
R,
filt=flt )
d = { 'freqs':f,
'gains':g,
'phases':p,
'fzero': fz,
'pzero': pz,
'ref_cl': ref_cl,
'vco_cl': vco_cl,
}
return d
def getInterpolatedFzeroPzero( f, g, p ):
""" look at the points of f, g and p surrounding where
g crosses zero and interpolate f and p at 0
"""
ndx = getIndexZeroDbCrossover( g )
f_zero_db = None
g_zero_db = None
p_zero_db = None
if ndx != None:
f_zero_db = f[ndx]
g_zero_db = g[ndx]
p_zero_db = p[ndx]
newf = f[ndx-1:ndx+1]
newp = p[ndx-1:ndx+1]
newg = g[ndx-1:ndx+1]
mg = (newg[1] - newg[0])/(newf[1] - newf[0])
mp = (newp[1] - newp[0])/(newf[1] - newf[0])
fz = newf[0] - (newg[0]/mg)
deltaf = fz - newf[0] # distance from newf[0] to 0db crossover
pz = 180 + mp*deltaf + newp[0]
return fz, pz
def getIndexZeroDbCrossover( g ):
for i in range(len(g)):
if g[i] <= 0:
return i
return None
def simulatePll( fstart,
fstop,
ptsPerDec,
kphi,
kvco,
N,
R,
filt=None,
coeffs=None ):
""" simulate an arbitrary phase-locked loop using either
filter coefficients or component values. return 3 lists:
f (frequencies), g_ol (open-loop gain), phases (open-loop phases)
"""
f = get_freq_points_per_decade(fstart, fstop, ptsPerDec)
if coeffs == None:
c1 = filt['c1']
c2 = filt['c2']
r2 = filt['r2']
if 'r3' not in filt.keys():
r3 = 0
c3 = 0
else:
c3 = filt['c3']
r3 = filt['r3']
if 'r4' not in filt.keys():
r4 = 0
c4 = 0
else:
c4 = filt['c4']
r4 = filt['r4']
coeffs = calculateCoefficients( c1=c1,
c2=c2,
c3=c3,
c4=c4,
r2=r2,
r3=r3,
r4=r4,
flt_type=filt['flt_type'])
a = coeffs
t2 = filt['r2']*filt['c2']
if len(a) == 2:
# 2nd order
a.append(0)
a.append(0)
elif len(a) == 3:
# 3rd order
a.append(0)
else:
pass
# loop filter impedance
# z = (1 + s*t2)/(s*(a[3]*s**3 + a[2]*s**2 + a[1]*s + a[0]))
z = calculateZ( f,
t2,
a[0],
a[1],
a[2],
a[3] )
# G(s)
# g = kphi*kvco*z/s
g = calculateG( f, z, kphi, kvco )
# # Open-loop gain
g_ol = g/N
g_ol_db = 10*np.log10(np.absolute(g_ol))
# ph_ol = 180 + np.unwrap(np.angle(g_ol))*180/np.pi
ph_ol = np.unwrap(np.angle(g_ol))*180/np.pi
# # Closed-loop reference transfer gain
cl_r = (1.0/R)*(g/(1+g/N))
cl_r_db = 20*np.log10(np.absolute(cl_r))
# # Closed-loop VCO transfer gain
cl_vco = 1.0/(1+g/N)
cl_vco_db = 20*np.log10(np.absolute(cl_vco))
# convert gains and phases to lists
# cannot return numpy array to javascript
g = []
p = []
g.extend(g_ol_db)
p.extend(ph_ol)
fz, pz = getInterpolatedFzeroPzero( f, g, p )
ref_cl = []
vco_cl = []
ref_cl.extend(cl_r_db)
vco_cl.extend(cl_vco_db)
return f, g, p, fz, pz, ref_cl, vco_cl
def interp_semilogx(x, y, num_points):
""" return a paired list of values each with length num_points where
the values are linearly interpolated with the x axis in the log scale.
Essentially, given arrays x and y, increase the resolution of to num_points
Parameters:
x (list) - x values (frequencies)
y (list) - y values (phase noise or gain in dB)
Note: x and y have a semilog X relationship.
Returns:
tuple of lists (freqs, values)
"""
# first, log-ify the x axis
log_x = []
for item in x:
log_x.append(math.log10(item)) # x_new, y_new = interp_linear(log_x, y, x_interp)
xmin = min(log_x)
xmax = max(log_x)
f_log = linspace(xmin, xmax, num_points)
y_interp = []
x_log = []
for x_val in x:
x_log.append(math.log10(x_val))
f = []
for xx in f_log:
f.append(10**(xx))
y_temp = interp_linear(x_log, y, xx)
y_interp.append(y_temp[1])
# f = [xx**(f_log) for xx in f_log]
return f, y_interp
# # return x_new, y_new
# return log_x, y
# # x_new, y_new = interp_linear(log_x, y, x_interp)
# # return x_new, y_new
# x_new, y_new = interp_linear(x, y, x_interp)
# return x_new, y_new
def linspace(a, b, num_points):
""" return a list of linearly spaced values
between a and b having num_points points
"""
inc = (float(b) - float(a))/(num_points-1)
ret_ar = []
for i in range(num_points):
ret_ar.append(a + i*inc)
return ret_ar
def interp_linear(x, y, x_interp):
""" linearly interpolate between two points with the
Parameters
x (list) - x values
y (list) - y values
Returns
tuple (x, y) where x is x_interp and y is the
interpolated y value
"""
if len(x) != len(y):
raise ValueError('x and y arrays need to be the same length')
x_interp = float(x_interp)
if x_interp < x[0]: # x_interp is below the lowest point in x array
# find the first slope and interpolate below
m = (y[1]-y[0])/(x[1]-x[0])
y_interp = (x_interp - x[0])*m + y[0]
return x_interp, y_interp
elif x_interp > x[-1]: # x_interp is above the highest point in x array
# find the last slope and interpolate above
m = (y[-1]-y[-2])/(x[-1]-x[-2])
y_interp = (x_interp - x[-1])*m + y[-1]
return x_interp, y_interp
else: # x_interp is between 2 points in array
for n in range(1,len(x)):
if x[n] > x_interp:
j = n
i = n-1
break
elif x[n] == x_interp:
return x[n], y[n]
m = (y[j]-y[i])/(x[j]-x[i])
y_interp = (x_interp - x[i])*m + y[i]
return x_interp, y_interp
def get_freq_points_per_decade(fstart, fstop, ptsPerDec):
""" return an array of frequencies starting at the
nearest decade of 10 from fstart and ending at the
nearest decade of 10 at fstop. Each decade has
ptsPerDec tpoints.
:Arguments:
fstart (float)
fstop (float)
ptsPerDec (int)
"""
fstart = float(fstart)
fstop = float(fstop)
ptsPerDec = int(ptsPerDec)
num_decades = round(math.log10(fstop/fstart)/math.log10(10),0)
ar = []
istart = int(math.log10(fstart)/math.log10(10))
ar.append(10**istart)
for i in range(istart,int(num_decades)+1):
newDec = 10**i
nextDec = 10**(i+1)
inc = float((nextDec - newDec))/float(ptsPerDec-1)
for j in range(1,ptsPerDec):
val = newDec + j*inc
ar.append(float(val))
return ar
def simulatePhaseNoise2( f,
refPn,
vcoPn,
pllFom,
kphi,
kvco,
fpfd,
N,
R,
filt=None,
coeffs=None,
numPts=1000):
""" simulate an arbitrary phase-locked loop using either
filter coefficients or component values. return 3 lists:
f (frequencies), g_ol (open-loop gain), phases (open-loop phases)
"""
f = np.array(f)
refPn = np.array(refPn)
vcoPn = np.array(vcoPn)
if coeffs == None:
c1 = filt['c1']
c2 = filt['c2']
r2 = filt['r2']
if 'r3' not in filt.keys():
r3 = 0
c3 = 0
else:
c3 = filt['c3']
r3 = filt['r3']
if 'r4' not in filt.keys():
r4 = 0
c4 = 0
else:
c4 = filt['c4']
r4 = filt['r4']
coeffs = calculateCoefficients( c1=c1,
c2=c2,
c3=c3,
c4=c4,
r2=r2,
r3=r3,
r4=r4,
flt_type=filt['flt_type'])
a = coeffs
t2 = filt['r2']*filt['c2']
if len(a) == 2:
# 2nd order
a.append(0)
a.append(0)
elif len(a) == 3:
# 3rd order
a.append(0)
else:
pass
# get smoothed curves for each phase noise component
freq, vcoPn = interp_semilogx(f, vcoPn, num_points=numPts)
freq, refPn = interp_semilogx(f, refPn, num_points=numPts)
# loop filter impedance
z = calculateZ(freq,
t2,
a[0],
a[1],
a[2],
a[3])
# G(s)
g = calculateG(freq, z, kphi, kvco)
# # Closed-loop reference transfer gain
cl_r = (1.0/R)*(g/(1+g/N))
cl_r_db = 20*np.log10(np.absolute(cl_r))
refPnOut = refPn + cl_r_db
refPn = []
refPn.extend( refPnOut )
cl_ic = (g/(1+g/N))
cl_ic_db = 20*np.log10(np.absolute(cl_ic))
icPnOut = pllFom + 10*np.log10(fpfd) + cl_ic_db
icPn = []
icPn.extend( icPnOut )
# # Closed-loop VCO transfer gain
cl_vco = 1.0/(1+g/N)
cl_vco_db = 20*np.log10(np.absolute(cl_vco))
vcoPnOut = vcoPn + cl_vco_db
vcoPn = []
vcoPn.extend( vcoPnOut )
compPn = []
for i in range(len(freq)):
compPn.append(power_sum([refPnOut[i],
vcoPnOut[i],
icPnOut[i] ]))
return freq, refPn, vcoPn, icPn, compPn
def simulatePhaseNoise(f,
refPn,
vcoPn,
pllFom,
pllFlicker,
kphi,
kvco,
fpfd,
N,
R,
filt=None,
coeffs=None):
""" simulate an arbitrary phase-locked loop using either
filter coefficients or component values. return 3 lists:
f (frequencies), g_ol (open-loop gain), phases (open-loop phases)
"""
if coeffs == None:
c1 = filt['c1']
c2 = filt['c2']
r2 = filt['r2']
if 'r3' not in filt.keys():
r3 = 0
c3 = 0
else:
c3 = filt['c3']
r3 = filt['r3']
if 'r4' not in filt.keys():
r4 = 0
c4 = 0
else:
c4 = filt['c4']
r4 = filt['r4']
coeffs = calculateCoefficients( c1=c1,
c2=c2,
c3=c3,
c4=c4,
r2=r2,
r3=r3,
r4=r4,
flt_type=filt['flt_type'])
a = coeffs
t2 = filt['r2']*filt['c2']
if len(a) == 2:
# 2nd order
a.append(0)
a.append(0)
elif len(a) == 3:
# 3rd order
a.append(0)
else:
pass
# loop filter impedance
z = calculateZ( f,
t2,
a[0],
a[1],
a[2],
a[3] )
# G(s)
g = calculateG( f, z, kphi, kvco )
# # Closed-loop reference transfer gain
cl_r = (1.0/R)*(g/(1+g/N))
cl_r_db = 20*np.log10(np.absolute(cl_r))
refPnOut = refPn + cl_r_db
refPn = []
refPn.extend( refPnOut )
cl_ic = (g/(1+g/N))
cl_ic_db = 20*np.log10(np.absolute(cl_ic))
icPnOut = pllFom + 10*np.log10(fpfd) + cl_ic_db
icPn = []
icPn.extend( icPnOut )
icFlickerOut = pllFlicker + 20*np.log10(fpfd) - 10*np.log10(f) + cl_ic_db
icFlick = []
icFlick.extend( icFlickerOut )
# # Closed-loop VCO transfer gain
cl_vco = 1.0/(1+g/N)
cl_vco_db = 20*np.log10(np.absolute(cl_vco))
vcoPnOut = vcoPn + cl_vco_db
vcoPn = []
vcoPn.extend( vcoPnOut )
compPn = []
for i in range(len(f)):
compPn.append(power_sum( [ refPnOut[i],
vcoPnOut[i],
icPnOut[i],
icFlickerOut[i] ] ))
return f, refPn, vcoPn, icPn, icFlick, compPn
def calculateCoefficients( c1=0,
c2=0,
c3=0,
c4=0,
r2=0,
r3=0,
r4=0,
flt_type='passive'):
""" return loop filter coeffiencients as list
a[0] = a0, a[1] = a1, etc.
"""
a = []
if flt_type == 'passive':
a.append( c1 + c2 + c3 + c4 )
a.append( c2*r2*(c1 + c3 + c4) + r3*(c1 + c2)*(c3 + c4) +\
c4*r4*(c1 + c2 + c3) )
a.append( c1*c2*r2*r3*(c3 + c4) +\
c4*r4*(c2*c3*r3 + c1*c3*r3 + c1*c2*r2 + c2*c3*r2) )
else:
a.append(c1 + c2)
a.append( (c1*c2*r2) + (c1 + c2) * (c3*r3 + c4*r4 + c4*r3) )
a.append( c3*c4*r3*r4 * (c1 + c2) + c1*c2*r2*(c3*r3 + c4*r4 + c4*r3) )
a.append(c1*c2*c3*c4*r2*r3*r4)
return a
def calculateZ(f, t2, a0, a1, a2=0, a3=0):
""" given the frequency array and the filter coefficients,
return Z(s) as a np.array()
"""
s = np.array(f)*2*math.pi*1j ####################
z = (1 + s*t2)/(s*(a3*s**3 + a2*s**2 + a1*s + a0))
return z
def calculateG(f, z, kphi, kvco):
""" given the loop filter impedance, kphi and kvco, return G(s)
"""
s = np.array(f)*2*math.pi*1j ###########
g = kphi*kvco*z/s
return g
def power_sum( pdb_lst ):
""" take a list of powers in dBm, add them
in the linear domain and return the sum
in log
"""
sum_lin = 0
for pdb in pdb_lst:
sum_lin += 10**(float(pdb)/10)*1e-3
return 10*math.log10(sum_lin/1e-3)
def getInterpolatedPhaseNoise(freq_list,
pn_list,
num_pts=1000):
"""
"""
f, pns = interp_semilogx(freq_list, pn_list, num_points=num_pts )
d = { 'freqs':f,
'pns':pns,
}
return d
| null | pll_calcs.py | pll_calcs.py | py | 38,887 | python | en | code | null | code-starcoder2 | 51 |
97753791 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from collections import namedtuple
import numpy as np
from scipy.stats import norm
from sklearn.neighbors import KDTree
from cgpm.cgpm import CGpm
from cgpm.utils import data as du
from cgpm.utils import general as gu
LocalGpm = namedtuple('LocalGpm', ['simulate', 'logpdf'])
class MultivariateKnn(CGpm):
"""Multivariate K-Nearest-Neighbors builds local statistical models on a
per-query basis.
Algorithm for simulate(rowid, targets, constraints) and
logpdf(rowid, targets, constraints):
- Find K nearest neighbors to `rowid` based only on the `constraints`.
- For each nearest neighbor k = 1,...,K
- Find M nearest neighbors of k (called the locality of k) based
on both the `constraints` and `targets` dimensions.
- For each target variable q \in target:
- Learn a primitive univariate CGPM, using the dimension q of
the M neighbors in the locality of k.
- Return a product CGPM G_k representing locality k.
Overall CGPM G = (1/K)*G_1 + ... + (1/K)*G_K is a simple-weighted
mixture of the product CGPM learned in each locality.
This "locality-based" algorithm is designed to capture the dependence
between the target variables, rather than assume that all the target
variables are independent conditioned on the constraints. Github ticket #133
will support selecting either the independent or locality-based versions of
KNN.
"""
def __init__(self, outputs, inputs, K=None, M=None, distargs=None,
params=None, rng=None):
# Input validation.
self._validate_init(outputs, inputs, K, M, distargs, params, rng)
# Default arguments.
if params is None:
params = {}
if rng is None:
rng = gu.gen_rng(1)
if M is None:
M = K
# Build the object.
self.rng = rng
# Varible indexes.
self.outputs = outputs
self.inputs = []
# Distargs.
self.stattypes = distargs['outputs']['stattypes']
self.statargs = distargs['outputs']['statargs']
self.levels = {
o: self.statargs[i]['k']
for i, o in enumerate(outputs) if self.stattypes[i] != 'numerical'
}
# Dataset.
self.data = OrderedDict()
self.N = 0
# Ordering of the chain.
self.ordering = list(self.rng.permutation(self.outputs))
# Number of nearest neighbors.
self.K = K
self.M = M
def incorporate(self, rowid, observation, inputs=None):
self._validate_incorporate(rowid, observation, inputs)
# Incorporate observed variables.
x = [observation.get(q, np.nan) for q in self.outputs]
# Update dataset and counts.
self.data[rowid] = x
self.N += 1
def unincorporate(self, rowid):
self._validate_unincorporate(rowid)
del self.data[rowid]
self.N -= 1
def logpdf(self, rowid, targets, constraints=None, inputs=None):
constraints = self.populate_constraints(rowid, targets, constraints)
# XXX Disable logpdf queries without constraints.
if inputs:
raise ValueError('Prohibited inputs: %s' % (inputs,))
if not constraints:
raise ValueError('Provide at least one constraint: %s'
% (constraints,))
self._validate_simulate_logpdf(rowid, targets, constraints)
# Retrieve the dataset and neighborhoods.
dataset, neighborhoods = self._find_neighborhoods(targets, constraints)
models = [self._create_local_model_joint(targets, dataset[n])
for n in neighborhoods]
# Compute logpdf in each neighborhood and simple average.
lp = [m.logpdf(targets) for m in models]
return gu.logsumexp(lp) - np.log(len(models))
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
if inputs:
raise ValueError('Prohibited inputs: %s' % (inputs,))
N_sim = 1 if N is None else N
constraints = self.populate_constraints(rowid, targets, constraints)
self._validate_simulate_logpdf(rowid, targets, constraints, N_sim)
if constraints:
# Retrieve the dataset and neighborhoods.
dataset, neighborhoods = self._find_neighborhoods(
targets, constraints)
models = [self._create_local_model_joint(targets, dataset[n])
for n in neighborhoods]
# Sample the models.
indices = self.rng.choice(len(models), size=N_sim)
# Sample from each model.
sampled_models = [models[i] for i in indices]
results = [m.simulate(targets) for m in sampled_models]
else:
results = self._simulate_fallback(rowid, targets, N_sim)
assert len(results) == N_sim
return results[0] if N is None else results
def _simulate_fallback(self, rowid, targets, N):
# Fallback: if there is no such constraints to resample from, then
# resample the first variable.
merged = len(targets) == len(self.outputs)
targets_dummy = [o for o in self.outputs if o not in targets]
if merged:
assert not targets_dummy
targets_dummy = [targets[0]]
targets = targets[1:]
dataset = self._dataset(targets_dummy)
indices = self.rng.choice(len(dataset), size=N)
constraints = [zip(targets_dummy, dataset[i]) for i in indices]
results = [self.simulate(rowid, targets, dict(e)) for e in constraints]
# Make sure to add back the resampled first target variable to results.
if merged:
results = [gu.merged(s, e) for s, e in zip(results, constraints)]
return results
def logpdf_score(self):
pass
def transition(self, N=None):
return
# --------------------------------------------------------------------------
# Internal.
def _find_neighborhoods(self, targets, constraints):
if not constraints:
raise ValueError('No constraints in neighbor search.')
if any(np.isnan(v) for v in constraints.values()):
raise ValueError('Nan constraints in neighbor search.')
# Extract the targets, constraints from the dataset.
lookup = list(targets) + list(constraints)
D = self._dataset(lookup)
# Not enough neighbors: crash for now. Workarounds include:
# (i) reduce K, (ii) randomly drop constraints, (iii) impute dataset.
if len(D) < self.K:
raise ValueError('Not enough neighbors: %s'
% ((targets, constraints),))
# Code the dataset with Euclidean embedding.
N = len(targets)
D_qr_code = self._dummy_code(D[:,:N], lookup[:N])
D_ev_code = self._dummy_code(D[:,N:], lookup[N:])
D_code = np.column_stack((D_qr_code, D_ev_code))
# Run nearest neighbor search on the constraints only.
constraints_code = self._dummy_code(
[constraints.values()], constraints.keys())
dist, neighbors = KDTree(D_ev_code).query(constraints_code, k=len(D))
# Check for equidistant neighbors and possibly extend the search.
valid = [i for i, d in enumerate(dist[0]) if d <= dist[0][self.K-1]]
if self.K < len(valid):
neighbors = self.rng.choice(neighbors[0][valid],
replace=False, size=self.K)
else:
neighbors = neighbors[0][:self.K]
# For each neighbor, find its nearest M on the full lookup set.
_, ex = KDTree(D_code).query(D_code[neighbors], k=min(self.M, self.K))
# Return the dataset and the list of neighborhoods.
return D[:,:len(targets)], ex
def _create_local_model_joint(self, targets, dataset):
assert all(q in self.outputs for q in targets)
assert dataset.shape[1] == len(targets)
lookup = {
'numerical': self._create_local_model_numerical,
'categorical': self._create_local_model_categorical,
'nominal': self._create_local_model_categorical,
}
models = {
q: lookup[self.stattypes[self.outputs.index(q)]](q, dataset[:,i])
for i, q in enumerate(targets)}
simulate = lambda q, N=None: {c: models[c].simulate(N) for c in q}
logpdf = lambda q: sum(models[c].logpdf(x) for c,x in q.iteritems())
return LocalGpm(simulate, logpdf)
def _create_local_model_numerical(self, q, locality):
assert q not in self.levels
(mu, std) = (np.mean(locality), max(np.std(locality), .01))
simulate = lambda N=None: self.rng.normal(mu, std, size=N)
logpdf = lambda x: norm.logpdf(x, mu, std)
return LocalGpm(simulate, logpdf)
def _create_local_model_categorical(self, q, locality):
assert q in self.levels
assert all(0 <= l < self.levels[q] for l in locality)
counts = np.bincount(locality.astype(int), minlength=self.levels[q])
p = counts / np.sum(counts, dtype=float)
simulate = lambda N: self.rng.choice(self.levels[q], p=p, size=N)
logpdf = lambda x: np.log(p[x])
return LocalGpm(simulate, logpdf)
def _dummy_code(self, D, variables):
levels = {variables.index(l): self.levels[l]
for l in variables if l in self.levels}
return D if not levels\
else np.asarray([du.dummy_code(r, levels) for r in D])
def _dataset(self, outputs):
indexes = [self.outputs.index(q) for q in outputs]
X = np.asarray(self.data.values())[:,indexes]
return X[~np.any(np.isnan(X), axis=1)]
def _stattypes(self, outputs):
indexes = [self.outputs.index(q) for q in outputs]
return [self.stattypes[i] for i in indexes]
def populate_constraints(self, rowid, targets, constraints):
if constraints is None:
constraints = {}
if rowid in self.data:
values = self.data[rowid]
assert len(values) == len(self.outputs)
observations = {
output : value
for output, value in zip(self.outputs, values)
if not np.isnan(value)
and output not in targets
and output not in constraints
}
constraints = gu.merged(constraints, observations)
return constraints
def get_params(self):
return {}
def get_distargs(self):
return {
'outputs': {
'stattypes': self.stattypes,
'statargs': self.statargs,
},
'K': self.K,
'M': self.M,
}
@staticmethod
def name():
return 'multivariate_knn'
# --------------------------------------------------------------------------
# Validation.
def _validate_init(self, outputs, inputs, K, M, distargs, params, rng):
# No inputs allowed.
if inputs:
raise ValueError('KNN rejects inputs: %s.' % inputs)
# At least one output.
if len(outputs) < 2:
raise ValueError('KNN needs >= 2 outputs: %s.' % outputs)
# Unique outputs.
if len(set(outputs)) != len(outputs):
raise ValueError('Duplicate outputs: %s.' % outputs)
# Ensure outputs in distargs.
if not distargs or 'outputs' not in distargs:
raise ValueError('Missing distargs: %s.' % distargs)
# Ensure K is positive.
if K is None or K < 1:
raise ValueError('Invalid K for nearest neighbors: %s.' % K)
# Ensure stattypes and statargs in distargs['outputs]'
if 'stattypes' not in distargs['outputs']\
or 'statargs' not in distargs['outputs']:
raise ValueError('Missing output stattypes: %s.' % distargs)
# Ensure stattypes correct length.
if len(distargs['outputs']['stattypes']) != len(outputs):
raise ValueError('Wrong number of stattypes: %s.' % distargs)
# Ensure statargs correct length.
if len(distargs['outputs']['statargs']) != len(outputs):
raise ValueError('Wrong number of statargs: %s.' % distargs)
# Ensure number of categories provided as k.
if any('k' not in distargs['outputs']['statargs'][i]
for i in xrange(len(outputs))
if distargs['outputs']['stattypes'][i] != 'numerical'):
raise ValueError('Missing number of categories k: %s' % distargs)
def _validate_simulate_logpdf(self, rowid, targets, constraints, N=None):
# No invalid number of samples.
if N is not None and N <= 0:
raise ValueError('Unknown number of samples: %s.' % N)
# At least K observations before we can do K nearest neighbors.
if self.N < self.K:
raise ValueError('MultivariateKnn needs >= %d observations: %d'
% (self.K, self.N))
# Need targets.
if not targets:
raise ValueError('No targets specified: %s.' % targets)
# All targets in outputs.
if any(q not in self.outputs for q in targets):
raise ValueError('Unknown variables in targets: %s, %s'
% (self.outputs, targets))
# All constraints in outputs.
if any(e not in self.outputs for e in constraints):
raise ValueError('Unknown variables in constraints: %s,%s'
% (self.outputs, constraints))
# No duplicate variables in targets and constraints.
if any(q in constraints for q in targets):
raise ValueError('Duplicate variable in targets/constraints: %s %s'
% (targets, constraints))
# Check for a nan in constraints.
if any(np.isnan(v) for v in constraints.itervalues()):
raise ValueError('Nan value in constraints: %s.' % constraints)
# Check for a nan in targets.,
if isinstance(targets, dict)\
and any(np.isnan(v) for v in targets.itervalues()):
raise ValueError('Nan value in targets: %s.' % targets)
def _validate_incorporate(self, rowid, observation, inputs):
# No duplicate observation.
if rowid in self.data:
raise ValueError('Already observed: %d.' % rowid)
# No inputs.
if inputs:
raise ValueError('No inputs allowed: %s.' % inputs)
# Missing observation.
if not observation:
raise ValueError('No observation specified: %s.' % observation)
# No unknown variables.
if any(q not in self.outputs for q in observation):
raise ValueError('Unknown variables: (%s,%s).'
% (observation, self.outputs))
def _validate_unincorporate(self, rowid):
if rowid not in self.data:
raise ValueError('No such observation: %d.' % rowid)
# --------------------------------------------------------------------------
# Serialization.
def to_metadata(self):
metadata = dict()
metadata['outputs'] = self.outputs
metadata['inputs'] = self.inputs
metadata['distargs'] = self.get_distargs()
metadata['N'] = self.N
metadata['data'] = self.data.items()
metadata['params'] = dict()
metadata['factory'] = ('cgpm.knn.mvknn', 'MultivariateKnn')
return metadata
@classmethod
def from_metadata(cls, metadata, rng=None):
if rng is None:
rng = gu.gen_rng(0)
knn = cls(
outputs=metadata['outputs'],
inputs=metadata['inputs'],
K=metadata['distargs']['K'], # Pending migration to **kwargs
M=metadata['distargs']['M'],
distargs=metadata['distargs'],
params=metadata['params'],
rng=rng)
knn.data = OrderedDict(metadata['data'])
knn.N = metadata['N']
return knn
| null | src/knn/mvknn.py | mvknn.py | py | 16,692 | python | en | code | null | code-starcoder2 | 51 |
404992545 | import unittest
from common import logger,login_token,base
from data.readexcel import ExcelUtil
data = ExcelUtil("MembershipSubscription").dict_data()
class Detailsofpayment(unittest.TestCase):
def setUp(self):
self.log = logger.Log()
def test_details_of_payment(self):
'''获取会员出款详情'''
route = data[4]["route"]
url = "".join(base.get_url(route))
token = login_token.login().get_token()
header = eval(data[4]["header"])
header["token"] = token
kwargs = {"json": token, "headers": header}
Method = data[4]["method"]
resp = base.get_response(url,Method,**kwargs)
self.log.info("--------start--------")
self.assertIn(data[4]["expect"], resp.text, msg="失败原因:%s not in %s" % (data[4]["expect"], resp.text))
self.log.info("------test is pass------")
self.log.info("---------end---------")
if __name__ == "__main__":
unittest.main() | null | java_auto_project/case/FundManagement(资金管理)/MemberWithdrawals(会员提款)/test_details_of_payment.py | test_details_of_payment.py | py | 976 | python | en | code | null | code-starcoder2 | 51 |
224795286 | import math, random
def getFinalList(N, n, input_tuple, pc1, pc2, pc3):
i1, i2, i3 = input_tuple
f1 = []
f2 = []
f3 = []
pc1 = pc1 / (pc1+pc2+pc3)
pc2 = pc2 / (pc1+pc2+pc3)
pc3 = pc3 / (pc1+pc2+pc3)
ceil1 = math.ceil(N*pc1)
ceil2 = math.ceil(N*pc2)
ceil3 = N-ceil1-ceil2
if (len(i1) >= ceil1):
#fill bucket 1 to the brim with only i1 elements
f1 += i1[:ceil1]
#f1 is full now with only i1 elements, i2 and i3 are untouched
#now fill f2 (and f3) with i2 and i3 elems
if (len(i2) >= ceil2):
#fill f2 to the brim with only i2 elements
f2 += i2[:ceil2]
#f2 also is full now with only i2 elements, i3 is untouched
#now fill f3 with i3 elements
if (len(i3) >= ceil3):
#fill f3 to the brim with i3 elems
f3 += i3[:ceil3]
else:
#fill as many as are available
f3 += i3
else:
#fill as many i2 elems as available
f2 += i2
#and fill remaining space in f2 with same no of initial i3 elements, (if also available)
rem2 = ceil2-len(f2)
if (len(i3) >= rem2):
#fill remaining space in f2 with i3 elems
f2.extend(i3[:rem2])
#now f2 also is full
#now move to f3 and fill with remaining i3 elems
f3 += i3[rem2:ceil3]
else:
#fill as many i3 elems as availabe
f2.extend(i3)
else:
#fill bucket 1 with as many i1 elems as available
f1 += i1
#fill remainng space in bucket 1 with b2 and b3 elements with their respective proportion, f1.append...(from i2 and/or i3)
pcn2 = pc2_new = pc2 / (pc2+pc3)
pcn3 = pc3_new = pc3 / (pc2+pc3)
rem1 = ceil1-len(i1)
ceiln2 = ceil2_new = math.ceil(pcn2*rem1)
ceiln3 = rem1-ceiln2
if (len(i2) >= ceiln2):
f1.extend(i2[:ceiln2])
if (len(i3) >= ceiln3):
f1.extend(i3[:ceiln3])
if (len(i2[ceiln2:]) >= ceil2):
f2.extend(i2[ceiln2:ceiln2+ceil2])
if (len(i3[ceiln3:]) >= ceil3):
f3.extend(i3[ceiln3:ceiln3+ceil3])
else:
f3.extend(i3[ceiln3:])
else:
f2.extend(i2[ceiln2:])
rem2 = ceil2-len(f2)
if (len(i3[ceiln3:]) >= rem2):
f2.extend(i3[ceiln3:ceiln3+rem2])
if (len(i3[ceiln3+rem2:]) >= ceil3):
f3.extend(i3[ceiln3+rem2:ceiln3+rem2+ceil3])
else:
f3.extend(i3[ceiln3+rem2:])
else:
f2.extend(i3[ceiln3:])
else:
f1.extend(i3)
if (len(i2[ceiln2:]) >= ceil2):
f2.extend(i2[ceiln2:ceiln2+ceil2])
else:
f2.extend(i2[ceiln2:])
else:
f1.extend(i2)
if len(i3) >= (ceiln2-len(i2)):
f1.extend(i3[:ceiln2-len(i2)])
if (len(i3[ceiln2-len(i2):]) >= ceiln3):
f1.extend(i3[ceiln2-len(i2):ceiln2-len(i2)+ceiln3])
if (len(i3[ceiln2-len(i2)+ceiln3:]) >= ceil2):
f2.extend(i3[ceiln2-len(i2)+ceiln3:ceiln2-len(i2)+ceiln3+ceil2])
if (len(i3[ceiln2-len(i2)+ceiln3+ceil2:]) >= ceil3):
f3.extend(i3[ceiln2-len(i2)+ceiln3+ceil2:ceiln2-len(i2)+ceiln3+ceil2+ceil3])
else:
f3.extend(i3[ceiln2-len(i2)+ceiln3+ceil2:])
else:
f2.extend(i3[ceiln2-len(i2)+ceiln3:])
else:
f1.extend(i3[ceiln2-len(i2):])
else:
f1.extend(i3[:])
#f = f1 + f2 + f3
#print('f1: ',f1)
#print('f2: ',f2)
#print('f3:', f3)
f = []
roof1 = math.ceil((len(f1)/N)*n)
roof2 = math.ceil((len(f2)/N)*n)
roof3 = n-roof1-roof2
pages = math.ceil(N/n)
for page in range(pages):
temp = f1[:roof1] + f2[:roof2] + f3[:roof3]
#print(' original: ',temp)
random.shuffle(temp)
#print(' Shuffled: ',temp)
f = f + temp
f1 = f1[roof1:]
f2 = f2[roof2:]
f3 = f3[roof3:]
return f
| null | app/flaskapp/recommend/final_freelancers/bucketing.py | bucketing.py | py | 4,668 | python | en | code | null | code-starcoder2 | 51 |
493716852 | #!/usr/bin/env python
from prettytable import PrettyTable
import subprocess
import json
def shell(cmd):
sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
return out, err
def get_nodes():
cmd = "openstack baremetal node list --long -f json"
out, err = shell(cmd)
if err:
print('%s error!!:%s' % (cmd, err))
nodes = json.loads(out)
return nodes
def get_ports():
cmd = "openstack baremetal port list --long -f json"
out, err = shell(cmd)
if err:
print('%s error!!:%s' % (cmd, err))
ports = json.loads(out)
return ports
def port_node_maps():
nodes = get_nodes()
ports = get_ports()
tbl = PrettyTable(["ipmi_address", "mac", "switch_id", "port_id"])
node_ipmis = {}
for node in nodes:
uuid = node["UUID"]
ipmi_address = node["Driver Info"]["ipmi_address"]
node_ipmis[uuid] = ipmi_address
for port in ports:
uuid = port["Node UUID"]
ipmi_address = node_ipmis[uuid]
mac = port["Address"]
switch_id = port["Local Link Connection"]["switch_id"]
port_id = port["Local Link Connection"]["port_id"]
tbl.add_row([ipmi_address, mac, switch_id, port_id])
print(tbl.get_string(sortby="ipmi_address"))
if __name__ == '__main__':
port_node_maps()
| null | ironic/ironic_list/ironic_node_port_list.py | ironic_node_port_list.py | py | 1,376 | python | en | code | null | code-starcoder2 | 51 |
429046196 | from django import forms
from .models import \
PartnerSet, \
TransactionSet, \
ProductSet, \
ProductItem
class NewClientForm(forms.ModelForm):
class Meta:
model = PartnerSet
fields = ('name', 'code')
class NewTransactionForm(forms.ModelForm):
class Meta:
model = ProductItem
# items = ProductItem.objects.all()
fields = ('code', 'name')
# fields = ('m1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9', 'm10',
# 'w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'w8', 'w9', 'w10')
# widgets = {
# 'myfield': forms.TextInput(attrs={'class': 'myfieldclass'}),
# }
class NewStorageForm(forms.ModelForm):
name = forms.CharField(initial='Storage name')
_fields = [name]
items = ProductItem.objects.all()
for item in items:
item_field = forms.CharField(max_length=6, name=item.code, label=item.code)
_fields.append(item_field)
fields = tuple(_fields)
| null | app/forms.py | forms.py | py | 1,004 | python | en | code | null | code-starcoder2 | 51 |
121065528 | '''
Diciamo che un dizionario d rappresenta un albero (e lo indichiamo come dizionario-albero)
se ciascuna chiave di d e' un identificativo di un nodo dell'albero e l'attributo della chiave e' la lista
(eventualmente vuota) degli identificativi dei figli del nodo. Gli identificativi dei nodi
all'interno delle liste sono in ordine lessicografico crescente.
Ecco un esempio di dizionario d che rappresenta un dizionario-albero
d={
'a':['b'],
'b':['c','d'],
'c':['i'],
'd':['e','l'],
'e':['f','g','h'],
'f':[],
'g':[],
'h':[],
'i':[],
'l':[]
}
L'albero rappresentato da d e'
'a'
|
_____________'b'____________
| |
'c' ________'d'_______
| | |
'i' _______'e'_______ 'l'
| | |
'f' 'g' 'h'
|
'i'
Implementare le seguenti funzioni:
1)
la funzione genera_sottoalbero(fnome,x,fout) che, presi:
- il nome di un file json contenente un dizionario-albero d (fonome)
- un identificativo x
- il nome di un file json (fout)
produce il dizionario-albero che rappresenta il sottoalbero radicato
nell'identificativo x che si ottiene dal dizionario-albero d.
Il dizionario-albero ottenuto va registrato nel file fout.
Se l'identificativo x non e' tra i nodi di d allora il dizionario-albero prodotto
deve essere vuoto.
Ad esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di
genera_sottoalbero(fname,'d',fout)
il file fout conterra' il dizionario
{'f': [], 'g': [], 'h': [], 'e': ['f', 'g', 'h'], 'l': [], 'd': ['e', 'l']}
2)
la funzione cancella_sottoalbero(fnome,x,fout) che, presi:
- il nome di un file json contenente un dizionario-albero d (fonome)
- un identificativo x
- il nome di un file json (fout)
ricava da d il sottoalbero radicato in x e lo salva nel file fout.
Se x non e' presente tra le chiavi di d allora il dizionario-albero d non viene modificato.
Ad esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di
cancella_sottoalbero(fname,'d',fout)
il file fout conterra' il dizionario
{'a': ['b'], 'b': ['c'], 'c': ['i'], 'i':[]}
3)
la funzione dizionario_livelli(fnome, fout) che, presi:
- il nome di un file json contenente un dizionario-albero d (fonome)
- il nome di un file json (fout)
costruisce il dizionario che ha come chiavi i livelli del dizionario-albero d. L'attributo di una
chiave di valore x e' la lista degli identificativi dei nodi che si trovano a livello x nell'albero rappresentato da d.
La lista è ordinata lessicograficamente ed in modo crescente.
Il dizionario cosi' costruito va registrato nel file fout.
Ad esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di
dizionario_livelli(fname,fout)
il file fout conterra' il dizionario
{0: ['a'], 1: ['b'], 2: ['c', 'd'], 3: ['e','i','l'], 4: ['f', 'g', 'h']}
4)
la funzione dizionario_gradi_antenati(fnome,y,fout) che, presi:
- il nome di un file json contenente un dizionario-albero d (fonome)
- un intero y
- il nome di un file json (fout)
costuisce il dizionario che ha come chiavi gli identificativi dei nodi dell'albero
rappresentato dal dizionario-albero d, Attributo di una chiave di valore x e' il numero
di antenati di grado y che ha il nodo con identificativo x nell'albero.
Registra il dizionario costruito nel file fout.
Ad esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di
dizionario_gradi_antenati(fnome,2,fout)
il file fout conterra' il dizionario
{'a': 0, 'b': 0, 'c': 1, 'd': 1, 'e': 2, 'f': 2, 'g': 2, 'h': 2, 'i': 1, 'l': 2}
AVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non
importare moduli che non sono nella libreria standard.
'''
import json
def genera_sottoalbero(fnome,x,fout):
'''inserire qui il vostro codice'''
json_data=open(fnome).read()
diz=json.loads(json_data)
diz2=estrapola(diz,x)
with open(fout,'w') as outfile:
json.dump(diz2,outfile)
def estrapola(diz,v):
valore=diz[v]
if valore==None:
diz2={v:[]} #prova
else:
diz2={v:valore}
for el in valore:
diz3=estrapola(diz,el)
diz2.update(diz3)
return diz2
def cancella_sottoalbero(fnome,x,fout):
'''inserire qui il vostro codice'''
json_data=open(fnome).read()
diz=json.loads(json_data)
radice=trovaradice(diz)
diz2=elimina_albero(diz,x,radice)
with open(fout,'w') as outfile:
json.dump(diz2,outfile)
def trovaradice(diz):
for chiave in diz.keys():
radice=chiave
break
return radice
def elimina_albero(diz,x,nodo):
valore=diz[nodo]
if valore!=[]:
diz2={nodo:valore}
if x in valore:
valore.remove(x)
for el in valore:
diz3=elimina_albero(diz,x,el)
diz2.update(diz3)
elif valore==[]:
diz2={nodo:[]}
return diz2
def dizionario_livelli(fnome,fout):
'''inserire qui il vostro codice'''
lista2=[]
with open(fnome,'r') as file:
diz=json.load(file)
#print(diz)
radice=trovaradice(diz) #ok
i=0
lista=livello(diz,radice,lista2,i)
massimo=trovamassimo(lista)
diz2=analisi(lista,massimo)
with open(fout,'w') as outfile:
json.dump(diz2,outfile)
def livello(diz,nodo,lista2,i):
valore=diz[nodo]
if valore==None:
lista2.append(i,nodo)
else:
lista2.append((i,nodo))
i=i+1
for figlio in valore:
lista2=livello(diz,figlio,lista2,i)
return lista2
def trovamassimo(lista):
listamassimo=[]
for a,b in lista:
listamassimo.append(a)
massimo=max(listamassimo)
return massimo
def analisi(lista,massimo):
lista1=[]
x=0
diz={}
while x<=massimo:
for el1,el2 in lista:
if el1==x:
lista1.append(el2)
lista1.sort()
diz[x]=lista1
lista1=[]
x+=1
return diz
def dizionario_gradi_antenati(fnome,y,fout):
'''inserire qui il vostro codice'''
lista2=[]
i=0
k=0
with open(fnome,'r') as file:
diz=json.load(file)
radice=trovaradice(diz)
lista=antenati(diz,radice,lista2,i,y,k)
diz=analisi2(lista)
with open(fout,'w') as outfile:
json.dump(diz,outfile)
def analisi2(lista):
def sec_elem(lista):
return lista[1]
diz={}
lista=sorted(lista, key=sec_elem)
for chiave,valore in lista:
diz[chiave]=valore
return diz
def antenati(diz,nodo,lista2,i,y,k):
valore=diz[nodo]
if valore==None:
return lista2 #prova
else:
lista2.append((nodo,k)) #prova
i=i+1
if len(valore)==y:
k=k+1
for figlio in valore:
lista2=antenati(diz,figlio,lista2,i,y,k)
return lista2 | null | students/1800408/homework04/program01.py | program01.py | py | 7,256 | python | en | code | null | code-starcoder2 | 51 |
216013651 | from address import Address
from customer import Customer
from transaction import Transaction
from utility import get_current_date, get_current_time, global_customer_map, global_transactions, global_branches, \
send_message
class Account(object):
"""
Maintains a structure for all accounts
:param str account_number: account_number of account
:param int balance: starting balance of account
:param Customer customer: associated customer
:param int max_transaction_amount: maximum transaction amount allowed
:param str branch_code: branch associated with account
"""
def __init__(self, account_number, balance, customer, max_transaction_amount, branch_code):
"""
Initialisation function for Account class
"""
self.account_number = account_number
self.balance = balance
self.customer = customer
self.max_transaction_amount = max_transaction_amount
self.branch_code = branch_code
def __str__(self):
"""
:return printable string for an object of Account class
:rtype str
"""
return str(
f'Account Number: {self.account_number}\nCustomer ID: {self.customer.customer_id}\nBalance'
f' INR{str(self.balance)}\nMaximum Transaction Amount{str(self.max_transaction_amount)}\nBranch Code'
f'{self.branch_code}')
def input_account(self):
"""
Input function to take values from the user and assign it to an object of Account class
"""
while True:
ch = input('Existing customer? (Y/N): ')
# For existing customers, adds a new account to the customer.active_accounts dictionary
if ch.upper() == 'Y':
existing_customer_id = input('Existing Customer ID: ')
if existing_customer_id in global_customer_map:
print(f'Customer found. Adding account to customer ID #{existing_customer_id}')
self.customer = global_customer_map[existing_customer_id]
self.customer.active_accounts_number += 1
break
else:
print('Customer ID does not exist. Recheck ID or register as a new customer.')
elif ch.upper() == 'N':
# For new customers, creates a new customer then adds a new account to the customer.active_accounts
# dictionary
self.customer = Customer('', '', Address('', '', '', '', '', '', '', ''), '', '', 0, '', {})
self.customer.input_customer()
self.customer.active_accounts_number += 1
break
while True:
try:
self.max_transaction_amount = int(input('Maximum Transaction Amount: '))
break
except ValueError:
print('\nInvalid Value\n')
while True:
try:
self.balance = int(input('Initial Balance: '))
break
except ValueError:
print('\nInvalid Value\n')
while True:
branch_code = input('Branch Code: ')
if branch_code in global_branches:
break
else:
print('\nInvalid Branch Code\n')
self.account_number = str(
self.customer.customer_id + branch_code + str("%02d" % self.customer.active_accounts_number))
self.customer.active_accounts[self.account_number] = self
print(f'Account created successfully! Account ID: {self.account_number}')
# Add creation of account to transactions log
global_transactions.append(
Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),
self.get_branch_code(), 'NA', 0, self.balance,
f'Account {self.account_number} created successfully!'))
send_message(
f'Greetings from Bank XXX!\nYour Customer ID {self.customer.customer_id}\nYour Account Number '
f'{self.account_number}.\nBalance INR{self.balance}\nYour account has been created successfully.',
self.customer.phone_number)
def delete_account(self, pop_from_list):
"""
Delete function to delete an object of Account class
"""
# Add deletion of account to transactions log
global_transactions.append(
Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),
self.get_branch_code(), 'NA', self.balance, 0,
f'Account {self.account_number} deleted successfully!'))
self.customer.active_accounts_number -= 1
if pop_from_list:
self.customer.active_accounts.pop(self.account_number)
print(f'Account {str(self.account_number)} deleted successfully! Closing Balance: INR{str(self.balance)}')
send_message(
f'Greetings from Bank XXX!\nYour Customer ID {self.customer.customer_id}\nYour Account Number '
f'{self.account_number}.\nYour account has been deleted successfully.', self.customer.phone_number)
def modify_account(self):
"""
Modify function to modify an object of Account class
"""
modify_account_list = ['1. Modify Maximum Transaction Amount']
for i in modify_account_list:
print('\t' + i)
print()
ch = input('Command: ')
if ch == '1':
while True:
try:
self.max_transaction_amount = int(input('New Maximum Transaction Amount: '))
break
except ValueError:
print('\nInvalid Value\n')
global_transactions.append(
Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),
self.get_branch_code(), 0, self.balance, self.balance,
'Maximum Transaction Amount modified successfully!'))
send_message(
f'Greetings from Bank XXX!\nYour Customer ID {self.customer.customer_id}\nYour Account Number '
f'{self.account_number}.\nYour account has been modified successfully.', self.customer.phone_number)
def deposit(self, amount):
"""
Deposit function to deposit money into account
"""
if int(amount) <= 0:
# Validation rule: Amount is negative
print('Invalid amount. Please enter positive values.\nTransaction aborted!')
elif int(amount) > self.max_transaction_amount:
# Validation rule: Amount is more than maximum set by the customer
print('Amount entered is more than the maximum.\nTransaction aborted!')
else:
self.balance += int(amount)
# Add deposit transaction to transactions log
global_transactions.append(
Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),
self.get_branch_code(), amount, str(int(self.balance) - int(amount)), self.balance,
f'{str(amount)} deposited successfully!'))
send_message(
f'Greetings from Bank XXX!\nYour Customer ID {self.customer.customer_id}.\nYou have deposited '
f'{str(amount)} from Account #{self.account_number}\nClosing Balance: INR{self.balance}',
self.customer.phone_number)
def withdraw(self, amount):
"""
Withdraw function to withdraw money from account
"""
if int(amount) <= 0:
# Validation rule: Amount is negative
print('Invalid amount. Please enter positive values.\nTransaction aborted!')
elif int(amount) > self.max_transaction_amount:
# Validation rule: Amount is more than maximum set by the customer
print('Amount entered is more than the maximum.\nTransaction aborted!')
elif int(amount) > self.balance:
# Validation rule: Amount is more than current balance
print('Amount entered is more than balance.\nTransaction aborted!')
else:
self.balance -= int(amount)
# Add withdrawal transaction to transactions log
global_transactions.append(
Transaction(self.customer.customer_id, self.account_number, get_current_date(), get_current_time(),
self.get_branch_code(), amount, str(int(self.balance) + int(amount)), str(self.balance),
f'{str(amount)} withdrawn successfully!'))
send_message(
f'Greetings from Bank XXX!\nYour Customer ID {self.customer.customer_id}.\nYou have withdrawn '
f'{str(amount)} from Account #{self.account_number}\nClosing Balance: INR{self.balance}',
self.customer.phone_number)
def get_branch_code(self):
"""
:return branch_code of the account, substring[4:8]
:rtype str
"""
return self.account_number[4:8]
| null | account.py | account.py | py | 9,212 | python | en | code | null | code-starcoder2 | 51 |
69936569 | # -*- coding: utf-8 -*-
# Copyright 2018 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import base64
import datetime
import json
import logging
from babel.dates import format_datetime, get_timezone
from types import NoneType
from google.appengine.api import taskqueue
from google.appengine.ext import db, ndb
from google.appengine.ext.deferred import deferred
from mcfw.consts import MISSING
from mcfw.properties import azzert
from mcfw.rpc import arguments, returns
from rogerthat.bizz.app import get_app
from rogerthat.bizz.service import re_index
from rogerthat.consts import SCHEDULED_QUEUE
from rogerthat.dal import parent_ndb_key
from rogerthat.dal.service import get_service_identity, get_default_service_identity
from rogerthat.models import App, Image
from rogerthat.models.news import NewsItem, NewsItemImage
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.rpc.users import get_current_session
from rogerthat.service.api import app, news
from rogerthat.to.news import NewsActionButtonTO, NewsTargetAudienceTO, NewsFeedNameTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.utils import now, channel
from rogerthat.utils.service import get_service_identity_tuple, get_service_user_from_service_identity_user
from rogerthat.utils.transactions import run_in_xg_transaction
from shop.bizz import update_regiomanager_statistic, get_payed
from shop.business.legal_entities import get_vat_pct
from shop.constants import STORE_MANAGER
from shop.dal import get_customer
from shop.exceptions import NoCreditCardException, AppNotFoundException
from shop.models import Contact, Product, RegioManagerTeam, Order, OrderNumber, OrderItem, Charge
from shop.to import OrderItemTO
from solutions import translate as common_translate
from solutions.common import SOLUTION_COMMON
from solutions.common.bizz import SolutionModule, OrganizationType, facebook, twitter
from solutions.common.bizz.cityapp import get_apps_in_country_count
from solutions.common.bizz.service import get_inbox_message_sender_details, new_inbox_message, \
send_inbox_message_update, send_message_updates
from solutions.common.dal import get_solution_settings
from solutions.common.dal.cityapp import get_cityapp_profile, get_service_user_for_city
from solutions.common.models import SolutionInboxMessage, SolutionScheduledBroadcast
from solutions.common.models.budget import Budget
from solutions.common.models.news import NewsCoupon, SolutionNewsItem, NewsSettings, NewsSettingsTags, NewsReview
from solutions.common.restapi.store import generate_and_put_order_pdf_and_send_mail
from solutions.common.to.news import SponsoredNewsItemCount, NewsBroadcastItemTO, NewsBroadcastItemListTO, \
NewsStatsTO, NewsAppTO
from solutions.flex import SOLUTION_FLEX
FREE_SPONSORED_ITEMS_PER_APP = 5
SPONSOR_DAYS = 7
class AllNewsSentToReviewWarning(BusinessException):
pass
@returns(NewsBroadcastItemListTO)
@arguments(cursor=unicode, service_identity=unicode, tag=unicode)
def get_news(cursor=None, service_identity=None, tag=None):
if not tag or tag is MISSING:
tag = u'news'
news_list = news.list_news(cursor, 5, service_identity, tag=tag)
result = NewsBroadcastItemListTO()
result.result = []
result.cursor = news_list.cursor
for news_item in news_list.result:
scheduled_item = get_scheduled_broadcast(news_item.id)
if scheduled_item:
on_facebook = scheduled_item.broadcast_on_facebook
on_twitter = scheduled_item.broadcast_on_twitter
result_item = NewsBroadcastItemTO.from_news_item_to(news_item, on_facebook, on_twitter)
else:
result_item = NewsBroadcastItemTO.from_news_item_to(news_item)
result.result.append(result_item)
return result
@returns(NewsStatsTO)
@arguments(news_id=(int, long), service_identity=unicode)
def get_news_statistics(news_id, service_identity=None):
news_item = news.get(news_id, service_identity, True)
apps_rpc = db.get([App.create_key(s.app_id) for s in news_item.statistics])
result = NewsStatsTO(news_item=NewsBroadcastItemTO.from_news_item_to(news_item))
result.apps = [NewsAppTO.from_model(model) for model in apps_rpc]
return result
def _save_coupon_news_id(news_item_id, coupon):
"""
Args:
news_item_id (int)
coupon (NewsCoupon)
"""
coupon.news_id = news_item_id
coupon.put()
def _app_uses_custom_organization_types(language):
"""Check if the app has any translated organization type"""
translations = {
translation.key: translation.value for translation in app.get_translations(language)
}
if translations:
for translation_key in OrganizationType.get_translation_keys().values():
if translations.get(translation_key):
return True
return False
def get_regional_apps_of_item(news_item, default_app_id):
"""Returns a list of regional apps of a news item if found"""
regional_apps = []
for app_id in news_item.app_ids:
if app_id in (App.APP_ID_OSA_LOYALTY, App.APP_ID_ROGERTHAT, default_app_id):
continue
regional_apps.append(app_id)
return regional_apps
@ndb.transactional()
def create_regional_news_item(news_item, regional_apps, service_user, service_identity, paid=False):
# type: (NewsItem, list[unicode], users.User, unicode, bool) -> SolutionNewsItem
sln_item_key = SolutionNewsItem.create_key(news_item.id, service_user)
settings_key = NewsSettings.create_key(service_user, service_identity)
sln_item, news_settings = ndb.get_multi([sln_item_key, settings_key]) # type: (SolutionNewsItem, NewsSettings)
if not sln_item:
sln_item = SolutionNewsItem(key=sln_item_key)
if news_item.scheduled_at:
publish_time = news_item.scheduled_at
else:
publish_time = news_item.timestamp
sln_item.publish_time = publish_time
sln_item.app_ids = regional_apps
sln_item.service_identity = service_identity
if paid or news_settings and NewsSettingsTags.FREE_REGIONAL_NEWS in news_settings.tags:
sln_item.paid = True
sln_item.put()
return sln_item
def check_budget(service_user, service_identity):
keys = [Budget.create_key(service_user), NewsSettings.create_key(service_user, service_identity)]
budget, news_settings = ndb.get_multi(keys) # type: (Budget, NewsSettings)
if not news_settings or NewsSettingsTags.FREE_REGIONAL_NEWS not in news_settings.tags:
if not budget or budget.balance <= 0:
raise BusinessException('insufficient_budget')
def publish_item(service_identity_user, app_id, host, is_free_regional_news, order_items, coupon,
should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token, **kwargs):
service_user, identity = get_service_identity_tuple(service_identity_user)
news_id = kwargs.get('news_id')
sticky = kwargs.pop('sticky', False)
if news_id:
news_type = kwargs.pop('news_type')
else:
news_type = kwargs.get('news_type')
qr_code_caption = kwargs.get('qr_code_caption')
scheduled_at = kwargs.get('scheduled_at')
def trans():
news_item = news.publish(accept_missing=True, sticky=sticky, **kwargs)
if should_save_coupon:
_save_coupon_news_id(news_item.id, coupon)
elif news_type == NewsItem.TYPE_QR_CODE and qr_code_caption is not MISSING and qr_code_caption and news_id:
news_coupon = NewsCoupon.get_by_news_id(service_identity_user, news_id)
if news_coupon:
news_coupon.content = qr_code_caption
news_coupon.put()
else:
logging.warn('Not updating qr_code_caption for non-existing coupon for news with id %d',
news_id)
if order_items:
create_and_pay_news_order(service_user, news_item.id, order_items)
regional_apps = get_regional_apps_of_item(news_item, app_id)
if regional_apps:
if not news_id and not is_free_regional_news:
# check for budget on creation only
check_budget(service_user, identity)
deferred.defer(create_regional_news_item, news_item, regional_apps, service_user, identity,
paid=is_free_regional_news, _transactional=True)
return news_item
try:
news_item = run_in_xg_transaction(trans)
if broadcast_on_facebook or broadcast_on_twitter:
if scheduled_at is not MISSING and scheduled_at > 0:
schedule_post_to_social_media(service_user, host, broadcast_on_facebook,
broadcast_on_twitter, facebook_access_token,
news_item.id, scheduled_at)
else:
post_to_social_media(service_user, broadcast_on_facebook,
broadcast_on_twitter, facebook_access_token,
news_item.id)
return NewsBroadcastItemTO.from_news_item_to(news_item, broadcast_on_facebook, broadcast_on_twitter)
except:
if should_save_coupon:
db.delete_async(coupon)
raise
def get_news_review_message(lang, timezone, header=None, **data):
def trans(term, *args, **kwargs):
return common_translate(lang, SOLUTION_COMMON, unicode(term), *args, **kwargs)
message = u'{}\n\n'.format(header or trans('news_review_requested'))
message += u'{}: {}\n'.format(trans('message-title'), data['title'])
message += u'{}: {}\n'.format(trans('inbox-message'), data['message'])
action_buttons = [
'{}'.format(button.caption) for button in data['action_buttons']
]
message += u'{}: {}\n'.format(trans('action_button'), ','.join(action_buttons))
scheduled_at = data.get('scheduled_at')
if scheduled_at:
d = datetime.datetime.utcfromtimestamp(scheduled_at)
date_str = format_datetime(d, locale=lang, tzinfo=get_timezone(timezone))
message += u'{}\n'.format(trans('scheduled_for_datetime', datetime=date_str))
return message
def store_image(image_data):
_, content = image_data.split(',')
image = Image(blob=base64.b64decode(content))
image.put()
return image
def send_news_review_message(sln_settings, sender_service, review_key, image_url, **data):
msg = get_news_review_message(sln_settings.main_language, sln_settings.timezone, **data)
sender_user_details = get_inbox_message_sender_details(sender_service)
picture_urls = []
if image_url:
picture_urls.append(image_url)
message = new_inbox_message(
sln_settings, msg, service_identity=None,
category=SolutionInboxMessage.CATEGORY_NEWS_REVIEW,
category_key=review_key,
user_details=sender_user_details,
picture_urls=picture_urls)
send_message_updates(sln_settings, u'solutions.common.news.review.update', message)
return unicode(message.key())
def send_news_for_review(city_service, service_identity_user, app_id, host, is_free_regional_news, order_items, coupon,
should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token,
**kwargs):
key = NewsReview.create_key(city_service)
review = key.get() or NewsReview(key=key)
review.service_identity_user = service_identity_user
review.app_id = app_id
review.host = host
review.is_free_regional_news = is_free_regional_news
review.order_items = order_items
review.coupon_id = coupon and coupon.id
review.broadcast_on_facebook = broadcast_on_facebook
review.broadcast_on_twitter = broadcast_on_twitter
review.facebook_access_token = facebook_access_token
review.data = kwargs
image_url = None
if kwargs['image']:
image = store_image(kwargs['image'])
review.image_id = image.id
image_url = u'/unauthenticated/image/%d' % review.image_id
sln_settings = get_solution_settings(city_service)
sender_service, _ = get_service_identity_tuple(service_identity_user)
review.inbox_message_key = send_news_review_message(
sln_settings, sender_service, unicode(key), image_url, **kwargs)
review.put()
@returns()
@arguments(review_key=unicode, reason=unicode)
def send_news_review_reply(review_key, reason):
review = ndb.Key(urlsafe=review_key).get()
if review:
service_user, identity = get_service_identity_tuple(review.service_identity_user)
sln_settings = get_solution_settings(service_user)
review_msg = get_news_review_message(sln_settings.main_language, sln_settings.timezone, reason, **review.data)
sender_user_details = get_inbox_message_sender_details(review.parent_service_user)
message = new_inbox_message(sln_settings, review_msg, service_identity=identity,
user_details=sender_user_details)
send_inbox_message_update(sln_settings, message, service_identity=identity)
@returns(NewsBroadcastItemTO)
@arguments(review_key=unicode)
def publish_item_from_review(review_key):
review = ndb.Key(urlsafe=review_key).get()
if not review:
raise BusinessException('review item is not found!')
coupon = review.coupon_id and NewsCoupon.get_by_id(review.coupon_id)
should_save_coupon = bool(coupon)
service_user, _ = get_service_identity_tuple(review.service_identity_user)
with users.set_user(service_user):
item = publish_item(
review.service_identity_user, review.app_id, review.host, review.is_free_regional_news,
review.order_items, coupon, should_save_coupon, review.broadcast_on_facebook, review.broadcast_on_twitter,
review.facebook_access_token, **review.data)
inbox_message = SolutionInboxMessage.get(review.inbox_message_key)
if inbox_message:
inbox_message.read = True
inbox_message.trashed = True
inbox_message.put()
sln_settings = get_solution_settings(review.parent_service_user)
send_inbox_message_update(sln_settings, inbox_message)
if review.image_id:
Image.get_by_id(review.image_id).key.delete()
review.key.delete()
return item
@returns(NewsBroadcastItemTO)
@arguments(service_identity_user=users.User, title=unicode, message=unicode, broadcast_type=unicode, sponsored=bool,
image=unicode, action_button=(NoneType, NewsActionButtonTO), order_items=(NoneType, [OrderItemTO]),
news_type=(int, long), qr_code_caption=unicode, app_ids=[unicode], scheduled_at=(int, long),
news_id=(NoneType, int, long), broadcast_on_facebook=bool, broadcast_on_twitter=bool,
facebook_access_token=unicode, target_audience=NewsTargetAudienceTO, role_ids=[(int, long)], host=unicode,
tag=unicode)
def put_news_item(service_identity_user, title, message, broadcast_type, sponsored, image, action_button, order_items,
news_type, qr_code_caption, app_ids, scheduled_at, news_id=None, broadcast_on_facebook=False,
broadcast_on_twitter=False, facebook_access_token=None, target_audience=None, role_ids=None,
host=None, tag=None):
"""
Creates a news item first then processes the payment if necessary (not necessary for non-promoted posts).
If the payment was unsuccessful it will be retried in a deferred task.
Args:
service_identity_user (users.User)
title (unicode)
message (unicode)
broadcast_type (unicode)
sponsored (bool)
image (unicode)
action_button (NewsActionButtonTO)
order_items (list of OrderItemTO)
news_type (int)
qr_code_caption (unicode)
app_ids (list of unicode)
scheduled_at (long)
news_id (long): id of the news item to update. When not provided a new news item will be created.
broadcast_on_facebook (bool)
broadcast_on_twitter (bool)
facebook_access_token (unicode): user or page access token
target_audience (NewsTargetAudienceTO)
role_ids (list of long) the list of role ids to filter sending the news to their members
host (unicode): host of the api request (used for social media apps)
tag(unicode)
Returns:
news_item (NewsBroadcastItemTO)
"""
NEWS_TAG = u'news'
if not order_items or order_items is MISSING:
order_items = []
if not tag or tag is MISSING:
tag = NEWS_TAG
if news_type == NewsItem.TYPE_QR_CODE:
sln_settings = get_solution_settings(get_service_user_from_service_identity_user(service_identity_user))
azzert(SolutionModule.LOYALTY in sln_settings.modules)
qr_code_caption = MISSING.default(qr_code_caption, title)
sponsored_until = None
should_save_coupon = news_type == NewsItem.TYPE_QR_CODE and not news_id
sponsored_app_ids = set()
si = get_service_identity(service_identity_user)
for order_item in reversed(order_items):
if order_item.product == Product.PRODUCT_NEWS_PROMOTION and sponsored:
azzert(order_item.app_id)
azzert(order_item.app_id not in sponsored_app_ids)
sponsored_app_ids.add(order_item.app_id)
order_item.count = get_sponsored_news_count_in_app(service_identity_user, order_item.app_id).count
else:
raise BusinessException('Invalid product %s' % order_item.product)
if not news_id and not app_ids:
raise BusinessException('Please select at least one app to publish this news in')
if sponsored:
sponsored_until_date = datetime.datetime.utcnow() + datetime.timedelta(days=SPONSOR_DAYS)
sponsored_until = long(sponsored_until_date.strftime('%s'))
# for sponsored news that is free in certain apps no order item is given, so add it here
sponsored_counts = get_sponsored_news_count(service_identity_user, app_ids)
for sponsored_count in sponsored_counts:
if sponsored_count.remaining_free != 0 and sponsored_count.app_id in app_ids:
sponsored_app_ids.add(sponsored_count.app_id)
app_ids = list(sponsored_app_ids)
service_user, identity = get_service_identity_tuple(service_identity_user)
default_app = get_app(si.defaultAppId)
if App.APP_ID_ROGERTHAT in si.appIds and App.APP_ID_ROGERTHAT not in app_ids:
app_ids.append(App.APP_ID_ROGERTHAT)
if default_app.demo and App.APP_ID_ROGERTHAT in app_ids:
app_ids.remove(App.APP_ID_ROGERTHAT)
feed_names = {}
if is_regional_news_enabled(default_app):
if tag == NEWS_TAG:
if default_app.demo:
# For demo apps the following rules count
# Extra apps selected --> post in REGIONAL NEWS in the demo app
# No extra apps selected --> post in LOCAL NEWS in the demo app
if len(app_ids) == 1 and app_ids[0] == default_app.app_id:
pass # LOCAL NEWS
else:
feed_names[default_app.app_id] = NewsFeedNameTO(
default_app.app_id, u'regional_news') # REGIONAL NEWS
app_ids = [default_app.app_id]
else:
for app_id in app_ids:
if app_id not in (si.app_id, App.APP_ID_ROGERTHAT):
feed_names[app_id] = NewsFeedNameTO(app_id, u'regional_news')
else:
if default_app.demo:
feed_names[default_app.app_id] = NewsFeedNameTO(default_app.app_id, tag)
else:
for app_id in app_ids:
feed_names[app_id] = NewsFeedNameTO(app_id, tag)
kwargs = {
'sticky_until': sponsored_until,
'message': message,
'broadcast_type': broadcast_type,
'service_identity': identity,
'news_id': news_id,
'news_type': news_type,
'image': image,
'scheduled_at': scheduled_at,
'target_audience': target_audience,
'role_ids': role_ids,
'tags': [tag],
}
if news_type == NewsItem.TYPE_QR_CODE:
if should_save_coupon:
def trans():
coupon = NewsCoupon(
parent=NewsCoupon.create_parent_key(service_identity_user),
content=qr_code_caption
)
coupon.put()
return coupon
coupon = db.run_in_transaction(trans)
kwargs['qr_code_content'] = u'%s' % json.dumps({'c': coupon.id})
kwargs['qr_code_caption'] = qr_code_caption
elif news_type == NewsItem.TYPE_NORMAL:
kwargs.update({
'action_buttons': [action_button] if action_button else [],
'title': title
})
else:
raise BusinessException('Invalid news type')
for key, value in kwargs.items():
if value is MISSING:
del kwargs[key]
current_session = get_current_session()
is_free_regional_news = (current_session and current_session.shop) or default_app.demo
if sponsored:
sticky = True
else:
customer = get_customer(service_user)
if customer and customer.organization_type == OrganizationType.CITY and \
not _app_uses_custom_organization_types(customer.language):
sticky = True
if kwargs['sticky_until'] is None:
kwargs['sticky_until'] = now()
else:
sticky = False
kwargs['sticky'] = sticky
if not should_save_coupon:
coupon = None
new_app_ids = list(app_ids)
if not news_id:
# check for city-enabled news review
for app_id in app_ids:
city_service = get_service_user_for_city(app_id)
if city_service and city_service != service_user:
city_app_profile = get_cityapp_profile(city_service)
if city_app_profile.review_news:
# create a city review for this app
city_kwargs = kwargs.copy()
city_kwargs['app_ids'] = [app_id]
city_kwargs['feed_names'] = feed_names.get(app_id, [])
send_news_for_review(
city_service, service_identity_user, app_id, host, is_free_regional_news, order_items,
coupon, should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token,
**city_kwargs)
# remove from current feed
new_app_ids.remove(app_id)
if feed_names and app_id in feed_names:
del feed_names[app_id]
if new_app_ids == [App.APP_ID_ROGERTHAT] or (not new_app_ids and len(app_ids) > 0):
raise AllNewsSentToReviewWarning(u'news_review_all_sent_to_review')
# for the rest
kwargs['feed_names'] = feed_names.values()
kwargs['app_ids'] = new_app_ids
with users.set_user(service_user):
return publish_item(
service_identity_user, si.app_id, host, is_free_regional_news, order_items,
coupon, should_save_coupon, broadcast_on_facebook, broadcast_on_twitter, facebook_access_token, **kwargs)
@returns()
@arguments(service_user=users.User, on_facebook=bool, on_twitter=bool,
facebook_access_token=unicode, news_id=(int, long))
def post_to_social_media(service_user, on_facebook, on_twitter,
facebook_access_token, news_id):
news_item = NewsItem.get_by_id(news_id)
if not news_item:
logging.warn('Cannot post to social media, news item does not exist')
return
if news_item.type == NewsItem.TYPE_QR_CODE:
logging.warn('Cannot post to social media for a coupon news type')
return
message = news_item.title + '\n' + news_item.message
image_content = None
if news_item.image_id:
news_item_image = NewsItemImage.get_by_id(news_item.image_id)
if news_item_image:
image_content = news_item_image.image
if on_facebook and facebook_access_token:
facebook.post_to_facebook(facebook_access_token, message, image_content)
if on_twitter:
media = []
if image_content:
media.append(image_content)
twitter.update_twitter_status(service_user, message, media)
def post_to_social_media_scheduled(str_key):
scheduled_broadcast = SolutionScheduledBroadcast.get(str_key)
if not scheduled_broadcast or scheduled_broadcast.deleted:
return
news_id = scheduled_broadcast.news_id
on_facebook = scheduled_broadcast.broadcast_on_facebook
on_twitter = scheduled_broadcast.broadcast_on_twitter
facebook_access_token = scheduled_broadcast.facebook_access_token
service_user = scheduled_broadcast.service_user
with users.set_user(service_user):
post_to_social_media(service_user, on_facebook, on_twitter,
facebook_access_token, news_id)
scheduled_broadcast.delete()
def get_scheduled_broadcast(news_item_id, service_user=None, create=False):
if service_user is None:
service_user = users.get_current_user()
key = SolutionScheduledBroadcast.create_key(news_item_id,
service_user,
SOLUTION_FLEX)
scheduled_broadcast = db.get(key)
if not scheduled_broadcast and create:
scheduled_broadcast = SolutionScheduledBroadcast(key=key)
return scheduled_broadcast
def schedule_post_to_social_media(service_user, host, on_facebook, on_twitter,
facebook_access_token, news_id, scheduled_at):
if scheduled_at < 1:
return
scheduled_broadcast = get_scheduled_broadcast(news_id, service_user, create=True)
if scheduled_broadcast.timestamp == scheduled_at:
return
if on_facebook:
if not facebook_access_token:
if scheduled_broadcast.facebook_access_token:
facebook_access_token = scheduled_broadcast.facebook_access_token
else:
raise ValueError('facebook access token is not provided, %s, news id: %d' % (service_user, news_id))
# try to extend facebook access token first
try:
facebook_access_token = facebook.extend_access_token(host, facebook_access_token)
except:
logging.error('Cannot get an extended facebook access token', exc_info=True)
if scheduled_broadcast.scheduled_task_name:
# remove the old scheduled task
task_name = str(scheduled_broadcast.scheduled_task_name)
taskqueue.Queue(SCHEDULED_QUEUE).delete_tasks_by_name(task_name)
scheduled_broadcast.timestamp = scheduled_at
scheduled_broadcast.broadcast_on_facebook = on_facebook
scheduled_broadcast.broadcast_on_twitter = on_twitter
scheduled_broadcast.facebook_access_token = facebook_access_token
scheduled_broadcast.news_id = news_id
task = deferred.defer(post_to_social_media_scheduled,
scheduled_broadcast.key_str,
_countdown=scheduled_at - now(),
_queue=SCHEDULED_QUEUE,
_transactional=db.is_in_transaction())
scheduled_broadcast.scheduled_task_name = task.name
scheduled_broadcast.put()
@returns()
@arguments(service_user=users.User, news_item_id=(int, long), order_items_to=[OrderItemTO])
def create_and_pay_news_order(service_user, news_item_id, order_items_to):
"""
Creates an order, orderitems, charge and executes the charge. Should be executed in a transaction.
Args:
service_user (users.User)
news_item_id (long)
order_items_to (ist of OrderItemTO)
Raises:
NoCreditCardException
ProductNotFoundException
"""
@db.non_transactional
def _get_customer():
return get_customer(service_user)
@db.non_transactional
def _get_contact():
return Contact.get_one(customer)
customer = _get_customer()
azzert(customer)
contact = _get_contact()
azzert(contact)
if not customer.stripe_valid:
raise NoCreditCardException(customer)
news_product_key = Product.create_key(Product.PRODUCT_NEWS_PROMOTION)
rmt_key = RegioManagerTeam.create_key(customer.team_id)
news_promotion_product, team = db.get((news_product_key, rmt_key))
azzert(news_promotion_product)
azzert(team)
new_order_key = Order.create_key(customer.id, OrderNumber.next(team.legal_entity_key))
vat_pct = get_vat_pct(customer, team)
total_amount = 0
for order_item in order_items_to:
if order_item.product == Product.PRODUCT_NEWS_PROMOTION:
total_amount += news_promotion_product.price * order_item.count
order_item.price = news_promotion_product.price
else:
raise BusinessException('Invalid product \'%s\'' % order_item.product)
vat = int(round(vat_pct * total_amount / 100))
total_amount_vat_incl = int(round(total_amount + vat))
now_ = now()
to_put = []
order = Order(
key=new_order_key,
date=now_,
amount=total_amount,
vat_pct=vat_pct,
vat=vat,
total_amount=total_amount_vat_incl,
contact_id=contact.id,
status=Order.STATUS_SIGNED,
is_subscription_order=False,
is_subscription_extension_order=False,
date_signed=now_,
manager=STORE_MANAGER,
team_id=team.id
)
to_put.append(order)
azzert(order.total_amount >= 0)
for item in order_items_to:
order_item = OrderItem(
parent=new_order_key,
number=item.number,
product_code=item.product,
count=item.count,
comment=item.comment,
price=item.price
)
order_item.app_id = item.app_id
if order_item.product_code == Product.PRODUCT_NEWS_PROMOTION:
order_item.news_item_id = news_item_id
to_put.append(order_item)
db.put(to_put)
# Not sure if this is necessary
deferred.defer(generate_and_put_order_pdf_and_send_mail, customer, new_order_key, service_user,
_transactional=True)
# No need for signing here, immediately create a charge.
charge = Charge(parent=new_order_key)
charge.date = now()
charge.type = Charge.TYPE_ORDER_DELIVERY
charge.amount = order.amount
charge.vat_pct = order.vat_pct
charge.vat = order.vat
charge.total_amount = order.total_amount
charge.manager = order.manager
charge.team_id = order.team_id
charge.status = Charge.STATUS_PENDING
charge.date_executed = now()
charge.currency_code = team.legal_entity.currency_code
charge.put()
# Update the regiomanager statistics so these kind of orders show up in the monthly statistics
deferred.defer(update_regiomanager_statistic, gained_value=order.amount / 100,
manager=order.manager, _transactional=True)
# charge the credit card
if charge.total_amount > 0:
get_payed(customer.id, order, charge)
else:
charge.status = Charge.STATUS_EXECUTED
charge.date_executed = now()
charge.put()
channel.send_message(service_user, 'common.billing.orders.update')
def delete_news(news_id):
news.delete(news_id)
@returns(SponsoredNewsItemCount)
@arguments(service_identity_user=users.User, app_id=unicode)
def get_sponsored_news_count_in_app(service_identity_user, app_id):
"""
Args:
service_identity_user (users.User)
app_id (unicode)
"""
news_items = NewsItem.list_sticky_by_sender_in_app(service_identity_user, app_id).fetch(
FREE_SPONSORED_ITEMS_PER_APP)
count = 0
if len(news_items) == FREE_SPONSORED_ITEMS_PER_APP:
for news_item in news_items:
item_stats = news_item.statistics[app_id]
if item_stats:
count += item_stats.reached_total
remaining_free_items = FREE_SPONSORED_ITEMS_PER_APP - len(news_items)
return SponsoredNewsItemCount(app_id, count, remaining_free_items)
@returns([SponsoredNewsItemCount])
@arguments(service_identity_user=users.User, app_ids=[unicode])
def get_sponsored_news_count(service_identity_user, app_ids):
"""
Calculate price for a news in every app, based on the average reach of the last five news items.
First five news items in an app should be free.
Args:
service_identity_user (users.User)
app_ids (list of unicode)
Returns:
things (list of SponsoredNewsItemCount)
"""
price_per_apps = []
for app_id in app_ids:
news_items = NewsItem.list_sticky_by_sender_in_app(service_identity_user, app_id).fetch(
FREE_SPONSORED_ITEMS_PER_APP)
count = 0
if len(news_items) == FREE_SPONSORED_ITEMS_PER_APP:
for news_item in news_items:
item_stats = news_item.statistics[app_id]
if item_stats:
count += item_stats.reached_total
remaining_free_items = FREE_SPONSORED_ITEMS_PER_APP - len(news_items)
price_per_apps.append(SponsoredNewsItemCount(app_id, int(count / 5), remaining_free_items))
return price_per_apps
def is_regional_news_enabled(app_model):
# type: (App) -> bool
if app_model.app_id.startswith('osa-'):
return True
country_code = app_model.app_id.split('-')[0].lower()
return app_model.type == App.APP_TYPE_CITY_APP and get_apps_in_country_count(country_code) > 1
def get_news_reviews(service_user):
parent_key = parent_ndb_key(service_user, SOLUTION_COMMON)
return NewsReview.query(ancestor=parent_key)
| null | src/solutions/common/bizz/news.py | news.py | py | 34,441 | python | en | code | null | code-starcoder2 | 51 |
64147603 | from flask import Flask
try:
from flask import Blueprint
except ImportError:
# Blueprints only available starting with 0.7,
# fall back to old Modules otherwise.
Blueprint = None
from flask import Module
from flaskext.assets import Environment, Bundle
class TestUrlAndDirectory(object):
"""By default, the 'url' and 'directory' settings of webassets are
not used in Flask-Assets; that is, the values are automatically
handled based on the configuration of the Flask app and the modules
used.
The user can disable the automatic handling by setting these values
if he needs to for some reason.
Let's test the different scenarios to ensure everything works.
"""
def setup(self):
self.app = Flask(__name__, static_path='/app_static')
import test_module
if not Blueprint:
self.module = Module(test_module.__name__, name='module',
static_path='/mod_static')
self.app.register_module(self.module)
else:
self.blueprint = Blueprint('module', test_module.__name__,
static_url_path='/mod_static',
static_folder='static')
self.app.register_blueprint(self.blueprint)
self.env = Environment(self.app)
def config_values_not_set_by_default(self):
assert not 'directory' in self.env.config
assert not 'url' in self.env.config
assert_raises(KeyError, self.env.config.__getitem__, 'directory')
assert_raises(KeyError, self.env.config.__getitem__, 'url')
def test_directory_auto(self):
"""Test how we handle file references if no root 'directory' is
configured manually.
"""
assert not 'directory' in self.env.config
root = self.app.root_path
assert Bundle('foo').get_files(self.env) == [root + '/static/foo']
# Modules prefixes in paths are handled specifically.
assert Bundle('module/bar').get_files(self.env) == [root + '/test_module/static/bar']
# Prefixes that aren't valid module names are just considered
# subfolders of the main app.
assert Bundle('nomodule/bar').get_files(self.env) == [root + '/static/nomodule/bar']
# In case the name of a app-level subfolder conflicts with a
# module name, you can always use this hack:
assert Bundle('./module/bar').get_files(self.env) == [root + '/static/module/bar']
def test_directory_custom(self):
"""A custom root directory is configured."""
self.env.directory = '/tmp'
assert Bundle('foo').get_files(self.env) == ['/tmp/foo']
# We do not recognize references to modules.
assert Bundle('module/bar').get_files(self.env) == ['/tmp/module/bar']
def test_url_auto(self):
"""Test how urls are generated if no 'url' is configured manually.
"""
assert not 'url' in self.env.config
assert Bundle('foo').urls(self.env) == ['/app_static/foo']
# Urls for files that point to a module use that module's url prefix.
assert Bundle('module/bar').urls(self.env) == ['/mod_static/bar']
# Try with a prefix that's not actually a valid module
assert Bundle('nomodule/bar').urls(self.env) == ['/app_static/nomodule/bar']
def test_url_custom(self):
"""A custom root url is configured."""
self.env.url = '/media'
assert Bundle('foo').urls(self.env) == ['/media/foo']
# We do not recognize references to modules.
assert Bundle('module/bar').urls(self.env) == ['/media/module/bar']
def test_existing_request_object_used(self):
"""[Regression] Check for a bug where the url generation code of
Flask-Assets always added a dummy test request to the context stack,
instead of using the existing one if there is one.
We test this by making the context define a custom SCRIPT_NAME
prefix, and then we check if it affects the generated urls, as
it should.
"""
with self.app.test_request_context(
'/', environ_overrides={'SCRIPT_NAME': '/yourapp'}):
assert Bundle('foo').urls(self.env) == ['/yourapp/app_static/foo']
| null | tests/test_integration.py | test_integration.py | py | 4,296 | python | en | code | null | code-starcoder2 | 51 |
408478612 | from flask import Flask, redirect, render_template, request, url_for, send_from_directory
from datetime import datetime
from contact import Contact
from user import User
from database import database
import os
import logging
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico')
@app.route('/')
def home():
return render_template('home.html')
@app.route('/register', methods=['GET'])
def register_get():
return render_template('register.html')
@app.route('/register', methods=['POST'])
def register_post():
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
values = (
None,
request.form['username'],
request.form['number'],
User.hash_password(request.form['password']),
timestamp
)
# i = User(*values).create()
i = database.create_user(User(*values))
# if i == 0:
# return redirect('/register')
return redirect('/')
@app.route('/login', methods=['GET'])
def login_get():
return render_template('login.html')
@app.route('/login', methods=['POST'])
def login_post():
username = request.form['username']
password = request.form['password']
user = database.get_user_by_name(username)
# number = User.find_by_number(auth)
if user is not None:
if user.verify_password(password) is True:
return redirect(url_for('display_contacts', user_id=user.get_id()))
else:
return 'Unsuccessful login'
# return redirect('/login')
# elif number is not None:
# if number.verify_password(password) is True:
# return redirect(url_for('display_contacts', user_id=number.id))
# else:
# return redirect('/login')
# TODO upgrade this
return 'No such user found. Have you registered?'
@app.route('/contacts/<int:user_id>', methods=['GET'])
def display_contacts(user_id):
user = database.get_user_by_id(user_id)
ping(user)
contacts = database.get_contacts_by_user_id(user)
if contacts is None:
app.logger.info('No contacts with this user id')
return render_template('contacts.html', user_id=user.get_id())
elif contacts is False:
app.logger.error('Error getting contact by user id')
return 'Error getting contact by user id'
app.logger.info('At least 1 contact with this user id exists')
return render_template('contacts.html', contacts=contacts, user_id=user.get_id())
@app.route('/contacts/<int:user_id>/create', methods=['GET'])
def create_contact_get(user_id):
user = database.get_user_by_id(user_id)
ping(user)
return render_template('create_contact.html', user_id=user.get_id())
@app.route('/contacts/<int:user_id>/create', methods=['POST'])
def create_contact_post(user_id):
user = database.get_user_by_id(user_id)
ping(user)
values = (None, request.form['Name'], request.form['Number'], request.form['Note'], user.get_id())
database.create_contact(Contact(*values))
return redirect(url_for('display_contacts', user_id=user.get_id()))
@app.route('/contacts/<int:user_id>/<int:contact_id>', methods=['GET'])
def display_contact(user_id, contact_id):
user = database.get_user_by_id(user_id)
ping(user)
contact = database.get_contact_by_id(contact_id)
if contact is None:
app.logger.error('No contact with this id.')
return render_template('contact.html', user_id=user.get_id(), contact=contact)
@app.route('/contacts/<int:user_id>/<int:contact_id>', methods=['POST'])
def update_contact(user_id, contact_id):
user = database.get_user_by_id(user_id)
ping(user)
contact = database.get_contact_by_id(contact_id)
try:
if request.form['Update_button'] is not None:
values = (contact_id, request.form['Name'], request.form['Number'], request.form['Note'], user.get_id())
database.update_contact(Contact(*values))
except KeyError:
app.logger.info('KeyError exception encountered when updating contact.')
try:
if request.form['Delete_button'] is not None:
database.delete_contact(database.get_contact_by_id(contact_id))
except KeyError:
app.logger.error('KeyError exception encountered when deleting contact.')
except:
app.logger.error('Unidentified exception encountered when deleting contact.')
except:
app.logger.info('Unidentified exception encountered when updating contact.')
return redirect(url_for('display_contacts', user_id=user.get_id()))
@app.route('/contacts/<int:user_id>/myinfo')
def display_user_info(user_id):
user = database.get_user_by_id(user_id)
if user is None or False:
# TODO check if we ever enter here
return 'error'
username = user.get_name()
number = user.get_number()
return render_template('user_info.html', user=user, username=username, number=number)
def ping(user):
database.ping(user)
if __name__ == "__main__":
app.run()
| null | app.py | app.py | py | 5,125 | python | en | code | null | code-starcoder2 | 51 |
225592973 | splash = '''
888 888 888 .d888 888888b. 888
888 o 888 888 d88P" 888 "88b 888
888 d8b 888 888 888 888 .88P 888
888 d888b 888 .d88b. 888d888 .d88b. 888 888 888 .d88b. 888 888888 8888888K. .d88b. 888888
888d88888b888 d8P Y8b 888P" d8P Y8b 888 888 888 d88""88b 888 888 888 "Y88b d88""88b 888
88888P Y88888 88888888 888 88888888 888 888 888 888 888 888 888 888 888 888 888 888
8888P Y8888 Y8b. 888 Y8b. Y88b 888 d88P Y88..88P 888 888 888 d88P Y88..88P Y88b.
888P Y888 "Y8888 888 "Y8888 "Y8888888P" "Y88P" 888 888 8888888P" "Y88P" "Y888
- = https://github.com/werewolves-devs/werewolf_bot = -
'''
splashes = [
'Now with 100% less JavaScript',
'I made it, we *HAVE* to use it',
'Standards? What are they?',
'Nah, we don\'t use libraries here.',
'The mailbox system is a \'good idea\'',
'Leaking tokens is fun!',
'Let\'s just shove everything into main.py, who still does organization in 2018',
'Works on my machine',
'Always use a database. What\'s a JSON?',
'Powered by Electricity',
'Who still writes docs in 2018?',
"First normal form? What does that mean?",
"By using a relational database but with nonrelational practices we get the worst of both worlds!",
"I haven\'t paid attention or read any comments, therefor it\'s impossible to understand!",
"Don\'t use that! Oh, you\'re asking why? Well... just don\'t it.",
"I don\'t wanna explain, just Google it.",
"What are cogs?",
"This is MY project. You\'re just freeloaders.",
"You've got three weeks to fix EVERYTHING.",
"No-one agrees? Too bad! My idea it is.",
"The next version will be written in Java only!"
]
import discord
import random
import asyncio
# Import config data
import story_time.cc_creation as creation_messages
from config import welcome_channel, game_master, dead_participant, frozen_participant, administrator
from config import ww_prefix as prefix
from management.db import db_set, db_get
from interpretation.ww_head import process
from interpretation.polls import count_votes
import config
import management.db as db
client = discord.Client()
def get_role(server_roles, target_id):
for each in server_roles:
if each.id == target_id:
return each
return None
async def remove_all_game_roles(member):
for role in member.roles:
if role.id == config.frozen_participant:
await member.remove_roles(role, reason="Updating CC permissions")
if role.id == config.dead_participant:
await member.remove_roles(role, reason="Updating CC permissions")
if role.id == config.suspended:
await member.remove_roles(role, reason="Updating CC permissions")
# Whenever a message is sent.
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
gamelog_channel = client.get_channel(int(config.game_log))
botspam_channel = client.get_channel(int(config.bot_spam))
storytime_channel = client.get_channel(int(config.story_time))
# Check if the message author has the Game Master role
isGameMaster = False
if message.guild == gamelog_channel.guild:
if game_master in [y.id for y in message.guild.get_member(message.author.id).roles]:
isGameMaster = True
isAdmin = False
if message.guild == gamelog_channel.guild:
if administrator in [y.id for y in message.guild.get_member(message.author.id).roles]:
isAdmin = True
result = process(message,isGameMaster,isAdmin)
temp_msg = []
for mailbox in result:
if mailbox.evaluate_polls == True:
for poll in db.get_all_polls():
# poll.msg_table -> list of message ids
# poll.blamed -> name of killer
# poll.purpose -> the reason of the kill
poll_channel = client.get_channel(int(poll.channel))
if poll_channel == None:
await botspam_channel.send("We got a problem! Could you send these results to the appropriate channel, please?")
poll_channel = botspam_channel
user_table = []
for msg in poll.msg_table:
poll_msg = await poll_channel.get_message(msg)
for emoji in poll_msg.reactions:
users = await emoji.users().flatten()
for person in users:
if db.isParticipant(person.id):
user_table.append([person.id,emoji.emoji])
log, result, chosen_emoji = count_votes(user_table,poll.purpose)
await gamelog_channel.send(log)
await poll_channel.send(result)
chosen_one = db.emoji_to_player(chosen_emoji)
if chosen_emoji != '' and chosen_one != None:
if poll.purpose == 'lynch':
db.add_kill(chosen_one,'Innocent')
elif poll.purpose == 'Mayor':
# TODO: give Mayor role and add data to dynamic.json
pass
elif poll.purpose == 'Reporter':
# TODO: give Reporter role and add data to dynamic.json
pass
elif poll.purpose == 'wolf':
db.add_kill(chosen_one,'Werewolf',db.random_wolf())
elif poll.purpose == 'cult':
db.add_kill(chosen_one,'Cult Leader',db.random_cult())
elif poll.purpose == 'thing':
# TODO: kill poor victim
pass
for element in mailbox.gamelog:
msg = await gamelog_channel.send(element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
for element in mailbox.botspam:
msg = await botspam_channel.send(element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
for element in mailbox.storytime:
msg = await storytime_channel.send(element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
for element in mailbox.answer:
msg = await message.channel.send(element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
for element in mailbox.channel:
if element.embed:
if element.destination == "spam":
msg = await botspam_channel.send(embed=element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
else:
msg = await client.get_channel(int(element.destination)).send(embed=element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
else:
msg = await client.get_channel(int(element.destination)).send(element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
for element in mailbox.player:
member = client.get_user(element.destination)
if member == None:
await message.channel.send("Couldn't send a DM to <@{}>!".format(element.destination))
await botspam_channel.send("<@{}> has attempted to send a DM to <@{}>, but failed, because we couldn't find the specified user via `get_user`.".format(message.author.id,element.destination))
else:
msg = await member.send(element.content)
for emoji in element.reactions:
await msg.add_reaction(emoji)
if element.temporary == True:
temp_msg.append(msg)
for element in mailbox.oldchannels:
# element.channel - channel to be edited;
# element.victim - person's permission to be changed;
# element.number - type of setting to set to:
# 0 - no access (no view, no type)
# 1 - access (view + type)
# 2 - frozen (view, no type)
# 3 - abducted (no view, no type)
# 4 - dead (dead role?)
# 0 -> read = False
# 1 -> read = True
# 2 -> give frozen (if they don't have it yet)
# 3 -> read = False
# 4 -> give dead role + remove participant role
# 5 -> mute
# 6 -> also mute, no read
channel = client.get_channel(element.channel)
user = client.get_user(element.victim)
main_guild = botspam_channel.guild
member = main_guild.get_member(element.victim)
await remove_all_game_roles(member)
if element.number == 0:
await channel.set_permissions(user, read_messages=False, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.participant), reason="Updating CC Permissions")
elif element.number == 1:
await channel.set_permissions(user, read_messages=True, send_messages=True)
await member.add_roles(get_role(main_guild.roles, config.participant), reason="Updating CC Permissions")
elif element.number == 2:
await channel.set_permissions(user, read_messages=True, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.frozen_participant), reason="Updating CC Permissions")
elif element.number == 3:
await channel.set_permissions(user, read_messages=False, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.participant), reason="Updating CC Permissions")
elif element.number == 4:
await channel.set_permissions(user, read_messages=True, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.dead_participant), reason="Updating CC Permissions")
elif element.number == 5:
await channel.set_permissions(user, read_messages=True, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.participant), reason="Updating CC Permissions")
elif element.number == 6:
await channel.set_permissions(user, read_messages=False, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.participant), reason="Updating CC Permissions")
elif element.number == 7:
await channel.set_permissions(user, read_messages=False, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.dead_participant), reason="Updating CC Permissions")
elif element.number == 8:
await channel.set_permissions(user, read_messages=False, send_messages=False)
await member.add_roles(get_role(main_guild.roles, config.suspended), reason="Updating CC Permissions")
else:
await msg.channel.send('Something went wrong! Please contact a Game Master.')
return
if db.isParticipant(element.victim,True,True):
db.set_user_in_channel(element.channel,element.victim,element.number)
for element in mailbox.newchannels:
# element.name - name of the channel;
# element.owner - owner of the channel;
# element.members - members of the channel
# element.settlers - members for whom this shall become their home channel
#
# @Participant - no view + type
# @dead Participant - view + no type
# @everyone - no view + no type
# All you need to do is create a channel where only the channel owner has access.
# The other members are given access through another Mailbox.
# You could make the work easier if you also posted a cc channel message already over here.
if ' ' not in element.name:
main_guild = botspam_channel.guild # Find the guild we're in
if element.owner not in element.members:
element.members.append(element.owner)
for buddy in element.settlers:
if buddy not in element.members:
msg = """**Warning:** I'm adding settlers to a channel!\nThis is should not be a problem, \
but it does at least indicate a flaw in the bot's code. Please, report this to the Game Masters!"""
await client.get_channel(message.channel).send(msg)
element.members.append(buddy)
viewers = []
frozones = []
abductees = []
deadies = []
for user in element.members:
member = main_guild.get_member(user)
if member == None:
await message.author.send("It doesn't seem like <@{}> is part of the server! I am sorry, I can't add them to your **conspiracy channel**.".format(user))
elif db.isParticipant(user,False,True) == True:
if int(db_get(user,'abducted')) == 1:
abductees.append(member)
elif int(db_get(user,'frozen')) == 1:
frozones.append(member)
elif db.isParticipant(user,False,False) == False:
deadies.append(member)
else:
viewers.append(member)
else:
deadies.append(member)
intro_msg = creation_messages.cc_intro([v.id for v in viewers])
# Role objects (based on ID)
roles = main_guild.roles # Roles from the guild
game_master_role = discord.utils.find(lambda r: r.id == game_master, roles)
default_permissions = {
main_guild.default_role: discord.PermissionOverwrite(read_messages=False,send_messages=False),
game_master_role: discord.PermissionOverwrite(read_messages=True,send_messages=True),
client.user: discord.PermissionOverwrite(read_messages=True,send_messages=True),
**{
member: discord.PermissionOverwrite(read_messages=True,send_messages=True) for member in viewers
},
**{
member: discord.PermissionOverwrite(read_messages=True,send_messages=False) for member in frozones
},
**{
member: discord.PermissionOverwrite(read_messages=True,send_messages=False) for member in deadies
}
}
# Create a new category if needed
if db.get_category() == None:
category = await main_guild.create_category('CC part {}'.format(db.count_categories()), reason='It seems like we couldn\'t use our previous category! Don\'t worry, I just created a new one.')
db.add_category(category.id)
else:
category = main_guild.get_channel(db.get_category())
try:
# Create the text channel
reason_msg = 'CC requested by ' + message.author.name
channel = await main_guild.create_text_channel(
name="s{}_{}".format(config.season,element.name),
category=category,
overwrites=default_permissions,
reason=reason_msg)
db.add_channel(channel.id,element.owner)
await channel.send(intro_msg)
# Set all access rules in the database
for member in viewers:
db.set_user_in_channel(channel.id,member.id,1)
for member in frozones:
db.set_user_in_channel(channel.id,member.id,2)
for member in abductees:
db.set_user_in_channel(channel.id,member.id,3)
for member in deadies:
if db.isParticipant(member.id,True,True) == True:
db.set_user_in_channel(channel.id,member.id,4)
except Exception as e: # Catch any thrown exceptions and send an error to the user.
await message.channel.send('It seems like I\'ve encountered an error! Please let the Game Masters know about this!')
await botspam_channel.send("Oi, Game Masters! I got a problem concerning channel creation for ya to fix.")
await botspam_channel.send(e)
raise e # Send the full log to Buddy1913 and his sketchy VM.
# Give the settlers their own happy little residence
for buddy in element.settlers:
db_set(buddy,"channel",channel.id)
else:
"""This should not happen, but we'll use it, to prevent the bot from purposely causing an error
everytime someone attempts to create a channel that contains spaces. 'cause believe me,
that happens ALL the time."""
msg = await message.channel.send("I\'m terribly sorry, but you can\'t use spaces in your channel name. Try again!")
temp_msg.append(msg)
for element in mailbox.polls:
# element.channel
# element.purpose
# element.user_id
# element.description
msg = element.description + '\n'
emoji_table = []
msg_table = []
i = 0
for user in db.poll_list():
if db.isParticipant(int(user[0])):
i += 1
msg += user[1] + " - <@" + str(user[0]) + "> "
if int(user[2]) + int(user[3]) > 0:
if int(user[2]) == 1:
msg += "**[FROZEN]** "
if int(user[3]) == 1:
msg += "**[ABDUCTED] **"
else:
emoji_table.append(user[1])
if i % 20 == 19:
msg = await client.get_channel(element.channel).send(msg)
for emoji in emoji_table:
await msg.add_reaction(emoji)
msg_table.append(msg)
msg = ''
else:
msg += '\n'
if msg != '':
msg = await client.get_channel(element.channel).send(msg)
for emoji in emoji_table:
await msg.add_reaction(emoji)
msg_table.append(msg)
db.add_poll(msg_table,element.purpose,element.channel,element.user_id)
await botspam_channel.send("A poll has been created in <#{}>!".format(element.channel))
for element in mailbox.deletecategories:
id = element.channel
category = client.get_channel(id)
if category != None:
bot_message = await message.channel.send('Please react with 👍 to confirm deletion of category `' + category.name + '`.\n\nNote: This action will irrevirsibly delete all channels contained within the specified category. Please use with discretion.')
await bot_message.add_reaction('👍')
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '👍'
try:
reaction, user = await client.wait_for('reaction_add', timeout=30.0, check=check)
except asyncio.TimeoutError:
await message.channel.send('Confirmation timed out.')
else:
await message.channel.send('Ok, I\'ll get right on that.\n\n*This might take some time.*')
for channel in category.channels:
await channel.delete()
await category.delete()
await message.channel.send('\n:thumbsup: Channels and category deleted')
else:
await message.channel.send('Sorry, I couldn\'t find that category.')
# Delete all temporary messages after "five" seconds.
await asyncio.sleep(120)
for msg in temp_msg:
await msg.delete()
# Whenever the bot regains his connection with the Discord API.
@client.event
async def on_ready():
print(' --> Logged in as')
print(' | > ' + client.user.name)
print(' | > ' + str(client.user.id))
await client.get_channel(welcome_channel).send('Beep boop! I just went online!')
print(splash)
print(' --> "' + random.choice(splashes) + '"')
print(' --> Please wait whilst we connect to the Discord API...')
try:
client.run(config.TOKEN)
except:
print(' | > Error logging in. Check your token is valid and you are connected to the Internet.')
| null | main.py | main.py | py | 22,318 | python | en | code | null | code-starcoder2 | 51 |
322285138 | import csv
import re
rf = open('story.csv', 'r')
wf1 = open('genre.csv', 'w')
wf2 = open('genre_of_story.csv', 'w')
def get_names(string):
filtered1 = re.sub(r'\([^)]*\)', '', string)
filtered2 = re.sub(r'\[[^)]*\]', '', filtered1)
filtered3 = re.sub(r'\{[^)]*\}', '', filtered2)
filtered4 = re.sub(r'\d+', '', filtered3)
names = set()
for name in filtered4.split(';'):
filtered = re.sub(r'[?|.|\[|\]|\{|\}|\(|\)|\"]', '', name).strip().title()
names.add(filtered)
return filter(None, names)
reader = csv.reader(rf, delimiter=',', quoting=csv.QUOTE_NONE)
next(reader, None)
writer1 = csv.writer(wf1)
writer2 = csv.writer(wf2)
names = {}
n = 0
for row in reader:
for name in get_names(row[10]):
if name not in names:
n = n+1
names[name] = n
writer1.writerow((n,name))
writer2.writerow((row[0], names[name]))
| null | create_genres.py | create_genres.py | py | 914 | python | en | code | null | code-starcoder2 | 51 |
406594349 | """
Unit tests for Sample class
"""
import unittest
import sys
import os
from pathlib import Path
import numpy as np
import pandas as pd
sys.path.append(os.path.abspath('../..'))
from flowkit import Sample, transforms
data1_fcs_path = 'examples/gate_ref/data1.fcs'
data1_sample = Sample(data1_fcs_path)
xform_logicle = transforms.LogicleTransform('logicle', param_t=10000, param_w=0.5, param_m=4.5, param_a=0)
xform_biex1 = transforms.WSPBiexTransform('neg0', width=-100.0, negative=0.0)
xform_biex2 = transforms.WSPBiexTransform('neg1', width=-100.0, negative=1.0)
class SampleTestCase(unittest.TestCase):
"""Tests for loading FCS files as Sample objects"""
def test_load_from_fcs_file_path(self):
"""Test creating Sample object from an FCS file path"""
fcs_file_path = "examples/test_data_2d_01.fcs"
sample = Sample(fcs_path_or_data=fcs_file_path)
self.assertIsInstance(sample, Sample)
def test_load_from_pathlib(self):
"""Test creating Sample object from a pathlib Path object"""
fcs_file_path = "examples/test_data_2d_01.fcs"
path = Path(fcs_file_path)
sample = Sample(fcs_path_or_data=path)
self.assertIsInstance(sample, Sample)
def test_load_from_numpy_array(self):
npy_file_path = "examples/test_comp_example.npy"
channels = [
'FSC-A', 'FSC-W', 'SSC-A',
'Ax488-A', 'PE-A', 'PE-TR-A',
'PerCP-Cy55-A', 'PE-Cy7-A', 'Ax647-A',
'Ax700-A', 'Ax750-A', 'PacBlu-A',
'Qdot525-A', 'PacOrange-A', 'Qdot605-A',
'Qdot655-A', 'Qdot705-A', 'Time'
]
npy_data = np.fromfile(npy_file_path)
sample = Sample(
npy_data,
channel_labels=channels
)
self.assertIsInstance(sample, Sample)
def test_load_from_pandas_multi_index(self):
sample_orig = Sample("examples/100715.fcs")
pnn_orig = sample_orig.pnn_labels
pns_orig = sample_orig.pns_labels
df = sample_orig.as_dataframe(source='orig')
sample_new = Sample(df)
pnn_new = sample_new.pnn_labels
pns_new = sample_new.pns_labels
self.assertListEqual(pnn_orig, pnn_new)
self.assertListEqual(pns_orig, pns_new)
def test_load_from_unsupported_object(self):
"""Test Sample constructor raises ValueError loading an unsupported object"""
self.assertRaises(ValueError, Sample, object())
def test_comp_matrix_from_csv(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = "examples/comp_complete_example.csv"
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
self.assertIsNotNone(sample._comp_events)
def test_clearing_comp_events(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = "examples/comp_complete_example.csv"
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.apply_compensation(None)
self.assertIsNone(sample._comp_events)
def test_comp_matrix_from_pathlib_path(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
self.assertIsNotNone(sample._comp_events)
def test_get_metadata(self):
"""Test Sample method get_metadata"""
fcs_file_path = "examples/test_data_2d_01.fcs"
sample = Sample(fcs_path_or_data=fcs_file_path)
meta = sample.get_metadata()
self.assertEqual(len(meta), 20)
self.assertEqual(meta['p1n'], 'channel_A')
@staticmethod
def test_get_channel_index_by_channel_number_int():
chan_number = data1_sample.get_channel_index(1)
np.testing.assert_equal(0, chan_number)
def test_get_channel_index_fails_by_chan_number_0(self):
# chan numbers are indexed at 1, not 0
self.assertRaises(ValueError, data1_sample.get_channel_index, 0)
def test_get_channel_index_fails(self):
# give an unsupported list as the arg
self.assertRaises(ValueError, data1_sample.get_channel_index, [0, 1])
@staticmethod
def test_get_channel_data_raw():
data_idx_0 = data1_sample.get_channel_data(0, source='raw')
np.testing.assert_equal(data1_sample._raw_events[:, 0], data_idx_0)
@staticmethod
def test_get_channel_data_comp():
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
data_idx_6 = sample.get_channel_data(6, source='comp')
np.testing.assert_equal(sample._comp_events[:, 6], data_idx_6)
@staticmethod
def test_get_channel_data_xform():
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.apply_transform(xform_logicle)
data_idx_6 = sample.get_channel_data(6, source='xform')
np.testing.assert_equal(sample._transformed_events[:, 6], data_idx_6)
def test_get_channel_data_subsample_fails(self):
self.assertRaises(
ValueError,
data1_sample.get_channel_data,
0,
source='raw',
subsample=True
)
def test_get_channel_data_subsample(self):
sample = Sample(data1_fcs_path)
sample.subsample_events(500)
data_idx_6 = sample.get_channel_data(6, source='raw', subsample=True)
self.assertEqual(len(data_idx_6), 500)
def test_get_subsampled_orig_events(self):
sample = Sample(data1_fcs_path)
sample.subsample_events(500)
events = sample.get_orig_events(subsample=True)
self.assertEqual(events.shape[0], 500)
def test_get_subsampled_raw_events(self):
sample = Sample(data1_fcs_path)
sample.subsample_events(500)
events = sample.get_raw_events(subsample=True)
self.assertEqual(events.shape[0], 500)
def test_get_subsampled_comp_events(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.subsample_events(500)
events = sample.get_comp_events(subsample=True)
self.assertEqual(events.shape[0], 500)
def test_get_subsampled_xform_events(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.apply_transform(xform_logicle)
sample.subsample_events(500)
events = sample.get_transformed_events(subsample=True)
self.assertEqual(events.shape[0], 500)
def test_get_comp_events_if_no_comp(self):
fcs_file_path = "examples/test_comp_example.fcs"
sample = Sample(
fcs_path_or_data=fcs_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
comp_events = sample.get_comp_events()
self.assertIsNone(comp_events)
def test_get_transformed_events_if_no_xform(self):
fcs_file_path = "examples/test_comp_example.fcs"
sample = Sample(
fcs_path_or_data=fcs_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
xform_events = sample.get_transformed_events()
self.assertIsNone(xform_events)
@staticmethod
def test_get_transformed_events_exclude_scatter():
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.apply_transform(xform_logicle, include_scatter=False)
fsc_a_index = sample.get_channel_index('FSC-A')
data_fsc_a = sample.get_channel_data(fsc_a_index, source='xform')
np.testing.assert_equal(sample._raw_events[:, fsc_a_index], data_fsc_a)
def test_get_transformed_events_include_scatter(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.apply_transform(xform_logicle, include_scatter=True)
fsc_a_index = sample.get_channel_index('FSC-A')
data_fsc_a_xform = sample.get_channel_data(fsc_a_index, source='xform')
data_fsc_a_raw = sample.get_channel_data(fsc_a_index, source='raw')
np.testing.assert_equal(sample._transformed_events[:, fsc_a_index], data_fsc_a_xform)
self.assertEqual(data_fsc_a_raw[0], 118103.25)
self.assertEqual(round(data_fsc_a_xform[0], 3), 1.238)
def test_get_events_as_data_frame_xform(self):
data1_sample.apply_transform(xform_logicle)
df = data1_sample.as_dataframe(source='xform')
self.assertIsInstance(df, pd.DataFrame)
np.testing.assert_equal(df.values, data1_sample.get_transformed_events())
def test_get_events_as_data_frame_comp(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = "examples/comp_complete_example.csv"
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
df = sample.as_dataframe(source='comp')
self.assertIsInstance(df, pd.DataFrame)
np.testing.assert_equal(df.values, sample.get_comp_events())
def test_get_events_as_data_frame_raw(self):
df = data1_sample.as_dataframe(source='raw')
self.assertIsInstance(df, pd.DataFrame)
np.testing.assert_equal(df.values, data1_sample.get_raw_events())
def test_get_events_as_data_frame_orig(self):
df = data1_sample.as_dataframe(source='orig')
self.assertIsInstance(df, pd.DataFrame)
np.testing.assert_equal(df.values, data1_sample.get_orig_events())
def test_get_events_as_data_frame_column_order(self):
orig_col_order = ['FSC-H', 'SSC-H', 'FL1-H', 'FL2-H', 'FL3-H', 'FL2-A', 'FL4-H', 'Time']
new_col_order = ['FSC-H', 'SSC-H', 'FL1-H', 'FL2-H', 'FL2-A', 'FL3-H', 'FL4-H', 'Time']
col_to_check = 'FL2-A'
df = data1_sample.as_dataframe(source='raw')
df_reorder = data1_sample.as_dataframe(source='raw', col_order=new_col_order)
self.assertListEqual(list(df.columns.get_level_values(0)), orig_col_order)
self.assertListEqual(list(df_reorder.columns.get_level_values(0)), new_col_order)
np.testing.assert_equal(df[col_to_check].values, df_reorder[col_to_check])
def test_get_events_as_data_frame_new_column_names(self):
new_cols = ['FSC-H', 'SSC-H', 'FLR1-H', 'FLR2-H', 'FLR3-H', 'FLR2-A', 'FLR4-H', 'Time']
df = data1_sample.as_dataframe(source='raw', col_names=new_cols)
self.assertListEqual(list(df.columns), new_cols)
@staticmethod
def test_fully_custom_transform():
sample1 = Sample(fcs_path_or_data=data1_fcs_path)
sample2 = Sample(fcs_path_or_data=data1_fcs_path)
custom_xforms = {
'FL1-H': xform_biex1,
'FL2-H': xform_biex1,
'FL3-H': xform_biex2,
'FL2-A': xform_biex1,
'FL4-H': xform_biex1
}
sample1.apply_transform(xform_biex1)
sample2.apply_transform(custom_xforms)
fl2_idx = sample1.get_channel_index('FL2-H')
fl3_idx = sample1.get_channel_index('FL3-H')
s1_fl2 = sample1.get_channel_data(fl2_idx, source='xform')
s2_fl2 = sample2.get_channel_data(fl2_idx, source='xform')
s1_fl3 = sample1.get_channel_data(fl3_idx, source='xform')
s2_fl3 = sample2.get_channel_data(fl3_idx, source='xform')
np.testing.assert_equal(s1_fl2, s2_fl2)
np.testing.assert_raises(AssertionError, np.testing.assert_equal, s1_fl3, s2_fl3)
def test_create_fcs(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.export("test_fcs_export.fcs", source='comp', directory="examples")
exported_fcs_file = "examples/test_fcs_export.fcs"
exported_sample = Sample(fcs_path_or_data=exported_fcs_file)
os.unlink(exported_fcs_file)
self.assertIsInstance(exported_sample, Sample)
# TODO: Excluding time channel here, as the difference was nearly 0.01. Need to investigate why the
# exported comp data isn't exactly equal
np.testing.assert_almost_equal(sample._comp_events[:, :-1], exported_sample._raw_events[:, :-1], decimal=3)
def test_create_csv(self):
fcs_file_path = "examples/test_comp_example.fcs"
comp_file_path = Path("examples/comp_complete_example.csv")
sample = Sample(
fcs_path_or_data=fcs_file_path,
compensation=comp_file_path,
ignore_offset_error=True # sample has off by 1 data offset
)
sample.export("test_fcs_export.csv", source='comp', directory="examples")
exported_csv_file = "examples/test_fcs_export.csv"
exported_df = pd.read_csv(exported_csv_file)
exported_sample = Sample(exported_df)
os.unlink(exported_csv_file)
self.assertIsInstance(exported_sample, Sample)
# TODO: Need to investigate why the exported comp data isn't exactly equal
np.testing.assert_almost_equal(sample._comp_events[:, :], exported_sample._raw_events[:, :], decimal=3)
def test_filter_negative_scatter(self):
# there are 2 negative SSC-A events in this file (of 65016 total events)
fcs_file_path = "examples/100715.fcs"
sample = Sample(fcs_path_or_data=fcs_file_path)
sample.subsample_events(50000)
sample.filter_negative_scatter(reapply_subsample=False)
# using the default seed, the 2 negative events are in the subsample
common_idx = np.intersect1d(sample.subsample_indices, sample.negative_scatter_indices)
self.assertEqual(len(common_idx), 2)
sample.filter_negative_scatter(reapply_subsample=True)
common_idx = np.intersect1d(sample.subsample_indices, sample.negative_scatter_indices)
self.assertEqual(len(common_idx), 0)
self.assertEqual(sample.negative_scatter_indices.shape[0], 2)
def test_filter_anomalous_events(self):
# there are 2 negative SSC-A events in this file (of 65016 total events)
fcs_file_path = "examples/100715.fcs"
sample = Sample(fcs_path_or_data=fcs_file_path)
sample.subsample_events(50000)
sample.filter_anomalous_events(reapply_subsample=False)
# using the default seed, the 2 negative events are in the subsample
common_idx = np.intersect1d(sample.subsample_indices, sample.anomalous_indices)
self.assertGreater(len(common_idx), 0)
sample.filter_anomalous_events(reapply_subsample=True)
common_idx = np.intersect1d(sample.subsample_indices, sample.anomalous_indices)
self.assertEqual(len(common_idx), 0)
self.assertGreater(sample.anomalous_indices.shape[0], 0)
| null | flowkit/tests/sample_tests.py | sample_tests.py | py | 16,804 | python | en | code | null | code-starcoder2 | 51 |
477814710 | import itertools
import numpy as np
from list_rotations import list_rotations
def get_combinations(coordinate_system, point_to_reference_corner_of_cube, cube_parts, shape, dimension):
combinations = [coordinate_system]
for cube_part in cube_parts:
combinations_new = []
for combination in combinations:
for index in itertools.product(*[range(0, n) for n in shape]):
position_in_cube = np.add(point_to_reference_corner_of_cube, np.array(index))
cube_part_in_reference_array = np.zeros(coordinate_system.shape)
cube_part_in_reference_array[(slice(position_in_cube[0], position_in_cube[0] + cube_part.shape[0]),
slice(position_in_cube[1], position_in_cube[1] + cube_part.shape[1]),
slice(position_in_cube[2], position_in_cube[2] + cube_part.shape[2]))] \
+= cube_part
for rotated_cube_part in list_rotations(cube_part_in_reference_array, dimension):
combination_with_rotated_cube_part = combination + rotated_cube_part
combinations_new.append(combination_with_rotated_cube_part)
combinations = combinations_new
return combinations
| null | combinations.py | combinations.py | py | 1,295 | python | en | code | null | code-starcoder2 | 51 |
404989492 | from commands import _embedMessage, _mongoFunctions
async def edit_due_date_message(client):
guild_list = _mongoFunctions.get_guilds_information()
for guild in guild_list:
global guild_id, channel_id
for key, value in guild.items():
if key == 'guild_id':
guild_id = value
if key == 'channel_id':
channel_id = value
update_due_dates(guild_id)
guild = client.get_guild(guild_id)
courses = _mongoFunctions.get_list_of_courses(guild_id)
for stream in _mongoFunctions.get_list_of_streams(guild_id):
await edit_schedule_embed(stream, courses, guild_id, guild, channel_id)
async def edit_schedule_embed(stream, courses, guild_id, guild, channel_id):
channel = guild.get_channel(channel_id)
message_id = _mongoFunctions.get_due_date_channel_id(guild_id, stream)
msg = await channel.fetch_message(message_id)
message_embed = _embedMessage.create("Upcoming Due Dates for Stream " + str(stream), "", "blue")
for course in courses:
due_dates = _mongoFunctions.get_all_upcoming_due_dates(guild_id, stream, course)
for due_date in due_dates:
if due_date['type'] == "Assignment":
emoji = ":pushpin:"
elif due_date['type'] == "Test":
emoji = ":bulb:"
elif due_date['type'] == "Exam":
emoji = ":pen_ballpoint:"
elif due_date['type'] == "Project":
emoji = ":books:"
elif due_date['type'] == "Quiz":
emoji = ":pencil:"
else:
emoji = ":placard:"
if due_date['time_included']:
current_due_date = " **Type:** " + due_date['type'].rjust(10) + " **Date:** " + due_date['date'].strftime("%m/%d/%Y, %H:%M:%S").rjust(10) + '\n'
else:
current_due_date = " **Type:** " + due_date['type'].rjust(10) + " **Date:** " + due_date['date'].strftime("%m/%d/%Y").rjust(10) + '\n'
if due_date == due_dates[0]:
title = "**" + course + "**\n" + emoji + " " + due_date['title']
else:
title = emoji + " " + due_date['title']
message_embed.add_field(name = title, value = current_due_date, inline = False)
await msg.edit(embed = message_embed)
def update_due_dates(guild_id):
_mongoFunctions.remove_due_dates_passed(guild_id)
| null | commands/_dueDateMessage.py | _dueDateMessage.py | py | 2,471 | python | en | code | null | code-starcoder2 | 51 |
620982439 | # uncompyle6 version 3.7.4
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/sensors/pubsub_sensor.py
# Compiled at: 2019-09-11 03:47:34
# Size of source mod 2**32: 4319 bytes
from airflow.contrib.hooks.gcp_pubsub_hook import PubSubHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class PubSubPullSensor(BaseSensorOperator):
__doc__ = "Pulls messages from a PubSub subscription and passes them through XCom.\n\n This sensor operator will pull up to ``max_messages`` messages from the\n specified PubSub subscription. When the subscription returns messages,\n the poke method's criteria will be fulfilled and the messages will be\n returned from the operator and passed through XCom for downstream tasks.\n\n If ``ack_messages`` is set to True, messages will be immediately\n acknowledged before being returned, otherwise, downstream tasks will be\n responsible for acknowledging them.\n\n ``project`` and ``subscription`` are templated so you can use\n variables in them.\n "
template_fields = ['project', 'subscription']
ui_color = '#ff7f50'
@apply_defaults
def __init__(self, project, subscription, max_messages=5, return_immediately=False, ack_messages=False, gcp_conn_id='google_cloud_default', delegate_to=None, *args, **kwargs):
"""
:param project: the GCP project ID for the subscription (templated)
:type project: str
:param subscription: the Pub/Sub subscription name. Do not include the
full subscription path.
:type subscription: str
:param max_messages: The maximum number of messages to retrieve per
PubSub pull request
:type max_messages: int
:param return_immediately: If True, instruct the PubSub API to return
immediately if no messages are available for delivery.
:type return_immediately: bool
:param ack_messages: If True, each message will be acknowledged
immediately rather than by any downstream tasks
:type ack_messages: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: str
"""
(super(PubSubPullSensor, self).__init__)(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.project = project
self.subscription = subscription
self.max_messages = max_messages
self.return_immediately = return_immediately
self.ack_messages = ack_messages
self._messages = None
def execute(self, context):
super(PubSubPullSensor, self).execute(context)
return self._messages
def poke(self, context):
hook = PubSubHook(gcp_conn_id=(self.gcp_conn_id), delegate_to=(self.delegate_to))
self._messages = hook.pull(self.project, self.subscription, self.max_messages, self.return_immediately)
if self._messages:
if self.ack_messages:
if self.ack_messages:
ack_ids = [m['ackId'] for m in self._messages if m.get('ackId')]
hook.acknowledge(self.project, self.subscription, ack_ids)
return self._messages | null | pycfiles/apache_airflow_arup-1.10.5-py3.6/pubsub_sensor.cpython-36.py | pubsub_sensor.cpython-36.py | py | 3,632 | python | en | code | null | code-starcoder2 | 51 |
190613859 | import numpy as np
import torch
class PrototypicalBatchSampler(object):
def __init__(self, labels, class_idxs, num_way, num_support, num_query, num_episode):
super(PrototypicalBatchSampler, self).__init__()
self.class_idxs = class_idxs
self.num_way = num_way
self.num_sample = num_support + num_query
self.num_episode = num_episode
self.classes, self.counts = np.unique(labels, return_counts=True)
# index table
self.indices = np.empty((len(self.classes), max(self.counts)), dtype=int) * np.nan
for idx, label in enumerate(labels):
class_idx = label
self.indices[class_idx, np.argwhere(np.isnan(self.indices[class_idx]))[0]] = idx
class_idxs = self.class_idxs[torch.randperm(len(self.class_idxs))[:self.num_way]]
def __iter__(self):
for episode in range(self.num_episode):
batch_size = self.num_way * self.num_sample
batch = np.zeros(batch_size, dtype=int)
class_idxs = self.class_idxs[torch.randperm(len(self.class_idxs))[:self.num_way]]
for i, c_idx in enumerate(class_idxs):
c_size = int(self.counts[c_idx])
s_idxs = torch.randperm(c_size)[:self.num_sample]
batch[i*self.num_sample : (i+1)*self.num_sample] = self.indices[c_idx][s_idxs]
yield batch
def __len__(self):
return self.num_episode | null | fsssl3d/data/prototypical_batch_sampler.py | prototypical_batch_sampler.py | py | 1,455 | python | en | code | null | code-starcoder2 | 51 |
344261979 | from socket import *
s = socket()
s.connect(('127.0.0.1', 8000))
message = input('->')
while message != 'q':
s.send(message.encode())
data = s.recv(1024)
print("recieved from server: " + str(data.decode()))
message = input('->')
s.close()
| null | lesson Threads/hw3_client.py | hw3_client.py | py | 257 | python | en | code | null | code-starcoder2 | 51 |
616105448 | import pywt
import numpy as np
import tensorflow as tf
#from tensorflow.contrib import rnn
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
def entry():
X_fill = load_data("train_filled.csv")
X_wv = denoise(X_fill)
X_train, Y_train, X_test, Y_test = split(X_wv)
Y_sae, Y_sae_test = stackedAutoencoders(X_train, X_test)
Y_hat, Y_hat_train = LSTM(Y_sae, Y_train, Y_sae_test)
accuracy_test = metric(Y_hat, Y_test)
accuravy_train = metric(Y_hat_train, Y_train)
print("Training Set Accuracy: " + str(accuravy_train*100) + "%")
print("Test Set Accuracy: " + str(accuracy_test*100) + "%")
#loads data
def load_data(filename):
return np.loadtxt(filename, delimiter=',')
#applies wavelet transform
def denoise(X):
m, n = X.shape
first_part = np.zeros((m, 28))
third_part = np.zeros((m, 64))
for row in range(m):
for col1 in range(28):
first_part[row][col1] = X[row][col1]
for col2 in range(64):
third_part[row][col2] = X[row][col2]
wav = pywt.Wavelet('haar')
D = np.zeros((m, 120))
for i, xi in enumerate(X):
coeffs = pywt.wavedec(xi[28:147], wav, mode='symmetric', level=1)
cA, cD = coeffs
cA = np.array(cA)
cD = np.array(cD)
D[i][:] = np.concatenate((cA, cD))
return np.concatenate((first_part, D, third_part), axis=1)
#splits data into X train, Y train, X test, Y test
def split(X_raw):
m, n = X_raw.shape
np.random.shuffle(X_raw)
X_train = np.zeros((30000, 147))
Y_train = np.zeros((30000, 62))
X_test = np.zeros((10000, 147))
Y_test = np.zeros((10000, 62))
for row in range(m):
if row < 30000:
for col1 in range(1, 148):
X_train[row][col1-1] = X_raw[row][col1]
for col2 in range(148, 210):
Y_train[row][col2-148] = X_raw[row][col2]
else:
for col1 in range(1, 148):
X_test[row-30000][col1-1] = X_raw[row][col1]
for col2 in range(148, 210):
Y_test[row-30000][col2-148] = X_raw[row][col2]
return X_train.T, Y_train.T, X_test.T, Y_test.T
# Trains the stacked Autoencoders and then passes both X_train and X_test
# into the SAE for next steps. 147->74->50->74->147
def stackedAutoencoders(X_input_train, X_input_test):
# Define parameters
num_examples = 30000
num_inputs = 147
num_hid1 = 74
num_hid2 = 50
num_hid3 = num_hid1
num_output = num_inputs
lr = 0.01
actf = tf.nn.relu
num_epoch = 1
batch_size = 200
# Create inputs
X = tf.placeholder(tf.float32, shape=[num_inputs, 30000])
X_test = tf.placeholder(tf.float32, shape=[num_inputs, 10000])
# Define variables
W1 = tf.get_variable("W1", [74, 147], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable("b1", [74, 1], initializer=tf.zeros_initializer())
W2 = tf.get_variable("W2", [50, 74], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable("b2", [50, 1], initializer=tf.zeros_initializer())
W3 = tf.get_variable("W3", [74, 50], initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.get_variable("b3", [74, 1], initializer=tf.zeros_initializer())
W4 = tf.get_variable("W4", [147, 74], initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.get_variable("b4", [147, 1], initializer=tf.zeros_initializer())
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3, "W4": W4, "b4": b4}
hid_layer1_train = actf(tf.matmul(W1, X)+b1)
hid_layer2_train = actf(tf.matmul(W2, hid_layer1_train)+b2)
hid_layer3_train = actf(tf.matmul(W3, hid_layer2_train)+b3)
output_layer = actf(tf.matmul(W4, hid_layer3_train)+b4)
hid_layer1_test = actf(tf.matmul(W1, X_test)+b1)
hid_layer2_test = actf(tf.matmul(W2, hid_layer1_test)+b2)
loss = tf.reduce_mean(tf.square(output_layer-X))
optimizer = tf.train.AdamOptimizer(lr)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
sess.run(train, feed_dict={X:X_input_train})
y_sae_train = sess.run(hid_layer2_train, feed_dict={X:X_input_train})
y_sae_test = sess.run(hid_layer2_test, feed_dict={X_test:X_input_test})
return y_sae_train, y_sae_test
# Creating LSTM
def myLSTM(X, Y, X_test):
#Dropout parameter
drop = 0.1
# Initialising the RNN
regressor = Sequential()
# Adding some Dropout regularisation and more RNN layers
regressor.add(Dropout(drop))
regressor.add(Sequential())
regressor.add(Dropout(drop))
regressor.add(Sequential())
regressor.add(Dropout(drop))
# Adding the output layer
regressor.add(Dense(62))
# Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X.T, Y.T, epochs=25, batch_size=200)
Y_hat = regressor.predict(X_test.T)
Y_hat_train = regressor.predict(X.T)
return Y_hat, Y_hat_train
#calculates accuracy of our model
def metric(Y_hat, Y):
Y_hat_sign = np.sign(Y_hat.T)
Y_sign = np.sign(Y)
results = np.equal(Y_hat_sign, Y_sign)
num_correct = np.sum(results)
total = results.shape[0] * results.shape[1]
return float(num_correct) / total
if __name__ == "__main__":
entry()
| null | src/app.py | app.py | py | 5,486 | python | en | code | null | code-starcoder2 | 51 |
310358936 | from pico2d import *
import game_framework
name = "game_function"
class EndMessage:
image = None
times_up = None
rabbit = None
box = None
draw_sign = False
def __init__(self):
self.font = load_font('resource/210하얀바람B.ttf')
self.timer = 0
self.bye_timer = 0
self.x = 302
self.y = 445
self.rabbitX = 302
self.rabbitY = 460
self.boxX = 302
self.boxY = 460
if EndMessage.image == None:
EndMessage.image = load_image('resource/end_screen.jpg')
if EndMessage.times_up == None :
EndMessage.times_up = load_image('resource/end.png')
def update(self, frame_time):
if self.draw_sign == False :
self.timer = SDL_GetTicks()
self.draw_sign = True
if(SDL_GetTicks() - self.timer >3000) :
if (self.rabbitY < 580) :
self.rabbitY += 1
else :
self.boxX = 400
self.boxY = 620
self.bye_timer = SDL_GetTicks() - self.timer-3000
#print('%f'%(self.bye_timer))
def draw(self,akoo):
if (self.draw_sign == True) :
self.image.draw(self.x,self.y)
self.times_up.clip_draw(400,291-270, 70, 160, self.rabbitX, self.rabbitY)
self.times_up.clip_draw(400,291-100, 130, 100, self.boxX, self.boxY)
if (self.bye_timer < 3000) :
self.font.draw(self.boxX-20,self.boxY+20,("score"),color=(0,0,0))
self.font.draw(self.boxX-23,self.boxY-10,("%5d")%akoo.score,color=(0,0,0))
else :
self.font.draw(self.boxX-15,self.boxY,("Bye!"),color=(0,0,0))
if (self.bye_timer > 5000) :
game_framework.quit()
self.times_up.clip_draw(0,291-250, 350, 250, self.x, self.y)
class Countdown :
image = None
draw_sign = False
def __init__(self) :
self.x = 302
self.y = 445
if Countdown.image == None:
Countdown.image = load_image('resource/countdown.png')
def update(self,frame_time,akoo):
if (0 <= akoo.time and akoo.time <= 5) :
self.draw_sign = True
else:
self.draw_sign = False
def draw(self,akoo):
if(self.draw_sign == True) :
self.image.clip_draw((5 - akoo.time)*100,0, 100, 100, self.x, self.y) | null | AkooFlower/game_function.py | game_function.py | py | 2,418 | python | en | code | null | code-starcoder2 | 51 |
180981316 | import markovify
import sys
def make_markov():
with open('tweet-corpus.txt','r') as f:
text = f.read()
model = markovify.NewlineText(text)
return model
def tweet(model, length=140, out=sys.stdout):
tweet = model.make_short_sentence(length) + '\n'
out.write(tweet)
def generate_tweets(model, length=140):
while True:
yield model.make_short_sentence(length)
if __name__ == '__main__':
model = make_markov()
for _ in range(10):
tweet(model) | null | markov.py | markov.py | py | 458 | python | en | code | null | code-starcoder2 | 51 |
234500338 | from sklearn.datasets import make_circles
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
def rbf_kernel_pca(X, gamma, n_components):
# Calculate pairwise squared Euclidean distances
sq_dists = pdist(X, 'sqeuclidean')
#X에 대해서 square euclidean distance vector로 표현한다.
#예를 들어사 X = [[1,3,4], [5,5,5], [6,5,7]]이면 총 3개의 sample이 있으니깐
#3C2의 총 3가지의 distance elements가 있다.
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
#이걸로 distance matrix를 만든다.
#예를 들어서 d_12 = distance from 1 to 2이다.
# Compute the symmetric kernel matrix.
K = exp(-gamma*mat_sq_dists)
#gamma에 대해서는 kernel equation을 참조
# Center the kernel matrix.
N = len(K)
one_N = np.ones((N,N)) / N
K = K - one_N.dot(K) - K.dot(one_N) + one_N.dot(K).dot(one_N)
eigvals, eigvecs = eigh(K)
# eigh returns them in sorted order ascending order인듯
alphas = np.column_stack((eigvecs[:,-i]) for i in range(1,n_components+1))
#numpy.column_stack : Stack 1-D arrays as columns into a 2-D array.
return alphas
X, y = make_circles(n_samples = 1000, random_state = 123, noise = 0.1, factor = 0.2)
plt.scatter(X[y==0, 0], X[y==0, 1], color = 'red', marker = '^', alpha = 0.5)
plt.scatter(X[y==1, 0], X[y==1, 1], color = 'blue', marker = 'o', alpha = 0.5)
plt.show()
#Let's start with the standard PCA approach to compare it with the results of the RBF
#kernel PCA:
scikit_pca = PCA(n_components = 2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3))
ax[0].scatter(X_spca[y==0, 0], X_spca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y==1, 0], X_spca[y==1, 1], color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y==0, 0], np.zeros((500,1))+0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y==1, 0], np.zeros((500,1))-0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
#Given an appropriate value for gamma , let's see if we are luckier using the RBF kernel
#PCA implementation:
X_kpca = rbf_kernel_pca(X, gamma = 15, n_components = 2)
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3))
ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1], color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1], color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y==0, 0], np.zeros((500,1))+0.02, color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((500,1))-0.02, color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
| null | chapter5/chapter5_ex5.py | chapter5_ex5.py | py | 3,047 | python | en | code | null | code-starcoder2 | 50 |
259492790 | from data_science_tools.aoi_selection_tool import AoiPipeline
from data_science_tools.aoi_selection_tool.sqlalchemy_wrappers import BaseWrapper
from data_science_tools.aoi_selection_tool.frontend_components import *
def main():
primary = Primary(
kind="attributes",
imagery_refresh="2018-02",
optional_cols=["state", "yearbuilt"],
)
cape = CapeAttributes(
constraints={
"cape_roof_condition_rating": ["-1"],
"cape_roof_geometry": "gable",
}
)
attom = Attom(
constraints={"propertyusestandardized":
["382", "383", "385"]}
)
pipeline = AoiPipeline([primary, cape, attom])
_ = pipeline.run(n_samples=1000, query_only=True, fetch_geometries=False)
print(pipeline.sql_query[0])
def main2():
# We want a pool of attribute geometries that have roofmaterial wood shake shingle
primary = Primary(
kind='attributes',
imagery_refresh='2018-02'
)
cape = CapeAttributes(
constraints={
"num_attom_matches": 1,
"largest_structure": True
}
)
attom = Attom(
constraints={
"roofmaterial": ['103', '127', '135', '136',
'137', '138', '139', '140', '142']
}
)
pipeline = AoiPipeline([primary, cape, attom])
raw_sql, df = pipeline.run(n_samples=1000, query_only=True, fetch_geometries=True)
print(raw_sql)
def main3():
# This query was giving resources exhausted for Jonathan - improve this !!!
components = [
Primary(kind="primary_roof", imagery_refresh='2018-02', optional_cols=["parcel_geometry_id"]),
Attom(constraints={
"propertyusestandardized": ["382", "383", "385"]
},
),
Stratify(columns=['state'], balanced=True, objects_per_strata=100)
]
pipeline = AoiPipeline()
pipeline.add_components(components)
pipeline.run(query_only=True, fetch_geometries=False)
# print(query)
def test():
# This query was giving resources exhausted for Jonathan - improve this !!!
components = [
Primary(kind="primary_roof",
imagery_refresh='2018-02',
optional_cols=["parcel_geometry_id"]),
Attom(constraints={
"propertyusestandardized": ["382", "383", "385"]
}),
# Stratify(columns=['state'], balanced=True, objects_per_strata=100)
Stratify(columns=['state'], balanced=False)
]
pipeline = AoiPipeline()
pipeline.add_components(components)
_ = pipeline.run(n_samples=1000, query_only=True, fetch_geometries=True)
qrs = pipeline.sql_query
for qr in qrs:
print(qr)
# print(pipeline.sql_query[0])
if __name__ == "__main__":
main()
# test()
| null | scratch_1.py | scratch_1.py | py | 2,873 | python | en | code | null | code-starcoder2 | 50 |
51382394 | import cv2
import torch
import numpy as np
import global_vars
import models
from filters import skinMask,greyMask
from models import load_model, predict_gesture
from utils import *
class recognizer:
def __init__(self):
# CNN
self.model = load_model()
if torch.cuda.is_available():
self.gpu = True
self.model.cuda()
self.prediction_frequency = 10 # each 10 images arise a prediction
self.prediction_count = 0
self.camera_height = 300
self.camera_width = 300
def get_hand_img(self, raw_img, x, y,fix=True):
'''
cut the part of img having hand.
raw_img: ndarray, (255,255,3)
x,y: right wrist coordinate
'''
if not fix:
if x - self.camera_width // 2 < 0:
x0 = 0
elif x + self.camera_width // 2 > raw_img.shape[1]:
x0 = raw_img.shape[1] - self.camera_width
else:
x0 = x - self.camera_width
if y - self.camera_height*2 < 0:
y0 = 0
# elif y + self.camera_height > raw_img.shape[0]:
# y0 = raw_img.shape[0] - self.camera_height
else:
y0 = x - self.camera_height*2
else:
x0, y0 = 350,300
# img = greyMask(raw_img, x0, y0, self.camera_width, self.camera_height)
img = skinMask(raw_img, x0, y0, self.camera_width, self.camera_height)
return img
def recognize(self, img):
gesture = predict_gesture(self.model, img,
self.gpu, verbose=True)
return gesture
| null | realtime_gesture_recog/recog.py | recog.py | py | 1,655 | python | en | code | null | code-starcoder2 | 50 |
45343168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# !@Time : 2021/4/14 下午3:57
# !@Author : miracleyin @email: miracleyin@live.com
# !@File : inference.py
import json
import csv
from pathlib import Path
from tqdm.notebook import tqdm
import torch
from torch.utils.data import DataLoader
from datasets import InferenceDataset, inference_collate_batch
from model.model import Classifier
def parse_args():
"""arguments"""
config = {
"data_dir": "./Dataset",
"model_path": "./model.ckpt",
"output_path": "./output.csv",
}
return config
def main(
data_dir,
model_path,
output_path,
):
"""Main function."""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"[Info]: Use {device} now!")
mapping_path = Path(data_dir) / "mapping.json"
mapping = json.load(mapping_path.open())
dataset = InferenceDataset(data_dir)
dataloader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
drop_last=False,
num_workers=8,
collate_fn=inference_collate_batch,
)
print(f"[Info]: Finish loading data!", flush=True)
speaker_num = len(mapping["id2speaker"])
model = Classifier(n_spks=speaker_num).to(device)
model.load_state_dict(torch.load(model_path))
model.eval()
print(f"[Info]: Finish creating model!", flush=True)
results = [["Id", "Category"]]
for feat_paths, mels in tqdm(dataloader):
with torch.no_grad():
mels = mels.to(device)
outs = model(mels)
preds = outs.argmax(1).cpu().numpy()
for feat_path, pred in zip(feat_paths, preds):
results.append([feat_path, mapping["id2speaker"][str(pred)]])
with open(output_path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(results)
if __name__ == "__main__":
main(**parse_args())
| null | inference.py | inference.py | py | 1,948 | python | en | code | null | code-starcoder2 | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.