max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
venv/Exercises/ex041.py | jonassignoreti/Python-CursoemVideo | 0 | 6621751 | '''A confederação Nacional de Natação precisa de um programa que leia
o ano de nascimento de um atleta e mostre sua categoria, de acordo com a idade:
-Até 9 anos: MIRIM;
-Até 14 anos: INFANTIL;
-Até 19 anos: JUNIOR;
-Até 20 Anos: SÊNIOR;
-Acima: MASTER.'''
from datetime import date
nome = str(input('Digite o nome do atleta: ')).strip().capitalize()
ano_nascimento = int(input('Digite o ano de nascimento do atleta: '))
idade = date.today().year - ano_nascimento
if idade <= 9:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mMIRIM\033[m'.format(nome, idade))
elif idade <= 14:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mINFANTIL\033[m'.format(nome, idade))
elif idade <= 19:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mJUNIOR\033[m'.format(nome, idade))
elif idade <= 25:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mSÊNIOR\033[m'.format(nome, idade))
else:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mMASTER\033[m'.format(nome, idade))
| '''A confederação Nacional de Natação precisa de um programa que leia
o ano de nascimento de um atleta e mostre sua categoria, de acordo com a idade:
-Até 9 anos: MIRIM;
-Até 14 anos: INFANTIL;
-Até 19 anos: JUNIOR;
-Até 20 Anos: SÊNIOR;
-Acima: MASTER.'''
from datetime import date
nome = str(input('Digite o nome do atleta: ')).strip().capitalize()
ano_nascimento = int(input('Digite o ano de nascimento do atleta: '))
idade = date.today().year - ano_nascimento
if idade <= 9:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mMIRIM\033[m'.format(nome, idade))
elif idade <= 14:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mINFANTIL\033[m'.format(nome, idade))
elif idade <= 19:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mJUNIOR\033[m'.format(nome, idade))
elif idade <= 25:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mSÊNIOR\033[m'.format(nome, idade))
else:
print('O Atleta {} tem {} anos de idade, portanto é da categoria \033[36mMASTER\033[m'.format(nome, idade))
| pt | 0.967542 | A confederação Nacional de Natação precisa de um programa que leia o ano de nascimento de um atleta e mostre sua categoria, de acordo com a idade: -Até 9 anos: MIRIM; -Até 14 anos: INFANTIL; -Até 19 anos: JUNIOR; -Até 20 Anos: SÊNIOR; -Acima: MASTER. | 4.231564 | 4 |
ProjectEuler.Problem.037.py | jihunroh/ProjectEuler-Python | 0 | 6621752 | from ProjectEulerCommons.Base import *
from ProjectEulerCommons.PrimeNumbers import generate_prime, is_prime
def is_truncatable_prime(n):
if n in [2, 3, 5, 7]:
return False
numberlist = [int(str(n)[i:]) for i in range(1, len(str(n)))] + [int(str(n)[:j]) for j in range(1, len(str(n)))]
for n in numberlist:
if not is_prime(n):
return False
return True
Answer(
sum([num for num in islice(filter(is_truncatable_prime, generate_prime()), 11)])
)
"""
------------------------------------------------
ProjectEuler.Problem.037.py
The Answer is: 748317
Time Elasped: 7.705394268035889sec
------------------------------------------------
"""
| from ProjectEulerCommons.Base import *
from ProjectEulerCommons.PrimeNumbers import generate_prime, is_prime
def is_truncatable_prime(n):
if n in [2, 3, 5, 7]:
return False
numberlist = [int(str(n)[i:]) for i in range(1, len(str(n)))] + [int(str(n)[:j]) for j in range(1, len(str(n)))]
for n in numberlist:
if not is_prime(n):
return False
return True
Answer(
sum([num for num in islice(filter(is_truncatable_prime, generate_prime()), 11)])
)
"""
------------------------------------------------
ProjectEuler.Problem.037.py
The Answer is: 748317
Time Elasped: 7.705394268035889sec
------------------------------------------------
"""
| en | 0.185917 | ------------------------------------------------ ProjectEuler.Problem.037.py The Answer is: 748317 Time Elasped: 7.705394268035889sec ------------------------------------------------ | 3.299545 | 3 |
apps/utils.py | dalelotts/insta360-auto-converter | 1 | 6621753 | from email.mime.text import MIMEText
from datetime import datetime
from datetime import date
import os
import logging
from logging.handlers import RotatingFileHandler
import smtplib
from configparser import ConfigParser
import time
log_dir = '/insta360-auto-converter-data/logs'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logger = logging.getLogger('insta360-auto-converter-logger')
logFile = '{}/insta360-auto-converter-logger-'.format(log_dir) + time.strftime("%Y%m%d") + '.log'
handler = RotatingFileHandler(logFile, mode='a', maxBytes=50 * 1024 * 1024,
backupCount=5, encoding=None, delay=False)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
config = ConfigParser()
config.read("/insta360-auto-converter-data/configs.txt")
def log(content, mail_out=False):
log_content = '[{}] {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), content)
if mail_out:
logger.error(log_content)
else:
logger.info(log_content)
print(log_content)
if mail_out:
send_mail(config["GMAIL_INFO"]["error_mail_to"], 'insta360-auto-converter Job Failed', content)
def silentremove(filename):
try:
os.remove(filename)
os.rmdir(filename)
except:
pass
def send_mail(to, subject, body):
s = config["GMAIL_INFO"]["pass"]
gmail_user = config["GMAIL_INFO"]["id"]
sent_from = gmail_user
mime = MIMEText(body, "plain", "utf-8")
mime["Subject"] = subject
mime["From"] = config["GMAIL_INFO"]["id"]
mime["To"] = to
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, s)
server.sendmail(sent_from, to, mime.as_string())
server.close()
log('Email sent!')
except Exception as e:
log('Send mail error: {}'.format(e)) | from email.mime.text import MIMEText
from datetime import datetime
from datetime import date
import os
import logging
from logging.handlers import RotatingFileHandler
import smtplib
from configparser import ConfigParser
import time
log_dir = '/insta360-auto-converter-data/logs'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logger = logging.getLogger('insta360-auto-converter-logger')
logFile = '{}/insta360-auto-converter-logger-'.format(log_dir) + time.strftime("%Y%m%d") + '.log'
handler = RotatingFileHandler(logFile, mode='a', maxBytes=50 * 1024 * 1024,
backupCount=5, encoding=None, delay=False)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
config = ConfigParser()
config.read("/insta360-auto-converter-data/configs.txt")
def log(content, mail_out=False):
log_content = '[{}] {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), content)
if mail_out:
logger.error(log_content)
else:
logger.info(log_content)
print(log_content)
if mail_out:
send_mail(config["GMAIL_INFO"]["error_mail_to"], 'insta360-auto-converter Job Failed', content)
def silentremove(filename):
try:
os.remove(filename)
os.rmdir(filename)
except:
pass
def send_mail(to, subject, body):
s = config["GMAIL_INFO"]["pass"]
gmail_user = config["GMAIL_INFO"]["id"]
sent_from = gmail_user
mime = MIMEText(body, "plain", "utf-8")
mime["Subject"] = subject
mime["From"] = config["GMAIL_INFO"]["id"]
mime["To"] = to
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, s)
server.sendmail(sent_from, to, mime.as_string())
server.close()
log('Email sent!')
except Exception as e:
log('Send mail error: {}'.format(e)) | none | 1 | 2.532584 | 3 | |
worker/k.py | hoshimaemi/XZZ | 29 | 6621754 | from zzcore import StdAns
import requests
class Ans(StdAns):
def GETMSG(self):
try:
picurl, status = Kemomimi()
except:
print()
msg = ''
if status == 200:
# 显示图标
# msg += f'[CQ:xml,data=<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' \
# f'<msg serviceID="1">' \
# f'<item><title>来喽!!</title></item>' \
# f'<source name="K!" icon="{picurl}" action="web" appid="-1" />' \
# f'</msg>' \
# f']'
msg = f'[CQ:xml,data=<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' \
f'<msg serviceID="1"\n' \
f'action="web" url="\n' \
f'https://brx86.gitee.io/kemomimi/202.jpg">\n' \
f'<item><title>ケモミミちゃん:</title><summary>Kemomimi酱来了~</summary><picture cover="https://brx86.gitee.io/kemomimi/202.jpg"/></item>\n' \
f'</msg> ' \
f']'
# 显示卡片图片
# msg += f'[CQ:cardimage,file={picurl},maxheight=200]'
else:
msg += '图库丢了哦,不是咱的问题呀!'
return msg
def Kemomimi():
url = "http://api.aya1.xyz:6/random0.php"
# 获取重定向后的地址
imgurl = requests.get(url).url
status = requests.get(url).status_code
# print(imgurl)\
return imgurl, status
| from zzcore import StdAns
import requests
class Ans(StdAns):
def GETMSG(self):
try:
picurl, status = Kemomimi()
except:
print()
msg = ''
if status == 200:
# 显示图标
# msg += f'[CQ:xml,data=<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' \
# f'<msg serviceID="1">' \
# f'<item><title>来喽!!</title></item>' \
# f'<source name="K!" icon="{picurl}" action="web" appid="-1" />' \
# f'</msg>' \
# f']'
msg = f'[CQ:xml,data=<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n' \
f'<msg serviceID="1"\n' \
f'action="web" url="\n' \
f'https://brx86.gitee.io/kemomimi/202.jpg">\n' \
f'<item><title>ケモミミちゃん:</title><summary>Kemomimi酱来了~</summary><picture cover="https://brx86.gitee.io/kemomimi/202.jpg"/></item>\n' \
f'</msg> ' \
f']'
# 显示卡片图片
# msg += f'[CQ:cardimage,file={picurl},maxheight=200]'
else:
msg += '图库丢了哦,不是咱的问题呀!'
return msg
def Kemomimi():
url = "http://api.aya1.xyz:6/random0.php"
# 获取重定向后的地址
imgurl = requests.get(url).url
status = requests.get(url).status_code
# print(imgurl)\
return imgurl, status
| en | 0.149854 | # 显示图标 # msg += f'[CQ:xml,data=<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' \ # f'<msg serviceID="1">' \ # f'<item><title>来喽!!</title></item>' \ # f'<source name="K!" icon="{picurl}" action="web" appid="-1" />' \ # f'</msg>' \ # f']' # 显示卡片图片 # msg += f'[CQ:cardimage,file={picurl},maxheight=200]' # 获取重定向后的地址 # print(imgurl)\ | 2.383299 | 2 |
src/config.py | loovien/meida-downloader | 1 | 6621755 | <filename>src/config.py
# -*- coding: utf-8 -*-
# website: https://loovien.github.io
# author: luowen<<EMAIL>>
# time: 2018/9/29 21:41
# desc:
from typing import Any, Optional, Union
configs = {
"queue_size": 10000,
"download_thread_num": 3,
"output": "./output",
"timeout": 5,
}
def setting_get(key: str) -> Optional[Union[str, int, dict, tuple, list]]:
global configs
items = key.split(".")
target = None
for cursor in range(0, len(items)):
key_seg = items[cursor]
target = configs.get(key_seg, None)
if isinstance(target, dict) or isinstance(target, tuple) or isinstance(target, list):
configs = target
continue
return target
return target
| <filename>src/config.py
# -*- coding: utf-8 -*-
# website: https://loovien.github.io
# author: luowen<<EMAIL>>
# time: 2018/9/29 21:41
# desc:
from typing import Any, Optional, Union
configs = {
"queue_size": 10000,
"download_thread_num": 3,
"output": "./output",
"timeout": 5,
}
def setting_get(key: str) -> Optional[Union[str, int, dict, tuple, list]]:
global configs
items = key.split(".")
target = None
for cursor in range(0, len(items)):
key_seg = items[cursor]
target = configs.get(key_seg, None)
if isinstance(target, dict) or isinstance(target, tuple) or isinstance(target, list):
configs = target
continue
return target
return target
| en | 0.316088 | # -*- coding: utf-8 -*- # website: https://loovien.github.io # author: luowen<<EMAIL>> # time: 2018/9/29 21:41 # desc: | 2.208565 | 2 |
phanas/nascopy.py | lesaint/phanas_destkop | 0 | 6621756 | import logging
import os
import subprocess
import sys
from pathlib import Path
class NasCopy:
__logger = logging.getLogger("nascopy")
__script_path = None
def __init__(self, config):
self.__load_nascopyscript_path(config)
def __load_nascopyscript_path(self, config):
nascopy_name = "nascopy"
script_path_name = "script_path"
if not config:
self.__logger.info("no config")
return False
if not nascopy_name in config:
self.__logger.info("config does not contain %s", nascopy_name)
return False
nascopy_config = config[nascopy_name]
if not isinstance(nascopy_config, dict) or not script_path_name in nascopy_config:
self.__logger.info("'%s' is not an object or does not contain name '%s'", nascopy_name, script_path_name)
return False
script_path_str = nascopy_config[script_path_name]
if not isinstance(script_path_str, str) or not script_path_str:
self.__logger.info("%s is not a string", script_path_name)
return False
script_path = Path(script_path_str)
if not script_path.is_file():
self.__logger.error("script %s can not be found", script_path)
return False
if not os.access(script_path, os.X_OK):
self.__logger.error("script %s is not executable", script_path)
return False
self.__logger.info("nascopy script: %s", script_path)
self.__script_path = script_path
return True
def should_nascopy(self):
if self.__script_path:
return True
return False
def do_nascopy(self):
command = [ self.__script_path ]
proc = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, universal_newlines = True)
def check_std(std, loglevel):
while True:
output = std.readline()
if output:
self.__logger.log(loglevel, output.strip())
else:
break
def check_io():
check_std(proc.stdout, logging.INFO)
while proc.poll() is None:
check_io()
proc.wait()
if proc.returncode != 0:
return False, "nascopy script had an error. Check the logs"
return True, None
def run(config):
logger = logging.getLogger("nascopy")
logger.info("NAS Copy started")
nascopy = NasCopy(config)
if nascopy.should_nascopy():
status, msg = nascopy.do_nascopy()
if not status:
logger.error(msg)
else:
logger.info("NAS copy is not configured")
logger.info("NAS copy to Phanas done") | import logging
import os
import subprocess
import sys
from pathlib import Path
class NasCopy:
__logger = logging.getLogger("nascopy")
__script_path = None
def __init__(self, config):
self.__load_nascopyscript_path(config)
def __load_nascopyscript_path(self, config):
nascopy_name = "nascopy"
script_path_name = "script_path"
if not config:
self.__logger.info("no config")
return False
if not nascopy_name in config:
self.__logger.info("config does not contain %s", nascopy_name)
return False
nascopy_config = config[nascopy_name]
if not isinstance(nascopy_config, dict) or not script_path_name in nascopy_config:
self.__logger.info("'%s' is not an object or does not contain name '%s'", nascopy_name, script_path_name)
return False
script_path_str = nascopy_config[script_path_name]
if not isinstance(script_path_str, str) or not script_path_str:
self.__logger.info("%s is not a string", script_path_name)
return False
script_path = Path(script_path_str)
if not script_path.is_file():
self.__logger.error("script %s can not be found", script_path)
return False
if not os.access(script_path, os.X_OK):
self.__logger.error("script %s is not executable", script_path)
return False
self.__logger.info("nascopy script: %s", script_path)
self.__script_path = script_path
return True
def should_nascopy(self):
if self.__script_path:
return True
return False
def do_nascopy(self):
command = [ self.__script_path ]
proc = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, universal_newlines = True)
def check_std(std, loglevel):
while True:
output = std.readline()
if output:
self.__logger.log(loglevel, output.strip())
else:
break
def check_io():
check_std(proc.stdout, logging.INFO)
while proc.poll() is None:
check_io()
proc.wait()
if proc.returncode != 0:
return False, "nascopy script had an error. Check the logs"
return True, None
def run(config):
logger = logging.getLogger("nascopy")
logger.info("NAS Copy started")
nascopy = NasCopy(config)
if nascopy.should_nascopy():
status, msg = nascopy.do_nascopy()
if not status:
logger.error(msg)
else:
logger.info("NAS copy is not configured")
logger.info("NAS copy to Phanas done") | none | 1 | 2.620104 | 3 | |
setup.py | rapid7/lockex | 4 | 6621757 | <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
setup(name="lockex",
version="0.3",
description="Get lock from zookeeper and execute",
packages=find_packages(exclude=["__pycache__"]),
install_requires=['click==6.2', 'python_gflags==2.0', 'kazoo==2.2.1', 'psutil==4.1.0', 'future==0.15.2'],
setup_requires=['flake8==2.5.4'],
tests_require=['tox==2.3.1', 'pytest==2.6.3', 'testfixtures==4.9.1', 'mock==1.0.1'],
entry_points={'console_scripts': ['lockex = lockex.execute:execute']},
extras_require=dict(test=['testfixtures'],),
license='BSD',)
| <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
setup(name="lockex",
version="0.3",
description="Get lock from zookeeper and execute",
packages=find_packages(exclude=["__pycache__"]),
install_requires=['click==6.2', 'python_gflags==2.0', 'kazoo==2.2.1', 'psutil==4.1.0', 'future==0.15.2'],
setup_requires=['flake8==2.5.4'],
tests_require=['tox==2.3.1', 'pytest==2.6.3', 'testfixtures==4.9.1', 'mock==1.0.1'],
entry_points={'console_scripts': ['lockex = lockex.execute:execute']},
extras_require=dict(test=['testfixtures'],),
license='BSD',)
| none | 1 | 1.374098 | 1 | |
backend/db/PopulateData.py | billsong0407/Bike4Joy | 0 | 6621758 | <reponame>billsong0407/Bike4Joy<filename>backend/db/PopulateData.py
# The purpose of this script is to initialize the database and populating the initial location data
# it was ran before the server deployment
# it is not used in the local and deployed server at any time
# it has no impacts on the live website interactions
import json
import mysql.connector
from mysql.connector import Error
if __name__ == "__main__" :
# Load database credentials
with open("../config.json") as file:
credentials = json.load(file)
_host = credentials['DB_ADDRESS']
_port = credentials["DB_PORT"]
_database = credentials["DB_NAME"]
_username = credentials["DB_USERNAME"]
_password = credentials["DB_PASSWORD"]
# Load location data from the json file
with open("./locations.json") as file:
raw_data = json.load(file)
locations = raw_data['parkingData']
try:
connection = mysql.connector.connect(host=_host,
port=_port,
database=_database,
user=_username,
password=_password)
print("Connected")
cursor = connection.cursor()
cursor.execute("SHOW TABLES;")
tables = cursor.fetchall()
if tables and "LOCATIONS" in list(tables[0]):
cursor.execute("DELETE FROM LOCATIONS") # reset LOCATION TABLE
# cursor.execute("DROP TABLE LOCATIONS")
print("TABLE LOCATION DELETED")
INSERT_QUERY = """
INSERT INTO LOCATIONS (address, postalCode, parkingType, capacity, lat, lng, bikeSize, yearInstalled)
VALUES
"""
# Populate table
for location in locations:
properties = location["properties"]
address = properties["ADDRESS_FULL"]
postal_code = properties["POSTAL_CODE"]
parking_type = properties["PARKING_TYPE"]
capacity = properties["BICYCLE_CAPACITY"]
bike_size = properties["SIZE_M"]
year_installed = properties["YEAR_INSTALLED"]
geometry = location["geometry"]["coordinates"]
lat = geometry[1]
lng = geometry[0]
INSERT_QUERY += f'("{address}", "{postal_code}", "{parking_type}", {capacity}, {lat}, {lng}, {bike_size}, {year_installed}),\n'
cursor.execute(INSERT_QUERY.rstrip().rstrip(','))
connection.commit()
print("ALL LOCATION DATA LOADED INTO BIKE4JOY DATABASE")
INSERT_QUERY = """
INSERT INTO USERS(name, email, userPassword) VALUES
("<NAME>", "<EMAIL>", "fronkfronk123"),
("<NAME>", "<EMAIL>", "<PASSWORD>"),
("<NAME>", "<EMAIL>", "bik4joy")
"""
cursor.execute(INSERT_QUERY.rstrip())
connection.commit()
INSERT_QUERY = """
INSERT INTO REVIEWS(image, video, comment, rating, user_id) VALUES
(\"\", \"\", "Good Place", \"★★★★★\", 1),
(\"https://ride4boybucket.s3.us-east-2.amazonaws.com/2021-12-01-11-30-29-rack.jpg\", \"\", "Excellent Environment", \"★★★★\", 2),
(\"\", \"\", "Looks pretty safe to park", \"★★★\", 3)
"""
cursor.execute(INSERT_QUERY.rstrip())
connection.commit()
INSERT_QUERY = """
INSERT INTO REVIEW_TO_LOCATION(loc_id, rev_id) VALUES
(2, 1),
(2, 2),
(2, 3)
"""
cursor.execute(INSERT_QUERY.rstrip())
connection.commit()
if connection.is_connected():
connection.cursor().close()
connection.close()
print("EXITING DATABASE")
except Error as e:
print(e) | # The purpose of this script is to initialize the database and populating the initial location data
# it was ran before the server deployment
# it is not used in the local and deployed server at any time
# it has no impacts on the live website interactions
import json
import mysql.connector
from mysql.connector import Error
if __name__ == "__main__" :
# Load database credentials
with open("../config.json") as file:
credentials = json.load(file)
_host = credentials['DB_ADDRESS']
_port = credentials["DB_PORT"]
_database = credentials["DB_NAME"]
_username = credentials["DB_USERNAME"]
_password = credentials["DB_PASSWORD"]
# Load location data from the json file
with open("./locations.json") as file:
raw_data = json.load(file)
locations = raw_data['parkingData']
try:
connection = mysql.connector.connect(host=_host,
port=_port,
database=_database,
user=_username,
password=_password)
print("Connected")
cursor = connection.cursor()
cursor.execute("SHOW TABLES;")
tables = cursor.fetchall()
if tables and "LOCATIONS" in list(tables[0]):
cursor.execute("DELETE FROM LOCATIONS") # reset LOCATION TABLE
# cursor.execute("DROP TABLE LOCATIONS")
print("TABLE LOCATION DELETED")
INSERT_QUERY = """
INSERT INTO LOCATIONS (address, postalCode, parkingType, capacity, lat, lng, bikeSize, yearInstalled)
VALUES
"""
# Populate table
for location in locations:
properties = location["properties"]
address = properties["ADDRESS_FULL"]
postal_code = properties["POSTAL_CODE"]
parking_type = properties["PARKING_TYPE"]
capacity = properties["BICYCLE_CAPACITY"]
bike_size = properties["SIZE_M"]
year_installed = properties["YEAR_INSTALLED"]
geometry = location["geometry"]["coordinates"]
lat = geometry[1]
lng = geometry[0]
INSERT_QUERY += f'("{address}", "{postal_code}", "{parking_type}", {capacity}, {lat}, {lng}, {bike_size}, {year_installed}),\n'
cursor.execute(INSERT_QUERY.rstrip().rstrip(','))
connection.commit()
print("ALL LOCATION DATA LOADED INTO BIKE4JOY DATABASE")
INSERT_QUERY = """
INSERT INTO USERS(name, email, userPassword) VALUES
("<NAME>", "<EMAIL>", "fronkfronk123"),
("<NAME>", "<EMAIL>", "<PASSWORD>"),
("<NAME>", "<EMAIL>", "bik4joy")
"""
cursor.execute(INSERT_QUERY.rstrip())
connection.commit()
INSERT_QUERY = """
INSERT INTO REVIEWS(image, video, comment, rating, user_id) VALUES
(\"\", \"\", "Good Place", \"★★★★★\", 1),
(\"https://ride4boybucket.s3.us-east-2.amazonaws.com/2021-12-01-11-30-29-rack.jpg\", \"\", "Excellent Environment", \"★★★★\", 2),
(\"\", \"\", "Looks pretty safe to park", \"★★★\", 3)
"""
cursor.execute(INSERT_QUERY.rstrip())
connection.commit()
INSERT_QUERY = """
INSERT INTO REVIEW_TO_LOCATION(loc_id, rev_id) VALUES
(2, 1),
(2, 2),
(2, 3)
"""
cursor.execute(INSERT_QUERY.rstrip())
connection.commit()
if connection.is_connected():
connection.cursor().close()
connection.close()
print("EXITING DATABASE")
except Error as e:
print(e) | en | 0.778999 | # The purpose of this script is to initialize the database and populating the initial location data # it was ran before the server deployment # it is not used in the local and deployed server at any time # it has no impacts on the live website interactions # Load database credentials # Load location data from the json file # reset LOCATION TABLE # cursor.execute("DROP TABLE LOCATIONS") INSERT INTO LOCATIONS (address, postalCode, parkingType, capacity, lat, lng, bikeSize, yearInstalled) VALUES # Populate table INSERT INTO USERS(name, email, userPassword) VALUES ("<NAME>", "<EMAIL>", "fronkfronk123"), ("<NAME>", "<EMAIL>", "<PASSWORD>"), ("<NAME>", "<EMAIL>", "bik4joy") INSERT INTO REVIEWS(image, video, comment, rating, user_id) VALUES (\"\", \"\", "Good Place", \"★★★★★\", 1), (\"https://ride4boybucket.s3.us-east-2.amazonaws.com/2021-12-01-11-30-29-rack.jpg\", \"\", "Excellent Environment", \"★★★★\", 2), (\"\", \"\", "Looks pretty safe to park", \"★★★\", 3) INSERT INTO REVIEW_TO_LOCATION(loc_id, rev_id) VALUES (2, 1), (2, 2), (2, 3) | 2.935267 | 3 |
autoPyTorch/pipeline/nodes/resampling_strategy_selector.py | thomascherickal/Auto-PyTorch | 1 | 6621759 | import numpy as np
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
from autoPyTorch.pipeline.base.pipeline_node import PipelineNode
from autoPyTorch.utils.config.config_option import ConfigOption
from autoPyTorch.components.preprocessing.resampling_base import ResamplingMethodNone, ResamplingMethodBase, TargetSizeStrategyBase
from sklearn.preprocessing import OneHotEncoder
import ConfigSpace
import ConfigSpace.hyperparameters as CSH
from autoPyTorch.utils.configspace_wrapper import ConfigWrapper
import logging
class ResamplingStrategySelector(PipelineNode):
def __init__(self):
super(ResamplingStrategySelector, self).__init__()
self.over_sampling_methods = dict()
self.add_over_sampling_method('none', ResamplingMethodNone)
self.under_sampling_methods = dict()
self.add_under_sampling_method('none', ResamplingMethodNone)
self.target_size_strategies = {'none': None}
self.logger = logging.getLogger('autonet')
def fit(self, hyperparameter_config, X_train, Y_train):
hyperparameter_config = ConfigWrapper(self.get_name(), hyperparameter_config)
if hyperparameter_config['target_size_strategy'] == 'none':
return dict()
over_sampling_method = self.over_sampling_methods[hyperparameter_config['over_sampling_method']](
ConfigWrapper(hyperparameter_config['over_sampling_method'], hyperparameter_config)
)
under_sampling_method = self.under_sampling_methods[hyperparameter_config['under_sampling_method']](
ConfigWrapper(hyperparameter_config['under_sampling_method'], hyperparameter_config)
)
target_size_strategy = self.target_size_strategies[hyperparameter_config['target_size_strategy']]()
y = np.argmax(Y_train, axis=1).astype(int)
ohe = OneHotEncoder(categories="auto", sparse=False)
ohe.fit(y.reshape((-1, 1)))
over_sampling_target_size = target_size_strategy.over_sample_strategy(y)
under_sampling_target_size = target_size_strategy.under_sample_strategy(y)
self.logger.debug("Distribution before resample: " + str(np.unique(y, return_counts=True)[1]))
X_train, y = over_sampling_method.resample(X_train, y, over_sampling_target_size)
X_train, y = under_sampling_method.resample(X_train, y, under_sampling_target_size)
self.logger.debug("Distribution after resample: " + str(np.unique(y, return_counts=True)[1]))
return {'X_train': X_train, 'Y_train': ohe.transform(y.reshape((-1, 1)))}
def add_over_sampling_method(self, name, resampling_method):
"""Add a resampling strategy.
Will be called with {X_train, Y_train}
Arguments:
name {string} -- name of resampling strategy for definition in config
resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train}
"""
if (not issubclass(resampling_method, ResamplingMethodBase)):
raise ValueError("Resampling method must be subclass of ResamplingMethodBase")
self.over_sampling_methods[name] = resampling_method
def add_under_sampling_method(self, name, resampling_method):
"""Add a resampling strategy.
Will be called with {X_train, Y_train}
Arguments:
name {string} -- name of resampling strategy for definition in config
resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train}
"""
if (not issubclass(resampling_method, ResamplingMethodBase)):
raise ValueError("Resampling method must be subclass of ResamplingMethodBase")
self.under_sampling_methods[name] = resampling_method
def add_target_size_strategy(self, name, target_size_strategy):
"""Add a resampling strategy.
Will be called with {X_train, Y_train}
Arguments:
name {string} -- name of resampling strategy for definition in config
resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train}
"""
if (not issubclass(target_size_strategy, TargetSizeStrategyBase)):
raise ValueError("Resampling method must be subclass of TargetSizeStrategyBase")
self.target_size_strategies[name] = target_size_strategy
def remove_over_sampling_method(self, name):
del self.over_sampling_methods[name]
def remove_under_sampling_method(self, name):
del self.under_sampling_methods[name]
def remove_target_size_strategy(self, name):
del self.target_size_strategies[name]
def get_pipeline_config_options(self):
options = [
ConfigOption(name="over_sampling_methods", default=list(self.over_sampling_methods.keys()), type=str, list=True, choices=list(self.over_sampling_methods.keys())),
ConfigOption(name="under_sampling_methods", default=list(self.under_sampling_methods.keys()), type=str, list=True, choices=list(self.under_sampling_methods.keys())),
ConfigOption(name="target_size_strategies", default=list(self.target_size_strategies.keys()), type=str, list=True, choices=list(self.target_size_strategies.keys())),
]
return options
def get_hyperparameter_search_space(self, **pipeline_config):
pipeline_config = self.pipeline.get_pipeline_config(**pipeline_config)
cs = ConfigSpace.ConfigurationSpace()
possible_over_sampling_methods = set(pipeline_config["over_sampling_methods"]).intersection(self.over_sampling_methods.keys())
possible_under_sampling_methods = set(pipeline_config["under_sampling_methods"]).intersection(self.under_sampling_methods.keys())
possible_target_size_strategies = set(pipeline_config["target_size_strategies"]).intersection(self.target_size_strategies.keys())
selector_over_sampling = cs.add_hyperparameter(CSH.CategoricalHyperparameter("over_sampling_method", possible_over_sampling_methods))
selector_under_sampling = cs.add_hyperparameter(CSH.CategoricalHyperparameter("under_sampling_method", possible_under_sampling_methods))
cs.add_hyperparameter(CSH.CategoricalHyperparameter("target_size_strategy", possible_target_size_strategies))
for method_name, method_type in self.over_sampling_methods.items():
if method_name not in possible_over_sampling_methods:
continue
method_cs = method_type.get_hyperparameter_search_space()
cs.add_configuration_space( prefix=method_name, configuration_space=method_cs, delimiter=ConfigWrapper.delimiter,
parent_hyperparameter={'parent': selector_over_sampling, 'value': method_name})
for method_name, method_type in self.under_sampling_methods.items():
if method_name not in possible_under_sampling_methods:
continue
method_cs = method_type.get_hyperparameter_search_space()
cs.add_configuration_space( prefix=method_name, configuration_space=method_cs, delimiter=ConfigWrapper.delimiter,
parent_hyperparameter={'parent': selector_under_sampling, 'value': method_name})
return self._apply_user_updates(cs) | import numpy as np
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
from autoPyTorch.pipeline.base.pipeline_node import PipelineNode
from autoPyTorch.utils.config.config_option import ConfigOption
from autoPyTorch.components.preprocessing.resampling_base import ResamplingMethodNone, ResamplingMethodBase, TargetSizeStrategyBase
from sklearn.preprocessing import OneHotEncoder
import ConfigSpace
import ConfigSpace.hyperparameters as CSH
from autoPyTorch.utils.configspace_wrapper import ConfigWrapper
import logging
class ResamplingStrategySelector(PipelineNode):
def __init__(self):
super(ResamplingStrategySelector, self).__init__()
self.over_sampling_methods = dict()
self.add_over_sampling_method('none', ResamplingMethodNone)
self.under_sampling_methods = dict()
self.add_under_sampling_method('none', ResamplingMethodNone)
self.target_size_strategies = {'none': None}
self.logger = logging.getLogger('autonet')
def fit(self, hyperparameter_config, X_train, Y_train):
hyperparameter_config = ConfigWrapper(self.get_name(), hyperparameter_config)
if hyperparameter_config['target_size_strategy'] == 'none':
return dict()
over_sampling_method = self.over_sampling_methods[hyperparameter_config['over_sampling_method']](
ConfigWrapper(hyperparameter_config['over_sampling_method'], hyperparameter_config)
)
under_sampling_method = self.under_sampling_methods[hyperparameter_config['under_sampling_method']](
ConfigWrapper(hyperparameter_config['under_sampling_method'], hyperparameter_config)
)
target_size_strategy = self.target_size_strategies[hyperparameter_config['target_size_strategy']]()
y = np.argmax(Y_train, axis=1).astype(int)
ohe = OneHotEncoder(categories="auto", sparse=False)
ohe.fit(y.reshape((-1, 1)))
over_sampling_target_size = target_size_strategy.over_sample_strategy(y)
under_sampling_target_size = target_size_strategy.under_sample_strategy(y)
self.logger.debug("Distribution before resample: " + str(np.unique(y, return_counts=True)[1]))
X_train, y = over_sampling_method.resample(X_train, y, over_sampling_target_size)
X_train, y = under_sampling_method.resample(X_train, y, under_sampling_target_size)
self.logger.debug("Distribution after resample: " + str(np.unique(y, return_counts=True)[1]))
return {'X_train': X_train, 'Y_train': ohe.transform(y.reshape((-1, 1)))}
def add_over_sampling_method(self, name, resampling_method):
"""Add a resampling strategy.
Will be called with {X_train, Y_train}
Arguments:
name {string} -- name of resampling strategy for definition in config
resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train}
"""
if (not issubclass(resampling_method, ResamplingMethodBase)):
raise ValueError("Resampling method must be subclass of ResamplingMethodBase")
self.over_sampling_methods[name] = resampling_method
def add_under_sampling_method(self, name, resampling_method):
"""Add a resampling strategy.
Will be called with {X_train, Y_train}
Arguments:
name {string} -- name of resampling strategy for definition in config
resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train}
"""
if (not issubclass(resampling_method, ResamplingMethodBase)):
raise ValueError("Resampling method must be subclass of ResamplingMethodBase")
self.under_sampling_methods[name] = resampling_method
def add_target_size_strategy(self, name, target_size_strategy):
"""Add a resampling strategy.
Will be called with {X_train, Y_train}
Arguments:
name {string} -- name of resampling strategy for definition in config
resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train}
"""
if (not issubclass(target_size_strategy, TargetSizeStrategyBase)):
raise ValueError("Resampling method must be subclass of TargetSizeStrategyBase")
self.target_size_strategies[name] = target_size_strategy
def remove_over_sampling_method(self, name):
del self.over_sampling_methods[name]
def remove_under_sampling_method(self, name):
del self.under_sampling_methods[name]
def remove_target_size_strategy(self, name):
del self.target_size_strategies[name]
def get_pipeline_config_options(self):
options = [
ConfigOption(name="over_sampling_methods", default=list(self.over_sampling_methods.keys()), type=str, list=True, choices=list(self.over_sampling_methods.keys())),
ConfigOption(name="under_sampling_methods", default=list(self.under_sampling_methods.keys()), type=str, list=True, choices=list(self.under_sampling_methods.keys())),
ConfigOption(name="target_size_strategies", default=list(self.target_size_strategies.keys()), type=str, list=True, choices=list(self.target_size_strategies.keys())),
]
return options
def get_hyperparameter_search_space(self, **pipeline_config):
pipeline_config = self.pipeline.get_pipeline_config(**pipeline_config)
cs = ConfigSpace.ConfigurationSpace()
possible_over_sampling_methods = set(pipeline_config["over_sampling_methods"]).intersection(self.over_sampling_methods.keys())
possible_under_sampling_methods = set(pipeline_config["under_sampling_methods"]).intersection(self.under_sampling_methods.keys())
possible_target_size_strategies = set(pipeline_config["target_size_strategies"]).intersection(self.target_size_strategies.keys())
selector_over_sampling = cs.add_hyperparameter(CSH.CategoricalHyperparameter("over_sampling_method", possible_over_sampling_methods))
selector_under_sampling = cs.add_hyperparameter(CSH.CategoricalHyperparameter("under_sampling_method", possible_under_sampling_methods))
cs.add_hyperparameter(CSH.CategoricalHyperparameter("target_size_strategy", possible_target_size_strategies))
for method_name, method_type in self.over_sampling_methods.items():
if method_name not in possible_over_sampling_methods:
continue
method_cs = method_type.get_hyperparameter_search_space()
cs.add_configuration_space( prefix=method_name, configuration_space=method_cs, delimiter=ConfigWrapper.delimiter,
parent_hyperparameter={'parent': selector_over_sampling, 'value': method_name})
for method_name, method_type in self.under_sampling_methods.items():
if method_name not in possible_under_sampling_methods:
continue
method_cs = method_type.get_hyperparameter_search_space()
cs.add_configuration_space( prefix=method_name, configuration_space=method_cs, delimiter=ConfigWrapper.delimiter,
parent_hyperparameter={'parent': selector_under_sampling, 'value': method_name})
return self._apply_user_updates(cs) | en | 0.709531 | Add a resampling strategy. Will be called with {X_train, Y_train} Arguments: name {string} -- name of resampling strategy for definition in config resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train} Add a resampling strategy. Will be called with {X_train, Y_train} Arguments: name {string} -- name of resampling strategy for definition in config resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train} Add a resampling strategy. Will be called with {X_train, Y_train} Arguments: name {string} -- name of resampling strategy for definition in config resampling_strategy {function} -- callable with {pipeline_config, X_train, Y_train} | 2.200402 | 2 |
SCNN-Tensorflow/cable-detection-model/tools/draw_output_picture2.py | gokiri/Cable_detection | 1 | 6621760 | <gh_stars>1-10
"""
This is to draw output cable(from prob maps) lines using linear regression, the number of lines is up to 2
"""
import cv2
import argparse
import numpy as np
from sklearn.linear_model import LinearRegression
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str, help='The image path or the src image save dir')
parser.add_argument('--probmap_path', type=str, help='The probmap path ')
parser.add_argument('--filename', type=str)
return parser.parse_args()
def get_middle_max(list_):
l = []
mid_value = np.mean(list_)
for i in range(len(list_)):
if list_[i] >= mid_value:
l.append(i)
return l[int(len(l)/2)]
def judge(dataset_info_file):
with open(dataset_info_file, 'r') as file:
for _info in file:
info_tmp = _info.strip(' ').split()
count = int(info_tmp[0]) + int(info_tmp[1]) + int(info_tmp[2]) + int(info_tmp[3])
return count
if __name__ == '__main__':
# init args
args = init_args()
lr = LinearRegression()
x1_ = []
y1_ = []
points1 = []
x2_ = []
y2_ = []
points2 = []
dataset_info_file = args.probmap_path #/Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1.exist.txt
dataset_info_file = dataset_info_file[:-10] + '.exist.txt'####################### 可以改进
print('dataset_info_file:###########', dataset_info_file)
print('judge(dataset_info_file): #############', judge(dataset_info_file))
if judge(dataset_info_file) == 1:
img1 = cv2.imread(args.image_path, cv2.IMREAD_COLOR) # 原图
img2 = cv2.imread(args.probmap_path, 0) # GT
img2_ = cv2.resize(img2, (640, 360))
for i in range(18):
y1_.append(20*i)
x1_.append(get_middle_max(img2_[20*i, :]))
points1.append([x1_[i], y1_[i]])
print(points1)
rows, cols = img1.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.array(points1, dtype=np.int32), cv2.DIST_L2, 0, 0.01, 0.01)
print([vx, vy, x, y])
righty = int((y * vx / vy) + x)
lefty= int((x - (rows - y) * vx / vy))
res = cv2.line(img1, (lefty, 0), (righty, rows - 1), (255, 0, 140), 2)
print("res.shape: ", res.shape)
cv2.imwrite("/Users/wutong/Desktop/uploads/saved/" + args.filename, res)
elif judge(dataset_info_file) == 2:
img1 = cv2.imread(args.image_path, cv2.IMREAD_COLOR) # 原图
img2 = cv2.imread(args.probmap_path, 0) # GT /Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1_1_avg.png
img2_ = cv2.resize(img2, (640, 360))
path_ = args.probmap_path
path_ = path_[:-9]+"2"+path_[-8:]
img3 = cv2.imread(args.probmap_path, 0) # GT /Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1_2_avg.png
img3_ = cv2.resize(img2, (640, 360))
for i in range(18):
y1_.append(20*i)
x1_.append(get_middle_max(img2_[20*i, :320]))
points1.append([x1_[i], y1_[i]])
y2_.append(20*i)
x2_.append(get_middle_max(img3_[20*i, 320:]))
points2.append([320 + x2_[i], y2_[i]])
rows, cols = img1.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.array(points1, dtype=np.int32), cv2.DIST_L2, 0, 0.01, 0.01)
righty = int((y * vx / vy) + x)
lefty= int((x - (rows - y) * vx / vy))
res = cv2.line(img1, (lefty, 0), (righty, rows - 1), (255, 0, 140), 2)
#cv2.imwrite("/Users/wutong/Desktop/uploads/saved/" + '66666' + args.filename, res)
rows, cols = res.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.array(points2, dtype=np.int32), cv2.DIST_L2, 0, 0.01, 0.01)
righty = int((y * vx / vy) + x)
lefty= int((x - (rows - y) * vx / vy))
res_ = cv2.line(res, (lefty, 0), (righty, rows - 1), (255, 0, 140), 2)
cv2.imwrite("/Users/wutong/Desktop/uploads/saved/" + args.filename, res_)
#cv2.imshow('lane', res)
#cv2.waitKey()
| """
This is to draw output cable(from prob maps) lines using linear regression, the number of lines is up to 2
"""
import cv2
import argparse
import numpy as np
from sklearn.linear_model import LinearRegression
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str, help='The image path or the src image save dir')
parser.add_argument('--probmap_path', type=str, help='The probmap path ')
parser.add_argument('--filename', type=str)
return parser.parse_args()
def get_middle_max(list_):
l = []
mid_value = np.mean(list_)
for i in range(len(list_)):
if list_[i] >= mid_value:
l.append(i)
return l[int(len(l)/2)]
def judge(dataset_info_file):
with open(dataset_info_file, 'r') as file:
for _info in file:
info_tmp = _info.strip(' ').split()
count = int(info_tmp[0]) + int(info_tmp[1]) + int(info_tmp[2]) + int(info_tmp[3])
return count
if __name__ == '__main__':
# init args
args = init_args()
lr = LinearRegression()
x1_ = []
y1_ = []
points1 = []
x2_ = []
y2_ = []
points2 = []
dataset_info_file = args.probmap_path #/Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1.exist.txt
dataset_info_file = dataset_info_file[:-10] + '.exist.txt'####################### 可以改进
print('dataset_info_file:###########', dataset_info_file)
print('judge(dataset_info_file): #############', judge(dataset_info_file))
if judge(dataset_info_file) == 1:
img1 = cv2.imread(args.image_path, cv2.IMREAD_COLOR) # 原图
img2 = cv2.imread(args.probmap_path, 0) # GT
img2_ = cv2.resize(img2, (640, 360))
for i in range(18):
y1_.append(20*i)
x1_.append(get_middle_max(img2_[20*i, :]))
points1.append([x1_[i], y1_[i]])
print(points1)
rows, cols = img1.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.array(points1, dtype=np.int32), cv2.DIST_L2, 0, 0.01, 0.01)
print([vx, vy, x, y])
righty = int((y * vx / vy) + x)
lefty= int((x - (rows - y) * vx / vy))
res = cv2.line(img1, (lefty, 0), (righty, rows - 1), (255, 0, 140), 2)
print("res.shape: ", res.shape)
cv2.imwrite("/Users/wutong/Desktop/uploads/saved/" + args.filename, res)
elif judge(dataset_info_file) == 2:
img1 = cv2.imread(args.image_path, cv2.IMREAD_COLOR) # 原图
img2 = cv2.imread(args.probmap_path, 0) # GT /Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1_1_avg.png
img2_ = cv2.resize(img2, (640, 360))
path_ = args.probmap_path
path_ = path_[:-9]+"2"+path_[-8:]
img3 = cv2.imread(args.probmap_path, 0) # GT /Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1_2_avg.png
img3_ = cv2.resize(img2, (640, 360))
for i in range(18):
y1_.append(20*i)
x1_.append(get_middle_max(img2_[20*i, :320]))
points1.append([x1_[i], y1_[i]])
y2_.append(20*i)
x2_.append(get_middle_max(img3_[20*i, 320:]))
points2.append([320 + x2_[i], y2_[i]])
rows, cols = img1.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.array(points1, dtype=np.int32), cv2.DIST_L2, 0, 0.01, 0.01)
righty = int((y * vx / vy) + x)
lefty= int((x - (rows - y) * vx / vy))
res = cv2.line(img1, (lefty, 0), (righty, rows - 1), (255, 0, 140), 2)
#cv2.imwrite("/Users/wutong/Desktop/uploads/saved/" + '66666' + args.filename, res)
rows, cols = res.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.array(points2, dtype=np.int32), cv2.DIST_L2, 0, 0.01, 0.01)
righty = int((y * vx / vy) + x)
lefty= int((x - (rows - y) * vx / vy))
res_ = cv2.line(res, (lefty, 0), (righty, rows - 1), (255, 0, 140), 2)
cv2.imwrite("/Users/wutong/Desktop/uploads/saved/" + args.filename, res_)
#cv2.imshow('lane', res)
#cv2.waitKey() | en | 0.558976 | This is to draw output cable(from prob maps) lines using linear regression, the number of lines is up to 2 :return: # init args #/Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1.exist.txt ###################### 可以改进 ###########', dataset_info_file) #############', judge(dataset_info_file)) # 原图 # GT # 原图 # GT /Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1_1_avg.png # GT /Users/wutong/Pictures/grass/pictures/cable_pic/uploads/1_22_1_2_avg.png #cv2.imwrite("/Users/wutong/Desktop/uploads/saved/" + '66666' + args.filename, res) #cv2.imshow('lane', res) #cv2.waitKey() | 3.024064 | 3 |
cassandra_backups/snapshotting.py | eliran-lusha/cassandra_backups_ek | 0 | 6621761 | <reponame>eliran-lusha/cassandra_backups_ek
from __future__ import (absolute_import, print_function)
import time
import json
import logging
from distutils.util import strtobool
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
from datetime import datetime
from fabric.api import (env, execute, hide, run, sudo)
from fabric.context_managers import settings
from fabric.operations import local
from cassandra_backups.utils import get_s3_connection_host, nice_local
class Snapshot(object):
"""
A Snapshot instance keeps the details about a cassandra snapshot
Multiple snapshots can be stored in a single S3 bucket
A Snapshot is best described by:
- its name (which defaults to the utc time of creation)
- the list of hostnames the snapshot runs on
- the list of keyspaces being backed up
- the keyspace table being backed up
- the S3 bucket's base path where the snapshot is stored
Snapshots data (and incremental backups) are stored using the
following convention:
s3_bucket_name:/<base_path>/<snapshot_name>/<node-hostname>/...
Snapshots are represented on S3 by their manifest file, this makes
incremental backups much easier
"""
SNAPSHOT_TIMESTAMP_FORMAT = '%Y%m%d'
def __init__(self, base_path, s3_bucket, hosts, keyspaces, table):
self.s3_bucket = s3_bucket
self.name = self.make_snapshot_name()
self.hosts = hosts
self.keyspaces = keyspaces
self.table = table
self._base_path = base_path
def dump_manifest_file(self):
manifest_data = {
'name': self.name,
'base_path': self._base_path,
'hosts': self.hosts,
'keyspaces': self.keyspaces,
'table': self.table
}
return json.dumps(manifest_data)
@staticmethod
def load_manifest_file(data, s3_bucket):
manifest_data = json.loads(data)
snapshot = Snapshot(
base_path=manifest_data['base_path'],
s3_bucket=s3_bucket,
hosts=manifest_data['hosts'],
keyspaces=manifest_data['keyspaces'],
table=manifest_data['table']
)
snapshot.name = manifest_data['name']
return snapshot
@property
def base_path(self):
return '/'.join([self._base_path, self.name])
@staticmethod
def make_snapshot_name():
return datetime.utcnow().strftime(Snapshot.SNAPSHOT_TIMESTAMP_FORMAT)
def unix_time_name(self):
dt = datetime.strptime(self.name, self.SNAPSHOT_TIMESTAMP_FORMAT)
return time.mktime(dt.timetuple()) * 1000
def __cmp__(self, other):
return self.unix_time_name() - other.unix_time_name()
def __repr__(self):
return self.name
__str__ = __repr__
class RestoreWorker(object):
def __init__(self, aws_access_key_id, aws_secret_access_key, s3_bucket_region, snapshot,
cassandra_tools_bin_dir, restore_dir, use_sudo, use_local):
self.aws_secret_access_key = aws_secret_access_key
self.aws_access_key_id = aws_access_key_id
self.s3_host = get_s3_connection_host(s3_bucket_region)
self.snapshot = snapshot
self.cassandra_tools_bin_dir = cassandra_tools_bin_dir
self.restore_dir = restore_dir
self.use_sudo = use_sudo
self.use_local = use_local
def restore(self, keyspace):
logging.info("Restoring keyspace=%(keyspace)s to host %(host)s ,\
" % dict(keyspace=keyspace, host=env.host_string))
restore_command = "cassandra-backups-agent " \
"fetch " \
"--keyspace=%(keyspace)s " \
"--snapshot-path=%(snapshot_path)s " \
"--aws-access-key-id=%(aws_access_key_id)s " \
"--aws-secret-access-key=%(aws_secret_access_key)s " \
"--s3-host=%(s3_host)s " \
"--s3-bucket-name=%(s3_bucket_name)s " \
"--host=%(host)s " \
"--cassandra-tools-bin-dir=%(cassandra_tools_bin_dir)s " \
"--restore-dir=%(restore_dir)s "
cmd = restore_command % dict(
keyspace=keyspace,
snapshot_path=self.snapshot.base_path,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
s3_host=self.s3_host,
s3_bucket_name=self.snapshot.s3_bucket,
host=env.host_string,
cassandra_tools_bin_dir=self.cassandra_tools_bin_dir,
restore_dir=self.restore_dir,
)
if self.use_local and self.use_sudo:
local("sudo " + cmd)
elif self.use_local:
local(cmd)
elif self.use_sudo:
sudo(cmd)
else:
run(cmd)
class BackupWorker(object):
"""
Backup process is split in this steps:
- requests cassandra to create new backups
- uploads backup files to S3
- clears backup files from nodes
- updates backup meta information
When performing a new snapshot the manifest of the snapshot is
uploaded to S3 for later use.
Snapshot's manifest path:
/<snapshot_base_path>/<snapshot_name>/manifest.json
Every time a backup is done a description of the current ring is
saved next to the snapshot manifest file
"""
def __init__(self, aws_secret_access_key,
aws_access_key_id, s3_bucket_region, s3_ssenc,
s3_connection_host, cassandra_conf_path, use_sudo, use_local,
cassandra_tools_bin_dir, cqlsh_user, cqlsh_password,
backup_schema, buffer_size, exclude_tables, rate_limit, quiet, nice,
connection_pool_size=12, reduced_redundancy=False):
self.aws_secret_access_key = aws_secret_access_key
self.aws_access_key_id = aws_access_key_id
self.s3_bucket_region = s3_bucket_region
self.s3_ssenc = s3_ssenc
self.s3_connection_host = s3_connection_host
self.cassandra_conf_path = cassandra_conf_path
self.nodetool_path = "{!s}/nodetool".format(cassandra_tools_bin_dir)
self.cqlsh_path = "{!s}/cqlsh".format(cassandra_tools_bin_dir)
self.cqlsh_user = cqlsh_user
self.cqlsh_password = <PASSWORD>
self.backup_schema = backup_schema
self.connection_pool_size = connection_pool_size
self.buffer_size = buffer_size
self.reduced_redundancy = reduced_redundancy
self.rate_limit = rate_limit
self.quiet = quiet
self.nice = nice
if isinstance(use_sudo, basestring):
self.use_sudo = bool(strtobool(use_sudo))
else:
self.use_sudo = use_sudo
if isinstance(use_local, basestring):
self.use_local = bool(strtobool(use_local))
else:
self.use_local = use_local
self.exclude_tables = exclude_tables
def execute_cmd(self, cmd):
if self.use_local and self.use_sudo:
return nice_local('sudo ' + cmd, nice=self.nice)
elif self.use_local:
return nice_local(cmd, nice=self.nice)
elif self.use_sudo:
return sudo(cmd)
else:
return run(cmd)
def get_current_node_hostname(self):
return env.host_string
def upload_node_backups(self, snapshot, incremental_backups):
prefix = '/'.join(snapshot.base_path.split('/') + [self.get_current_node_hostname()])
manifest_path = '/tmp/backupmanifest'
manifest_command = "cassandra-backups-agent " \
"%(incremental_backups)s create-upload-manifest " \
"--manifest_path=%(manifest_path)s " \
"--snapshot_name=%(snapshot_name)s " \
"--snapshot_keyspaces=%(snapshot_keyspaces)s " \
"--snapshot_table=%(snapshot_table)s " \
"--conf_path=%(conf_path)s " \
"--exclude_tables=%(exclude_tables)s"
cmd = manifest_command % dict(
manifest_path=manifest_path,
snapshot_name=snapshot.name,
snapshot_keyspaces=','.join(snapshot.keyspaces or ''),
snapshot_table=snapshot.table,
conf_path=self.cassandra_conf_path,
exclude_tables=self.exclude_tables,
incremental_backups=incremental_backups and '--incremental_backups' or ''
)
self.execute_cmd(cmd)
upload_command = "cassandra-backups-agent %(incremental_backups)s " \
"put " \
"--s3-bucket-name=%(bucket)s " \
"--s3-bucket-region=%(s3_bucket_region)s %(s3_ssenc)s " \
"--s3-base-path=%(prefix)s " \
"--manifest=%(manifest)s " \
"--bufsize=%(bufsize)s " \
"--concurrency=4"
if self.reduced_redundancy:
upload_command += " --reduced-redundancy"
if self.rate_limit > 0:
upload_command += " --rate-limit=%(rate_limit)s"
if self.quiet:
upload_command += " --quiet"
if self.aws_access_key_id and self.aws_secret_access_key:
upload_command += " --aws-access-key-id=%(key)s " \
"--aws-secret-access-key=%(secret)s"
cmd = upload_command % dict(
bucket=snapshot.s3_bucket,
s3_bucket_region=self.s3_bucket_region,
s3_ssenc=self.s3_ssenc and '--s3-ssenc' or '',
prefix=prefix,
key=self.aws_access_key_id,
secret=self.aws_secret_access_key,
manifest=manifest_path,
bufsize=self.buffer_size,
rate_limit=self.rate_limit,
incremental_backups=incremental_backups and '--incremental_backups' or ''
)
self.execute_cmd(cmd)
def snapshot(self, snapshot):
"""
Perform a snapshot
"""
logging.info("Create {!r} snapshot".format(snapshot))
try:
self.start_cluster_backup(snapshot, incremental_backups=False)
self.upload_cluster_backups(snapshot, incremental_backups=False)
finally:
self.clear_cluster_snapshot(snapshot)
self.write_ring_description(snapshot)
self.write_snapshot_manifest(snapshot)
if self.backup_schema:
self.write_schema(snapshot)
def update_snapshot(self, snapshot):
"""Updates backup data changed since :snapshot was done"""
logging.info("Update {!r} snapshot".format(snapshot))
self.start_cluster_backup(snapshot, incremental_backups=True)
self.upload_cluster_backups(snapshot, incremental_backups=True)
self.write_ring_description(snapshot)
if self.backup_schema:
self.write_schema(snapshot)
def get_ring_description(self):
with settings(host_string=env.hosts[0]):
with hide('output'):
ring_description = self.execute_cmd(self.nodetool_path + ' ring')
return ring_description
def get_keyspace_schema(self, keyspace=None):
if self.cqlsh_user and self.cqlsh_password:
auth = "-u {!s} -p {!s}".format(self.cqlsh_user, self.cqlsh_password)
else:
auth = ""
with settings(host_string=env.hosts[0]):
with hide('output'):
cmd = "{!s} {!s} -e 'DESCRIBE SCHEMA;'".format(
self.cqlsh_path, auth)
if keyspace:
cmd = "{!s} -k {!s} {!s} -e 'DESCRIBE KEYSPACE {!s};'".format(
self.cqlsh_path, keyspace, auth, keyspace)
output = self.execute_cmd(cmd)
return output
def write_on_S3(self, bucket_name, path, content):
conn = S3Connection(
self.aws_access_key_id,
self.aws_secret_access_key,
host=self.s3_connection_host)
bucket = conn.get_bucket(bucket_name, validate=False)
key = bucket.new_key(path)
key.set_contents_from_string(content)
def write_ring_description(self, snapshot):
logging.info("Writing ring description")
content = self.get_ring_description()
ring_path = '/'.join([snapshot.base_path, 'ring'])
self.write_on_S3(snapshot.s3_bucket, ring_path, content)
def write_schema(self, snapshot):
if snapshot.keyspaces:
for ks in snapshot.keyspaces:
logging.info("Writing schema for keyspace {!s}".format(ks))
content = self.get_keyspace_schema(ks)
schema_path = '/'.join(
[snapshot.base_path, "schema_{!s}.cql".format(ks)])
self.write_on_S3(snapshot.s3_bucket, schema_path, content)
else:
logging.info("Writing schema for all keyspaces")
content = self.get_keyspace_schema()
schema_path = '/'.join([snapshot.base_path, "schema.cql"])
self.write_on_S3(snapshot.s3_bucket, schema_path, content)
def write_snapshot_manifest(self, snapshot):
content = snapshot.dump_manifest_file()
manifest_path = '/'.join([snapshot.base_path, 'manifest.json'])
self.write_on_S3(snapshot.s3_bucket, manifest_path, content)
def start_cluster_backup(self, snapshot, incremental_backups=False):
logging.info("Creating snapshots")
with settings(parallel=True, pool_size=self.connection_pool_size):
execute(self.node_start_backup, snapshot, incremental_backups)
def node_start_backup(self, snapshot, incremental_backups):
"""Runs snapshot command on a cassandra node"""
def hide_exec_cmd(cmd):
with hide('running', 'stdout', 'stderr'):
self.execute_cmd(cmd)
if incremental_backups:
backup_command = "%(nodetool)s flush %(keyspace)s %(tables)s"
if snapshot.keyspaces:
# flush can only take one keyspace at a time.
for keyspace in snapshot.keyspaces:
cmd = backup_command % dict(
nodetool=self.nodetool_path,
keyspace=keyspace,
tables=snapshot.table or ''
)
hide_exec_cmd(cmd)
else:
# If no keyspace then can't provide a table either.
cmd = backup_command % dict(
nodetool=self.nodetool_path,
keyspace='',
tables=''
)
hide_exec_cmd(cmd)
else:
backup_command = "%(nodetool)s snapshot %(table_param)s \
-t %(snapshot)s %(keyspaces)s"
if snapshot.table:
# Only one keyspace can be specified along with a column family.
table_param = "-cf {!s}".format(snapshot.table)
for keyspace in snapshot.keyspaces:
cmd = backup_command % dict(
nodetool=self.nodetool_path,
table_param=table_param,
snapshot=snapshot.name,
keyspaces=keyspace
)
hide_exec_cmd(cmd)
else:
cmd = backup_command % dict(
nodetool=self.nodetool_path,
table_param='',
snapshot=snapshot.name,
keyspaces=' '.join(snapshot.keyspaces or '')
)
hide_exec_cmd(cmd)
def upload_cluster_backups(self, snapshot, incremental_backups):
logging.info("Uploading backups")
with settings(parallel=True, pool_size=self.connection_pool_size):
execute(self.upload_node_backups, snapshot, incremental_backups)
def clear_cluster_snapshot(self, snapshot):
logging.info("Clearing snapshots")
with settings(parallel=True, pool_size=self.connection_pool_size):
execute(self.clear_node_snapshot, snapshot)
def clear_node_snapshot(self, snapshot):
"""Cleans up snapshots from a cassandra node"""
clear_command = '%(nodetool)s clearsnapshot -t "%(snapshot)s"'
cmd = clear_command % dict(
nodetool=self.nodetool_path,
snapshot=snapshot.name
)
self.execute_cmd(cmd)
class SnapshotCollection(object):
def __init__(
self, aws_access_key_id,
aws_secret_access_key, base_path, s3_bucket, s3_connection_host):
self.s3_connection_host = s3_connection_host
self.s3_bucket = s3_bucket
self.base_path = base_path
self.snapshots = None
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
def _read_s3(self):
if self.snapshots:
return
conn = S3Connection(self.aws_access_key_id, self.aws_secret_access_key, host=self.s3_connection_host)
bucket = conn.get_bucket(self.s3_bucket, validate=False)
self.snapshots = []
prefix = self.base_path
if not self.base_path.endswith('/'):
prefix = "{!s}/".format(self.base_path)
snap_paths = [snap.name for snap in bucket.list(
prefix=prefix, delimiter='/')]
# Remove the root dir from the list since it won't have a manifest file.
snap_paths = [x for x in snap_paths if x != prefix]
for snap_path in snap_paths:
mkey = Key(bucket)
manifest_path = '/'.join([snap_path, 'manifest.json'])
mkey.key = manifest_path
try:
manifest_data = mkey.get_contents_as_string()
except S3ResponseError as e: # manifest.json not found.
logging.warn("Response: {!r} manifest_path: {!r}".format(
e.message, manifest_path))
continue
try:
self.snapshots.append(
Snapshot.load_manifest_file(manifest_data, self.s3_bucket))
except Exception as e: # Invalid json format.
logging.error("Parsing manifest.json failed. {!r}".format(
e.message))
continue
self.snapshots = sorted(self.snapshots, reverse=True)
def get_snapshot_by_name(self, name):
snapshots = filter(lambda s: s.name == name, self)
return snapshots and snapshots[0]
def get_latest(self):
self._read_s3()
return self.snapshots[0]
def get_snapshot_for(self, hosts, keyspaces, table, name):
"""Returns the most recent compatible snapshot"""
for snapshot in self:
if snapshot.hosts != hosts:
continue
if snapshot.keyspaces != keyspaces:
continue
if snapshot.table != table:
continue
if snapshot.name != name:
continue
return snapshot
def __iter__(self):
self._read_s3()
return iter(self.snapshots)
| from __future__ import (absolute_import, print_function)
import time
import json
import logging
from distutils.util import strtobool
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
from datetime import datetime
from fabric.api import (env, execute, hide, run, sudo)
from fabric.context_managers import settings
from fabric.operations import local
from cassandra_backups.utils import get_s3_connection_host, nice_local
class Snapshot(object):
"""
A Snapshot instance keeps the details about a cassandra snapshot
Multiple snapshots can be stored in a single S3 bucket
A Snapshot is best described by:
- its name (which defaults to the utc time of creation)
- the list of hostnames the snapshot runs on
- the list of keyspaces being backed up
- the keyspace table being backed up
- the S3 bucket's base path where the snapshot is stored
Snapshots data (and incremental backups) are stored using the
following convention:
s3_bucket_name:/<base_path>/<snapshot_name>/<node-hostname>/...
Snapshots are represented on S3 by their manifest file, this makes
incremental backups much easier
"""
SNAPSHOT_TIMESTAMP_FORMAT = '%Y%m%d'
def __init__(self, base_path, s3_bucket, hosts, keyspaces, table):
self.s3_bucket = s3_bucket
self.name = self.make_snapshot_name()
self.hosts = hosts
self.keyspaces = keyspaces
self.table = table
self._base_path = base_path
def dump_manifest_file(self):
manifest_data = {
'name': self.name,
'base_path': self._base_path,
'hosts': self.hosts,
'keyspaces': self.keyspaces,
'table': self.table
}
return json.dumps(manifest_data)
@staticmethod
def load_manifest_file(data, s3_bucket):
manifest_data = json.loads(data)
snapshot = Snapshot(
base_path=manifest_data['base_path'],
s3_bucket=s3_bucket,
hosts=manifest_data['hosts'],
keyspaces=manifest_data['keyspaces'],
table=manifest_data['table']
)
snapshot.name = manifest_data['name']
return snapshot
@property
def base_path(self):
return '/'.join([self._base_path, self.name])
@staticmethod
def make_snapshot_name():
return datetime.utcnow().strftime(Snapshot.SNAPSHOT_TIMESTAMP_FORMAT)
def unix_time_name(self):
dt = datetime.strptime(self.name, self.SNAPSHOT_TIMESTAMP_FORMAT)
return time.mktime(dt.timetuple()) * 1000
def __cmp__(self, other):
return self.unix_time_name() - other.unix_time_name()
def __repr__(self):
return self.name
__str__ = __repr__
class RestoreWorker(object):
def __init__(self, aws_access_key_id, aws_secret_access_key, s3_bucket_region, snapshot,
cassandra_tools_bin_dir, restore_dir, use_sudo, use_local):
self.aws_secret_access_key = aws_secret_access_key
self.aws_access_key_id = aws_access_key_id
self.s3_host = get_s3_connection_host(s3_bucket_region)
self.snapshot = snapshot
self.cassandra_tools_bin_dir = cassandra_tools_bin_dir
self.restore_dir = restore_dir
self.use_sudo = use_sudo
self.use_local = use_local
def restore(self, keyspace):
logging.info("Restoring keyspace=%(keyspace)s to host %(host)s ,\
" % dict(keyspace=keyspace, host=env.host_string))
restore_command = "cassandra-backups-agent " \
"fetch " \
"--keyspace=%(keyspace)s " \
"--snapshot-path=%(snapshot_path)s " \
"--aws-access-key-id=%(aws_access_key_id)s " \
"--aws-secret-access-key=%(aws_secret_access_key)s " \
"--s3-host=%(s3_host)s " \
"--s3-bucket-name=%(s3_bucket_name)s " \
"--host=%(host)s " \
"--cassandra-tools-bin-dir=%(cassandra_tools_bin_dir)s " \
"--restore-dir=%(restore_dir)s "
cmd = restore_command % dict(
keyspace=keyspace,
snapshot_path=self.snapshot.base_path,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
s3_host=self.s3_host,
s3_bucket_name=self.snapshot.s3_bucket,
host=env.host_string,
cassandra_tools_bin_dir=self.cassandra_tools_bin_dir,
restore_dir=self.restore_dir,
)
if self.use_local and self.use_sudo:
local("sudo " + cmd)
elif self.use_local:
local(cmd)
elif self.use_sudo:
sudo(cmd)
else:
run(cmd)
class BackupWorker(object):
"""
Backup process is split in this steps:
- requests cassandra to create new backups
- uploads backup files to S3
- clears backup files from nodes
- updates backup meta information
When performing a new snapshot the manifest of the snapshot is
uploaded to S3 for later use.
Snapshot's manifest path:
/<snapshot_base_path>/<snapshot_name>/manifest.json
Every time a backup is done a description of the current ring is
saved next to the snapshot manifest file
"""
def __init__(self, aws_secret_access_key,
aws_access_key_id, s3_bucket_region, s3_ssenc,
s3_connection_host, cassandra_conf_path, use_sudo, use_local,
cassandra_tools_bin_dir, cqlsh_user, cqlsh_password,
backup_schema, buffer_size, exclude_tables, rate_limit, quiet, nice,
connection_pool_size=12, reduced_redundancy=False):
self.aws_secret_access_key = aws_secret_access_key
self.aws_access_key_id = aws_access_key_id
self.s3_bucket_region = s3_bucket_region
self.s3_ssenc = s3_ssenc
self.s3_connection_host = s3_connection_host
self.cassandra_conf_path = cassandra_conf_path
self.nodetool_path = "{!s}/nodetool".format(cassandra_tools_bin_dir)
self.cqlsh_path = "{!s}/cqlsh".format(cassandra_tools_bin_dir)
self.cqlsh_user = cqlsh_user
self.cqlsh_password = <PASSWORD>
self.backup_schema = backup_schema
self.connection_pool_size = connection_pool_size
self.buffer_size = buffer_size
self.reduced_redundancy = reduced_redundancy
self.rate_limit = rate_limit
self.quiet = quiet
self.nice = nice
if isinstance(use_sudo, basestring):
self.use_sudo = bool(strtobool(use_sudo))
else:
self.use_sudo = use_sudo
if isinstance(use_local, basestring):
self.use_local = bool(strtobool(use_local))
else:
self.use_local = use_local
self.exclude_tables = exclude_tables
def execute_cmd(self, cmd):
if self.use_local and self.use_sudo:
return nice_local('sudo ' + cmd, nice=self.nice)
elif self.use_local:
return nice_local(cmd, nice=self.nice)
elif self.use_sudo:
return sudo(cmd)
else:
return run(cmd)
def get_current_node_hostname(self):
return env.host_string
def upload_node_backups(self, snapshot, incremental_backups):
prefix = '/'.join(snapshot.base_path.split('/') + [self.get_current_node_hostname()])
manifest_path = '/tmp/backupmanifest'
manifest_command = "cassandra-backups-agent " \
"%(incremental_backups)s create-upload-manifest " \
"--manifest_path=%(manifest_path)s " \
"--snapshot_name=%(snapshot_name)s " \
"--snapshot_keyspaces=%(snapshot_keyspaces)s " \
"--snapshot_table=%(snapshot_table)s " \
"--conf_path=%(conf_path)s " \
"--exclude_tables=%(exclude_tables)s"
cmd = manifest_command % dict(
manifest_path=manifest_path,
snapshot_name=snapshot.name,
snapshot_keyspaces=','.join(snapshot.keyspaces or ''),
snapshot_table=snapshot.table,
conf_path=self.cassandra_conf_path,
exclude_tables=self.exclude_tables,
incremental_backups=incremental_backups and '--incremental_backups' or ''
)
self.execute_cmd(cmd)
upload_command = "cassandra-backups-agent %(incremental_backups)s " \
"put " \
"--s3-bucket-name=%(bucket)s " \
"--s3-bucket-region=%(s3_bucket_region)s %(s3_ssenc)s " \
"--s3-base-path=%(prefix)s " \
"--manifest=%(manifest)s " \
"--bufsize=%(bufsize)s " \
"--concurrency=4"
if self.reduced_redundancy:
upload_command += " --reduced-redundancy"
if self.rate_limit > 0:
upload_command += " --rate-limit=%(rate_limit)s"
if self.quiet:
upload_command += " --quiet"
if self.aws_access_key_id and self.aws_secret_access_key:
upload_command += " --aws-access-key-id=%(key)s " \
"--aws-secret-access-key=%(secret)s"
cmd = upload_command % dict(
bucket=snapshot.s3_bucket,
s3_bucket_region=self.s3_bucket_region,
s3_ssenc=self.s3_ssenc and '--s3-ssenc' or '',
prefix=prefix,
key=self.aws_access_key_id,
secret=self.aws_secret_access_key,
manifest=manifest_path,
bufsize=self.buffer_size,
rate_limit=self.rate_limit,
incremental_backups=incremental_backups and '--incremental_backups' or ''
)
self.execute_cmd(cmd)
def snapshot(self, snapshot):
"""
Perform a snapshot
"""
logging.info("Create {!r} snapshot".format(snapshot))
try:
self.start_cluster_backup(snapshot, incremental_backups=False)
self.upload_cluster_backups(snapshot, incremental_backups=False)
finally:
self.clear_cluster_snapshot(snapshot)
self.write_ring_description(snapshot)
self.write_snapshot_manifest(snapshot)
if self.backup_schema:
self.write_schema(snapshot)
def update_snapshot(self, snapshot):
"""Updates backup data changed since :snapshot was done"""
logging.info("Update {!r} snapshot".format(snapshot))
self.start_cluster_backup(snapshot, incremental_backups=True)
self.upload_cluster_backups(snapshot, incremental_backups=True)
self.write_ring_description(snapshot)
if self.backup_schema:
self.write_schema(snapshot)
def get_ring_description(self):
with settings(host_string=env.hosts[0]):
with hide('output'):
ring_description = self.execute_cmd(self.nodetool_path + ' ring')
return ring_description
def get_keyspace_schema(self, keyspace=None):
if self.cqlsh_user and self.cqlsh_password:
auth = "-u {!s} -p {!s}".format(self.cqlsh_user, self.cqlsh_password)
else:
auth = ""
with settings(host_string=env.hosts[0]):
with hide('output'):
cmd = "{!s} {!s} -e 'DESCRIBE SCHEMA;'".format(
self.cqlsh_path, auth)
if keyspace:
cmd = "{!s} -k {!s} {!s} -e 'DESCRIBE KEYSPACE {!s};'".format(
self.cqlsh_path, keyspace, auth, keyspace)
output = self.execute_cmd(cmd)
return output
def write_on_S3(self, bucket_name, path, content):
conn = S3Connection(
self.aws_access_key_id,
self.aws_secret_access_key,
host=self.s3_connection_host)
bucket = conn.get_bucket(bucket_name, validate=False)
key = bucket.new_key(path)
key.set_contents_from_string(content)
def write_ring_description(self, snapshot):
logging.info("Writing ring description")
content = self.get_ring_description()
ring_path = '/'.join([snapshot.base_path, 'ring'])
self.write_on_S3(snapshot.s3_bucket, ring_path, content)
def write_schema(self, snapshot):
if snapshot.keyspaces:
for ks in snapshot.keyspaces:
logging.info("Writing schema for keyspace {!s}".format(ks))
content = self.get_keyspace_schema(ks)
schema_path = '/'.join(
[snapshot.base_path, "schema_{!s}.cql".format(ks)])
self.write_on_S3(snapshot.s3_bucket, schema_path, content)
else:
logging.info("Writing schema for all keyspaces")
content = self.get_keyspace_schema()
schema_path = '/'.join([snapshot.base_path, "schema.cql"])
self.write_on_S3(snapshot.s3_bucket, schema_path, content)
def write_snapshot_manifest(self, snapshot):
content = snapshot.dump_manifest_file()
manifest_path = '/'.join([snapshot.base_path, 'manifest.json'])
self.write_on_S3(snapshot.s3_bucket, manifest_path, content)
def start_cluster_backup(self, snapshot, incremental_backups=False):
logging.info("Creating snapshots")
with settings(parallel=True, pool_size=self.connection_pool_size):
execute(self.node_start_backup, snapshot, incremental_backups)
def node_start_backup(self, snapshot, incremental_backups):
"""Runs snapshot command on a cassandra node"""
def hide_exec_cmd(cmd):
with hide('running', 'stdout', 'stderr'):
self.execute_cmd(cmd)
if incremental_backups:
backup_command = "%(nodetool)s flush %(keyspace)s %(tables)s"
if snapshot.keyspaces:
# flush can only take one keyspace at a time.
for keyspace in snapshot.keyspaces:
cmd = backup_command % dict(
nodetool=self.nodetool_path,
keyspace=keyspace,
tables=snapshot.table or ''
)
hide_exec_cmd(cmd)
else:
# If no keyspace then can't provide a table either.
cmd = backup_command % dict(
nodetool=self.nodetool_path,
keyspace='',
tables=''
)
hide_exec_cmd(cmd)
else:
backup_command = "%(nodetool)s snapshot %(table_param)s \
-t %(snapshot)s %(keyspaces)s"
if snapshot.table:
# Only one keyspace can be specified along with a column family.
table_param = "-cf {!s}".format(snapshot.table)
for keyspace in snapshot.keyspaces:
cmd = backup_command % dict(
nodetool=self.nodetool_path,
table_param=table_param,
snapshot=snapshot.name,
keyspaces=keyspace
)
hide_exec_cmd(cmd)
else:
cmd = backup_command % dict(
nodetool=self.nodetool_path,
table_param='',
snapshot=snapshot.name,
keyspaces=' '.join(snapshot.keyspaces or '')
)
hide_exec_cmd(cmd)
def upload_cluster_backups(self, snapshot, incremental_backups):
logging.info("Uploading backups")
with settings(parallel=True, pool_size=self.connection_pool_size):
execute(self.upload_node_backups, snapshot, incremental_backups)
def clear_cluster_snapshot(self, snapshot):
logging.info("Clearing snapshots")
with settings(parallel=True, pool_size=self.connection_pool_size):
execute(self.clear_node_snapshot, snapshot)
def clear_node_snapshot(self, snapshot):
"""Cleans up snapshots from a cassandra node"""
clear_command = '%(nodetool)s clearsnapshot -t "%(snapshot)s"'
cmd = clear_command % dict(
nodetool=self.nodetool_path,
snapshot=snapshot.name
)
self.execute_cmd(cmd)
class SnapshotCollection(object):
def __init__(
self, aws_access_key_id,
aws_secret_access_key, base_path, s3_bucket, s3_connection_host):
self.s3_connection_host = s3_connection_host
self.s3_bucket = s3_bucket
self.base_path = base_path
self.snapshots = None
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
def _read_s3(self):
if self.snapshots:
return
conn = S3Connection(self.aws_access_key_id, self.aws_secret_access_key, host=self.s3_connection_host)
bucket = conn.get_bucket(self.s3_bucket, validate=False)
self.snapshots = []
prefix = self.base_path
if not self.base_path.endswith('/'):
prefix = "{!s}/".format(self.base_path)
snap_paths = [snap.name for snap in bucket.list(
prefix=prefix, delimiter='/')]
# Remove the root dir from the list since it won't have a manifest file.
snap_paths = [x for x in snap_paths if x != prefix]
for snap_path in snap_paths:
mkey = Key(bucket)
manifest_path = '/'.join([snap_path, 'manifest.json'])
mkey.key = manifest_path
try:
manifest_data = mkey.get_contents_as_string()
except S3ResponseError as e: # manifest.json not found.
logging.warn("Response: {!r} manifest_path: {!r}".format(
e.message, manifest_path))
continue
try:
self.snapshots.append(
Snapshot.load_manifest_file(manifest_data, self.s3_bucket))
except Exception as e: # Invalid json format.
logging.error("Parsing manifest.json failed. {!r}".format(
e.message))
continue
self.snapshots = sorted(self.snapshots, reverse=True)
def get_snapshot_by_name(self, name):
snapshots = filter(lambda s: s.name == name, self)
return snapshots and snapshots[0]
def get_latest(self):
self._read_s3()
return self.snapshots[0]
def get_snapshot_for(self, hosts, keyspaces, table, name):
"""Returns the most recent compatible snapshot"""
for snapshot in self:
if snapshot.hosts != hosts:
continue
if snapshot.keyspaces != keyspaces:
continue
if snapshot.table != table:
continue
if snapshot.name != name:
continue
return snapshot
def __iter__(self):
self._read_s3()
return iter(self.snapshots) | en | 0.875799 | A Snapshot instance keeps the details about a cassandra snapshot Multiple snapshots can be stored in a single S3 bucket A Snapshot is best described by: - its name (which defaults to the utc time of creation) - the list of hostnames the snapshot runs on - the list of keyspaces being backed up - the keyspace table being backed up - the S3 bucket's base path where the snapshot is stored Snapshots data (and incremental backups) are stored using the following convention: s3_bucket_name:/<base_path>/<snapshot_name>/<node-hostname>/... Snapshots are represented on S3 by their manifest file, this makes incremental backups much easier Backup process is split in this steps: - requests cassandra to create new backups - uploads backup files to S3 - clears backup files from nodes - updates backup meta information When performing a new snapshot the manifest of the snapshot is uploaded to S3 for later use. Snapshot's manifest path: /<snapshot_base_path>/<snapshot_name>/manifest.json Every time a backup is done a description of the current ring is saved next to the snapshot manifest file Perform a snapshot Updates backup data changed since :snapshot was done Runs snapshot command on a cassandra node # flush can only take one keyspace at a time. # If no keyspace then can't provide a table either. # Only one keyspace can be specified along with a column family. Cleans up snapshots from a cassandra node # Remove the root dir from the list since it won't have a manifest file. # manifest.json not found. # Invalid json format. Returns the most recent compatible snapshot | 2.231992 | 2 |
fishery_model/gear_library.py | bkuczenski/scoping-gear-losses | 0 | 6621762 | <gh_stars>0
import os
from unit_gears.model_library import GearModelLibrary, MODELS_DIR
from .quantile_catch_effort_intensity import quantile_reg_models
CUSTOM_GEARS = os.path.abspath(os.path.join(os.path.dirname(__file__), 'gear_data'))
LOAD_FAMILIES = (
'Sala 2019', # trawl meta-model
'Deshpande 2020'
)
SAMPLE_FAMILIES = {
'seiners': ['Avadi 2014.json', 'Laissane 2011.json', 'Laso 2017.json', 'Pravin 2016.json', 'Soldo 2019.json'],
'trawlers': ['Hoffman 2009.json', 'Thrane 2006.json', 'Watanabe 2016.json'],
'driftnets': ['Akyol 2012.json'],
'drifting-longlines': ['Gabr 2012.json'],
'set-nets': ['Grimaldo 2019.json'],
}
TAU = ('0.5', '0.6', '0.7', '0.8', '0.9')
def gml_init(tau=TAU):
_gml = GearModelLibrary(verbose=False)
for family in LOAD_FAMILIES:
_gml.load_family(MODELS_DIR, family)
_gml.load_path(CUSTOM_GEARS, verbose=True)
for t in tau:
print('Building regression models for tau=%s' % t)
quantile_reg_models(_gml, tau=t)
return _gml
| import os
from unit_gears.model_library import GearModelLibrary, MODELS_DIR
from .quantile_catch_effort_intensity import quantile_reg_models
CUSTOM_GEARS = os.path.abspath(os.path.join(os.path.dirname(__file__), 'gear_data'))
LOAD_FAMILIES = (
'Sala 2019', # trawl meta-model
'Deshpande 2020'
)
SAMPLE_FAMILIES = {
'seiners': ['Avadi 2014.json', 'Laissane 2011.json', 'Laso 2017.json', 'Pravin 2016.json', 'Soldo 2019.json'],
'trawlers': ['Hoffman 2009.json', 'Thrane 2006.json', 'Watanabe 2016.json'],
'driftnets': ['Akyol 2012.json'],
'drifting-longlines': ['Gabr 2012.json'],
'set-nets': ['Grimaldo 2019.json'],
}
TAU = ('0.5', '0.6', '0.7', '0.8', '0.9')
def gml_init(tau=TAU):
_gml = GearModelLibrary(verbose=False)
for family in LOAD_FAMILIES:
_gml.load_family(MODELS_DIR, family)
_gml.load_path(CUSTOM_GEARS, verbose=True)
for t in tau:
print('Building regression models for tau=%s' % t)
quantile_reg_models(_gml, tau=t)
return _gml | pt | 0.108206 | # trawl meta-model | 1.903424 | 2 |
roboticstoolbox/models/ETS/GenericSeven.py | Russ76/robotics-toolbox-python | 0 | 6621763 | #!/usr/bin/env python
import numpy as np
from roboticstoolbox.robot.ET import ET
from roboticstoolbox.robot.ETS import ETS
from roboticstoolbox.robot.ERobot import ERobot
from roboticstoolbox.robot.Link import Link
class GenericSeven(ERobot):
"""
Create model of a generic seven degree-of-freedom robot
robot = GenericSeven() creates a robot object. This robot is represented
using the elementary transform sequence (ETS).
"""
def __init__(self):
deg = np.pi / 180
mm = 1e-3
tool_offset = (103) * mm
link_length = 0.5
l0 = Link(ET.tz(link_length) * ET.Rz(), name="link0", parent=None)
l1 = Link(ETS(ET.Ry()), name="link1", parent=l0)
l2 = Link(ET.tz(link_length) * ET.Rz(), name="link2", parent=l1)
l3 = Link(ETS(ET.Ry()), name="link3", parent=l2)
l4 = Link(ET.tz(link_length) * ET.Rz(), name="link4", parent=l3)
l5 = Link(ETS(ET.Ry()), name="link5", parent=l4)
l6 = Link(ET.tx(link_length) * ET.Rz(), name="link6", parent=l5)
ee = Link(ETS(ET.tz(-link_length)), name="ee", parent=l6)
elinks = [l0, l1, l2, l3, l4, l5, l6, ee]
# elinks = [l0, l1, l2, l3, l4, l5, ee]
super(GenericSeven, self).__init__(
elinks, name="Generic Seven", manufacturer="Jesse's Imagination"
)
self.qr = np.array([0, -0.3, 0, -2.2, 0, 2.0, np.pi / 4])
self.qz = np.zeros(7)
self.addconfiguration("qr", self.qr)
self.addconfiguration("qz", self.qz)
| #!/usr/bin/env python
import numpy as np
from roboticstoolbox.robot.ET import ET
from roboticstoolbox.robot.ETS import ETS
from roboticstoolbox.robot.ERobot import ERobot
from roboticstoolbox.robot.Link import Link
class GenericSeven(ERobot):
"""
Create model of a generic seven degree-of-freedom robot
robot = GenericSeven() creates a robot object. This robot is represented
using the elementary transform sequence (ETS).
"""
def __init__(self):
deg = np.pi / 180
mm = 1e-3
tool_offset = (103) * mm
link_length = 0.5
l0 = Link(ET.tz(link_length) * ET.Rz(), name="link0", parent=None)
l1 = Link(ETS(ET.Ry()), name="link1", parent=l0)
l2 = Link(ET.tz(link_length) * ET.Rz(), name="link2", parent=l1)
l3 = Link(ETS(ET.Ry()), name="link3", parent=l2)
l4 = Link(ET.tz(link_length) * ET.Rz(), name="link4", parent=l3)
l5 = Link(ETS(ET.Ry()), name="link5", parent=l4)
l6 = Link(ET.tx(link_length) * ET.Rz(), name="link6", parent=l5)
ee = Link(ETS(ET.tz(-link_length)), name="ee", parent=l6)
elinks = [l0, l1, l2, l3, l4, l5, l6, ee]
# elinks = [l0, l1, l2, l3, l4, l5, ee]
super(GenericSeven, self).__init__(
elinks, name="Generic Seven", manufacturer="Jesse's Imagination"
)
self.qr = np.array([0, -0.3, 0, -2.2, 0, 2.0, np.pi / 4])
self.qz = np.zeros(7)
self.addconfiguration("qr", self.qr)
self.addconfiguration("qz", self.qz)
| en | 0.737259 | #!/usr/bin/env python Create model of a generic seven degree-of-freedom robot robot = GenericSeven() creates a robot object. This robot is represented using the elementary transform sequence (ETS). # elinks = [l0, l1, l2, l3, l4, l5, ee] | 2.52197 | 3 |
zmei/views.py | zmei-framework/zmei-utils | 1 | 6621764 | from django.views.generic.base import View, ContextMixin, TemplateResponseMixin
class ImproperlyConfigured(Exception):
pass
class _Data(object):
def __init__(self, data=None):
self.__dict__.update(data or {})
def __add__(self, data):
return _Data({**self.__dict__, **data})
class ZmeiDataViewMixin(ContextMixin, View):
_data = None
def get_data(self, url, request, inherited):
return {}
def _get_data(self):
if not self._data:
self._data = self.get_data(
url=type('url', (object,), self.kwargs),
request=self.request,
inherited=False
)
return self._data
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
return {**context_data, **self._get_data()}
class CrudView(TemplateResponseMixin):
def render_to_response(self, context, **response_kwargs):
return context
class CrudMultiplexerView(TemplateResponseMixin, ContextMixin, View):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.crud_views = {}
for cls in self.get_crud_views():
crud = cls(*args, **kwargs)
self.crud_views[crud.name] = crud
def get_crud_views(self):
return ()
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['crud'] = {}
for crud in self.crud_views.values():
self.populate_crud(args, crud, kwargs, request)
context['crud'][crud.name] = crud.get(request, *args, **kwargs)
return self.render_to_response(context)
def populate_crud(self, args, crud, kwargs, request):
crud.request = request
crud.args = args
crud.kwargs = kwargs
def post(self, request, *args, **kwargs):
form_name = request.POST.get('_form')
crud = self.crud_views.get(form_name)
self.populate_crud(args, crud, kwargs, request)
if not crud:
return self.http_method_not_allowed(request, *args, **kwargs)
return crud.post(request, *args, **kwargs)
| from django.views.generic.base import View, ContextMixin, TemplateResponseMixin
class ImproperlyConfigured(Exception):
pass
class _Data(object):
def __init__(self, data=None):
self.__dict__.update(data or {})
def __add__(self, data):
return _Data({**self.__dict__, **data})
class ZmeiDataViewMixin(ContextMixin, View):
_data = None
def get_data(self, url, request, inherited):
return {}
def _get_data(self):
if not self._data:
self._data = self.get_data(
url=type('url', (object,), self.kwargs),
request=self.request,
inherited=False
)
return self._data
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
return {**context_data, **self._get_data()}
class CrudView(TemplateResponseMixin):
def render_to_response(self, context, **response_kwargs):
return context
class CrudMultiplexerView(TemplateResponseMixin, ContextMixin, View):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.crud_views = {}
for cls in self.get_crud_views():
crud = cls(*args, **kwargs)
self.crud_views[crud.name] = crud
def get_crud_views(self):
return ()
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['crud'] = {}
for crud in self.crud_views.values():
self.populate_crud(args, crud, kwargs, request)
context['crud'][crud.name] = crud.get(request, *args, **kwargs)
return self.render_to_response(context)
def populate_crud(self, args, crud, kwargs, request):
crud.request = request
crud.args = args
crud.kwargs = kwargs
def post(self, request, *args, **kwargs):
form_name = request.POST.get('_form')
crud = self.crud_views.get(form_name)
self.populate_crud(args, crud, kwargs, request)
if not crud:
return self.http_method_not_allowed(request, *args, **kwargs)
return crud.post(request, *args, **kwargs)
| none | 1 | 1.925082 | 2 | |
setup.py | brightway-lca/bw_io | 0 | 6621765 | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
requirements = [
"bw_projects",
'appdirs',
'peewee',
'stats_arrays',
'wrapt',
]
test_requirements = ['pytest']
v_temp = {}
with open("bw_io/version.py") as fp:
exec(fp.read(), v_temp)
version = ".".join((str(x) for x in v_temp['version']))
setup(
name='bw_io',
version=version,
packages=find_packages(exclude=['tests']),
author='<NAME>',
author_email='<EMAIL>',
license="NewBSD 3-clause; LICENSE",
# Only if you have non-python data (CSV, etc.). Might need to change the directory name as well.
# package_data={'your_name_here': package_files(os.path.join('bw_io', 'data'))},
install_requires=requirements,
tests_require=requirements + test_requirements,
url="https://github.com/brightway-lca/bw_io",
long_description_content_type='text/markdown',
long_description=open(path.join(here, "README.md")).read(),
description='I/O functions for Brightway framework',
classifiers=[
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
| from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
requirements = [
"bw_projects",
'appdirs',
'peewee',
'stats_arrays',
'wrapt',
]
test_requirements = ['pytest']
v_temp = {}
with open("bw_io/version.py") as fp:
exec(fp.read(), v_temp)
version = ".".join((str(x) for x in v_temp['version']))
setup(
name='bw_io',
version=version,
packages=find_packages(exclude=['tests']),
author='<NAME>',
author_email='<EMAIL>',
license="NewBSD 3-clause; LICENSE",
# Only if you have non-python data (CSV, etc.). Might need to change the directory name as well.
# package_data={'your_name_here': package_files(os.path.join('bw_io', 'data'))},
install_requires=requirements,
tests_require=requirements + test_requirements,
url="https://github.com/brightway-lca/bw_io",
long_description_content_type='text/markdown',
long_description=open(path.join(here, "README.md")).read(),
description='I/O functions for Brightway framework',
classifiers=[
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
],
)
| en | 0.711163 | # Only if you have non-python data (CSV, etc.). Might need to change the directory name as well. # package_data={'your_name_here': package_files(os.path.join('bw_io', 'data'))}, | 1.696367 | 2 |
homehub/api/management/commands/reboot_system.py | stricoff92/homehub | 0 | 6621766 |
import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **kwargs):
# This is pretty janky, ok it's very janky...
os.system("nohup sleep 2 && reboot > /dev/null & disown")
|
import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **kwargs):
# This is pretty janky, ok it's very janky...
os.system("nohup sleep 2 && reboot > /dev/null & disown")
| en | 0.969085 | # This is pretty janky, ok it's very janky... | 1.986362 | 2 |
widgets/gui_text.py | ilil01/qdt | 29 | 6621767 | __all__ = [
"GUIText"
, "READONLY"
]
# based on http://tkinter.unpythonic.net/wiki/ReadOnlyText
from six.moves.tkinter import (
Text,
NORMAL
)
READONLY = "readonly"
from sys import (
version_info
)
try:
if version_info[0] < 3 or version_info[0] == 3 and version_info[1] <= 5:
from idlelib.WidgetRedirector import (
WidgetRedirector
)
else:
from idlelib.redirector import (
WidgetRedirector
)
except ImportError as e:
raise ImportError("Cannot import from idlelib. Try:"
" sudo apt-get install idle-python%d.%d" % version_info[:2]
)
def _break(*args, **kw):
return "break"
class GUIText(Text):
"""
TODO:
* READONLY state support for config, configure, _configure e.t.c.
* returning READONLY by config, configure, _configure e.t.c.
* switching back to NORMAL, DISABLED
"""
def __init__(self, master, **kw):
read_only = False
try:
state = kw["state"]
except:
pass
else:
if state == READONLY:
read_only = True
kw["state"] = NORMAL
Text.__init__(self, master, **kw)
self.redirector = WidgetRedirector(self)
if read_only:
self.__read_only = True
""" Note that software editing is still possible by calling those
"insert" and "delete". """
self.insert = self.redirector.register("insert", _break)
self.delete = self.redirector.register("delete", _break)
| __all__ = [
"GUIText"
, "READONLY"
]
# based on http://tkinter.unpythonic.net/wiki/ReadOnlyText
from six.moves.tkinter import (
Text,
NORMAL
)
READONLY = "readonly"
from sys import (
version_info
)
try:
if version_info[0] < 3 or version_info[0] == 3 and version_info[1] <= 5:
from idlelib.WidgetRedirector import (
WidgetRedirector
)
else:
from idlelib.redirector import (
WidgetRedirector
)
except ImportError as e:
raise ImportError("Cannot import from idlelib. Try:"
" sudo apt-get install idle-python%d.%d" % version_info[:2]
)
def _break(*args, **kw):
return "break"
class GUIText(Text):
"""
TODO:
* READONLY state support for config, configure, _configure e.t.c.
* returning READONLY by config, configure, _configure e.t.c.
* switching back to NORMAL, DISABLED
"""
def __init__(self, master, **kw):
read_only = False
try:
state = kw["state"]
except:
pass
else:
if state == READONLY:
read_only = True
kw["state"] = NORMAL
Text.__init__(self, master, **kw)
self.redirector = WidgetRedirector(self)
if read_only:
self.__read_only = True
""" Note that software editing is still possible by calling those
"insert" and "delete". """
self.insert = self.redirector.register("insert", _break)
self.delete = self.redirector.register("delete", _break)
| en | 0.711666 | # based on http://tkinter.unpythonic.net/wiki/ReadOnlyText TODO: * READONLY state support for config, configure, _configure e.t.c. * returning READONLY by config, configure, _configure e.t.c. * switching back to NORMAL, DISABLED Note that software editing is still possible by calling those "insert" and "delete". | 2.610134 | 3 |
hw6/6.2.py | ArtemNikolaev/gb-hw | 0 | 6621768 | <gh_stars>0
# https://github.com/ArtemNikolaev/gb-hw/issues/39
class Road:
def __init__(self, length, width):
self._length = length
self._width = width
def mass(self, as_mass, as_height):
return self._length * self._width * as_mass * as_height
road = Road(20, 5000)
print(road.mass(25, 5))
| # https://github.com/ArtemNikolaev/gb-hw/issues/39
class Road:
def __init__(self, length, width):
self._length = length
self._width = width
def mass(self, as_mass, as_height):
return self._length * self._width * as_mass * as_height
road = Road(20, 5000)
print(road.mass(25, 5)) | en | 0.623167 | # https://github.com/ArtemNikolaev/gb-hw/issues/39 | 3.548349 | 4 |
references/video_classification/.ipynb_checkpoints/timecycle-checkpoint.py | ajabri/vision | 1 | 6621769 | import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.ndimage.filters import gaussian_filter
import torchvision
import resnet as resnet3d
import resnet2d
import itertools
import time
import numpy as np
import cv2
import visdom
import utils
from matplotlib import cm
color = cm.get_cmap('winter')
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
def unfold_time(x, T):
return x.view(int(x.shape[0] / T), T, *x.shape[1:])
def fold_time(x):
return x.view(x.shape[0] * x.shape[1], *x.shape[2:])
class UnfoldTime(nn.Module):
def __init__(self, T):
super(UnfoldTime, self).__init__()
self.T = T
def forward(self, x):
return x.view(int(x.shape[0] / self.T), self.T, *x.shape[1:])
class FoldTime(nn.Module):
def __init__(self, T):
super(FoldTime, self).__init__()
self.T = T
def forward(self, x):
return x.view(x.shape[0] * x.shape[1], *x.shape[2:])
class TimeCycle(nn.Module):
def __init__(self, args=None):
super(TimeCycle, self).__init__()
# self.resnet = resnet3d.r3d_18(pretrained=False)
self.resnet = resnet3d.r2d_10()
# self.resnet = resnet3d.r2d_18(pretrained=True)
self.resnet.fc, self.resnet.avgpool, self.resnet.layer4 = None, None, None
self.infer_dims()
# self.resnet_nchan = self.resnet.
self.selfsim_head = self.make_head([self.enc_hid_dim, 2*self.enc_hid_dim, self.enc_hid_dim])
self.context_head = self.make_head([self.enc_hid_dim, 2*self.enc_hid_dim, self.enc_hid_dim])
# assuming no fc pre-training
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# self.kldv = torch.nn.KLDivLoss(reduction="batchmean")
self.kldv = torch.nn.KLDivLoss(reduction="batchmean")
self.xent = torch.nn.CrossEntropyLoss(reduction="none")
self.target_temp = 1
self._targets = {}
if args is not None:
self.kldv_coef = args.kldv_coef
self.xent_coef = args.xent_coef
self.zero_diagonal = args.zero_diagonal
self.dropout_rate = args.dropout
else:
self.kldv_coef = 0
self.xent_coef = 0
self.zero_diagonal = 0
self.dropout_rate = 0
self.dropout = torch.nn.Dropout(p=self.dropout_rate, inplace=False)
self.viz = visdom.Visdom(port=8095, env='%s_%s' % (args.name if args is not None else 'test', '')) #int(time.time())))
self.viz.close()
def infer_dims(self):
# if '2D' in str(type(self.resnet.conv1)):
dummy = torch.Tensor(1, 3, 1, 224, 224)
# else:
# dummy = torch.Tensor(1, 3, 224, 224)
dummy_out = self.resnet(dummy)
self.enc_hid_dim = dummy_out.shape[1]
# import pdb; pdb.set_trace()
def make_head(self, dims):
head = []
for d1, d2 in zip(dims, dims[1:]):
h = nn.Conv3d(d1, d2, kernel_size=1, bias=True)
nn.init.kaiming_normal_(h.weight, mode='fan_out', nonlinearity='relu')
head += [h, nn.LeakyReLU(0.1)]
head = nn.Sequential(*head)
return head
def make_smooth_target_2d(self, H, W):
import time
t1 = time.time()
I = torch.eye(H*W).float()
Is = []
for _I in I:
_I = gaussian_filter(_I.view(H, W).numpy(), sigma=self.target_temp)
_I = F.softmax(torch.from_numpy(_I).log().view(-1))
Is.append(_I)
I = torch.stack(Is)
print('made target ', H, W, time.time()-t1)
return I
def compute_affinity(self, x1, x2, do_dropout=True):
N, C, T, H, W = x1.shape
# assert x1.shape == x2.shape
# assuming xs: N, C, 1, H, W
x1 = x1.transpose(3, 4).contiguous() # for the inlier counter
x1_flat = x1.view(x1.size(0), x1.size(1), -1)
x1_flat = x1_flat.transpose(1, 2)
x2_flat = x2.transpose(3, 4).contiguous().view(x2.size(0), x2.size(1), -1)
# import pdb; pdb.set_trace()
A = torch.matmul(x1_flat, x2_flat)
A = torch.div(A, C**0.5)
# A = torch.div(A, 1/C**0.5)
if do_dropout:
x1_flat, x2_flat = F.dropout(x1_flat, p=0.5), F.dropout(x2_flat, p=0.5)
# import pdb; pdb.set_trace()
# if self.dropout_rate > 0:
# A = self.dropout(A)
if self.zero_diagonal:
A[:, torch.eye(A.shape[1]).long().cuda()] = 0
# A =
# import pdb; pdb.set_trace()
# A12 = A.view(A.size(0), 1, H * H, W, W)
# A21 = A.view(A.size(0), 1, H, H, W * W)
# A12 = F.softmax(A, dim=2)
# A21 = F.softmax(A.transpose(1, 2), dim=2)
A1, A2 = A, A.transpose(1, 2).clone()
if do_dropout:
A1, A2 = self.dropout(A1), self.dropout(A2)
# A1, A2 = self.dropout(A), self.dropout(A.transpose(1, 2))
A1 = F.softmax(A1, dim=2)
A2 = F.softmax(A2, dim=2)
AA = torch.matmul(A2, A1)
# import pdb; pdb.set_trace()
log_AA = torch.log(AA + 1e-20)
return A, AA, log_AA
def forward_affinity(self, x1, x2, encode=False):
'''
For computing similarity of things in X1 w.r.t X2
As in, will return (n x H1*W1 x H2 x W2) sized affinity object
'''
if encode:
x1 = self.resnet(x1)
x2 = self.resnet(x2)
N, C, T, H1, W1 = x1.shape
H2, W2 = x2.shape[-2:]
A, AA, log_AA = self.compute_affinity(x1, x2)
A = A.view(*A.shape[:2], H2, W2)
return A
def forward_encoder(self, x):
return self.resnet(x)
def forward(self, x, just_feats=False):
ff = self.resnet(x)
ff = self.selfsim_head(ff)
# ff = F.normalize(ff, p=2, dim=1)
N, C, T, _H, _W = ff.shape
_h, _w = _H // 4, _W // 4
xents = torch.tensor([0.]).cuda()
kldvs = torch.tensor([0.]).cuda()
accur = torch.tensor([0.]).cuda()
L = len(list(itertools.combinations(range(T), 2)))
for (t1, t2) in itertools.combinations(range(T), 2):
x1, x2 = ff[:, :, t1:t1+1, _h:-_h, _w:-_w].contiguous(), ff[:, :, t2:t2+1, _h:-_h, _w:-_w].contiguous()
#ff[:, :, t2:t2+1, 2*_h:-2*_h, 2*_w:-2*_w].contiguous()
# x1, x2 = ff[:, :, t1:t1+1, _h:-_h, _w:-_w].contiguous(), ff[:, :, t2:t2+1, 2*_h:-2*_h, 2*_w:-2*_w].contiguous()
# x1, x2 = ff[:, :, t1:t1+1, _H//2-_h:_H//2+_h, _W//2-_w:_W//2+_w].contiguous(), \
# ff[:, :, t2:t2+1, _H//2-_h:_H//2+_h, _W//2-_w:_W//2+_w].contiguous()
H, W = x2.shape[-2:]
# import pdb; pdb.set_trace()
if H*W not in self._targets:
self._targets[H*W] = self.make_smooth_target_2d(H, W)
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2)
target = torch.arange(AA.shape[1])[None].repeat(AA.shape[0], 1)
target = (target).view(-1).cuda()
# import pdb; pdb.set_trace()
log_AA = log_AA.view(-1, log_AA.shape[1])
# Cross Entropy
if self.xent_coef > 0:
_xent_loss = self.xent(log_AA, target)
xents += _xent_loss.mean()
# import pdb; pdb.set_trace()
# print((torch.argmax(log_AA, dim=-1) == target).sum())
accur += (torch.argmax(log_AA, dim=-1) == target).float().mean()
# KL Div with Smoothed 2D Targets
if self.kldv_coef > 0:
I = self._targets[H*W][None].repeat(N, 1, 1).view(-1, A.shape[-1]).cuda()
kldv_loss = self.kldv(log_AA, I)
# print(kldv_loss, log_AA.min(), AA.min(), A.min())
kldvs += kldv_loss
# import pdb; pdb.set_trace()
# self.viz.images()
# _AA = AA.view(-1, H * W, H, W)
if np.random.random() < 0.003:
self.viz.text('%s %s' % (t1, t2), opts=dict(height=1, width=10000))
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2, do_dropout=False)
log_AA = log_AA.view(-1, log_AA.shape[1])
_xent_loss = self.xent(log_AA, target)
_AA = AA.view(-1, H * W, H, W)
_A = A.view(*A.shape[:2], x1.shape[-1], -1)
u, v = utils.compute_flow(_A[0:1])
flows = torch.stack([u, v], dim=-1).cpu().numpy()
flows = utils.draw_hsv(flows[0])
# import pdb; pdb.set_trace()
flows = cv2.resize(flows, (256, 256))
self.viz.image((flows).transpose(2, 0, 1))
# flows = [cv2.resize(flow.clip(min=0).astype(np.uint8), (256, 256)) for flow in flows]
# self.viz.image((flows[0]).transpose(2, 0, 1))
# import time
# time.sleep(0.1)
# import pdb; pdb.set_trace()
xx = _xent_loss[:H*W]
xx -= xx.min()
xx /= xx.max()
# xx = color(xx.detach().cpu().numpy())
_img = torch.stack([x[0, :, t1], x[0, :, t2]])
_img -= _img.min()
_img /= _img.max()
self.viz.images(_img)
pca_ff = utils.pca_feats(torch.stack([ff[0, :, t1], ff[0, :, t2]]).detach().cpu())
pca_ff = utils.make_gif(pca_ff, outname=None)
self.viz.images(pca_ff.transpose(0, -1, 1, 2))
img_grid = [cv2.resize(aa, (50,50), interpolation=cv2.INTER_NEAREST)[None] for aa in _AA[0, :, :, :, None].cpu().detach().numpy()]
img_grid = [img_grid[_].repeat(3, 0) * np.array(color(xx[_].item()))[:3, None, None] for _ in range(H*W)]
img_grid = [img_grid[_] / img_grid[_].max() for _ in range(H*W)]
img_grid = torch.from_numpy(np.array(img_grid))
img_grid = torchvision.utils.make_grid(img_grid, nrow=H, padding=1, pad_value=1)
# img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)
self.viz.images(img_grid)
return ff, self.xent_coef * (xents/L), self.kldv_coef * (kldvs/L), accur/L
# return dict(x=ff, xent_loss=xents, kldv_loss=kldvs)
def forward2(self, x):
iH, iW = x.shape[-2:]
_ih, _iw = iH // 6, iW // 6
base, query = x[:, :, 0:2], x[:, :, -1:, iH//2-_ih:iH//2+_ih, iW//2-_iw:iW//2+_iw]
# import pdb; pdb.set_trace()
X1, X2 = self.resnet(base), self.resnet(query)
# ff = self.selfsim_head(ff)
# ff = F.normalize(ff, p=2, dim=1)
N, C = X1.shape[:2]
# _h, _w = _H // 10, _W // 10
xents = torch.tensor([0.]).cuda()
kldvs = torch.tensor([0.]).cuda()
accur = torch.tensor([0.]).cuda()
# L = len(list(itertools.combinations(range(T), 2)))
# for (t1, t2) in itertools.combinations(range(T), 2):
L = 1
for _ in range(L):
x1, x2 = X1[:, :, 0:1], X2
H, W = x2.shape[-2:]
if H*W not in self._targets:
self._targets[H*W] = self.make_smooth_target_2d(H, W)
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2)
target = torch.arange(AA.shape[1])[None].repeat(AA.shape[0], 1)
target = (target).view(-1).cuda()
# import pdb; pdb.set_trace()
log_AA = log_AA.view(-1, log_AA.shape[1])
# Cross Entropy
if self.xent_coef > 0:
_xent_loss = self.xent(log_AA, target)
xents += _xent_loss.mean()
# import pdb; pdb.set_trace()
# print((torch.argmax(log_AA, dim=-1) == target).sum())
accur += (torch.argmax(log_AA, dim=-1) == target).float().mean()
# KL Div with Smoothed 2D Targets
if self.kldv_coef > 0:
I = self._targets[H*W][None].repeat(N, 1, 1).view(-1, A.shape[-1]).cuda()
kldv_loss = self.kldv(log_AA, I)
# print(kldv_loss, log_AA.min(), AA.min(), A.min())
kldvs += kldv_loss
# import pdb; pdb.set_trace()
# self.viz.images()
# _AA = AA.view(-1, H * W, H, W)
if np.random.random() < 0.01:
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2, do_dropout=False)
log_AA = log_AA.view(-1, log_AA.shape[1])
_xent_loss = self.xent(log_AA, target)
_AA = AA.view(-1, H * W, H, W)
import pdb; pdb.set_trace()
xx = _xent_loss[:H*W]
xx -= xx.min()
xx /= xx.max()
# xx = color(xx.detach().cpu().numpy())
_img = torch.stack([x[0, :, 0], x[0, :, -1]])
_img -= _img.min()
_img /= _img.max()
self.viz.text('%s %s' % (0, -1), opts=dict(height=1, width=10000))
self.viz.images(_img)
# import pdb; pdb.set_trace()
pca_ff = utils.pca_feats(X1[0, :].transpose(0, 1).detach().cpu())
pca_ff = utils.make_gif(pca_ff, outname=None)
self.viz.images(pca_ff.transpose(0, -1, 1, 2))
pca_ff = utils.pca_feats(X2[0, :].transpose(0, 1).detach().cpu())
pca_ff = utils.make_gif(pca_ff, outname=None)
self.viz.image(pca_ff.transpose(0, -1, 1, 2)[0])
img_grid = [cv2.resize(aa, (50,50), interpolation=cv2.INTER_NEAREST)[None] for aa in _AA[0, :, :, :, None].cpu().detach().numpy()]
img_grid = [img_grid[_].repeat(3, 0) * np.array(color(xx[_].item()))[:3, None, None] for _ in range(H*W)]
img_grid = [img_grid[_] / img_grid[_].max() for _ in range(H*W)]
img_grid = torch.from_numpy(np.array(img_grid))
img_grid = torchvision.utils.make_grid(img_grid, nrow=H, padding=1, pad_value=1)
# img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)
self.viz.images(img_grid)
return x1, self.xent_coef * (xents/L), self.kldv_coef * (kldvs/L), accur/L
| import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.ndimage.filters import gaussian_filter
import torchvision
import resnet as resnet3d
import resnet2d
import itertools
import time
import numpy as np
import cv2
import visdom
import utils
from matplotlib import cm
color = cm.get_cmap('winter')
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
def unfold_time(x, T):
return x.view(int(x.shape[0] / T), T, *x.shape[1:])
def fold_time(x):
return x.view(x.shape[0] * x.shape[1], *x.shape[2:])
class UnfoldTime(nn.Module):
def __init__(self, T):
super(UnfoldTime, self).__init__()
self.T = T
def forward(self, x):
return x.view(int(x.shape[0] / self.T), self.T, *x.shape[1:])
class FoldTime(nn.Module):
def __init__(self, T):
super(FoldTime, self).__init__()
self.T = T
def forward(self, x):
return x.view(x.shape[0] * x.shape[1], *x.shape[2:])
class TimeCycle(nn.Module):
def __init__(self, args=None):
super(TimeCycle, self).__init__()
# self.resnet = resnet3d.r3d_18(pretrained=False)
self.resnet = resnet3d.r2d_10()
# self.resnet = resnet3d.r2d_18(pretrained=True)
self.resnet.fc, self.resnet.avgpool, self.resnet.layer4 = None, None, None
self.infer_dims()
# self.resnet_nchan = self.resnet.
self.selfsim_head = self.make_head([self.enc_hid_dim, 2*self.enc_hid_dim, self.enc_hid_dim])
self.context_head = self.make_head([self.enc_hid_dim, 2*self.enc_hid_dim, self.enc_hid_dim])
# assuming no fc pre-training
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# self.kldv = torch.nn.KLDivLoss(reduction="batchmean")
self.kldv = torch.nn.KLDivLoss(reduction="batchmean")
self.xent = torch.nn.CrossEntropyLoss(reduction="none")
self.target_temp = 1
self._targets = {}
if args is not None:
self.kldv_coef = args.kldv_coef
self.xent_coef = args.xent_coef
self.zero_diagonal = args.zero_diagonal
self.dropout_rate = args.dropout
else:
self.kldv_coef = 0
self.xent_coef = 0
self.zero_diagonal = 0
self.dropout_rate = 0
self.dropout = torch.nn.Dropout(p=self.dropout_rate, inplace=False)
self.viz = visdom.Visdom(port=8095, env='%s_%s' % (args.name if args is not None else 'test', '')) #int(time.time())))
self.viz.close()
def infer_dims(self):
# if '2D' in str(type(self.resnet.conv1)):
dummy = torch.Tensor(1, 3, 1, 224, 224)
# else:
# dummy = torch.Tensor(1, 3, 224, 224)
dummy_out = self.resnet(dummy)
self.enc_hid_dim = dummy_out.shape[1]
# import pdb; pdb.set_trace()
def make_head(self, dims):
head = []
for d1, d2 in zip(dims, dims[1:]):
h = nn.Conv3d(d1, d2, kernel_size=1, bias=True)
nn.init.kaiming_normal_(h.weight, mode='fan_out', nonlinearity='relu')
head += [h, nn.LeakyReLU(0.1)]
head = nn.Sequential(*head)
return head
def make_smooth_target_2d(self, H, W):
import time
t1 = time.time()
I = torch.eye(H*W).float()
Is = []
for _I in I:
_I = gaussian_filter(_I.view(H, W).numpy(), sigma=self.target_temp)
_I = F.softmax(torch.from_numpy(_I).log().view(-1))
Is.append(_I)
I = torch.stack(Is)
print('made target ', H, W, time.time()-t1)
return I
def compute_affinity(self, x1, x2, do_dropout=True):
N, C, T, H, W = x1.shape
# assert x1.shape == x2.shape
# assuming xs: N, C, 1, H, W
x1 = x1.transpose(3, 4).contiguous() # for the inlier counter
x1_flat = x1.view(x1.size(0), x1.size(1), -1)
x1_flat = x1_flat.transpose(1, 2)
x2_flat = x2.transpose(3, 4).contiguous().view(x2.size(0), x2.size(1), -1)
# import pdb; pdb.set_trace()
A = torch.matmul(x1_flat, x2_flat)
A = torch.div(A, C**0.5)
# A = torch.div(A, 1/C**0.5)
if do_dropout:
x1_flat, x2_flat = F.dropout(x1_flat, p=0.5), F.dropout(x2_flat, p=0.5)
# import pdb; pdb.set_trace()
# if self.dropout_rate > 0:
# A = self.dropout(A)
if self.zero_diagonal:
A[:, torch.eye(A.shape[1]).long().cuda()] = 0
# A =
# import pdb; pdb.set_trace()
# A12 = A.view(A.size(0), 1, H * H, W, W)
# A21 = A.view(A.size(0), 1, H, H, W * W)
# A12 = F.softmax(A, dim=2)
# A21 = F.softmax(A.transpose(1, 2), dim=2)
A1, A2 = A, A.transpose(1, 2).clone()
if do_dropout:
A1, A2 = self.dropout(A1), self.dropout(A2)
# A1, A2 = self.dropout(A), self.dropout(A.transpose(1, 2))
A1 = F.softmax(A1, dim=2)
A2 = F.softmax(A2, dim=2)
AA = torch.matmul(A2, A1)
# import pdb; pdb.set_trace()
log_AA = torch.log(AA + 1e-20)
return A, AA, log_AA
def forward_affinity(self, x1, x2, encode=False):
'''
For computing similarity of things in X1 w.r.t X2
As in, will return (n x H1*W1 x H2 x W2) sized affinity object
'''
if encode:
x1 = self.resnet(x1)
x2 = self.resnet(x2)
N, C, T, H1, W1 = x1.shape
H2, W2 = x2.shape[-2:]
A, AA, log_AA = self.compute_affinity(x1, x2)
A = A.view(*A.shape[:2], H2, W2)
return A
def forward_encoder(self, x):
return self.resnet(x)
def forward(self, x, just_feats=False):
ff = self.resnet(x)
ff = self.selfsim_head(ff)
# ff = F.normalize(ff, p=2, dim=1)
N, C, T, _H, _W = ff.shape
_h, _w = _H // 4, _W // 4
xents = torch.tensor([0.]).cuda()
kldvs = torch.tensor([0.]).cuda()
accur = torch.tensor([0.]).cuda()
L = len(list(itertools.combinations(range(T), 2)))
for (t1, t2) in itertools.combinations(range(T), 2):
x1, x2 = ff[:, :, t1:t1+1, _h:-_h, _w:-_w].contiguous(), ff[:, :, t2:t2+1, _h:-_h, _w:-_w].contiguous()
#ff[:, :, t2:t2+1, 2*_h:-2*_h, 2*_w:-2*_w].contiguous()
# x1, x2 = ff[:, :, t1:t1+1, _h:-_h, _w:-_w].contiguous(), ff[:, :, t2:t2+1, 2*_h:-2*_h, 2*_w:-2*_w].contiguous()
# x1, x2 = ff[:, :, t1:t1+1, _H//2-_h:_H//2+_h, _W//2-_w:_W//2+_w].contiguous(), \
# ff[:, :, t2:t2+1, _H//2-_h:_H//2+_h, _W//2-_w:_W//2+_w].contiguous()
H, W = x2.shape[-2:]
# import pdb; pdb.set_trace()
if H*W not in self._targets:
self._targets[H*W] = self.make_smooth_target_2d(H, W)
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2)
target = torch.arange(AA.shape[1])[None].repeat(AA.shape[0], 1)
target = (target).view(-1).cuda()
# import pdb; pdb.set_trace()
log_AA = log_AA.view(-1, log_AA.shape[1])
# Cross Entropy
if self.xent_coef > 0:
_xent_loss = self.xent(log_AA, target)
xents += _xent_loss.mean()
# import pdb; pdb.set_trace()
# print((torch.argmax(log_AA, dim=-1) == target).sum())
accur += (torch.argmax(log_AA, dim=-1) == target).float().mean()
# KL Div with Smoothed 2D Targets
if self.kldv_coef > 0:
I = self._targets[H*W][None].repeat(N, 1, 1).view(-1, A.shape[-1]).cuda()
kldv_loss = self.kldv(log_AA, I)
# print(kldv_loss, log_AA.min(), AA.min(), A.min())
kldvs += kldv_loss
# import pdb; pdb.set_trace()
# self.viz.images()
# _AA = AA.view(-1, H * W, H, W)
if np.random.random() < 0.003:
self.viz.text('%s %s' % (t1, t2), opts=dict(height=1, width=10000))
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2, do_dropout=False)
log_AA = log_AA.view(-1, log_AA.shape[1])
_xent_loss = self.xent(log_AA, target)
_AA = AA.view(-1, H * W, H, W)
_A = A.view(*A.shape[:2], x1.shape[-1], -1)
u, v = utils.compute_flow(_A[0:1])
flows = torch.stack([u, v], dim=-1).cpu().numpy()
flows = utils.draw_hsv(flows[0])
# import pdb; pdb.set_trace()
flows = cv2.resize(flows, (256, 256))
self.viz.image((flows).transpose(2, 0, 1))
# flows = [cv2.resize(flow.clip(min=0).astype(np.uint8), (256, 256)) for flow in flows]
# self.viz.image((flows[0]).transpose(2, 0, 1))
# import time
# time.sleep(0.1)
# import pdb; pdb.set_trace()
xx = _xent_loss[:H*W]
xx -= xx.min()
xx /= xx.max()
# xx = color(xx.detach().cpu().numpy())
_img = torch.stack([x[0, :, t1], x[0, :, t2]])
_img -= _img.min()
_img /= _img.max()
self.viz.images(_img)
pca_ff = utils.pca_feats(torch.stack([ff[0, :, t1], ff[0, :, t2]]).detach().cpu())
pca_ff = utils.make_gif(pca_ff, outname=None)
self.viz.images(pca_ff.transpose(0, -1, 1, 2))
img_grid = [cv2.resize(aa, (50,50), interpolation=cv2.INTER_NEAREST)[None] for aa in _AA[0, :, :, :, None].cpu().detach().numpy()]
img_grid = [img_grid[_].repeat(3, 0) * np.array(color(xx[_].item()))[:3, None, None] for _ in range(H*W)]
img_grid = [img_grid[_] / img_grid[_].max() for _ in range(H*W)]
img_grid = torch.from_numpy(np.array(img_grid))
img_grid = torchvision.utils.make_grid(img_grid, nrow=H, padding=1, pad_value=1)
# img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)
self.viz.images(img_grid)
return ff, self.xent_coef * (xents/L), self.kldv_coef * (kldvs/L), accur/L
# return dict(x=ff, xent_loss=xents, kldv_loss=kldvs)
def forward2(self, x):
iH, iW = x.shape[-2:]
_ih, _iw = iH // 6, iW // 6
base, query = x[:, :, 0:2], x[:, :, -1:, iH//2-_ih:iH//2+_ih, iW//2-_iw:iW//2+_iw]
# import pdb; pdb.set_trace()
X1, X2 = self.resnet(base), self.resnet(query)
# ff = self.selfsim_head(ff)
# ff = F.normalize(ff, p=2, dim=1)
N, C = X1.shape[:2]
# _h, _w = _H // 10, _W // 10
xents = torch.tensor([0.]).cuda()
kldvs = torch.tensor([0.]).cuda()
accur = torch.tensor([0.]).cuda()
# L = len(list(itertools.combinations(range(T), 2)))
# for (t1, t2) in itertools.combinations(range(T), 2):
L = 1
for _ in range(L):
x1, x2 = X1[:, :, 0:1], X2
H, W = x2.shape[-2:]
if H*W not in self._targets:
self._targets[H*W] = self.make_smooth_target_2d(H, W)
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2)
target = torch.arange(AA.shape[1])[None].repeat(AA.shape[0], 1)
target = (target).view(-1).cuda()
# import pdb; pdb.set_trace()
log_AA = log_AA.view(-1, log_AA.shape[1])
# Cross Entropy
if self.xent_coef > 0:
_xent_loss = self.xent(log_AA, target)
xents += _xent_loss.mean()
# import pdb; pdb.set_trace()
# print((torch.argmax(log_AA, dim=-1) == target).sum())
accur += (torch.argmax(log_AA, dim=-1) == target).float().mean()
# KL Div with Smoothed 2D Targets
if self.kldv_coef > 0:
I = self._targets[H*W][None].repeat(N, 1, 1).view(-1, A.shape[-1]).cuda()
kldv_loss = self.kldv(log_AA, I)
# print(kldv_loss, log_AA.min(), AA.min(), A.min())
kldvs += kldv_loss
# import pdb; pdb.set_trace()
# self.viz.images()
# _AA = AA.view(-1, H * W, H, W)
if np.random.random() < 0.01:
# Self similarity
A, AA, log_AA = self.compute_affinity(x1, x2, do_dropout=False)
log_AA = log_AA.view(-1, log_AA.shape[1])
_xent_loss = self.xent(log_AA, target)
_AA = AA.view(-1, H * W, H, W)
import pdb; pdb.set_trace()
xx = _xent_loss[:H*W]
xx -= xx.min()
xx /= xx.max()
# xx = color(xx.detach().cpu().numpy())
_img = torch.stack([x[0, :, 0], x[0, :, -1]])
_img -= _img.min()
_img /= _img.max()
self.viz.text('%s %s' % (0, -1), opts=dict(height=1, width=10000))
self.viz.images(_img)
# import pdb; pdb.set_trace()
pca_ff = utils.pca_feats(X1[0, :].transpose(0, 1).detach().cpu())
pca_ff = utils.make_gif(pca_ff, outname=None)
self.viz.images(pca_ff.transpose(0, -1, 1, 2))
pca_ff = utils.pca_feats(X2[0, :].transpose(0, 1).detach().cpu())
pca_ff = utils.make_gif(pca_ff, outname=None)
self.viz.image(pca_ff.transpose(0, -1, 1, 2)[0])
img_grid = [cv2.resize(aa, (50,50), interpolation=cv2.INTER_NEAREST)[None] for aa in _AA[0, :, :, :, None].cpu().detach().numpy()]
img_grid = [img_grid[_].repeat(3, 0) * np.array(color(xx[_].item()))[:3, None, None] for _ in range(H*W)]
img_grid = [img_grid[_] / img_grid[_].max() for _ in range(H*W)]
img_grid = torch.from_numpy(np.array(img_grid))
img_grid = torchvision.utils.make_grid(img_grid, nrow=H, padding=1, pad_value=1)
# img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)
self.viz.images(img_grid)
return x1, self.xent_coef * (xents/L), self.kldv_coef * (kldvs/L), accur/L
| en | 0.387455 | # self.resnet = resnet3d.r3d_18(pretrained=False) # self.resnet = resnet3d.r2d_18(pretrained=True) # self.resnet_nchan = self.resnet. # assuming no fc pre-training # self.kldv = torch.nn.KLDivLoss(reduction="batchmean") #int(time.time()))) # if '2D' in str(type(self.resnet.conv1)): # else: # dummy = torch.Tensor(1, 3, 224, 224) # import pdb; pdb.set_trace() # assert x1.shape == x2.shape # assuming xs: N, C, 1, H, W # for the inlier counter # import pdb; pdb.set_trace() # A = torch.div(A, 1/C**0.5) # import pdb; pdb.set_trace() # if self.dropout_rate > 0: # A = self.dropout(A) # A = # import pdb; pdb.set_trace() # A12 = A.view(A.size(0), 1, H * H, W, W) # A21 = A.view(A.size(0), 1, H, H, W * W) # A12 = F.softmax(A, dim=2) # A21 = F.softmax(A.transpose(1, 2), dim=2) # A1, A2 = self.dropout(A), self.dropout(A.transpose(1, 2)) # import pdb; pdb.set_trace() For computing similarity of things in X1 w.r.t X2 As in, will return (n x H1*W1 x H2 x W2) sized affinity object # ff = F.normalize(ff, p=2, dim=1) #ff[:, :, t2:t2+1, 2*_h:-2*_h, 2*_w:-2*_w].contiguous() # x1, x2 = ff[:, :, t1:t1+1, _h:-_h, _w:-_w].contiguous(), ff[:, :, t2:t2+1, 2*_h:-2*_h, 2*_w:-2*_w].contiguous() # x1, x2 = ff[:, :, t1:t1+1, _H//2-_h:_H//2+_h, _W//2-_w:_W//2+_w].contiguous(), \ # ff[:, :, t2:t2+1, _H//2-_h:_H//2+_h, _W//2-_w:_W//2+_w].contiguous() # import pdb; pdb.set_trace() # Self similarity # import pdb; pdb.set_trace() # Cross Entropy # import pdb; pdb.set_trace() # print((torch.argmax(log_AA, dim=-1) == target).sum()) # KL Div with Smoothed 2D Targets # print(kldv_loss, log_AA.min(), AA.min(), A.min()) # import pdb; pdb.set_trace() # self.viz.images() # _AA = AA.view(-1, H * W, H, W) # Self similarity # import pdb; pdb.set_trace() # flows = [cv2.resize(flow.clip(min=0).astype(np.uint8), (256, 256)) for flow in flows] # self.viz.image((flows[0]).transpose(2, 0, 1)) # import time # time.sleep(0.1) # import pdb; pdb.set_trace() # xx = color(xx.detach().cpu().numpy()) # img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1) # return dict(x=ff, xent_loss=xents, kldv_loss=kldvs) # import pdb; pdb.set_trace() # ff = self.selfsim_head(ff) # ff = F.normalize(ff, p=2, dim=1) # _h, _w = _H // 10, _W // 10 # L = len(list(itertools.combinations(range(T), 2))) # for (t1, t2) in itertools.combinations(range(T), 2): # Self similarity # import pdb; pdb.set_trace() # Cross Entropy # import pdb; pdb.set_trace() # print((torch.argmax(log_AA, dim=-1) == target).sum()) # KL Div with Smoothed 2D Targets # print(kldv_loss, log_AA.min(), AA.min(), A.min()) # import pdb; pdb.set_trace() # self.viz.images() # _AA = AA.view(-1, H * W, H, W) # Self similarity # xx = color(xx.detach().cpu().numpy()) # import pdb; pdb.set_trace() # img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1) | 2.212837 | 2 |
contentcuration/contentcuration/viewsets/task.py | kollivier/studio | 0 | 6621770 | <reponame>kollivier/studio
from django.conf import settings
from django.db.models import Exists
from django.db.models import OuterRef
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.rest_framework import UUIDFilter
from rest_framework.permissions import IsAuthenticated
from contentcuration.celery import app
from contentcuration.models import Channel
from contentcuration.models import Task
from contentcuration.models import User
from contentcuration.viewsets.base import DestroyModelMixin
from contentcuration.viewsets.base import ReadOnlyValuesViewset
from contentcuration.viewsets.base import RequiredFilterSet
class TaskFilter(RequiredFilterSet):
channel = UUIDFilter(method="filter_channel")
def filter_channel(self, queryset, name, value):
user_id = not self.request.user.is_anonymous() and self.request.user.id
user_email = not self.request.user.is_anonymous() and self.request.user.email
user_queryset = User.objects.filter(id=user_id)
channel_queryset = Channel.objects.annotate(
edit=Exists(user_queryset.filter(editable_channels=OuterRef("id"))),
view=Exists(user_queryset.filter(view_only_channels=OuterRef("id"))),
)
channel_queryset = channel_queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(
id__in=Channel.objects.filter(deleted=False)
.filter(Q(public=True) | Q(pending_editors__email=user_email))
.values_list("id", flat=True)
.distinct()
)
)
if channel_queryset.filter(id=value).exists():
return queryset.filter(metadata__affects__channel=value.hex)
return queryset.none()
class Meta:
model = Task
fields = ("channel",)
class TaskViewSet(ReadOnlyValuesViewset, DestroyModelMixin):
queryset = Task.objects.all()
permission_classes = [IsAuthenticated]
filter_backends = (DjangoFilterBackend,)
filter_class = TaskFilter
lookup_field = "task_id"
values = (
"task_id",
"task_type",
"created",
"status",
"is_progress_tracking",
"user_id",
"metadata",
)
field_map = {"user": "user_id"}
@classmethod
def id_attr(cls):
return "task_id"
def perform_destroy(self, instance):
# TODO: Add logic to delete the Celery task using app.control.revoke(). This will require some extensive
# testing to ensure terminating in-progress tasks will not put the db in an indeterminate state.
app.control.revoke(instance.task_id, terminate=True)
instance.delete()
def get_edit_queryset(self):
return Task.objects.filter(user=self.request.user)
def consolidate(self, items, queryset):
if not settings.CELERY_TASK_ALWAYS_EAGER:
for item in items:
result = app.AsyncResult(item["task_id"])
if result and result.status:
item["status"] = result.status
if "progress" not in item["metadata"]:
# Just flagging this, but this appears to be the correct way to get task metadata,
# even though the API is marked as private.
meta = result._get_task_meta()
if (
meta
and "result" in meta
and meta["result"]
and not isinstance(meta["result"], Exception)
and "progress" in meta["result"]
):
item["metadata"]["progress"] = meta["result"]["progress"]
else:
item["metadata"]["progress"] = None
item["channel"] = (
item.get("metadata", {}).get("affects", {}).get("channel")
)
return items
| from django.conf import settings
from django.db.models import Exists
from django.db.models import OuterRef
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.rest_framework import UUIDFilter
from rest_framework.permissions import IsAuthenticated
from contentcuration.celery import app
from contentcuration.models import Channel
from contentcuration.models import Task
from contentcuration.models import User
from contentcuration.viewsets.base import DestroyModelMixin
from contentcuration.viewsets.base import ReadOnlyValuesViewset
from contentcuration.viewsets.base import RequiredFilterSet
class TaskFilter(RequiredFilterSet):
channel = UUIDFilter(method="filter_channel")
def filter_channel(self, queryset, name, value):
user_id = not self.request.user.is_anonymous() and self.request.user.id
user_email = not self.request.user.is_anonymous() and self.request.user.email
user_queryset = User.objects.filter(id=user_id)
channel_queryset = Channel.objects.annotate(
edit=Exists(user_queryset.filter(editable_channels=OuterRef("id"))),
view=Exists(user_queryset.filter(view_only_channels=OuterRef("id"))),
)
channel_queryset = channel_queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(
id__in=Channel.objects.filter(deleted=False)
.filter(Q(public=True) | Q(pending_editors__email=user_email))
.values_list("id", flat=True)
.distinct()
)
)
if channel_queryset.filter(id=value).exists():
return queryset.filter(metadata__affects__channel=value.hex)
return queryset.none()
class Meta:
model = Task
fields = ("channel",)
class TaskViewSet(ReadOnlyValuesViewset, DestroyModelMixin):
queryset = Task.objects.all()
permission_classes = [IsAuthenticated]
filter_backends = (DjangoFilterBackend,)
filter_class = TaskFilter
lookup_field = "task_id"
values = (
"task_id",
"task_type",
"created",
"status",
"is_progress_tracking",
"user_id",
"metadata",
)
field_map = {"user": "user_id"}
@classmethod
def id_attr(cls):
return "task_id"
def perform_destroy(self, instance):
# TODO: Add logic to delete the Celery task using app.control.revoke(). This will require some extensive
# testing to ensure terminating in-progress tasks will not put the db in an indeterminate state.
app.control.revoke(instance.task_id, terminate=True)
instance.delete()
def get_edit_queryset(self):
return Task.objects.filter(user=self.request.user)
def consolidate(self, items, queryset):
if not settings.CELERY_TASK_ALWAYS_EAGER:
for item in items:
result = app.AsyncResult(item["task_id"])
if result and result.status:
item["status"] = result.status
if "progress" not in item["metadata"]:
# Just flagging this, but this appears to be the correct way to get task metadata,
# even though the API is marked as private.
meta = result._get_task_meta()
if (
meta
and "result" in meta
and meta["result"]
and not isinstance(meta["result"], Exception)
and "progress" in meta["result"]
):
item["metadata"]["progress"] = meta["result"]["progress"]
else:
item["metadata"]["progress"] = None
item["channel"] = (
item.get("metadata", {}).get("affects", {}).get("channel")
)
return items | en | 0.882163 | # TODO: Add logic to delete the Celery task using app.control.revoke(). This will require some extensive # testing to ensure terminating in-progress tasks will not put the db in an indeterminate state. # Just flagging this, but this appears to be the correct way to get task metadata, # even though the API is marked as private. | 1.990731 | 2 |
setup.py | tastatham/ons_geoportal | 0 | 6621771 | from setuptools import setup, find_packages
def readme():
with open("README.md") as f:
return f.read()
install_requires = ["geopandas==0.10.2", "requests== 2.27.1"]
setup(
name="ons_geoportal",
version="0.1",
description="Python tools for downloading geospatial vector data from \
the Open Geography Portal",
long_description=(readme()),
long_description_content_type="text/markdown",
url="https://github.com/tastatham/ons_geoportal",
author="<NAME>",
author_email="<EMAIL>",
keywords="download open geography portal, data, geopandas",
license="MIT",
packages=find_packages(exclude=["tests"]),
install_requires=install_requires,
include_package_data=False,
)
| from setuptools import setup, find_packages
def readme():
with open("README.md") as f:
return f.read()
install_requires = ["geopandas==0.10.2", "requests== 2.27.1"]
setup(
name="ons_geoportal",
version="0.1",
description="Python tools for downloading geospatial vector data from \
the Open Geography Portal",
long_description=(readme()),
long_description_content_type="text/markdown",
url="https://github.com/tastatham/ons_geoportal",
author="<NAME>",
author_email="<EMAIL>",
keywords="download open geography portal, data, geopandas",
license="MIT",
packages=find_packages(exclude=["tests"]),
install_requires=install_requires,
include_package_data=False,
)
| none | 1 | 1.650855 | 2 | |
mljar/client/prediction_download.py | mljar/mljar-api-python | 42 | 6621772 | <gh_stars>10-100
import os
import uuid
import tempfile
import pandas as pd
from .base import MljarHttpClient
from ..exceptions import PredictionDownloadException
from ..log import logger
class PredictionDownloadClient(MljarHttpClient):
'''
Client to get predictions from MLJAR.
'''
def __init__(self):
self.url = "/download/prediction/"
super(PredictionDownloadClient, self).__init__()
def download(self, prediction_hid):
response = self.request("POST", self.url, data = {"prediction_id": prediction_hid}, parse_json=False)
pred = None
try:
tmp_file = os.path.join(tempfile.gettempdir(), 'mljar_prediction_' + str(uuid.uuid4()) + '.csv')
with open(tmp_file, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
pred = pd.read_csv(tmp_file)
os.remove(tmp_file)
except Exception as e:
raise PredictionDownloadException(str(e))
return pred
| import os
import uuid
import tempfile
import pandas as pd
from .base import MljarHttpClient
from ..exceptions import PredictionDownloadException
from ..log import logger
class PredictionDownloadClient(MljarHttpClient):
'''
Client to get predictions from MLJAR.
'''
def __init__(self):
self.url = "/download/prediction/"
super(PredictionDownloadClient, self).__init__()
def download(self, prediction_hid):
response = self.request("POST", self.url, data = {"prediction_id": prediction_hid}, parse_json=False)
pred = None
try:
tmp_file = os.path.join(tempfile.gettempdir(), 'mljar_prediction_' + str(uuid.uuid4()) + '.csv')
with open(tmp_file, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
pred = pd.read_csv(tmp_file)
os.remove(tmp_file)
except Exception as e:
raise PredictionDownloadException(str(e))
return pred | en | 0.83072 | Client to get predictions from MLJAR. # filter out keep-alive new chunks | 2.550497 | 3 |
Python3/Projects/RockPaperScissors/rock_paper_scissors_v3.py | norbertosanchezdichi/TIL | 0 | 6621773 | from random import choice
player_wins = 0
computer_wins = 0
while player_wins < 2 or computer_wins < 2:
print(f'Player Score: {player_wins} Computer Score: {computer_wins}')
print('Rock...')
print('Paper...')
print('Scissors...')
player1 = input('Player 1, make your move: ').lower()
player2 = choice(['rock', 'paper', 'scissors'])
print(f'Computer plays {player2}')
if player1 == player2:
print('Its a TIE!')
elif player1 == 'rock' and player2 == 'scissors':
print('Player 1 WINS!')
player_wins += 1
elif player1 == 'paper' and player2 == 'rock':
print('Player 1 WINS!')
player_wins += 1
elif player1 == 'scissors' and player2 == 'paper':
print('Player 1 WINS!')
player_wins += 1
else:
print('Computer WINS!')
computer_wins += 1
if player_wins > computer_wins:
print('CONGRATULATIONS!')
else:
print('Better luck next time!')
print(f'FINAL SCORE: Player Score: {player_wins} Computer Score: {computer_wins}')
| from random import choice
player_wins = 0
computer_wins = 0
while player_wins < 2 or computer_wins < 2:
print(f'Player Score: {player_wins} Computer Score: {computer_wins}')
print('Rock...')
print('Paper...')
print('Scissors...')
player1 = input('Player 1, make your move: ').lower()
player2 = choice(['rock', 'paper', 'scissors'])
print(f'Computer plays {player2}')
if player1 == player2:
print('Its a TIE!')
elif player1 == 'rock' and player2 == 'scissors':
print('Player 1 WINS!')
player_wins += 1
elif player1 == 'paper' and player2 == 'rock':
print('Player 1 WINS!')
player_wins += 1
elif player1 == 'scissors' and player2 == 'paper':
print('Player 1 WINS!')
player_wins += 1
else:
print('Computer WINS!')
computer_wins += 1
if player_wins > computer_wins:
print('CONGRATULATIONS!')
else:
print('Better luck next time!')
print(f'FINAL SCORE: Player Score: {player_wins} Computer Score: {computer_wins}')
| none | 1 | 4.200277 | 4 | |
machine_learning_basics/regression_example.py | AndreiRoibu/basic_numpy | 0 | 6621774 | <reponame>AndreiRoibu/basic_numpy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Data Source: https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise
dataFrame = pd.read_csv('../large_files/airfoil_self_noise.dat', sep='\t', header=None)
print(dataFrame.head())
print(dataFrame.info())
input_data = dataFrame[[0,1,2,3,4]].values
target_data = dataFrame[5].values
X_train, X_test, y_train, y_test = train_test_split(input_data, target_data, test_size=0.33)
model = LinearRegression()
model.fit(X_train, y_train)
# Here we evaluate the data
print("Train Score Linear Regression:", model.score(X_train, y_train))
print("Test Score Linear Regression:", model.score(X_test, y_test))
# predictions = model.predict(X_test)
# print("Predictions:", predictions)
print('-------------------------')
model2 = RandomForestRegressor()
model2.fit(X_train, y_train)
print("Train Score Random Forest:", model2.score(X_train, y_train))
print("Test Score Random Forest:", model2.score(X_test, y_test))
print('-------------------------')
input_scaler = StandardScaler()
X_train_scaled = input_scaler.fit_transform(X_train)
X_test_scaled = input_scaler.transform(X_test)
output_scaler = StandardScaler()
y_train_scaled = output_scaler.fit_transform(np.expand_dims(y_train, -1)).ravel()
y_test_scaled = output_scaler.fit_transform(np.expand_dims(y_test, -1)).ravel()
model3 = MLPRegressor(max_iter=500)
model3.fit(X_train_scaled, y_train_scaled)
print("Train Score MLP:", model3.score(X_train_scaled, y_train_scaled))
print("Test Score MLP:", model3.score(X_test_scaled, y_test_scaled)) | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Data Source: https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise
dataFrame = pd.read_csv('../large_files/airfoil_self_noise.dat', sep='\t', header=None)
print(dataFrame.head())
print(dataFrame.info())
input_data = dataFrame[[0,1,2,3,4]].values
target_data = dataFrame[5].values
X_train, X_test, y_train, y_test = train_test_split(input_data, target_data, test_size=0.33)
model = LinearRegression()
model.fit(X_train, y_train)
# Here we evaluate the data
print("Train Score Linear Regression:", model.score(X_train, y_train))
print("Test Score Linear Regression:", model.score(X_test, y_test))
# predictions = model.predict(X_test)
# print("Predictions:", predictions)
print('-------------------------')
model2 = RandomForestRegressor()
model2.fit(X_train, y_train)
print("Train Score Random Forest:", model2.score(X_train, y_train))
print("Test Score Random Forest:", model2.score(X_test, y_test))
print('-------------------------')
input_scaler = StandardScaler()
X_train_scaled = input_scaler.fit_transform(X_train)
X_test_scaled = input_scaler.transform(X_test)
output_scaler = StandardScaler()
y_train_scaled = output_scaler.fit_transform(np.expand_dims(y_train, -1)).ravel()
y_test_scaled = output_scaler.fit_transform(np.expand_dims(y_test, -1)).ravel()
model3 = MLPRegressor(max_iter=500)
model3.fit(X_train_scaled, y_train_scaled)
print("Train Score MLP:", model3.score(X_train_scaled, y_train_scaled))
print("Test Score MLP:", model3.score(X_test_scaled, y_test_scaled)) | en | 0.719658 | # Data Source: https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise # Here we evaluate the data # predictions = model.predict(X_test) # print("Predictions:", predictions) | 3.066221 | 3 |
utypes.py | null-sleep/ulisp | 0 | 6621775 | <reponame>null-sleep/ulisp
import re
class List(list):
def __repr__(self):
exrep = str()
for item in self.__iter__():
exrep += "{}, ".format(item)
return "(" + exrep[:-2] + ")"
class String(str):
pass
class Hash(dict):
def __setitem__(self, k, v):
"""Raises an exception if key is repeated during definition"""
if k in self.keys():
raise ValueError("Key is already present") # Make into a Syntax error??
else:
return super(Hash, self).__setitem__(k, v)
def _make_hash(key_value_pair_list):
# Resolves list of key-value pairs into a dictionary
hash_map = Hash()
for i in range(0, len(key_value_pair_list), 2):
if type(key_value_pair_list[i]) in [String, Symbol, str]:
hash_map[key_value_pair_list[i]] = key_value_pair_list[i+1]
else:
raise Exception('Invalid key: {} provided'.format(key_value_pair_list[i]))
return hash_map
class Symbol(str):
def __repr__(self):
value = self.__str__()
return "Symbol<{}> ".format(value)
def _make_symbol(symbol_type, value):
return Symbol(symbol_type, value)
class Quote():
def __init__(self, value):
self.value = value
def __repr__(self):
value = str(self.value)
return "Quote({})".format(value )# Behaves similar to a string
def _lambda(ast, params, env, Eval, Env):
# ast is the ast of the body of the function
def function(*args):
return Eval(ast, Env(env, params, List(args)))
function.__meta__ = None
function.__ast__ = ast
# resolution of args to list handled in function call
function.__gen_env__ = lambda args: Env(env, params, args)
return function
| import re
class List(list):
def __repr__(self):
exrep = str()
for item in self.__iter__():
exrep += "{}, ".format(item)
return "(" + exrep[:-2] + ")"
class String(str):
pass
class Hash(dict):
def __setitem__(self, k, v):
"""Raises an exception if key is repeated during definition"""
if k in self.keys():
raise ValueError("Key is already present") # Make into a Syntax error??
else:
return super(Hash, self).__setitem__(k, v)
def _make_hash(key_value_pair_list):
# Resolves list of key-value pairs into a dictionary
hash_map = Hash()
for i in range(0, len(key_value_pair_list), 2):
if type(key_value_pair_list[i]) in [String, Symbol, str]:
hash_map[key_value_pair_list[i]] = key_value_pair_list[i+1]
else:
raise Exception('Invalid key: {} provided'.format(key_value_pair_list[i]))
return hash_map
class Symbol(str):
def __repr__(self):
value = self.__str__()
return "Symbol<{}> ".format(value)
def _make_symbol(symbol_type, value):
return Symbol(symbol_type, value)
class Quote():
def __init__(self, value):
self.value = value
def __repr__(self):
value = str(self.value)
return "Quote({})".format(value )# Behaves similar to a string
def _lambda(ast, params, env, Eval, Env):
# ast is the ast of the body of the function
def function(*args):
return Eval(ast, Env(env, params, List(args)))
function.__meta__ = None
function.__ast__ = ast
# resolution of args to list handled in function call
function.__gen_env__ = lambda args: Env(env, params, args)
return function | en | 0.826471 | Raises an exception if key is repeated during definition # Make into a Syntax error?? # Resolves list of key-value pairs into a dictionary # Behaves similar to a string # ast is the ast of the body of the function # resolution of args to list handled in function call | 3.58036 | 4 |
2021/05.py | bernikr/advent-of-code | 1 | 6621776 | import math
import re
from collections import defaultdict
from aoc_utils import Vec
from aocd import get_data
def simplify(c):
return c / math.gcd(*c)
def count_overlaps(inp, include_diagonals):
lines_per_point = defaultdict(lambda: 0)
for start, end in inp:
d = simplify(end - start)
if all(c != 0 for c in d) and not include_diagonals:
continue
lines_per_point[start] += 1
pos = start
while pos != end:
pos += d
lines_per_point[pos] += 1
return sum(n >= 2 for n in lines_per_point.values())
def part1(inp):
return count_overlaps(inp, False)
def part2(inp):
return count_overlaps(inp, True)
if __name__ == '__main__':
data = get_data(day=5, year=2021)
inp = [(Vec(a, b), Vec(c, d)) for a, b, c, d in
((tuple(map(int, a))) for a in
(re.match(r'(\d+),(\d+) -> (\d+),(\d+)', l).groups() for l in data.splitlines()))]
print(part1(inp))
print(part2(inp))
| import math
import re
from collections import defaultdict
from aoc_utils import Vec
from aocd import get_data
def simplify(c):
return c / math.gcd(*c)
def count_overlaps(inp, include_diagonals):
lines_per_point = defaultdict(lambda: 0)
for start, end in inp:
d = simplify(end - start)
if all(c != 0 for c in d) and not include_diagonals:
continue
lines_per_point[start] += 1
pos = start
while pos != end:
pos += d
lines_per_point[pos] += 1
return sum(n >= 2 for n in lines_per_point.values())
def part1(inp):
return count_overlaps(inp, False)
def part2(inp):
return count_overlaps(inp, True)
if __name__ == '__main__':
data = get_data(day=5, year=2021)
inp = [(Vec(a, b), Vec(c, d)) for a, b, c, d in
((tuple(map(int, a))) for a in
(re.match(r'(\d+),(\d+) -> (\d+),(\d+)', l).groups() for l in data.splitlines()))]
print(part1(inp))
print(part2(inp))
| none | 1 | 3.108714 | 3 | |
store/tests/tests_viewset_Membership.py | MelanieFJNR/Blitz-API | 3 | 6621777 | <reponame>MelanieFJNR/Blitz-API
import json
from datetime import timedelta
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from blitz_api.factories import UserFactory, AdminFactory
from blitz_api.models import AcademicLevel
from blitz_api.services import remove_translation_fields
from ..models import Membership
User = get_user_model()
class MembershipTests(APITestCase):
@classmethod
def setUpClass(cls):
super(MembershipTests, cls).setUpClass()
cls.client = APIClient()
cls.user = UserFactory()
cls.admin = AdminFactory()
cls.academic_level = AcademicLevel.objects.create(
name="University"
)
cls.membership = Membership.objects.create(
name="basic_membership",
details="1-Year student membership",
price=50,
available=True,
duration=timedelta(days=365),
)
cls.membership.academic_levels.set([cls.academic_level])
cls.membership_unavailable = Membership.objects.create(
name="pending_membership",
details="todo",
price=50,
available=False,
duration=timedelta(days=365),
)
cls.membership_unavailable.academic_levels.set([cls.academic_level])
def test_create(self):
"""
Ensure we can create a membership if user has permission.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "advanced_membership",
'details': "3-Year student membership",
'available': True,
'price': 125,
'duration': timedelta(days=365*3),
'academic_levels': [reverse(
'academiclevel-detail', args=[self.academic_level.id]
)],
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
response_data = remove_translation_fields(json.loads(response.content))
del response_data['url']
del response_data['id']
content = {
'available': True,
'details': '3-Year student membership',
'duration': '1095 00:00:00',
'name': 'advanced_membership',
'price': '125.00',
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(
response_data,
content
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_without_permission(self):
"""
Ensure we can't create a membership if user has no permission.
"""
self.client.force_authenticate(user=self.user)
data = {
'name': "advanced_membership",
'details': "3-Year student membership",
'price': 125,
'duration': timedelta(days=365*3),
'academic_levels': [reverse(
'academiclevel-detail', args=[self.academic_level.id]
)],
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'detail': 'You do not have permission to perform this action.'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_missing_field(self):
"""
Ensure we can't create a membership when required field are missing.
"""
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'duration': ['This field is required.'],
'price': ['This field is required.'],
'available': ['This field is required.'],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_null_field(self):
"""
Ensure we can't create a membership when required field are null.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': None,
'details': None,
'price': None,
'duration': None,
'available': None,
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'duration': ['This field may not be null.'],
'name': ['This field may not be null.'],
'price': ['This field may not be null.'],
'available': ['This field may not be null.'],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_field(self):
"""
Ensure we can't create a membership when required field are invalid.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "",
'details': "",
'price': "",
'duration': "invalid",
'academic_levels': "invalid",
'available': "invalid",
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'academic_levels': [
'Expected a list of items but got type "str".'
],
'duration': [
'Duration has wrong format. Use one of these formats instead: '
'[DD] [HH:[MM:]]ss[.uuuuuu].'
],
'name': ['This field may not be blank.'],
'price': ['A valid number is required.'],
'available': ['Must be a valid boolean.'],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update(self):
"""
Ensure we can update a membership.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "basic_membership_updated",
'details': "1-Year student membership",
'price': 10,
'duration': timedelta(days=365),
'available': True,
# ManytoMany relationship not required for some reasons.
# Needs investigtion.
# 'academic_levels': [reverse(
# 'academiclevel-detail', args=[self.academic_level.id]
# )],
}
response = self.client.put(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
data,
format='json',
)
content = {
'available': True,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership_updated',
'price': '10.00',
'url': 'http://testserver/memberships/' + str(self.membership.id),
'id': self.membership.id,
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_as_admin(self):
"""
Ensure we can make a membership unavailable as an admin.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
membership = self.membership
membership.refresh_from_db()
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT
)
self.assertFalse(self.membership.available)
def test_delete_as_user(self):
"""
Ensure that a user can't make a membership unavailable.
"""
self.client.force_authenticate(user=self.user)
response = self.client.delete(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
self.assertEqual(
response.status_code, status.HTTP_403_FORBIDDEN
)
def test_delete_inexistent(self):
"""
Ensure that deleting a non-existent membership does nothing.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'membership-detail',
kwargs={'pk': 999},
),
)
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT
)
def test_list(self):
"""
Ensure we can list available memberships as an unauthenticated user.
"""
response = self.client.get(
reverse('membership-list'),
format='json',
)
data = json.loads(response.content)
content = {
'count': 1,
'next': None,
'previous': None,
'results': [{
'available': True,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' +
str(self.membership.id),
'id': self.membership.id,
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_as_admin(self):
"""
Ensure we can list all memberships as an admin.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse('membership-list'),
format='json',
)
data = remove_translation_fields(json.loads(response.content))
data['results'] = [
remove_translation_fields(m) for m in data['results']
]
content = {
'count': 2,
'next': None,
'previous': None,
'results': [{
'available': True,
'id': self.membership.id,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' +
str(self.membership.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}, {
'available': False,
'id': self.membership_unavailable.id,
'details': 'todo',
'duration': '365 00:00:00',
'name': 'pending_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' +
str(self.membership_unavailable.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read(self):
"""
Ensure we can read a membership as an unauthenticated user.
"""
response = self.client.get(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
content = {
'available': True,
'id': self.membership.id,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' + str(self.membership.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_as_admin(self):
"""
Ensure we can read a membership as an unauthenticated user.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
content = {
'available': True,
'id': self.membership.id,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' + str(self.membership.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_non_existent(self):
"""
Ensure we get not found when asking for a membership that doesn't
exist.
"""
response = self.client.get(
reverse(
'membership-detail',
kwargs={'pk': 999},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| import json
from datetime import timedelta
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from blitz_api.factories import UserFactory, AdminFactory
from blitz_api.models import AcademicLevel
from blitz_api.services import remove_translation_fields
from ..models import Membership
User = get_user_model()
class MembershipTests(APITestCase):
@classmethod
def setUpClass(cls):
super(MembershipTests, cls).setUpClass()
cls.client = APIClient()
cls.user = UserFactory()
cls.admin = AdminFactory()
cls.academic_level = AcademicLevel.objects.create(
name="University"
)
cls.membership = Membership.objects.create(
name="basic_membership",
details="1-Year student membership",
price=50,
available=True,
duration=timedelta(days=365),
)
cls.membership.academic_levels.set([cls.academic_level])
cls.membership_unavailable = Membership.objects.create(
name="pending_membership",
details="todo",
price=50,
available=False,
duration=timedelta(days=365),
)
cls.membership_unavailable.academic_levels.set([cls.academic_level])
def test_create(self):
"""
Ensure we can create a membership if user has permission.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "advanced_membership",
'details': "3-Year student membership",
'available': True,
'price': 125,
'duration': timedelta(days=365*3),
'academic_levels': [reverse(
'academiclevel-detail', args=[self.academic_level.id]
)],
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
response_data = remove_translation_fields(json.loads(response.content))
del response_data['url']
del response_data['id']
content = {
'available': True,
'details': '3-Year student membership',
'duration': '1095 00:00:00',
'name': 'advanced_membership',
'price': '125.00',
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(
response_data,
content
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_without_permission(self):
"""
Ensure we can't create a membership if user has no permission.
"""
self.client.force_authenticate(user=self.user)
data = {
'name': "advanced_membership",
'details': "3-Year student membership",
'price': 125,
'duration': timedelta(days=365*3),
'academic_levels': [reverse(
'academiclevel-detail', args=[self.academic_level.id]
)],
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'detail': 'You do not have permission to perform this action.'
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_missing_field(self):
"""
Ensure we can't create a membership when required field are missing.
"""
self.client.force_authenticate(user=self.admin)
data = {}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'duration': ['This field is required.'],
'price': ['This field is required.'],
'available': ['This field is required.'],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_null_field(self):
"""
Ensure we can't create a membership when required field are null.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': None,
'details': None,
'price': None,
'duration': None,
'available': None,
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'duration': ['This field may not be null.'],
'name': ['This field may not be null.'],
'price': ['This field may not be null.'],
'available': ['This field may not be null.'],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_invalid_field(self):
"""
Ensure we can't create a membership when required field are invalid.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "",
'details': "",
'price': "",
'duration': "invalid",
'academic_levels': "invalid",
'available': "invalid",
}
response = self.client.post(
reverse('membership-list'),
data,
format='json',
)
content = {
'academic_levels': [
'Expected a list of items but got type "str".'
],
'duration': [
'Duration has wrong format. Use one of these formats instead: '
'[DD] [HH:[MM:]]ss[.uuuuuu].'
],
'name': ['This field may not be blank.'],
'price': ['A valid number is required.'],
'available': ['Must be a valid boolean.'],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update(self):
"""
Ensure we can update a membership.
"""
self.client.force_authenticate(user=self.admin)
data = {
'name': "basic_membership_updated",
'details': "1-Year student membership",
'price': 10,
'duration': timedelta(days=365),
'available': True,
# ManytoMany relationship not required for some reasons.
# Needs investigtion.
# 'academic_levels': [reverse(
# 'academiclevel-detail', args=[self.academic_level.id]
# )],
}
response = self.client.put(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
data,
format='json',
)
content = {
'available': True,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership_updated',
'price': '10.00',
'url': 'http://testserver/memberships/' + str(self.membership.id),
'id': self.membership.id,
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_as_admin(self):
"""
Ensure we can make a membership unavailable as an admin.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
membership = self.membership
membership.refresh_from_db()
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT
)
self.assertFalse(self.membership.available)
def test_delete_as_user(self):
"""
Ensure that a user can't make a membership unavailable.
"""
self.client.force_authenticate(user=self.user)
response = self.client.delete(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
self.assertEqual(
response.status_code, status.HTTP_403_FORBIDDEN
)
def test_delete_inexistent(self):
"""
Ensure that deleting a non-existent membership does nothing.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.delete(
reverse(
'membership-detail',
kwargs={'pk': 999},
),
)
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT
)
def test_list(self):
"""
Ensure we can list available memberships as an unauthenticated user.
"""
response = self.client.get(
reverse('membership-list'),
format='json',
)
data = json.loads(response.content)
content = {
'count': 1,
'next': None,
'previous': None,
'results': [{
'available': True,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' +
str(self.membership.id),
'id': self.membership.id,
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_as_admin(self):
"""
Ensure we can list all memberships as an admin.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse('membership-list'),
format='json',
)
data = remove_translation_fields(json.loads(response.content))
data['results'] = [
remove_translation_fields(m) for m in data['results']
]
content = {
'count': 2,
'next': None,
'previous': None,
'results': [{
'available': True,
'id': self.membership.id,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' +
str(self.membership.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}, {
'available': False,
'id': self.membership_unavailable.id,
'details': 'todo',
'duration': '365 00:00:00',
'name': 'pending_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' +
str(self.membership_unavailable.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}]
}
self.assertEqual(data, content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read(self):
"""
Ensure we can read a membership as an unauthenticated user.
"""
response = self.client.get(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
content = {
'available': True,
'id': self.membership.id,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' + str(self.membership.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_as_admin(self):
"""
Ensure we can read a membership as an unauthenticated user.
"""
self.client.force_authenticate(user=self.admin)
response = self.client.get(
reverse(
'membership-detail',
kwargs={'pk': self.membership.id},
),
)
content = {
'available': True,
'id': self.membership.id,
'details': '1-Year student membership',
'duration': '365 00:00:00',
'name': 'basic_membership',
'price': '50.00',
'url': 'http://testserver/memberships/' + str(self.membership.id),
'academic_levels': ['http://testserver/academic_levels/' +
str(self.academic_level.id)],
'available_on_product_types': [],
'available_on_products': [],
'options': [],
}
self.assertEqual(
remove_translation_fields(json.loads(response.content)),
content
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_read_non_existent(self):
"""
Ensure we get not found when asking for a membership that doesn't
exist.
"""
response = self.client.get(
reverse(
'membership-detail',
kwargs={'pk': 999},
),
)
content = {'detail': 'Not found.'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) | en | 0.909507 | Ensure we can create a membership if user has permission. Ensure we can't create a membership if user has no permission. Ensure we can't create a membership when required field are missing. Ensure we can't create a membership when required field are null. Ensure we can't create a membership when required field are invalid. Ensure we can update a membership. # ManytoMany relationship not required for some reasons. # Needs investigtion. # 'academic_levels': [reverse( # 'academiclevel-detail', args=[self.academic_level.id] # )], Ensure we can make a membership unavailable as an admin. Ensure that a user can't make a membership unavailable. Ensure that deleting a non-existent membership does nothing. Ensure we can list available memberships as an unauthenticated user. Ensure we can list all memberships as an admin. Ensure we can read a membership as an unauthenticated user. Ensure we can read a membership as an unauthenticated user. Ensure we get not found when asking for a membership that doesn't exist. | 1.941399 | 2 |
plotly/validators/layout/slider/step/_value.py | faezs/plotly.py | 2 | 6621778 | <reponame>faezs/plotly.py<filename>plotly/validators/layout/slider/step/_value.py<gh_stars>1-10
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='value', parent_name='layout.slider.step', **kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='arraydraw',
role='info',
**kwargs
)
| import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='value', parent_name='layout.slider.step', **kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='arraydraw',
role='info',
**kwargs
) | none | 1 | 2.085495 | 2 | |
day12/main.py | Jachoooo/AdventOfCode2021 | 0 | 6621779 | import time
from functools import lru_cache
FILENAME="input.txt"
if FILENAME=="test.txt":
D=True
else:
D=False
starttime=time.perf_counter()
class node:
def __init__(self,name) -> None:
self.name=name
self.connected=set()
def add_ngbr(self,name):
self.connected.add(name)
@lru_cache(maxsize=None)
def search1(visited):
global nodes
global paths
global D
vislist=visited.split(',')
curNode=vislist[-1]
if D:print('[1]Searching in ',nodes[curNode].connected)
for ngbr in nodes[curNode].connected:
if ngbr=='end':
if D:print(visited+','+'end')
paths.add(visited+','+'end')
continue
if ngbr.isupper():
search1(visited+','+ngbr)
if (not ngbr.isupper()) and not(ngbr in vislist):
search1(visited+','+ngbr)
@lru_cache(maxsize=None)
def search2(visited,sm_cave):
global nodes
global paths2
global D
vislist=visited.split(',')
curNode=vislist[-1]
if sm_cave in vislist:
vislist.remove(sm_cave)
if D:print('[2]Searching in ',nodes[curNode].connected)
for ngbr in nodes[curNode].connected:
if ngbr=='end':
if D:print(visited+','+'end')
paths2.add(visited+','+'end')
continue
if ngbr.isupper():
search2(visited+','+ngbr,sm_cave)
if (not ngbr.isupper()) and not(ngbr in vislist):
search2(visited+','+ngbr,sm_cave)
paths=set()
paths2=set()
nodes={}
print("\u001b[2J\u001b[0;0H")
with open(FILENAME,'r') as file:
for line in file:
node1,node2=line.strip().split('-')
n1=node(node1)
n2=node(node2)
if node1 in nodes.keys():
nodes[node1].add_ngbr(node2)
else:
nodes[node1]=n1
nodes[node1].add_ngbr(node2)
if node2 in nodes.keys():
nodes[node2].add_ngbr(node1)
else:
nodes[node2]=n2
nodes[node2].add_ngbr(node1)
search1('start')
print('[Part 1] Done!')
i=0
for elem in nodes.values():
if not(elem.name.isupper() or (elem.name in ['start','end'])):i+=1
j=0
for elem in nodes.values():
if elem.name.isupper() or (elem.name in ['start','end']):
continue
else:
search2('start',elem.name)
j+=1
print('[Part 2]',int(j*100/i),'%')
print(f"Part 1 result = {len(paths)}")
print(f"Part 2 result = {len(paths2)}")
print("Done in {:.6f} s".format(time.perf_counter()-starttime))
| import time
from functools import lru_cache
FILENAME="input.txt"
if FILENAME=="test.txt":
D=True
else:
D=False
starttime=time.perf_counter()
class node:
def __init__(self,name) -> None:
self.name=name
self.connected=set()
def add_ngbr(self,name):
self.connected.add(name)
@lru_cache(maxsize=None)
def search1(visited):
global nodes
global paths
global D
vislist=visited.split(',')
curNode=vislist[-1]
if D:print('[1]Searching in ',nodes[curNode].connected)
for ngbr in nodes[curNode].connected:
if ngbr=='end':
if D:print(visited+','+'end')
paths.add(visited+','+'end')
continue
if ngbr.isupper():
search1(visited+','+ngbr)
if (not ngbr.isupper()) and not(ngbr in vislist):
search1(visited+','+ngbr)
@lru_cache(maxsize=None)
def search2(visited,sm_cave):
global nodes
global paths2
global D
vislist=visited.split(',')
curNode=vislist[-1]
if sm_cave in vislist:
vislist.remove(sm_cave)
if D:print('[2]Searching in ',nodes[curNode].connected)
for ngbr in nodes[curNode].connected:
if ngbr=='end':
if D:print(visited+','+'end')
paths2.add(visited+','+'end')
continue
if ngbr.isupper():
search2(visited+','+ngbr,sm_cave)
if (not ngbr.isupper()) and not(ngbr in vislist):
search2(visited+','+ngbr,sm_cave)
paths=set()
paths2=set()
nodes={}
print("\u001b[2J\u001b[0;0H")
with open(FILENAME,'r') as file:
for line in file:
node1,node2=line.strip().split('-')
n1=node(node1)
n2=node(node2)
if node1 in nodes.keys():
nodes[node1].add_ngbr(node2)
else:
nodes[node1]=n1
nodes[node1].add_ngbr(node2)
if node2 in nodes.keys():
nodes[node2].add_ngbr(node1)
else:
nodes[node2]=n2
nodes[node2].add_ngbr(node1)
search1('start')
print('[Part 1] Done!')
i=0
for elem in nodes.values():
if not(elem.name.isupper() or (elem.name in ['start','end'])):i+=1
j=0
for elem in nodes.values():
if elem.name.isupper() or (elem.name in ['start','end']):
continue
else:
search2('start',elem.name)
j+=1
print('[Part 2]',int(j*100/i),'%')
print(f"Part 1 result = {len(paths)}")
print(f"Part 2 result = {len(paths2)}")
print("Done in {:.6f} s".format(time.perf_counter()-starttime))
| none | 1 | 3.395936 | 3 | |
keyup/scrapers/novelkeys.py | anticlockwise/key_updates | 0 | 6621780 | from ..models import GroupBuyItem
from .base import PyQueryBasedScraper
from .util import normalize_date
from pyquery import PyQuery as pq
import json
URL = "https://novelkeys.xyz/pages/updates"
STORE_NAME = "Novelkeys"
class NovelkeysScraper(PyQueryBasedScraper):
def _get_url(self):
return URL
def _scrape(self, doc):
gb_items = []
update_cells = doc(".sc-pkHUE .sc-pkUyL")
for update_cell in update_cells:
update_cell_pq = pq(update_cell)
gb_url = update_cell_pq.attr("data-href")
if gb_url:
title_cell = update_cell_pq("h3.sc-oTLFK .sc-ptSuy")
title = title_cell.text()
expected_ship_date_cell = update_cell_pq(".sc-pbYdQ")
if len(expected_ship_date_cell) == 2:
expected_ship_date = pq(expected_ship_date_cell[1]).text().strip()
if expected_ship_date:
gb_item = GroupBuyItem(
title, STORE_NAME, normalize_date(expected_ship_date)
)
gb_items.append(gb_item)
return gb_items
if __name__ == "__main__":
scraper = NovelkeysScraper()
gb_items = scraper.scrape()
for gb_item in gb_items:
print(json.dumps(gb_item.__dict__()))
| from ..models import GroupBuyItem
from .base import PyQueryBasedScraper
from .util import normalize_date
from pyquery import PyQuery as pq
import json
URL = "https://novelkeys.xyz/pages/updates"
STORE_NAME = "Novelkeys"
class NovelkeysScraper(PyQueryBasedScraper):
def _get_url(self):
return URL
def _scrape(self, doc):
gb_items = []
update_cells = doc(".sc-pkHUE .sc-pkUyL")
for update_cell in update_cells:
update_cell_pq = pq(update_cell)
gb_url = update_cell_pq.attr("data-href")
if gb_url:
title_cell = update_cell_pq("h3.sc-oTLFK .sc-ptSuy")
title = title_cell.text()
expected_ship_date_cell = update_cell_pq(".sc-pbYdQ")
if len(expected_ship_date_cell) == 2:
expected_ship_date = pq(expected_ship_date_cell[1]).text().strip()
if expected_ship_date:
gb_item = GroupBuyItem(
title, STORE_NAME, normalize_date(expected_ship_date)
)
gb_items.append(gb_item)
return gb_items
if __name__ == "__main__":
scraper = NovelkeysScraper()
gb_items = scraper.scrape()
for gb_item in gb_items:
print(json.dumps(gb_item.__dict__()))
| none | 1 | 2.578192 | 3 | |
public_experiments/pareto_utils.py | kirtanp/MAMO-fair | 1 | 6621781 | import numpy as np
import pandas as pd
from pygmo.core import hypervolume
import matplotlib.pyplot as pltorc
import torch
import torch.nn as nn
def reset_model(model):
for layer in model.children():
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
def weights_init(m):
if isinstance(m, nn.Conv3d or nn.Conv1d): #nn.Conv3d
torch.nn.init.xavier_uniform_(m.weight.data, init.calculate_gain('relu'))
m.bias.data.fill_(0)
# torch.nn.init.xavier_uniform_(m.bias.data)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.normal_(mean=1.0, std=0.02)
m.bias.data.fill_(0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
def to_np(scores):
"""Convert the scores as output by the pareto manager to numpy
to be able to use for finding the pareto front and plotting
"""
scores = [i[0] for i in scores]
scores = np.array(scores)
return(scores)
def identify_pareto(scores):
"""For n pareto points in d dimensions, 'scores' is a numpy array
with shape (n, d). Returns the indices of the points which are part of
the pareto front
"""
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
return population_ids[pareto_front]
def get_pareto_points(scores):
"""Get pareto points in a form convenient for plotting
"""
x = [0 for i in range(scores.shape[1]) ]
for i in range(scores.shape[1]):
x[i] = scores[:,i]
pareto = identify_pareto(scores)
pareto_front = scores[pareto]
pareto_front_df = pd.DataFrame(pareto_front)
pareto_front_df.sort_values(0, inplace=True)
pareto_front = pareto_front_df.values
x_pareto = [0 for i in range(pareto_front.shape[1])]
for i in range(pareto_front.shape[1]):
x_pareto[i] = pareto_front[:,i]
return(x_pareto[0], x_pareto[1])
my_colors = ['r', 'g', 'b', 'k', 'y', 'm', 'c']
def plot_paretos(score_list, names, axes_labels, colors=my_colors):
pareto = []
for score in score_list:
pareto.append(get_pareto_points(score))
for i, p in enumerate(pareto):
rgb = np.random.rand(3,)
plt.plot(p[0], p[1], c=colors[i], label=names[i])
plt.scatter(p[0], p[1], c=colors[i])
plt.xlabel(axes_labels[0])
plt.ylabel(axes_labels[1])
plt.legend()
plt.show()
def plot_2d_pareto(scores):
pareto = get_pareto_points(scores)
plt.plot(pareto[0], pareto[1])
plt.scatter(pareto[0], pareto[1])
def get_hypervolume(scores):
scores = -scores
hv = hypervolume(scores)
d = scores.shape[1]
return hv.compute([0.0]*d)
def is_dominated(p, S):
for s in S:
if(np.all(s>=p)):
return True
return False
def coverage(S1, S2):
count = 0
for s in S2:
if(is_dominated(s, S1)):
count = count + 1
return(count/len(S2))
def distance_to_closest_neighbor(s, scores):
min_distance = 2*scores.shape[1]
for s_j in scores:
distance = np.sum(abs(s_j - s))
if(distance < min_distance):
min_distance = distance
return(min_distance)
def spacing(scores):
distances = []
for i, s in enumerate(scores):
d_i = distance_to_closest_neighbor(s, np.delete(scores, i, axis=0))
distances.append(d_i)
distances = np.array(distances)
d_mean = np.mean(distances)
total=0
for d in distances:
total+=((d-d_mean)*(d-d_mean))
return np.sqrt(total/float(len(scores) - 1))
def get_solution(scores, norm='l2', ideal_point_id='zenith'):
d = scores.shape[1]
if(ideal_point_id == 'zenith'):
ip = np.ones(d)
elif(isinstance(ideal_point_id, int)):
ip = np.zeros(d)
ip[ideal_point_id] = 1
d_min = 10*d
s_min = np.zeros(d)
for i, s in enumerate(scores):
if(norm=='l2'):
d_s = np.sum((ip - s)**2)
elif(norm=='l1'):
d_s = np.sum(np.abs(ip - s))
if(d_s<d_min):
d_min = d_s
s_min = s
i_min = i
return s_min, i_min | import numpy as np
import pandas as pd
from pygmo.core import hypervolume
import matplotlib.pyplot as pltorc
import torch
import torch.nn as nn
def reset_model(model):
for layer in model.children():
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
def weights_init(m):
if isinstance(m, nn.Conv3d or nn.Conv1d): #nn.Conv3d
torch.nn.init.xavier_uniform_(m.weight.data, init.calculate_gain('relu'))
m.bias.data.fill_(0)
# torch.nn.init.xavier_uniform_(m.bias.data)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.normal_(mean=1.0, std=0.02)
m.bias.data.fill_(0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
def to_np(scores):
"""Convert the scores as output by the pareto manager to numpy
to be able to use for finding the pareto front and plotting
"""
scores = [i[0] for i in scores]
scores = np.array(scores)
return(scores)
def identify_pareto(scores):
"""For n pareto points in d dimensions, 'scores' is a numpy array
with shape (n, d). Returns the indices of the points which are part of
the pareto front
"""
# Count number of items
population_size = scores.shape[0]
# Create a NumPy index for scores on the pareto front (zero indexed)
population_ids = np.arange(population_size)
# Create a starting list of items on the Pareto front
# All items start off as being labelled as on the Parteo front
pareto_front = np.ones(population_size, dtype=bool)
# Loop through each item. This will then be compared with all other items
for i in range(population_size):
# Loop through all other items
for j in range(population_size):
# Check if our 'i' pint is dominated by out 'j' point
if all(scores[j] >= scores[i]) and any(scores[j] > scores[i]):
# j dominates i. Label 'i' point as not on Pareto front
pareto_front[i] = 0
# Stop further comparisons with 'i' (no more comparisons needed)
break
# Return ids of scenarios on pareto front
return population_ids[pareto_front]
def get_pareto_points(scores):
"""Get pareto points in a form convenient for plotting
"""
x = [0 for i in range(scores.shape[1]) ]
for i in range(scores.shape[1]):
x[i] = scores[:,i]
pareto = identify_pareto(scores)
pareto_front = scores[pareto]
pareto_front_df = pd.DataFrame(pareto_front)
pareto_front_df.sort_values(0, inplace=True)
pareto_front = pareto_front_df.values
x_pareto = [0 for i in range(pareto_front.shape[1])]
for i in range(pareto_front.shape[1]):
x_pareto[i] = pareto_front[:,i]
return(x_pareto[0], x_pareto[1])
my_colors = ['r', 'g', 'b', 'k', 'y', 'm', 'c']
def plot_paretos(score_list, names, axes_labels, colors=my_colors):
pareto = []
for score in score_list:
pareto.append(get_pareto_points(score))
for i, p in enumerate(pareto):
rgb = np.random.rand(3,)
plt.plot(p[0], p[1], c=colors[i], label=names[i])
plt.scatter(p[0], p[1], c=colors[i])
plt.xlabel(axes_labels[0])
plt.ylabel(axes_labels[1])
plt.legend()
plt.show()
def plot_2d_pareto(scores):
pareto = get_pareto_points(scores)
plt.plot(pareto[0], pareto[1])
plt.scatter(pareto[0], pareto[1])
def get_hypervolume(scores):
scores = -scores
hv = hypervolume(scores)
d = scores.shape[1]
return hv.compute([0.0]*d)
def is_dominated(p, S):
for s in S:
if(np.all(s>=p)):
return True
return False
def coverage(S1, S2):
count = 0
for s in S2:
if(is_dominated(s, S1)):
count = count + 1
return(count/len(S2))
def distance_to_closest_neighbor(s, scores):
min_distance = 2*scores.shape[1]
for s_j in scores:
distance = np.sum(abs(s_j - s))
if(distance < min_distance):
min_distance = distance
return(min_distance)
def spacing(scores):
distances = []
for i, s in enumerate(scores):
d_i = distance_to_closest_neighbor(s, np.delete(scores, i, axis=0))
distances.append(d_i)
distances = np.array(distances)
d_mean = np.mean(distances)
total=0
for d in distances:
total+=((d-d_mean)*(d-d_mean))
return np.sqrt(total/float(len(scores) - 1))
def get_solution(scores, norm='l2', ideal_point_id='zenith'):
d = scores.shape[1]
if(ideal_point_id == 'zenith'):
ip = np.ones(d)
elif(isinstance(ideal_point_id, int)):
ip = np.zeros(d)
ip[ideal_point_id] = 1
d_min = 10*d
s_min = np.zeros(d)
for i, s in enumerate(scores):
if(norm=='l2'):
d_s = np.sum((ip - s)**2)
elif(norm=='l1'):
d_s = np.sum(np.abs(ip - s))
if(d_s<d_min):
d_min = d_s
s_min = s
i_min = i
return s_min, i_min | en | 0.848702 | #nn.Conv3d # torch.nn.init.xavier_uniform_(m.bias.data) Convert the scores as output by the pareto manager to numpy to be able to use for finding the pareto front and plotting For n pareto points in d dimensions, 'scores' is a numpy array with shape (n, d). Returns the indices of the points which are part of the pareto front # Count number of items # Create a NumPy index for scores on the pareto front (zero indexed) # Create a starting list of items on the Pareto front # All items start off as being labelled as on the Parteo front # Loop through each item. This will then be compared with all other items # Loop through all other items # Check if our 'i' pint is dominated by out 'j' point # j dominates i. Label 'i' point as not on Pareto front # Stop further comparisons with 'i' (no more comparisons needed) # Return ids of scenarios on pareto front Get pareto points in a form convenient for plotting | 2.919262 | 3 |
setup.py | xelaxela13/django_include_bootstrap | 0 | 6621782 | from setuptools import setup, find_packages
setup(
name="django-include-bootstrap",
version="1.0",
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=["requests", "subresource-integrity"],
package_data={
# If any package contains *.txt or *.rst files, include them:
"": ["*.txt", "*.rst", "*.msg"],
},
# metadata to display on PyPI
author="<NAME>",
author_email="<EMAIL>",
description="Include twitter bootstrap to Django templates",
keywords="django, bootstrap",
url="https://github.com/xelaxela13/django_include_bootstrap",
classifiers=[
"License :: OSI Approved :: Python Software Foundation License",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
"Topic :: Software Development :: Libraries"
],
include_package_data=True
)
| from setuptools import setup, find_packages
setup(
name="django-include-bootstrap",
version="1.0",
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=["requests", "subresource-integrity"],
package_data={
# If any package contains *.txt or *.rst files, include them:
"": ["*.txt", "*.rst", "*.msg"],
},
# metadata to display on PyPI
author="<NAME>",
author_email="<EMAIL>",
description="Include twitter bootstrap to Django templates",
keywords="django, bootstrap",
url="https://github.com/xelaxela13/django_include_bootstrap",
classifiers=[
"License :: OSI Approved :: Python Software Foundation License",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
"Topic :: Software Development :: Libraries"
],
include_package_data=True
)
| en | 0.780884 | # If any package contains *.txt or *.rst files, include them: # metadata to display on PyPI | 1.52091 | 2 |
src/tentohako/game/node.py | Koukyosyumei/TenToHako | 0 | 6621783 | <reponame>Koukyosyumei/TenToHako
def get_valid_action_with_basic_rule(board):
"""Return a list of possible actions based on the given board.
Args:
board: the instance of Board class which represents
the current board state.
Returns:
valid_actions: a list of possible actions
"""
valid_actions = []
for j in range(board.dim_y):
for i in range(board.dim_x):
if (i % 2 != j % 2) and (board.board_matrix[j][i] == " "):
valid_actions.append((j, i))
return valid_actions
class Node:
def __init__(self, parentNode, board, action, activePlayer, id_to_scores):
"""Basic class which represents a node which stores the trajectory of the game.
This class is mainly used for tree search algorithm.
Args:
parentNode: a node which represents the previous state
board: current board
action: an action which made the current board from the
previous state
activePlayer: a current active player
id_to_scores: current score status
Attributes:
parentNode: a node which represents the previous state
board: current board
action: an action which made the current board from the
previous state
activePlayer: a current active player
id_to_scores: current score status
children: list of the child nodes
wins: number of wins from this node
visits: number of trials from this node
unexamined: valid action from the current node
"""
self.action = action
self.parentNode = parentNode
self.board = board
self.children = []
self.wins = 0
self.visits = 0
self.unexamined = get_valid_action_with_basic_rule(board)
self.activePlayer = activePlayer
self.id_to_scores = id_to_scores
def addChild(self, board, index, id_to_scores):
"""Add a child node to this node.
Args:
board: the instance of the Board class which represents the next
status from the current node
index: index of the valid action
id_to_scores: score status of the child node
Returns:
node: the child node
"""
node = Node(
self, board, self.unexamined[index], self.action_player * -1, id_to_scores
)
del self.unexamined[index]
self.children.append(node)
return node
def selectChild(self):
"""Select the child node for searching"""
pass
def update(self, result, player_id):
"""Update the visit history of this node.
Args:
result: result of the game from this node.
player_id: id of the player (basically equal to the activePlayer)
"""
self.visits += 1
self.wins += result[player_id]
def mostVisitedChild(self):
"""Return most visited child node.
Returns:
mostVisited: most visited child node.
"""
mostVisited = self.children[0]
for i, child in enumerate(self.children):
if child.visits > mostVisited.visits:
mostVisited = child
return mostVisited
| def get_valid_action_with_basic_rule(board):
"""Return a list of possible actions based on the given board.
Args:
board: the instance of Board class which represents
the current board state.
Returns:
valid_actions: a list of possible actions
"""
valid_actions = []
for j in range(board.dim_y):
for i in range(board.dim_x):
if (i % 2 != j % 2) and (board.board_matrix[j][i] == " "):
valid_actions.append((j, i))
return valid_actions
class Node:
def __init__(self, parentNode, board, action, activePlayer, id_to_scores):
"""Basic class which represents a node which stores the trajectory of the game.
This class is mainly used for tree search algorithm.
Args:
parentNode: a node which represents the previous state
board: current board
action: an action which made the current board from the
previous state
activePlayer: a current active player
id_to_scores: current score status
Attributes:
parentNode: a node which represents the previous state
board: current board
action: an action which made the current board from the
previous state
activePlayer: a current active player
id_to_scores: current score status
children: list of the child nodes
wins: number of wins from this node
visits: number of trials from this node
unexamined: valid action from the current node
"""
self.action = action
self.parentNode = parentNode
self.board = board
self.children = []
self.wins = 0
self.visits = 0
self.unexamined = get_valid_action_with_basic_rule(board)
self.activePlayer = activePlayer
self.id_to_scores = id_to_scores
def addChild(self, board, index, id_to_scores):
"""Add a child node to this node.
Args:
board: the instance of the Board class which represents the next
status from the current node
index: index of the valid action
id_to_scores: score status of the child node
Returns:
node: the child node
"""
node = Node(
self, board, self.unexamined[index], self.action_player * -1, id_to_scores
)
del self.unexamined[index]
self.children.append(node)
return node
def selectChild(self):
"""Select the child node for searching"""
pass
def update(self, result, player_id):
"""Update the visit history of this node.
Args:
result: result of the game from this node.
player_id: id of the player (basically equal to the activePlayer)
"""
self.visits += 1
self.wins += result[player_id]
def mostVisitedChild(self):
"""Return most visited child node.
Returns:
mostVisited: most visited child node.
"""
mostVisited = self.children[0]
for i, child in enumerate(self.children):
if child.visits > mostVisited.visits:
mostVisited = child
return mostVisited | en | 0.882423 | Return a list of possible actions based on the given board. Args: board: the instance of Board class which represents the current board state. Returns: valid_actions: a list of possible actions Basic class which represents a node which stores the trajectory of the game. This class is mainly used for tree search algorithm. Args: parentNode: a node which represents the previous state board: current board action: an action which made the current board from the previous state activePlayer: a current active player id_to_scores: current score status Attributes: parentNode: a node which represents the previous state board: current board action: an action which made the current board from the previous state activePlayer: a current active player id_to_scores: current score status children: list of the child nodes wins: number of wins from this node visits: number of trials from this node unexamined: valid action from the current node Add a child node to this node. Args: board: the instance of the Board class which represents the next status from the current node index: index of the valid action id_to_scores: score status of the child node Returns: node: the child node Select the child node for searching Update the visit history of this node. Args: result: result of the game from this node. player_id: id of the player (basically equal to the activePlayer) Return most visited child node. Returns: mostVisited: most visited child node. | 3.65563 | 4 |
restaurants/restaurant_management/page/restaurant_manage/restaurant_manage.py | zeinabmohammed/Restaurants | 1 | 6621784 | from __future__ import unicode_literals
import frappe
from erpnext.accounts.doctype.pos_profile.pos_profile import get_item_groups
from erpnext.accounts.doctype.pos_invoice.pos_invoice import get_stock_availability
class RestaurantManage:
@staticmethod
def production_center_notify(status):
object_in_status = frappe.get_list("Status Managed Production Center", "parent", filters={
"parentType": "Restaurant Object",
"status_managed": ("in", status)
})
for item in object_in_status:
obj = frappe.get_doc("Restaurant Object", item.parent)
obj.synchronize()
@staticmethod
def get_rooms():
rooms = frappe.get_list("Restaurant Object", "name, description", filters={
"type": "Room"
})
for room in rooms:
t = frappe.get_doc("Restaurant Object", room.name)
room["orders_count"] = t.orders_count
return rooms
@staticmethod
def add_room():
room = frappe.new_doc("Restaurant Object")
room.type = "Room"
room.description = f"Room {(RestaurantManage().count_roms() + 1)}"
room.save()
return room
@staticmethod
def count_roms():
return frappe.db.count("Restaurant Object", filters={"type": "Room"})
@staticmethod
def listener(data):
for d in data:
if len(data[d]["data"]) == 0:
return data
if d == "Table":
cond = "and `table` in (%s)" % (', '.join([f"'{row}'" for row in data[d]["data"]]))
oc = frappe.db.sql(f"""
SELECT `table` as name, count(`table`) as count
FROM `tabTable Order`
WHERE status = 'Attending' {cond}
GROUP by `table`
""", as_dict=True)
for o in oc:
data[d]["data"][o.name]["count"] = o.count
if d == "Room":
cond = "and `room` in (%s)" % (', '.join([f"'{row}'" for row in data[d]["data"]]))
oc = frappe.db.sql(f"""
SELECT `room` as name, count(`room`) as count
FROM `tabTable Order`
WHERE status = 'Attending' {cond}
GROUP by `room`
""", as_dict=True)
for o in oc:
data[d]["data"][o.name]["count"] = o.count
if d == "Production Center":
for pc in data[d]["data"]:
production_center = frappe.get_doc("Restaurant Object", pc)
data[d]["data"][pc]["count"] = production_center.orders_count_in_production_center
if d == "Process":
production_center = frappe.get_doc("Restaurant Object", data[d]["data"])
status_managed = production_center.status_managed
filters = {
"status": ("in", [item.status_managed for item in status_managed]),
"item_group": ("in", production_center._items_group),
"parent": ("!=", "")
}
data = dict(Process=frappe.get_all("Order Entry Item", "identifier,status", filters=filters))
return data
@frappe.whitelist()
def get_rooms():
return RestaurantManage().get_rooms()
@frappe.whitelist()
def add_room(client=None):
frappe.publish_realtime("check_rooms", dict(
client=client,
current_room=RestaurantManage().add_room().name,
rooms=RestaurantManage().get_rooms()
))
@frappe.whitelist(allow_guest=True)
def get_work_station():
work_stations = frappe.get_list("Work Station")
work_station = frappe.get_doc("Work Station", work_stations[0].name)
return {
"work_station": work_station,
"pos_profile": frappe.get_doc("POS Profile", work_station.pos_profile)
}
@frappe.whitelist()
def listeners(args):
import json
return RestaurantManage().listener(json.loads(args))
@frappe.whitelist()
def get_settings_data():
restaurant_settings = frappe.get_single("Restaurant Settings")
return restaurant_settings.settings_data()
def pos_profile_data():
restaurant_settings = frappe.get_single("Restaurant Settings")
return restaurant_settings.pos_profile_data()
def set_pos_profile(doc, method=None):
frappe.publish_realtime("pos_profile_update", pos_profile_data())
def notify_to_check_command(command_foods):
frappe.publish_realtime("notify_to_check_order_data", dict(
commands_foods=command_foods
))
def debug_data(data):
frappe.publish_realtime("debug_data", data)
@frappe.whitelist()
def get_items(start, page_length, price_list, item_group, pos_profile, search_value=""):
data = dict()
result = []
allow_negative_stock = frappe.db.get_single_value('Stock Settings', 'allow_negative_stock')
warehouse, hide_unavailable_items = frappe.db.get_value('POS Profile', pos_profile,
['warehouse', 'hide_unavailable_items'])
if not frappe.db.exists('Item Group', item_group):
item_group = get_root_of('Item Group')
if search_value:
data = search_serial_or_batch_or_barcode_number(search_value)
item_code = data.get("item_code") if data.get("item_code") else search_value
serial_no = data.get("serial_no") if data.get("serial_no") else ""
batch_no = data.get("batch_no") if data.get("batch_no") else ""
barcode = data.get("barcode") if data.get("barcode") else ""
if data:
item_info = frappe.db.get_value(
"Item", data.get("item_code"),
["name as item_code", "item_name", "description", "stock_uom", "image as item_image", "is_stock_item"]
, as_dict=1)
item_info.setdefault('serial_no', serial_no)
item_info.setdefault('batch_no', batch_no)
item_info.setdefault('barcode', barcode)
return {'items': [item_info]}
condition = get_conditions(item_code, serial_no, batch_no, barcode)
condition += get_item_group_condition(pos_profile)
lft, rgt = frappe.db.get_value('Item Group', item_group, ['lft', 'rgt'])
bin_join_selection, bin_join_condition = "", ""
if hide_unavailable_items:
bin_join_selection = ", `tabBin` bin"
bin_join_condition = "AND bin.warehouse = %(warehouse)s AND bin.item_code = item.name AND bin.actual_qty > 0"
items_data = frappe.db.sql("""
SELECT
item.name AS item_code,
item.item_name,
item.description,
item.stock_uom,
item.image AS item_image,
item.is_stock_item
FROM
`tabItem` item {bin_join_selection}
WHERE
item.disabled = 0
AND item.has_variants = 0
AND item.is_sales_item = 1
AND item.is_fixed_asset = 0
AND item.item_group in (SELECT name FROM `tabItem Group` WHERE lft >= {lft} AND rgt <= {rgt})
AND {condition}
{bin_join_condition}
ORDER BY
item.name asc
LIMIT
{start}, {page_length}"""
.format(
start=start,
page_length=page_length,
lft=lft,
rgt=rgt,
condition=condition,
bin_join_selection=bin_join_selection,
bin_join_condition=bin_join_condition
), {'warehouse': warehouse}, as_dict=1)
if items_data:
items = [d.item_code for d in items_data]
item_prices_data = frappe.get_all("Item Price",
fields=["item_code", "price_list_rate", "currency"],
filters={'price_list': price_list, 'item_code': ['in', items]})
item_prices = {}
for d in item_prices_data:
item_prices[d.item_code] = d
for item in items_data:
item_code = item.item_code
item_price = item_prices.get(item_code) or {}
if allow_negative_stock:
item_stock_qty = \
frappe.db.sql("""select ifnull(sum(actual_qty), 0) from `tabBin` where item_code = %s""", item_code)[0][
0]
else:
item_stock_qty = get_stock_availability(item_code, warehouse)
row = {}
row.update(item)
row.update({
'price_list_rate': item_price.get('price_list_rate'),
'currency': item_price.get('currency'),
'actual_qty': item_stock_qty,
})
result.append(row)
res = {
'items': result
}
return res
def get_conditions(item_code, serial_no, batch_no, barcode):
if serial_no or batch_no or barcode:
return "item.name = {0}".format(frappe.db.escape(item_code))
return """(item.name like {item_code}
or item.item_name like {item_code})""".format(item_code = frappe.db.escape('%' + item_code + '%'))
def get_item_group_condition(pos_profile):
cond = "and 1=1"
item_groups = get_item_groups(pos_profile)
if item_groups:
cond = "and item.item_group in (%s)"%(', '.join(['%s']*len(item_groups)))
return cond % tuple(item_groups) | from __future__ import unicode_literals
import frappe
from erpnext.accounts.doctype.pos_profile.pos_profile import get_item_groups
from erpnext.accounts.doctype.pos_invoice.pos_invoice import get_stock_availability
class RestaurantManage:
@staticmethod
def production_center_notify(status):
object_in_status = frappe.get_list("Status Managed Production Center", "parent", filters={
"parentType": "Restaurant Object",
"status_managed": ("in", status)
})
for item in object_in_status:
obj = frappe.get_doc("Restaurant Object", item.parent)
obj.synchronize()
@staticmethod
def get_rooms():
rooms = frappe.get_list("Restaurant Object", "name, description", filters={
"type": "Room"
})
for room in rooms:
t = frappe.get_doc("Restaurant Object", room.name)
room["orders_count"] = t.orders_count
return rooms
@staticmethod
def add_room():
room = frappe.new_doc("Restaurant Object")
room.type = "Room"
room.description = f"Room {(RestaurantManage().count_roms() + 1)}"
room.save()
return room
@staticmethod
def count_roms():
return frappe.db.count("Restaurant Object", filters={"type": "Room"})
@staticmethod
def listener(data):
for d in data:
if len(data[d]["data"]) == 0:
return data
if d == "Table":
cond = "and `table` in (%s)" % (', '.join([f"'{row}'" for row in data[d]["data"]]))
oc = frappe.db.sql(f"""
SELECT `table` as name, count(`table`) as count
FROM `tabTable Order`
WHERE status = 'Attending' {cond}
GROUP by `table`
""", as_dict=True)
for o in oc:
data[d]["data"][o.name]["count"] = o.count
if d == "Room":
cond = "and `room` in (%s)" % (', '.join([f"'{row}'" for row in data[d]["data"]]))
oc = frappe.db.sql(f"""
SELECT `room` as name, count(`room`) as count
FROM `tabTable Order`
WHERE status = 'Attending' {cond}
GROUP by `room`
""", as_dict=True)
for o in oc:
data[d]["data"][o.name]["count"] = o.count
if d == "Production Center":
for pc in data[d]["data"]:
production_center = frappe.get_doc("Restaurant Object", pc)
data[d]["data"][pc]["count"] = production_center.orders_count_in_production_center
if d == "Process":
production_center = frappe.get_doc("Restaurant Object", data[d]["data"])
status_managed = production_center.status_managed
filters = {
"status": ("in", [item.status_managed for item in status_managed]),
"item_group": ("in", production_center._items_group),
"parent": ("!=", "")
}
data = dict(Process=frappe.get_all("Order Entry Item", "identifier,status", filters=filters))
return data
@frappe.whitelist()
def get_rooms():
return RestaurantManage().get_rooms()
@frappe.whitelist()
def add_room(client=None):
frappe.publish_realtime("check_rooms", dict(
client=client,
current_room=RestaurantManage().add_room().name,
rooms=RestaurantManage().get_rooms()
))
@frappe.whitelist(allow_guest=True)
def get_work_station():
work_stations = frappe.get_list("Work Station")
work_station = frappe.get_doc("Work Station", work_stations[0].name)
return {
"work_station": work_station,
"pos_profile": frappe.get_doc("POS Profile", work_station.pos_profile)
}
@frappe.whitelist()
def listeners(args):
import json
return RestaurantManage().listener(json.loads(args))
@frappe.whitelist()
def get_settings_data():
restaurant_settings = frappe.get_single("Restaurant Settings")
return restaurant_settings.settings_data()
def pos_profile_data():
restaurant_settings = frappe.get_single("Restaurant Settings")
return restaurant_settings.pos_profile_data()
def set_pos_profile(doc, method=None):
frappe.publish_realtime("pos_profile_update", pos_profile_data())
def notify_to_check_command(command_foods):
frappe.publish_realtime("notify_to_check_order_data", dict(
commands_foods=command_foods
))
def debug_data(data):
frappe.publish_realtime("debug_data", data)
@frappe.whitelist()
def get_items(start, page_length, price_list, item_group, pos_profile, search_value=""):
data = dict()
result = []
allow_negative_stock = frappe.db.get_single_value('Stock Settings', 'allow_negative_stock')
warehouse, hide_unavailable_items = frappe.db.get_value('POS Profile', pos_profile,
['warehouse', 'hide_unavailable_items'])
if not frappe.db.exists('Item Group', item_group):
item_group = get_root_of('Item Group')
if search_value:
data = search_serial_or_batch_or_barcode_number(search_value)
item_code = data.get("item_code") if data.get("item_code") else search_value
serial_no = data.get("serial_no") if data.get("serial_no") else ""
batch_no = data.get("batch_no") if data.get("batch_no") else ""
barcode = data.get("barcode") if data.get("barcode") else ""
if data:
item_info = frappe.db.get_value(
"Item", data.get("item_code"),
["name as item_code", "item_name", "description", "stock_uom", "image as item_image", "is_stock_item"]
, as_dict=1)
item_info.setdefault('serial_no', serial_no)
item_info.setdefault('batch_no', batch_no)
item_info.setdefault('barcode', barcode)
return {'items': [item_info]}
condition = get_conditions(item_code, serial_no, batch_no, barcode)
condition += get_item_group_condition(pos_profile)
lft, rgt = frappe.db.get_value('Item Group', item_group, ['lft', 'rgt'])
bin_join_selection, bin_join_condition = "", ""
if hide_unavailable_items:
bin_join_selection = ", `tabBin` bin"
bin_join_condition = "AND bin.warehouse = %(warehouse)s AND bin.item_code = item.name AND bin.actual_qty > 0"
items_data = frappe.db.sql("""
SELECT
item.name AS item_code,
item.item_name,
item.description,
item.stock_uom,
item.image AS item_image,
item.is_stock_item
FROM
`tabItem` item {bin_join_selection}
WHERE
item.disabled = 0
AND item.has_variants = 0
AND item.is_sales_item = 1
AND item.is_fixed_asset = 0
AND item.item_group in (SELECT name FROM `tabItem Group` WHERE lft >= {lft} AND rgt <= {rgt})
AND {condition}
{bin_join_condition}
ORDER BY
item.name asc
LIMIT
{start}, {page_length}"""
.format(
start=start,
page_length=page_length,
lft=lft,
rgt=rgt,
condition=condition,
bin_join_selection=bin_join_selection,
bin_join_condition=bin_join_condition
), {'warehouse': warehouse}, as_dict=1)
if items_data:
items = [d.item_code for d in items_data]
item_prices_data = frappe.get_all("Item Price",
fields=["item_code", "price_list_rate", "currency"],
filters={'price_list': price_list, 'item_code': ['in', items]})
item_prices = {}
for d in item_prices_data:
item_prices[d.item_code] = d
for item in items_data:
item_code = item.item_code
item_price = item_prices.get(item_code) or {}
if allow_negative_stock:
item_stock_qty = \
frappe.db.sql("""select ifnull(sum(actual_qty), 0) from `tabBin` where item_code = %s""", item_code)[0][
0]
else:
item_stock_qty = get_stock_availability(item_code, warehouse)
row = {}
row.update(item)
row.update({
'price_list_rate': item_price.get('price_list_rate'),
'currency': item_price.get('currency'),
'actual_qty': item_stock_qty,
})
result.append(row)
res = {
'items': result
}
return res
def get_conditions(item_code, serial_no, batch_no, barcode):
if serial_no or batch_no or barcode:
return "item.name = {0}".format(frappe.db.escape(item_code))
return """(item.name like {item_code}
or item.item_name like {item_code})""".format(item_code = frappe.db.escape('%' + item_code + '%'))
def get_item_group_condition(pos_profile):
cond = "and 1=1"
item_groups = get_item_groups(pos_profile)
if item_groups:
cond = "and item.item_group in (%s)"%(', '.join(['%s']*len(item_groups)))
return cond % tuple(item_groups) | en | 0.460527 | SELECT `table` as name, count(`table`) as count FROM `tabTable Order` WHERE status = 'Attending' {cond} GROUP by `table` SELECT `room` as name, count(`room`) as count FROM `tabTable Order` WHERE status = 'Attending' {cond} GROUP by `room` SELECT item.name AS item_code, item.item_name, item.description, item.stock_uom, item.image AS item_image, item.is_stock_item FROM `tabItem` item {bin_join_selection} WHERE item.disabled = 0 AND item.has_variants = 0 AND item.is_sales_item = 1 AND item.is_fixed_asset = 0 AND item.item_group in (SELECT name FROM `tabItem Group` WHERE lft >= {lft} AND rgt <= {rgt}) AND {condition} {bin_join_condition} ORDER BY item.name asc LIMIT {start}, {page_length} select ifnull(sum(actual_qty), 0) from `tabBin` where item_code = %s (item.name like {item_code} or item.item_name like {item_code}) | 1.824259 | 2 |
tests/templates/test_configure_app.py | neumann-nico/dash-labs | 1 | 6621785 | <gh_stars>1-10
from ..fixtures import app, test_template
# Helpers
def num_stylesheets(app):
return sum("test_stylesheet" in url for url in app.config.external_stylesheets)
def num_css_class_entries(app):
return app.index_string.count(".test-css-class")
def test_configure_app_resources(app, test_template):
# Check that custom stylesheet is not present initially
assert num_stylesheets(app) == 0
# Check stylesheet added upon template.layout
test_template._configure_app(app)
assert num_stylesheets(app) == 1
# Check that configuration is idempotent
test_template._configure_app(app)
assert num_stylesheets(app) == 1
def test_configure_app_inline_css(app, test_template):
# Check that special css class is not present initially
assert num_css_class_entries(app) == 0
# Check inline css added upon template.layout
test_template._configure_app(app)
assert num_css_class_entries(app) == 1
# Check that adding inline css is idempotent
test_template._configure_app(app)
assert num_css_class_entries(app) == 1
def test_configure_app_with_none_css(app, test_template):
# Check that special css class is not present initially
assert num_css_class_entries(app) == 0
# Blank out inline css
original_index_string = app.index_string
type(test_template)._inline_css = None
# Check that nothing crashes and css is not modified
test_template._configure_app(app)
assert num_css_class_entries(app) == 0
assert app.index_string == original_index_string
| from ..fixtures import app, test_template
# Helpers
def num_stylesheets(app):
return sum("test_stylesheet" in url for url in app.config.external_stylesheets)
def num_css_class_entries(app):
return app.index_string.count(".test-css-class")
def test_configure_app_resources(app, test_template):
# Check that custom stylesheet is not present initially
assert num_stylesheets(app) == 0
# Check stylesheet added upon template.layout
test_template._configure_app(app)
assert num_stylesheets(app) == 1
# Check that configuration is idempotent
test_template._configure_app(app)
assert num_stylesheets(app) == 1
def test_configure_app_inline_css(app, test_template):
# Check that special css class is not present initially
assert num_css_class_entries(app) == 0
# Check inline css added upon template.layout
test_template._configure_app(app)
assert num_css_class_entries(app) == 1
# Check that adding inline css is idempotent
test_template._configure_app(app)
assert num_css_class_entries(app) == 1
def test_configure_app_with_none_css(app, test_template):
# Check that special css class is not present initially
assert num_css_class_entries(app) == 0
# Blank out inline css
original_index_string = app.index_string
type(test_template)._inline_css = None
# Check that nothing crashes and css is not modified
test_template._configure_app(app)
assert num_css_class_entries(app) == 0
assert app.index_string == original_index_string | en | 0.812815 | # Helpers # Check that custom stylesheet is not present initially # Check stylesheet added upon template.layout # Check that configuration is idempotent # Check that special css class is not present initially # Check inline css added upon template.layout # Check that adding inline css is idempotent # Check that special css class is not present initially # Blank out inline css # Check that nothing crashes and css is not modified | 2.33226 | 2 |
src/python/lc_download.py | learnclang/2-terminal-downloader | 1 | 6621786 | """Download file from a terminal
Contents:
- Communicating via HTTP
- Opening a file
- Writing to a file
Usage:
$ lc_download.py http://google.com
"""
import os
import urllib2
def download(url):
"""Download into ram, then disk"""
response = urllib2.urlopen(url)
destination = os.path.basename(url)
with open(destination, "wb") as f:
f.write(response.read())
if __name__ == '__main__':
import sys
try:
url = sys.argv[1]
except:
print "Please enter a URL"
sys.exit()
download(url)
| """Download file from a terminal
Contents:
- Communicating via HTTP
- Opening a file
- Writing to a file
Usage:
$ lc_download.py http://google.com
"""
import os
import urllib2
def download(url):
"""Download into ram, then disk"""
response = urllib2.urlopen(url)
destination = os.path.basename(url)
with open(destination, "wb") as f:
f.write(response.read())
if __name__ == '__main__':
import sys
try:
url = sys.argv[1]
except:
print "Please enter a URL"
sys.exit()
download(url)
| en | 0.752284 | Download file from a terminal Contents: - Communicating via HTTP - Opening a file - Writing to a file Usage: $ lc_download.py http://google.com Download into ram, then disk | 3.9141 | 4 |
{{cookiecutter.app_slug}}/{{cookiecutter.app_slug}}/actors.py | epandurski/cookiecutter-flask-signalbus | 0 | 6621787 | <reponame>epandurski/cookiecutter-flask-signalbus<gh_stars>0
from .extensions import broker
# Define your asynchronous actors here. For example:
#
# @broker.actor
# def process_job(param):
# pass
| from .extensions import broker
# Define your asynchronous actors here. For example:
#
# @broker.actor
# def process_job(param):
# pass | en | 0.674129 | # Define your asynchronous actors here. For example: # # @broker.actor # def process_job(param): # pass | 1.287648 | 1 |
report_gpu_metrics.py | howardwang15/gcp-gpu-monitoring | 0 | 6621788 | <filename>report_gpu_metrics.py
# Copyright 2021 <NAME>
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTICE: This file has been modified by <NAME> to add support for versions of the Python google-cloud-monitoring
# library >= 2.0.0
import time
import socket
import subprocess
import requests
import csv
import datetime
from google.cloud import monitoring_v3
from google.protobuf.timestamp_pb2 import Timestamp
metadata_server = "http://metadata/computeMetadata/v1/instance/"
metadata_flavor = {'Metadata-Flavor' : 'Google'}
data = requests.get(metadata_server + 'zone', headers = metadata_flavor).text
zone = data.split("/")[3]
project_id = data.split("/")[1]
client = monitoring_v3.MetricServiceClient()
project_name = client.common_project_path(project_id)
instance_id = requests.get(metadata_server + 'id', headers = metadata_flavor).text
def report_metric(value, t, instance_id, zone, project_id):
series = monitoring_v3.types.TimeSeries()
series.metric.type = 'custom.googleapis.com/{type}'.format(type=t)
series.resource.type = 'gce_instance'
series.resource.labels['instance_id'] = instance_id
series.resource.labels['zone'] = zone
series.resource.labels['project_id'] = project_id
now = time.time()
seconds = int(now)
nanos = int((now - seconds) * 10 ** 9)
interval = monitoring_v3.TimeInterval({ 'end_time': { 'seconds': seconds, 'nanos': nanos }})
point = monitoring_v3.Point()
point.value.int64_value = value
point.interval = interval
series.points.append(point)
client.create_time_series(request={'name': project_name, 'time_series': [series]})
def get_nvidia_smi_utilization(gpu_query_name):
csv_file_path = '/tmp/gpu_utilization.csv'
usage = 0
length = 0
subprocess.check_call(['/bin/bash', '-c',
'nvidia-smi --query-gpu={} -u --format=csv'
' > {}'.format(gpu_query_name, csv_file_path)])
with open(csv_file_path) as csvfile:
utilizations = csv.reader(csvfile, delimiter=' ')
for row in utilizations:
length += 1
if length > 1:
usage += int(row[0])
return int(usage / (length - 1))
def get_gpu_utilization():
return get_nvidia_smi_utilization("utilization.gpu")
def get_gpu_memory_utilization():
return get_nvidia_smi_utilization("utilization.memory")
GPU_UTILIZATION_METRIC_NAME = "gpu_utilization"
GPU_MEMORY_UTILIZATION_METRIC_NAME = "gpu_memory_utilization"
while True:
report_metric(get_gpu_utilization(),
GPU_UTILIZATION_METRIC_NAME,
instance_id,
zone,
project_id)
report_metric(get_gpu_memory_utilization(),
GPU_MEMORY_UTILIZATION_METRIC_NAME,
instance_id,
zone,
project_id)
time.sleep(30)
| <filename>report_gpu_metrics.py
# Copyright 2021 <NAME>
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTICE: This file has been modified by <NAME> to add support for versions of the Python google-cloud-monitoring
# library >= 2.0.0
import time
import socket
import subprocess
import requests
import csv
import datetime
from google.cloud import monitoring_v3
from google.protobuf.timestamp_pb2 import Timestamp
metadata_server = "http://metadata/computeMetadata/v1/instance/"
metadata_flavor = {'Metadata-Flavor' : 'Google'}
data = requests.get(metadata_server + 'zone', headers = metadata_flavor).text
zone = data.split("/")[3]
project_id = data.split("/")[1]
client = monitoring_v3.MetricServiceClient()
project_name = client.common_project_path(project_id)
instance_id = requests.get(metadata_server + 'id', headers = metadata_flavor).text
def report_metric(value, t, instance_id, zone, project_id):
series = monitoring_v3.types.TimeSeries()
series.metric.type = 'custom.googleapis.com/{type}'.format(type=t)
series.resource.type = 'gce_instance'
series.resource.labels['instance_id'] = instance_id
series.resource.labels['zone'] = zone
series.resource.labels['project_id'] = project_id
now = time.time()
seconds = int(now)
nanos = int((now - seconds) * 10 ** 9)
interval = monitoring_v3.TimeInterval({ 'end_time': { 'seconds': seconds, 'nanos': nanos }})
point = monitoring_v3.Point()
point.value.int64_value = value
point.interval = interval
series.points.append(point)
client.create_time_series(request={'name': project_name, 'time_series': [series]})
def get_nvidia_smi_utilization(gpu_query_name):
csv_file_path = '/tmp/gpu_utilization.csv'
usage = 0
length = 0
subprocess.check_call(['/bin/bash', '-c',
'nvidia-smi --query-gpu={} -u --format=csv'
' > {}'.format(gpu_query_name, csv_file_path)])
with open(csv_file_path) as csvfile:
utilizations = csv.reader(csvfile, delimiter=' ')
for row in utilizations:
length += 1
if length > 1:
usage += int(row[0])
return int(usage / (length - 1))
def get_gpu_utilization():
return get_nvidia_smi_utilization("utilization.gpu")
def get_gpu_memory_utilization():
return get_nvidia_smi_utilization("utilization.memory")
GPU_UTILIZATION_METRIC_NAME = "gpu_utilization"
GPU_MEMORY_UTILIZATION_METRIC_NAME = "gpu_memory_utilization"
while True:
report_metric(get_gpu_utilization(),
GPU_UTILIZATION_METRIC_NAME,
instance_id,
zone,
project_id)
report_metric(get_gpu_memory_utilization(),
GPU_MEMORY_UTILIZATION_METRIC_NAME,
instance_id,
zone,
project_id)
time.sleep(30)
| en | 0.859494 | # Copyright 2021 <NAME> # Copyright 2019 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTICE: This file has been modified by <NAME> to add support for versions of the Python google-cloud-monitoring # library >= 2.0.0 | 2.277846 | 2 |
Mentor_Meeting_Application-aditya/django_school/classroom/migrations/0003_quiz_report.py | AdityaMalani/Mentor-Meet | 2 | 6621789 | # Generated by Django 2.0.1 on 2018-11-01 10:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classroom', '0002_create_initial_subjects'),
]
operations = [
migrations.AddField(
model_name='quiz',
name='report',
field=models.FileField(default=None, upload_to=''),
preserve_default=False,
),
]
| # Generated by Django 2.0.1 on 2018-11-01 10:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classroom', '0002_create_initial_subjects'),
]
operations = [
migrations.AddField(
model_name='quiz',
name='report',
field=models.FileField(default=None, upload_to=''),
preserve_default=False,
),
]
| en | 0.860134 | # Generated by Django 2.0.1 on 2018-11-01 10:38 | 1.646481 | 2 |
code/ofa.py | iun1xmd5/ofa | 1 | 6621790 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 04:07:50 2020
@author: c1ph3r
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tqdm
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_log_error, mean_absolute_error
from keras.models import Model
from keras.layers import LSTM, TimeDistributed, Input, Dense
from keras.layers.core import Lambda
from keras import backend as K
from keras.utils import plot_model
from tweek import set_size
#for saving models
from keras.models import model_from_yaml
from keras.models import model_from_json
#plotting modules
#import matplotlib as mpl
from matplotlib.ticker import ScalarFormatter, AutoMinorLocator
#plt.style.use('mpsty/mypaperstyle.mplstyle')
#mpl.use('pdf')
#import matplotlib.pyplot as plt
#from matplotlib import style
#style.use('seaborn-paper')
'''plt.rc('font', family= 'serif', serif= 'Times, Palatino, New Century Schoolbook, Bookman, Computer Modern Roman')
#plt.rc('font', family='sans-serif', sans_serif= 'Helvetica, Avant Garde, Computer Modern Sans serif')
plt.rc('font', family='sans-serif')
plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
plt.rc('font', family='monospace', monospace= 'Courier, Computer Modern Typewriter')'''
plt.rc('font',**{'family':'serif','serif':['Times']})
plt.rc('text', usetex = True) # Use latex for text
plt.rc('xtick', labelsize = 14)
plt.rc('ytick', labelsize = 14)
plt.rc('axes', labelsize = 14)
plt.rc('axes', linewidth=1)
plt.rcParams['text.latex.preamble'] =[r'\boldmath']
params = {'legend.framealpha': 0.1,
'legend.handlelength': 0.8,
'legend.labelspacing':0.1,
'legend.fontsize' : 10}
plt.rcParams.update(params)
class Ofa:
def __init__(self,
X,y,F,ts, q=0.05, batch_size=250, epochs=30, ly_one=128, ly_two=32, ext=True):
self.ext = ext
self.split_ratio = ts
self.X, self.y,self.F = X, y,F
self.sequence_length = self.X.shape[1]
self.ts = int(self.split_ratio* self.X.shape[0]) #for hrss 0.7495
self.nl_1=ly_one
self.nl_2=ly_two
self.batch_size = batch_size
self.epochs = epochs
self.q=q
self.y_train = self.y[0:self.ts]
def fit(self, dropout=0.3, embedd=True):
# fix random seed for reproducibility
np.random.seed(7)
self.input_ae = Input(shape=(self.sequence_length, self.X.shape[2]))
self.lstm1 = LSTM(self.nl_1, return_sequences=True, dropout=dropout)
encoded_ae = self.lstm1(self.input_ae, training=True)
self.encoder = Model(self.input_ae, encoded_ae)
self.encoder.compile(optimizer='adam', loss='mean_absolute_error', metrics=['mse'])
self.Xe = self.encoder.predict(self.X) # generate X feature vectors
#self.ext=ext
self.embedd=embedd
if self.ext: #If exegenous features are available
if embedd:#FXe
dim1=self.F.shape[2]
dim2=self.nl_1
self.FXe = np.concatenate([self.Xe, self.F], axis=2) #concatenate X, F
self.XFe_train = self.FXe[0:self.ts,:]
self.XFe_test = self.FXe[self.ts:,:]
self.hxfe, model, scaler= self.train(self.XFe_train, self.XFe_test, dim1,dim2)
self.x_test=self.X[self.ts:,:]
print('..Fitting exogenous and embedding vectors (XFe) done..')
else:#FX ex with no embedding
dim1=self.F.shape[2]
dim2=self.X.shape[2]
self.FX = np.concatenate((self.F, self.X), axis=2)
self.FX_train = self.FX[0:self.ts,:]
self.FX_test = self.FX[self.ts:,:]
self.hxf, model, scaler=self.train(self.FX_train, self.FX_test, dim1, dim2)
self.x_test=self.X[self.ts:,:]
print('...Fitting features (FX) done...')
else:#No exogenous features
if embedd:#Xe with embedding
dim1=0
dim2=self.nl_1
self.Xe_train = self.Xe[0:self.ts,:]
self.Xe_test= self.Xe[self.ts:,:]
self.hxe, model, scaler=self.train(self.Xe_train, self.Xe_test, dim1, dim2)
self.x_test=self.X[self.ts:,:]
print('.. Fitting embending vectors (Xe) done..')
else:#No ex and no embedding
dim1= 0
dim2=self.X.shape[2]
self.X_train = self.X[0:self.ts,:]
self.X_test = self.X[self.ts:,:]
self.hx, model, scaler = self.train(self.X_train, self.X_test, dim1, dim2)
#self.x_test=self.X[self.ts:,:]
print('..fitting features (X) done..')
self.model, self.scaler, self.dim1, self.dim2 = model, scaler, dim1, dim2
#return self.model, self.scaler,self.dim1, self.dim2
def train(self, Xtr,Xte,di1,dim2):
scaler = StandardScaler()
Xtr = scaler.fit_transform(Xtr.reshape(-1,dim2+di1)).reshape(-1,self.sequence_length,dim2+di1)
Xte = scaler.transform(Xte.reshape(-1,dim2+di1)).reshape(-1,self.sequence_length,dim2+di1)
#self.XF_val_e = self.scaler1.transform(self.XF_val_e.reshape(-1,self.nl_1+dim)).reshape(-1,self.sequence_length,self.nl_1+dim)
inputs = Input(shape=(Xtr.shape[1], Xtr.shape[2]))
lstm1 = LSTM(self.nl_1, return_sequences=True, dropout=0.3)(inputs, training=True)
lstm2 = LSTM(self.nl_2, return_sequences=False, dropout=0.3)(lstm1, training=True)
dense1 = Dense(50)(lstm2)
out10 = Dense(1)(dense1)
out50 = Dense(1)(dense1)
out90 = Dense(1)(dense1)
self.losses = [lambda y,f:self.loss(self.q, y, f), lambda y,f:self.loss(0.5, y, f), lambda y,f: self.loss(1-self.q, y, f)]
#out = Dense(1)(dense1)
model = Model(inputs, [out10,out50,out90])
model.compile(loss=self.losses, optimizer='adam', metrics=['mae'], loss_weights = [0.2,0.2,0.2])
#model = Model(inputs, out)
#model.compile(optimizer='adam', loss='mean_absolute_error', metrics=['mae'])
history = model.fit(Xtr, [self.y_train, self.y_train, self.y_train], \
epochs=self.epochs, batch_size=self.batch_size, verbose=0, shuffle=True)
'''self.history1 = self.model1.fit(self.XF_train_e, self.y_train, \
validation_data=(self.XF_val_e,self.y_val),epochs=150, batch_size=128, verbose=1, shuffle=True)'''
return history, model, scaler
'''def evaluate(self, choice='True'):
if choice:
print('validating......', end='\n')
self.val_score = self.model.fit(self.XF_val_noe, self.y_val, epochs=30, batch_size=128, verbose=1, shuffle=True)
else:
print('Testing......', end='\n')
self.test_score = self.model.fit(self.XF_test_noe, self.y_test, epochs=30, batch_size=128, verbose=1, shuffle=True)'''
def predictor(self, ext, emb):
#self.model, self.scaler,self.dim1, self.dim2 =self._fit()
if emb:
if ext:
enc = K.function([self.encoder.layers[0].input, K.learning_phase()], [self.encoder.layers[-1].output])
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
enc_pred = np.vstack(enc([self.x_test,1]))
enc_pred = np.concatenate([enc_pred, self.F[self.ts:,:]], axis=2)
trans_pred = self.scaler.transform(enc_pred.reshape(-1,self.dim1+self.dim2)).reshape(-1,self.sequence_length,self.dim1+self.dim2)
NN_pred = NN([trans_pred,1])
#print('...quantile applied on XFe done...')
else:
enc = K.function([self.encoder.layers[0].input, K.learning_phase()], [self.encoder.layers[-1].output])
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
enc_pred = np.vstack(enc([self.x_test,1]))
#enc_pred = np.concatenate([enc_pred, self.F[self.ts:,:]], axis=2)
trans_pred = self.scaler.transform(enc_pred.reshape(-1,self.dim2)).reshape(-1,self.sequence_length,self.dim2)
NN_pred = NN([trans_pred,1])
#print('...quantile applied on Xe done...')
else:
if ext:
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
#self.XF_train_noe = np.concatenate((self.x_test, self.F_test), axis=2)
trans_pred = self.scaler.transform(self.FX[self.ts:,:].reshape(-1,self.dim2+ \
self.dim1)).reshape(-1,self.sequence_length,self.dim2+self.dim1)
NN_pred = NN([trans_pred,1])
#print('..quantile applied on XF done..')
else:
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
#self.XF_train_noe = np.concatenate((self.x_test, self.F_test), axis=2)
trans_pred = self.scaler.transform(self.X[self.ts:,:].reshape(-1,self.dim2)).reshape(-1,self.sequence_length,self.dim2)
NN_pred = NN([trans_pred,1])
#print('..quantile appliedt on X done..')
return NN_pred
def plot(self,name='testing'):
#plt.figure(figsize=(16,8))
#plt.xlabel(r'$\bf {running time(s)} x10$', fontsize=45, fontweight ='bold')
#plt.ylabel(r'$\bf {window size~}x100$', fontsize=45, fontweight ='bold')
#plt.xlabel(r'\textbf{execution time(s)} $x10$', fontsize=10, fontweight ='bold')
#plt.ylabel(r'$\psi~x10^2$', fontsize=10, fontweight ='bold')
fraction = 0.5
width = 512
fig, ax = plt.subplots(3,figsize=set_size(width, fraction), sharex='all',sharey='all', gridspec_kw={'hspace': 0.5})
ax[0].scatter(range(0,len(self.y_test)),self.y_test, s=50, marker='*', c='blue', alpha=0.7, label='test data')
ax[1].plot(self.error, c='green', label='scores', alpha=0.9, lw=2)
for i in range(2):
#ax[i].xaxis.set_major_locator(plt.LinearLocator(6))
#ax[i].yaxis.set_major_locator(plt.LinearLocator(3))
#ax[i].xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))
#ax[i].yaxis.set_major_formatter(plt.FormatStrFormatter('%.1f'))
ax[i].yaxis.set_minor_locator(AutoMinorLocator(2))
#ax[i].xaxis.set_minor_locator(AutoMinorLocator(2))
#plot anomalies
plt.scatter(np.where(np.logical_or(self.y_test>self.y90, self.y_test<self.y10))[0],
self.anom_scores, c='red', s=50, marker='*', alpha=0.7)
plt.plot(self.error, c='green', alpha=0.9, lw=2)
fig.text(-0.005, 0.5, 'OFA scores', va='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])
plt.xlabel(r'time')
#plt.ylabel(r'OFA score')
plt.tight_layout()
# plt.savefig('/media/c1ph3r/colls/Dropbox/_China/_Xidian/_6th/vldb/vldb_style_sample/latex/figures/'+name+'.pdf',
# format='pdf', bbox_inches='tight')
fraction = 0.5
width =510
fig, ax = plt.subplots(3,figsize=set_size(width, fraction), sharex='all', gridspec_kw={'hspace': 0.5})
ax[0].plot(self.y90, c='orange', alpha=0.9, lw=2)
ax[1].plot(self.y50, c='cyan', alpha=0.9, lw=2)
ax[2].plot(self.y10, c='purple', alpha=0.9, lw=2)
for i in range(3):
#ax[i].xaxis.set_major_locator(plt.LinearLocator(6))
ax[i].yaxis.set_major_locator(plt.LinearLocator(3))
# ax[i].xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))
ax[i].yaxis.set_major_formatter(plt.FormatStrFormatter('%.5f'))
#ax[i].yaxis.set_minor_locator(AutoMinorLocator(1))
#ax[i].xaxis.set_minor_locator(AutoMinorLocator(2))
plt.xlabel(r'time')
#plt.ylim([-0.001, 0.001])
plt.tight_layout()
#fig.text(0.1, 0.5, 'scores', va='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])
# plt.savefig('/latex/figures/credit.pdf',
# format='pdf',bbox_inches='tight')
def predict(self, X,y,F):
self.x_test, self.y_test, self.f_exogenous = X,y,F
#self.x_test, self.y_test = X,y
#tracking from the encoded states
x_enc = self.encoder.predict(self.x_test)
xf=np.concatenate((x_enc, self.f_exogenous), axis=2)
self.yhat = np.array(self.model.predict(xf))[:,]
#using predictor
self.scores= np.array(self.predictor(1,self.ext, self.embedd)) #lower, median and upper quantile scores
self.y10, self.y50, self.y90 = self.scores[0][:,0], self.scores[1][:,0], self.scores[2][:,0]
self.mean_s, self.std_s= np.mean(self.y50.mean(axis=0), axis=0), np.std(self.y50.mean(axis=0), axis=0)
self.anomaly_scores()
self.error= self.y_test-self.y50
def _predict(self, name='stock'):
fraction = 0.5
width =510
fig, ax = plt.subplots(3, figsize=set_size(width, fraction), sharex='all', gridspec_kw={'hspace': 0.5})
ax[0].plot(self.y10[28:], color='green', alpha=0.9)
ax[0].plot(self.y50[28:], color='blue', alpha=0.6)
ax[0].plot(self.y90[28:], color='purple', alpha=0.8)
ax[0].yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
ax[0].yaxis.set_major_locator(plt.LinearLocator(3))
ax[0].set_title('fitted model (blue) and thresholds')
#ax[0].setylabel(r'thresholds')
ax[1].plot(self.y_test[28:], alpha=0.9, lw=2, c='orange')
ax[1].yaxis.set_major_locator(plt.LinearLocator(3))
ax[1].yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
ax[1].set_title('test data')
ax[2].plot(self.anom_scores[28:], c='navy', label='scores', alpha=0.9, lw=2)
ax[2].yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
ax[2].yaxis.set_major_locator(plt.LinearLocator(3))
#ax[2].plot(self.id_anomaly)
ax[2].scatter(np.where(np.logical_or(self.y_test[28:]>self.y90[28:], self.y_test[28:]<self.y10[28:]))[0],
self.anom_scores[28:][np.logical_or(self.y_test[28:]>self.y90[28:], self.y_test[28:]<self.y10[28:])], c='red', s=50, marker='*', alpha=0.7)
# ax[2].scatter(np.where(np.logical_or(self.y_test[28:]>self.y90[28:], self.y_test[28:]<self.y10[28:]))[0],
# self.anom_scores[28:][self.id_anomaly], c='red', s=50, marker='*', alpha=0.7) #best simplified version for marking anomalies
plt.ylim([-0.27,0.1])
ax[2].xaxis.set_major_locator(plt.LinearLocator(6))
fig.text(-0.06, 0.5, 'OFA scores', va='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])
plt.xlabel(r'time', fontsize=plt.rcParams['axes.labelsize'])
plt.tight_layout()
#plot anomalies
#ax.xaxis.set_major_locator(plt.LinearLocator(6))
#ax.yaxis.set_major_locator(plt.LinearLocator(4))
#ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))
#ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
#ax.yaxis.set_major_formatter(ScalarFormatter())
#ax.yaxis.major.formatter._useMathText = True
#ax.yaxis.major.formatter._useMathText = True
#ax.yaxis.set_minor_locator(AutoMinorLocator(1))
#plt.ylim([self.y10.min()-1, self.y90.max()+1])
#plt.legend(loc='lower right')
#plt.tight_layout()
#fig, ax = plt.subplots(figsize=set_size(width, fraction))
#ax[1].plot(self.error, c='green', label='scores', alpha=0.9, lw=2)
plt.savefig('/latex/figures/'+
name+'.pdf',format='pdf',bbox_inches='tight')
def gen_sequence(self,df, seq_length, seq_cols):
data = df[seq_cols].values
num_elements = data.shape[0]
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
yield data[start:stop, :]
def read_dataset(self, filename, targX, sequence_length, ext=True):
data = pd.read_csv(filename, sep=',', usecols=[3,4,5,6,7])
#filename = './datasets/physics.dat'
'''r = open(filename,'r').read().split(',')
X = np.array(list(map(float,r)))
data = pd.DataFrame({'LOW':X})'''
#filename='datasets/high-storage-system-data-for-energy-optimization/HRSS_anomalous_optimized.csv'
#data = pd.read_csv(filename, sep=',', usecols=[0,1,2,3,6],names=['temp','hum','pre','mea','LOW'])
df = data
X,F = [], []
x = data[targX].values
y = x[sequence_length:x.shape[0]]
for sequence in self.gen_sequence(df, sequence_length, [targX]):
X.append(sequence)
X = np.array(X)
if ext:
cols = [cols for cols in data.columns if cols !=targX]
for sequence in self.gen_sequence(df, sequence_length, cols):
F.append(sequence)
F = np.array(F)
return X, F, y
def read_data(self, data,sequence_length, ext=False):
#filename = 'datasets/edf_stocks.csv'
#data = pd.read_csv(filename, sep=',', usecols=[3,4,5,6,7])
#data = pd.read_csv(filename, sep=',', usecols=[0,1,2,3,6],names=['temp','hum','pre','mea','LOW'])
dfxy = pd.DataFrame(data)
del dfxy[0]
X, F = [], []
targX = [cols for cols in dfxy.columns]
xy = dfxy[targX].values
y = xy[sequence_length:xy.shape[0],-1]
for sequence in self.gen_sequence(dfxy, sequence_length, targX):
X.append(sequence)
X = np.array(X)
if ext:
cols = [cols for cols in dfxy.columns if cols !=targX]
for sequence in self.gen_sequence(dfxy, sequence_length, cols):
F.append(sequence)
F = np.array(F)
return X, F, y
def anomaly_scores(self):
#return index of anomalies in the test data
self.anom_scores = self.y_test -self.y50
self.anomaly = self.y_test[np.logical_or(self.y_test>self.y90, self.y_test<self.y10)]
### CROSSOVER CHECK ###
id_anomaly=[]
for i,v in enumerate(self.y_test):
if np.logical_or(self.y_test[i]>self.y90[i], self.y_test[i]<self.y10[i]):
id_anomaly.append(i)
self.id_anomaly = np.array(id_anomaly)
def loss(self, q,y,f):
e = y-f
return K.mean(K.maximum(q*e, (q-1)*e),axis=-1)
def modelsavn(self,savejs=False, saveym=False, loadym=False, loadjs=False, savemodel=True):
#needs further processessing
# serialize model to JSON
if savejs:
self.model_json = self.model.to_json()
with open("model.json", "w") as json_file:
json_file.write(self.model_json)
# serialize weights to HDF5
self.model.save_weights("model.h5")
print("Saved model to disk")
# later...
if loadjs:
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# serialize model to YAML
if saveym:
model_yaml = self.model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
self.model.save_weights("model.h5")
print("Saved model to disk")
#Later
if loadym:
yaml_file = open('model.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
if savemodel:
# save model and architecture to single file
self.model.save("model.h5")
print("Saved model to disk")
return loaded_model
''' evaluate loaded model on test data
loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
score = loaded_model.evaluate(X, Y, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))'''
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 04:07:50 2020
@author: c1ph3r
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tqdm
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_log_error, mean_absolute_error
from keras.models import Model
from keras.layers import LSTM, TimeDistributed, Input, Dense
from keras.layers.core import Lambda
from keras import backend as K
from keras.utils import plot_model
from tweek import set_size
#for saving models
from keras.models import model_from_yaml
from keras.models import model_from_json
#plotting modules
#import matplotlib as mpl
from matplotlib.ticker import ScalarFormatter, AutoMinorLocator
#plt.style.use('mpsty/mypaperstyle.mplstyle')
#mpl.use('pdf')
#import matplotlib.pyplot as plt
#from matplotlib import style
#style.use('seaborn-paper')
'''plt.rc('font', family= 'serif', serif= 'Times, Palatino, New Century Schoolbook, Bookman, Computer Modern Roman')
#plt.rc('font', family='sans-serif', sans_serif= 'Helvetica, Avant Garde, Computer Modern Sans serif')
plt.rc('font', family='sans-serif')
plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
plt.rc('font', family='monospace', monospace= 'Courier, Computer Modern Typewriter')'''
plt.rc('font',**{'family':'serif','serif':['Times']})
plt.rc('text', usetex = True) # Use latex for text
plt.rc('xtick', labelsize = 14)
plt.rc('ytick', labelsize = 14)
plt.rc('axes', labelsize = 14)
plt.rc('axes', linewidth=1)
plt.rcParams['text.latex.preamble'] =[r'\boldmath']
params = {'legend.framealpha': 0.1,
'legend.handlelength': 0.8,
'legend.labelspacing':0.1,
'legend.fontsize' : 10}
plt.rcParams.update(params)
class Ofa:
def __init__(self,
X,y,F,ts, q=0.05, batch_size=250, epochs=30, ly_one=128, ly_two=32, ext=True):
self.ext = ext
self.split_ratio = ts
self.X, self.y,self.F = X, y,F
self.sequence_length = self.X.shape[1]
self.ts = int(self.split_ratio* self.X.shape[0]) #for hrss 0.7495
self.nl_1=ly_one
self.nl_2=ly_two
self.batch_size = batch_size
self.epochs = epochs
self.q=q
self.y_train = self.y[0:self.ts]
def fit(self, dropout=0.3, embedd=True):
# fix random seed for reproducibility
np.random.seed(7)
self.input_ae = Input(shape=(self.sequence_length, self.X.shape[2]))
self.lstm1 = LSTM(self.nl_1, return_sequences=True, dropout=dropout)
encoded_ae = self.lstm1(self.input_ae, training=True)
self.encoder = Model(self.input_ae, encoded_ae)
self.encoder.compile(optimizer='adam', loss='mean_absolute_error', metrics=['mse'])
self.Xe = self.encoder.predict(self.X) # generate X feature vectors
#self.ext=ext
self.embedd=embedd
if self.ext: #If exegenous features are available
if embedd:#FXe
dim1=self.F.shape[2]
dim2=self.nl_1
self.FXe = np.concatenate([self.Xe, self.F], axis=2) #concatenate X, F
self.XFe_train = self.FXe[0:self.ts,:]
self.XFe_test = self.FXe[self.ts:,:]
self.hxfe, model, scaler= self.train(self.XFe_train, self.XFe_test, dim1,dim2)
self.x_test=self.X[self.ts:,:]
print('..Fitting exogenous and embedding vectors (XFe) done..')
else:#FX ex with no embedding
dim1=self.F.shape[2]
dim2=self.X.shape[2]
self.FX = np.concatenate((self.F, self.X), axis=2)
self.FX_train = self.FX[0:self.ts,:]
self.FX_test = self.FX[self.ts:,:]
self.hxf, model, scaler=self.train(self.FX_train, self.FX_test, dim1, dim2)
self.x_test=self.X[self.ts:,:]
print('...Fitting features (FX) done...')
else:#No exogenous features
if embedd:#Xe with embedding
dim1=0
dim2=self.nl_1
self.Xe_train = self.Xe[0:self.ts,:]
self.Xe_test= self.Xe[self.ts:,:]
self.hxe, model, scaler=self.train(self.Xe_train, self.Xe_test, dim1, dim2)
self.x_test=self.X[self.ts:,:]
print('.. Fitting embending vectors (Xe) done..')
else:#No ex and no embedding
dim1= 0
dim2=self.X.shape[2]
self.X_train = self.X[0:self.ts,:]
self.X_test = self.X[self.ts:,:]
self.hx, model, scaler = self.train(self.X_train, self.X_test, dim1, dim2)
#self.x_test=self.X[self.ts:,:]
print('..fitting features (X) done..')
self.model, self.scaler, self.dim1, self.dim2 = model, scaler, dim1, dim2
#return self.model, self.scaler,self.dim1, self.dim2
def train(self, Xtr,Xte,di1,dim2):
scaler = StandardScaler()
Xtr = scaler.fit_transform(Xtr.reshape(-1,dim2+di1)).reshape(-1,self.sequence_length,dim2+di1)
Xte = scaler.transform(Xte.reshape(-1,dim2+di1)).reshape(-1,self.sequence_length,dim2+di1)
#self.XF_val_e = self.scaler1.transform(self.XF_val_e.reshape(-1,self.nl_1+dim)).reshape(-1,self.sequence_length,self.nl_1+dim)
inputs = Input(shape=(Xtr.shape[1], Xtr.shape[2]))
lstm1 = LSTM(self.nl_1, return_sequences=True, dropout=0.3)(inputs, training=True)
lstm2 = LSTM(self.nl_2, return_sequences=False, dropout=0.3)(lstm1, training=True)
dense1 = Dense(50)(lstm2)
out10 = Dense(1)(dense1)
out50 = Dense(1)(dense1)
out90 = Dense(1)(dense1)
self.losses = [lambda y,f:self.loss(self.q, y, f), lambda y,f:self.loss(0.5, y, f), lambda y,f: self.loss(1-self.q, y, f)]
#out = Dense(1)(dense1)
model = Model(inputs, [out10,out50,out90])
model.compile(loss=self.losses, optimizer='adam', metrics=['mae'], loss_weights = [0.2,0.2,0.2])
#model = Model(inputs, out)
#model.compile(optimizer='adam', loss='mean_absolute_error', metrics=['mae'])
history = model.fit(Xtr, [self.y_train, self.y_train, self.y_train], \
epochs=self.epochs, batch_size=self.batch_size, verbose=0, shuffle=True)
'''self.history1 = self.model1.fit(self.XF_train_e, self.y_train, \
validation_data=(self.XF_val_e,self.y_val),epochs=150, batch_size=128, verbose=1, shuffle=True)'''
return history, model, scaler
'''def evaluate(self, choice='True'):
if choice:
print('validating......', end='\n')
self.val_score = self.model.fit(self.XF_val_noe, self.y_val, epochs=30, batch_size=128, verbose=1, shuffle=True)
else:
print('Testing......', end='\n')
self.test_score = self.model.fit(self.XF_test_noe, self.y_test, epochs=30, batch_size=128, verbose=1, shuffle=True)'''
def predictor(self, ext, emb):
#self.model, self.scaler,self.dim1, self.dim2 =self._fit()
if emb:
if ext:
enc = K.function([self.encoder.layers[0].input, K.learning_phase()], [self.encoder.layers[-1].output])
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
enc_pred = np.vstack(enc([self.x_test,1]))
enc_pred = np.concatenate([enc_pred, self.F[self.ts:,:]], axis=2)
trans_pred = self.scaler.transform(enc_pred.reshape(-1,self.dim1+self.dim2)).reshape(-1,self.sequence_length,self.dim1+self.dim2)
NN_pred = NN([trans_pred,1])
#print('...quantile applied on XFe done...')
else:
enc = K.function([self.encoder.layers[0].input, K.learning_phase()], [self.encoder.layers[-1].output])
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
enc_pred = np.vstack(enc([self.x_test,1]))
#enc_pred = np.concatenate([enc_pred, self.F[self.ts:,:]], axis=2)
trans_pred = self.scaler.transform(enc_pred.reshape(-1,self.dim2)).reshape(-1,self.sequence_length,self.dim2)
NN_pred = NN([trans_pred,1])
#print('...quantile applied on Xe done...')
else:
if ext:
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
#self.XF_train_noe = np.concatenate((self.x_test, self.F_test), axis=2)
trans_pred = self.scaler.transform(self.FX[self.ts:,:].reshape(-1,self.dim2+ \
self.dim1)).reshape(-1,self.sequence_length,self.dim2+self.dim1)
NN_pred = NN([trans_pred,1])
#print('..quantile applied on XF done..')
else:
NN = K.function([self.model.layers[0].input, K.learning_phase()],
[self.model.layers[-3].output, self.model.layers[-2].output, self.model.layers[-1].output])
#self.XF_train_noe = np.concatenate((self.x_test, self.F_test), axis=2)
trans_pred = self.scaler.transform(self.X[self.ts:,:].reshape(-1,self.dim2)).reshape(-1,self.sequence_length,self.dim2)
NN_pred = NN([trans_pred,1])
#print('..quantile appliedt on X done..')
return NN_pred
def plot(self,name='testing'):
#plt.figure(figsize=(16,8))
#plt.xlabel(r'$\bf {running time(s)} x10$', fontsize=45, fontweight ='bold')
#plt.ylabel(r'$\bf {window size~}x100$', fontsize=45, fontweight ='bold')
#plt.xlabel(r'\textbf{execution time(s)} $x10$', fontsize=10, fontweight ='bold')
#plt.ylabel(r'$\psi~x10^2$', fontsize=10, fontweight ='bold')
fraction = 0.5
width = 512
fig, ax = plt.subplots(3,figsize=set_size(width, fraction), sharex='all',sharey='all', gridspec_kw={'hspace': 0.5})
ax[0].scatter(range(0,len(self.y_test)),self.y_test, s=50, marker='*', c='blue', alpha=0.7, label='test data')
ax[1].plot(self.error, c='green', label='scores', alpha=0.9, lw=2)
for i in range(2):
#ax[i].xaxis.set_major_locator(plt.LinearLocator(6))
#ax[i].yaxis.set_major_locator(plt.LinearLocator(3))
#ax[i].xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))
#ax[i].yaxis.set_major_formatter(plt.FormatStrFormatter('%.1f'))
ax[i].yaxis.set_minor_locator(AutoMinorLocator(2))
#ax[i].xaxis.set_minor_locator(AutoMinorLocator(2))
#plot anomalies
plt.scatter(np.where(np.logical_or(self.y_test>self.y90, self.y_test<self.y10))[0],
self.anom_scores, c='red', s=50, marker='*', alpha=0.7)
plt.plot(self.error, c='green', alpha=0.9, lw=2)
fig.text(-0.005, 0.5, 'OFA scores', va='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])
plt.xlabel(r'time')
#plt.ylabel(r'OFA score')
plt.tight_layout()
# plt.savefig('/media/c1ph3r/colls/Dropbox/_China/_Xidian/_6th/vldb/vldb_style_sample/latex/figures/'+name+'.pdf',
# format='pdf', bbox_inches='tight')
fraction = 0.5
width =510
fig, ax = plt.subplots(3,figsize=set_size(width, fraction), sharex='all', gridspec_kw={'hspace': 0.5})
ax[0].plot(self.y90, c='orange', alpha=0.9, lw=2)
ax[1].plot(self.y50, c='cyan', alpha=0.9, lw=2)
ax[2].plot(self.y10, c='purple', alpha=0.9, lw=2)
for i in range(3):
#ax[i].xaxis.set_major_locator(plt.LinearLocator(6))
ax[i].yaxis.set_major_locator(plt.LinearLocator(3))
# ax[i].xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))
ax[i].yaxis.set_major_formatter(plt.FormatStrFormatter('%.5f'))
#ax[i].yaxis.set_minor_locator(AutoMinorLocator(1))
#ax[i].xaxis.set_minor_locator(AutoMinorLocator(2))
plt.xlabel(r'time')
#plt.ylim([-0.001, 0.001])
plt.tight_layout()
#fig.text(0.1, 0.5, 'scores', va='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])
# plt.savefig('/latex/figures/credit.pdf',
# format='pdf',bbox_inches='tight')
def predict(self, X,y,F):
self.x_test, self.y_test, self.f_exogenous = X,y,F
#self.x_test, self.y_test = X,y
#tracking from the encoded states
x_enc = self.encoder.predict(self.x_test)
xf=np.concatenate((x_enc, self.f_exogenous), axis=2)
self.yhat = np.array(self.model.predict(xf))[:,]
#using predictor
self.scores= np.array(self.predictor(1,self.ext, self.embedd)) #lower, median and upper quantile scores
self.y10, self.y50, self.y90 = self.scores[0][:,0], self.scores[1][:,0], self.scores[2][:,0]
self.mean_s, self.std_s= np.mean(self.y50.mean(axis=0), axis=0), np.std(self.y50.mean(axis=0), axis=0)
self.anomaly_scores()
self.error= self.y_test-self.y50
def _predict(self, name='stock'):
fraction = 0.5
width =510
fig, ax = plt.subplots(3, figsize=set_size(width, fraction), sharex='all', gridspec_kw={'hspace': 0.5})
ax[0].plot(self.y10[28:], color='green', alpha=0.9)
ax[0].plot(self.y50[28:], color='blue', alpha=0.6)
ax[0].plot(self.y90[28:], color='purple', alpha=0.8)
ax[0].yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
ax[0].yaxis.set_major_locator(plt.LinearLocator(3))
ax[0].set_title('fitted model (blue) and thresholds')
#ax[0].setylabel(r'thresholds')
ax[1].plot(self.y_test[28:], alpha=0.9, lw=2, c='orange')
ax[1].yaxis.set_major_locator(plt.LinearLocator(3))
ax[1].yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
ax[1].set_title('test data')
ax[2].plot(self.anom_scores[28:], c='navy', label='scores', alpha=0.9, lw=2)
ax[2].yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
ax[2].yaxis.set_major_locator(plt.LinearLocator(3))
#ax[2].plot(self.id_anomaly)
ax[2].scatter(np.where(np.logical_or(self.y_test[28:]>self.y90[28:], self.y_test[28:]<self.y10[28:]))[0],
self.anom_scores[28:][np.logical_or(self.y_test[28:]>self.y90[28:], self.y_test[28:]<self.y10[28:])], c='red', s=50, marker='*', alpha=0.7)
# ax[2].scatter(np.where(np.logical_or(self.y_test[28:]>self.y90[28:], self.y_test[28:]<self.y10[28:]))[0],
# self.anom_scores[28:][self.id_anomaly], c='red', s=50, marker='*', alpha=0.7) #best simplified version for marking anomalies
plt.ylim([-0.27,0.1])
ax[2].xaxis.set_major_locator(plt.LinearLocator(6))
fig.text(-0.06, 0.5, 'OFA scores', va='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize'])
plt.xlabel(r'time', fontsize=plt.rcParams['axes.labelsize'])
plt.tight_layout()
#plot anomalies
#ax.xaxis.set_major_locator(plt.LinearLocator(6))
#ax.yaxis.set_major_locator(plt.LinearLocator(4))
#ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))
#ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
#ax.yaxis.set_major_formatter(ScalarFormatter())
#ax.yaxis.major.formatter._useMathText = True
#ax.yaxis.major.formatter._useMathText = True
#ax.yaxis.set_minor_locator(AutoMinorLocator(1))
#plt.ylim([self.y10.min()-1, self.y90.max()+1])
#plt.legend(loc='lower right')
#plt.tight_layout()
#fig, ax = plt.subplots(figsize=set_size(width, fraction))
#ax[1].plot(self.error, c='green', label='scores', alpha=0.9, lw=2)
plt.savefig('/latex/figures/'+
name+'.pdf',format='pdf',bbox_inches='tight')
def gen_sequence(self,df, seq_length, seq_cols):
data = df[seq_cols].values
num_elements = data.shape[0]
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
yield data[start:stop, :]
def read_dataset(self, filename, targX, sequence_length, ext=True):
data = pd.read_csv(filename, sep=',', usecols=[3,4,5,6,7])
#filename = './datasets/physics.dat'
'''r = open(filename,'r').read().split(',')
X = np.array(list(map(float,r)))
data = pd.DataFrame({'LOW':X})'''
#filename='datasets/high-storage-system-data-for-energy-optimization/HRSS_anomalous_optimized.csv'
#data = pd.read_csv(filename, sep=',', usecols=[0,1,2,3,6],names=['temp','hum','pre','mea','LOW'])
df = data
X,F = [], []
x = data[targX].values
y = x[sequence_length:x.shape[0]]
for sequence in self.gen_sequence(df, sequence_length, [targX]):
X.append(sequence)
X = np.array(X)
if ext:
cols = [cols for cols in data.columns if cols !=targX]
for sequence in self.gen_sequence(df, sequence_length, cols):
F.append(sequence)
F = np.array(F)
return X, F, y
def read_data(self, data,sequence_length, ext=False):
#filename = 'datasets/edf_stocks.csv'
#data = pd.read_csv(filename, sep=',', usecols=[3,4,5,6,7])
#data = pd.read_csv(filename, sep=',', usecols=[0,1,2,3,6],names=['temp','hum','pre','mea','LOW'])
dfxy = pd.DataFrame(data)
del dfxy[0]
X, F = [], []
targX = [cols for cols in dfxy.columns]
xy = dfxy[targX].values
y = xy[sequence_length:xy.shape[0],-1]
for sequence in self.gen_sequence(dfxy, sequence_length, targX):
X.append(sequence)
X = np.array(X)
if ext:
cols = [cols for cols in dfxy.columns if cols !=targX]
for sequence in self.gen_sequence(dfxy, sequence_length, cols):
F.append(sequence)
F = np.array(F)
return X, F, y
def anomaly_scores(self):
#return index of anomalies in the test data
self.anom_scores = self.y_test -self.y50
self.anomaly = self.y_test[np.logical_or(self.y_test>self.y90, self.y_test<self.y10)]
### CROSSOVER CHECK ###
id_anomaly=[]
for i,v in enumerate(self.y_test):
if np.logical_or(self.y_test[i]>self.y90[i], self.y_test[i]<self.y10[i]):
id_anomaly.append(i)
self.id_anomaly = np.array(id_anomaly)
def loss(self, q,y,f):
e = y-f
return K.mean(K.maximum(q*e, (q-1)*e),axis=-1)
def modelsavn(self,savejs=False, saveym=False, loadym=False, loadjs=False, savemodel=True):
#needs further processessing
# serialize model to JSON
if savejs:
self.model_json = self.model.to_json()
with open("model.json", "w") as json_file:
json_file.write(self.model_json)
# serialize weights to HDF5
self.model.save_weights("model.h5")
print("Saved model to disk")
# later...
if loadjs:
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# serialize model to YAML
if saveym:
model_yaml = self.model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
self.model.save_weights("model.h5")
print("Saved model to disk")
#Later
if loadym:
yaml_file = open('model.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
if savemodel:
# save model and architecture to single file
self.model.save("model.h5")
print("Saved model to disk")
return loaded_model
''' evaluate loaded model on test data
loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
score = loaded_model.evaluate(X, Y, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))'''
| en | 0.234884 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Tue Jan 21 04:07:50 2020 @author: c1ph3r #for saving models #plotting modules #import matplotlib as mpl #plt.style.use('mpsty/mypaperstyle.mplstyle') #mpl.use('pdf') #import matplotlib.pyplot as plt #from matplotlib import style #style.use('seaborn-paper') plt.rc('font', family= 'serif', serif= 'Times, Palatino, New Century Schoolbook, Bookman, Computer Modern Roman') #plt.rc('font', family='sans-serif', sans_serif= 'Helvetica, Avant Garde, Computer Modern Sans serif') plt.rc('font', family='sans-serif') plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) ## for Palatino and other serif fonts use: #rc('font',**{'family':'serif','serif':['Palatino']}) plt.rc('font', family='monospace', monospace= 'Courier, Computer Modern Typewriter') # Use latex for text #for hrss 0.7495 # fix random seed for reproducibility # generate X feature vectors #self.ext=ext #If exegenous features are available #FXe #concatenate X, F #FX ex with no embedding #No exogenous features #Xe with embedding #No ex and no embedding #self.x_test=self.X[self.ts:,:] #return self.model, self.scaler,self.dim1, self.dim2 #self.XF_val_e = self.scaler1.transform(self.XF_val_e.reshape(-1,self.nl_1+dim)).reshape(-1,self.sequence_length,self.nl_1+dim) #out = Dense(1)(dense1) #model = Model(inputs, out) #model.compile(optimizer='adam', loss='mean_absolute_error', metrics=['mae']) self.history1 = self.model1.fit(self.XF_train_e, self.y_train, \ validation_data=(self.XF_val_e,self.y_val),epochs=150, batch_size=128, verbose=1, shuffle=True) def evaluate(self, choice='True'): if choice: print('validating......', end='\n') self.val_score = self.model.fit(self.XF_val_noe, self.y_val, epochs=30, batch_size=128, verbose=1, shuffle=True) else: print('Testing......', end='\n') self.test_score = self.model.fit(self.XF_test_noe, self.y_test, epochs=30, batch_size=128, verbose=1, shuffle=True) #self.model, self.scaler,self.dim1, self.dim2 =self._fit() #print('...quantile applied on XFe done...') #enc_pred = np.concatenate([enc_pred, self.F[self.ts:,:]], axis=2) #print('...quantile applied on Xe done...') #self.XF_train_noe = np.concatenate((self.x_test, self.F_test), axis=2) #print('..quantile applied on XF done..') #self.XF_train_noe = np.concatenate((self.x_test, self.F_test), axis=2) #print('..quantile appliedt on X done..') #plt.figure(figsize=(16,8)) #plt.xlabel(r'$\bf {running time(s)} x10$', fontsize=45, fontweight ='bold') #plt.ylabel(r'$\bf {window size~}x100$', fontsize=45, fontweight ='bold') #plt.xlabel(r'\textbf{execution time(s)} $x10$', fontsize=10, fontweight ='bold') #plt.ylabel(r'$\psi~x10^2$', fontsize=10, fontweight ='bold') #ax[i].xaxis.set_major_locator(plt.LinearLocator(6)) #ax[i].yaxis.set_major_locator(plt.LinearLocator(3)) #ax[i].xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f')) #ax[i].yaxis.set_major_formatter(plt.FormatStrFormatter('%.1f')) #ax[i].xaxis.set_minor_locator(AutoMinorLocator(2)) #plot anomalies #plt.ylabel(r'OFA score') # plt.savefig('/media/c1ph3r/colls/Dropbox/_China/_Xidian/_6th/vldb/vldb_style_sample/latex/figures/'+name+'.pdf', # format='pdf', bbox_inches='tight') #ax[i].xaxis.set_major_locator(plt.LinearLocator(6)) # ax[i].xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f')) #ax[i].yaxis.set_minor_locator(AutoMinorLocator(1)) #ax[i].xaxis.set_minor_locator(AutoMinorLocator(2)) #plt.ylim([-0.001, 0.001]) #fig.text(0.1, 0.5, 'scores', va='center', rotation='vertical', fontsize=plt.rcParams['axes.labelsize']) # plt.savefig('/latex/figures/credit.pdf', # format='pdf',bbox_inches='tight') #self.x_test, self.y_test = X,y #tracking from the encoded states #using predictor #lower, median and upper quantile scores #ax[0].setylabel(r'thresholds') #ax[2].plot(self.id_anomaly) # ax[2].scatter(np.where(np.logical_or(self.y_test[28:]>self.y90[28:], self.y_test[28:]<self.y10[28:]))[0], # self.anom_scores[28:][self.id_anomaly], c='red', s=50, marker='*', alpha=0.7) #best simplified version for marking anomalies #plot anomalies #ax.xaxis.set_major_locator(plt.LinearLocator(6)) #ax.yaxis.set_major_locator(plt.LinearLocator(4)) #ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f')) #ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f')) #ax.yaxis.set_major_formatter(ScalarFormatter()) #ax.yaxis.major.formatter._useMathText = True #ax.yaxis.major.formatter._useMathText = True #ax.yaxis.set_minor_locator(AutoMinorLocator(1)) #plt.ylim([self.y10.min()-1, self.y90.max()+1]) #plt.legend(loc='lower right') #plt.tight_layout() #fig, ax = plt.subplots(figsize=set_size(width, fraction)) #ax[1].plot(self.error, c='green', label='scores', alpha=0.9, lw=2) #filename = './datasets/physics.dat' r = open(filename,'r').read().split(',') X = np.array(list(map(float,r))) data = pd.DataFrame({'LOW':X}) #filename='datasets/high-storage-system-data-for-energy-optimization/HRSS_anomalous_optimized.csv' #data = pd.read_csv(filename, sep=',', usecols=[0,1,2,3,6],names=['temp','hum','pre','mea','LOW']) #filename = 'datasets/edf_stocks.csv' #data = pd.read_csv(filename, sep=',', usecols=[3,4,5,6,7]) #data = pd.read_csv(filename, sep=',', usecols=[0,1,2,3,6],names=['temp','hum','pre','mea','LOW']) #return index of anomalies in the test data ### CROSSOVER CHECK ### #needs further processessing # serialize model to JSON # serialize weights to HDF5 # later... # load json and create model # load weights into new model # serialize model to YAML # serialize weights to HDF5 #Later # load weights into new model # save model and architecture to single file evaluate loaded model on test data loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) score = loaded_model.evaluate(X, Y, verbose=0) print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100)) | 2.22431 | 2 |
esep/utils/jobqueue/pbs.py | lyingTree/ESEP | 0 | 6621791 | <filename>esep/utils/jobqueue/pbs.py
# -*- coding:utf-8 -*-
"""
--------------------------------------------------------------------------------
File Name : pbs.py
Start Date : 2021-09-09 00:00
Contributor : D.CW
Email : <EMAIL>
--------------------------------------------------------------------------------
Introduction:
Protable Batch System
--------------------------------------------------------------------------------
"""
import logging
import os
from .core import Job, JobQueueCluster, job_parameters
logger = logging.getLogger(__name__)
class PBSJob(Job):
submit_command = "qsub"
cancel_command = "qdel"
check_command = "qstat"
manage_command = "pbsnodes"
def __init__(self, app_command, queue=None, project=None, walltime=None, **base_class_kwargs):
super().__init__(app_command, **base_class_kwargs)
# Try to find a project name from environment variable
project = project or os.environ.get("PBS_ACCOUNT")
header_lines = []
# PBS header build
if self.job_name is not None:
header_lines.append("#PBS -N %s" % self.job_name)
if queue is not None:
header_lines.append("#PBS -q %s" % queue)
if project is not None:
header_lines.append("#PBS -A %s" % project)
if self.nodes is None:
self.nodes = 1
if self.cores is None:
self.cores = 1
resource_spec = "nodes={0}:ppn={1}".format(self.nodes, self.cores)
header_lines.append("#PBS -l %s" % resource_spec)
if walltime is not None:
header_lines.append("#PBS -l walltime=%s" % walltime)
if self.log_directory is not None:
header_lines.append("#PBS -e %s/" % self.log_directory)
header_lines.append("#PBS -o %s/" % self.log_directory)
# Declare class attribute that shall be overridden
self.job_header = "\n".join(header_lines)
logger.debug("Job script: \n %s" % self.job_script())
class PBSCluster(JobQueueCluster):
__doc__ = """ Run the program on a PBS cluster
Parameters
----------
queue : str
Destination queue for each worker job. Passed to `#PBS -q` option.
project : str
Accounting string associated with each worker job. Passed to `#PBS -A` option.
{job}
resource_spec : str
Request resources and specify job placement. Passed to `#PBS -l` option.
walltime : str
Walltime for each worker job.
""".format(job=job_parameters)
job_cls = PBSJob
@property
def status(self):
out_dict = dict()
flag, out = self._dummy_job.status()
if flag:
tmp = list(filter(None, out.split('\n')[2].split(' ')))
out_dict = dict(job_id=tmp[0].split('.')[0], job_name=tmp[1], user=tmp[2], time_use=tmp[3], status=tmp[4],
queue=tmp[5])
return flag, out, out_dict
| <filename>esep/utils/jobqueue/pbs.py
# -*- coding:utf-8 -*-
"""
--------------------------------------------------------------------------------
File Name : pbs.py
Start Date : 2021-09-09 00:00
Contributor : D.CW
Email : <EMAIL>
--------------------------------------------------------------------------------
Introduction:
Protable Batch System
--------------------------------------------------------------------------------
"""
import logging
import os
from .core import Job, JobQueueCluster, job_parameters
logger = logging.getLogger(__name__)
class PBSJob(Job):
submit_command = "qsub"
cancel_command = "qdel"
check_command = "qstat"
manage_command = "pbsnodes"
def __init__(self, app_command, queue=None, project=None, walltime=None, **base_class_kwargs):
super().__init__(app_command, **base_class_kwargs)
# Try to find a project name from environment variable
project = project or os.environ.get("PBS_ACCOUNT")
header_lines = []
# PBS header build
if self.job_name is not None:
header_lines.append("#PBS -N %s" % self.job_name)
if queue is not None:
header_lines.append("#PBS -q %s" % queue)
if project is not None:
header_lines.append("#PBS -A %s" % project)
if self.nodes is None:
self.nodes = 1
if self.cores is None:
self.cores = 1
resource_spec = "nodes={0}:ppn={1}".format(self.nodes, self.cores)
header_lines.append("#PBS -l %s" % resource_spec)
if walltime is not None:
header_lines.append("#PBS -l walltime=%s" % walltime)
if self.log_directory is not None:
header_lines.append("#PBS -e %s/" % self.log_directory)
header_lines.append("#PBS -o %s/" % self.log_directory)
# Declare class attribute that shall be overridden
self.job_header = "\n".join(header_lines)
logger.debug("Job script: \n %s" % self.job_script())
class PBSCluster(JobQueueCluster):
__doc__ = """ Run the program on a PBS cluster
Parameters
----------
queue : str
Destination queue for each worker job. Passed to `#PBS -q` option.
project : str
Accounting string associated with each worker job. Passed to `#PBS -A` option.
{job}
resource_spec : str
Request resources and specify job placement. Passed to `#PBS -l` option.
walltime : str
Walltime for each worker job.
""".format(job=job_parameters)
job_cls = PBSJob
@property
def status(self):
out_dict = dict()
flag, out = self._dummy_job.status()
if flag:
tmp = list(filter(None, out.split('\n')[2].split(' ')))
out_dict = dict(job_id=tmp[0].split('.')[0], job_name=tmp[1], user=tmp[2], time_use=tmp[3], status=tmp[4],
queue=tmp[5])
return flag, out, out_dict
| en | 0.516741 | # -*- coding:utf-8 -*- -------------------------------------------------------------------------------- File Name : pbs.py Start Date : 2021-09-09 00:00 Contributor : D.CW Email : <EMAIL> -------------------------------------------------------------------------------- Introduction: Protable Batch System -------------------------------------------------------------------------------- # Try to find a project name from environment variable # PBS header build # Declare class attribute that shall be overridden Run the program on a PBS cluster Parameters ---------- queue : str Destination queue for each worker job. Passed to `#PBS -q` option. project : str Accounting string associated with each worker job. Passed to `#PBS -A` option. {job} resource_spec : str Request resources and specify job placement. Passed to `#PBS -l` option. walltime : str Walltime for each worker job. | 2.238426 | 2 |
src/dp/rest_api/v1/parsers/input/user/__init__.py | Michalesko/DP_API | 0 | 6621792 | <reponame>Michalesko/DP_API
# coding: utf-8
__author__ = 'Miso'
from postUsersParser import post_users_parser
from putUsersParser import put_users_parser
from postUsersGoalParser import post_users_goal_parser
from postUsersTimeParser import post_users_time_parser | # coding: utf-8
__author__ = 'Miso'
from postUsersParser import post_users_parser
from putUsersParser import put_users_parser
from postUsersGoalParser import post_users_goal_parser
from postUsersTimeParser import post_users_time_parser | en | 0.833554 | # coding: utf-8 | 1.108678 | 1 |
Dynamic_Traffic_Maker.py | marcostfermin/Dynamic-Traffic-Lights-Simulation | 0 | 6621793 | # -*- coding: utf-8 -*-.
"""
Created on Sun Oct 17 13:15:32 2021.
@author: <NAME>
"""
import pygame
def get_traffic_maker():
# sam = test()
# print(sam)
pygame.init()
# window properties
WIDTH = 1400
HEIGHT = 922
BACK_COLOR = (0, 0, 0)
default_road = pygame.image.load("images/roads/road1.jpg")
default_road = pygame.transform.smoothscale(default_road, (1400, 922))
light_road = pygame.image.load("images/roads/road2.jpg")
light_road = pygame.transform.smoothscale(light_road, (1400, 922))
medium_road = pygame.image.load("images/roads/road3.jpg")
medium_road = pygame.transform.smoothscale(medium_road, (1400, 922))
heavy_road = pygame.image.load("images/roads/road4.jpg")
heavy_road = pygame.transform.smoothscale(heavy_road, (1400, 922))
ring3 = pygame.Rect(600, 290, 220, 70)
clr3 = (255, 0, 0)
ring4 = pygame.Rect(600, 420, 220, 70)
clr4 = (255, 0, 0)
ring5 = pygame.Rect(600, 550, 220, 70)
clr5 = (255, 0, 0)
# -------------------Texts---------------------------------
table_font = pygame.font.SysFont("timesnewroman",
25,
bold=pygame.font.Font.bold)
light = table_font.render("Light Traffic", True, (255, 255, 255))
medium = table_font.render("Medium Traffic", True, (255, 255, 255))
heavy = table_font.render("Heavy Traffic", True, (255, 255, 255))
screen = pygame.display.set_mode((WIDTH, HEIGHT))
# frame rate
Clock = pygame.time.Clock()
# convert table to desired size and remove bg
# ------------------Traffic-------------------------------
light_traffic = [100, 200, 225, 250]
medium_traffic = [200, 400, 450, 500]
heavy_traffic = [400, 800, 900, 1000]
run = True
# game starts
while run:
# Display screen
screen.fill((BACK_COLOR))
# Display table
if ring3.collidepoint(pygame.mouse.get_pos()):
screen.blit(light_road, (0, 0))
clr3 = (0, 255, 0)
elif ring4.collidepoint(pygame.mouse.get_pos()):
screen.blit(medium_road, (0, 0))
clr4 = (0, 255, 0)
elif ring5.collidepoint(pygame.mouse.get_pos()):
screen.blit(heavy_road, (0, 0))
clr5 = (0, 255, 0)
else:
screen.blit(default_road, (0, 0))
clr3 = clr4 = clr5 = (255, 0, 0)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
run = False
pygame.draw.rect(screen, clr3, ring3, 2, 10)
pygame.draw.rect(screen, clr4, ring4, 2, 10)
pygame.draw.rect(screen, clr5, ring5, 2, 10)
screen.blit(light, (640, 315))
screen.blit(medium, (620, 440))
screen.blit(heavy, (630, 570))
if pygame.mouse.get_pressed()[0] and ring3.collidepoint(
pygame.mouse.get_pos()):
print("light tapped")
return light_traffic
if pygame.mouse.get_pressed()[0] and ring4.collidepoint(
pygame.mouse.get_pos()):
print("medium tapped")
return medium_traffic
if pygame.mouse.get_pressed()[0] and ring5.collidepoint(
pygame.mouse.get_pos()):
print("heavy tapped")
return heavy_traffic
pygame.display.flip()
Clock.tick(60)
if __name__ == "__main__":
# pass
get_traffic_maker()
| # -*- coding: utf-8 -*-.
"""
Created on Sun Oct 17 13:15:32 2021.
@author: <NAME>
"""
import pygame
def get_traffic_maker():
# sam = test()
# print(sam)
pygame.init()
# window properties
WIDTH = 1400
HEIGHT = 922
BACK_COLOR = (0, 0, 0)
default_road = pygame.image.load("images/roads/road1.jpg")
default_road = pygame.transform.smoothscale(default_road, (1400, 922))
light_road = pygame.image.load("images/roads/road2.jpg")
light_road = pygame.transform.smoothscale(light_road, (1400, 922))
medium_road = pygame.image.load("images/roads/road3.jpg")
medium_road = pygame.transform.smoothscale(medium_road, (1400, 922))
heavy_road = pygame.image.load("images/roads/road4.jpg")
heavy_road = pygame.transform.smoothscale(heavy_road, (1400, 922))
ring3 = pygame.Rect(600, 290, 220, 70)
clr3 = (255, 0, 0)
ring4 = pygame.Rect(600, 420, 220, 70)
clr4 = (255, 0, 0)
ring5 = pygame.Rect(600, 550, 220, 70)
clr5 = (255, 0, 0)
# -------------------Texts---------------------------------
table_font = pygame.font.SysFont("timesnewroman",
25,
bold=pygame.font.Font.bold)
light = table_font.render("Light Traffic", True, (255, 255, 255))
medium = table_font.render("Medium Traffic", True, (255, 255, 255))
heavy = table_font.render("Heavy Traffic", True, (255, 255, 255))
screen = pygame.display.set_mode((WIDTH, HEIGHT))
# frame rate
Clock = pygame.time.Clock()
# convert table to desired size and remove bg
# ------------------Traffic-------------------------------
light_traffic = [100, 200, 225, 250]
medium_traffic = [200, 400, 450, 500]
heavy_traffic = [400, 800, 900, 1000]
run = True
# game starts
while run:
# Display screen
screen.fill((BACK_COLOR))
# Display table
if ring3.collidepoint(pygame.mouse.get_pos()):
screen.blit(light_road, (0, 0))
clr3 = (0, 255, 0)
elif ring4.collidepoint(pygame.mouse.get_pos()):
screen.blit(medium_road, (0, 0))
clr4 = (0, 255, 0)
elif ring5.collidepoint(pygame.mouse.get_pos()):
screen.blit(heavy_road, (0, 0))
clr5 = (0, 255, 0)
else:
screen.blit(default_road, (0, 0))
clr3 = clr4 = clr5 = (255, 0, 0)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
run = False
pygame.draw.rect(screen, clr3, ring3, 2, 10)
pygame.draw.rect(screen, clr4, ring4, 2, 10)
pygame.draw.rect(screen, clr5, ring5, 2, 10)
screen.blit(light, (640, 315))
screen.blit(medium, (620, 440))
screen.blit(heavy, (630, 570))
if pygame.mouse.get_pressed()[0] and ring3.collidepoint(
pygame.mouse.get_pos()):
print("light tapped")
return light_traffic
if pygame.mouse.get_pressed()[0] and ring4.collidepoint(
pygame.mouse.get_pos()):
print("medium tapped")
return medium_traffic
if pygame.mouse.get_pressed()[0] and ring5.collidepoint(
pygame.mouse.get_pos()):
print("heavy tapped")
return heavy_traffic
pygame.display.flip()
Clock.tick(60)
if __name__ == "__main__":
# pass
get_traffic_maker()
| en | 0.36824 | # -*- coding: utf-8 -*-. Created on Sun Oct 17 13:15:32 2021.
@author: <NAME> # sam = test() # print(sam) # window properties # -------------------Texts--------------------------------- # frame rate # convert table to desired size and remove bg # ------------------Traffic------------------------------- # game starts # Display screen # Display table # pass | 2.668624 | 3 |
examples/example_twocol.py | pymoc/pymoc_uf | 6 | 6621794 | <gh_stars>1-10
'''
This script shows an example of a "two-column" model for the
diffusive overturning circulation in a basin, integrated via time-stepping.
The first column represents the basin, while the second column represents the
northern sinking region. In this example we are using an isopycnal mapping of
overturning circulation, i.e. the overturning is converted into isopycnal space
and then mapped onto the different water columns. In this configuration
the model is effectively isopycnal, with the vertical coordinate representing
the mean depth of the respective isopycnal in the column.
'''
from pymoc.modules import Psi_Thermwind, Column
import numpy as np
from matplotlib import pyplot as plt
# boundary conditions:
bs = 0.03
bs_north = 0.0
bbot = -0.003
A_basin = 8e13 #area of the basin
A_north = A_basin / 100. #area of northern sinking region
# time-stepping parameters:
dt = 86400 * 30 # time-step for vert. adv. diff. calc.
MOC_up_iters = int(
np.floor(2 * 360 * 86400 / dt)
) # multiplier for MOC time-step (MOC is updated every MOC_up_iters time steps)
plot_iters = int(
np.ceil(500 * 360 * 86400 / dt)
) # plotting frequency (in iterations)
total_iters = int(
np.ceil(4000 * 360 * 86400 / dt)
) # total number of timesteps
# The next few lines define a reasonable vertically varying kappa profile:
# (to use const. kappa, simply define kappa as scalar)
kappa_back = 1e-5
kappa_4k = 3e-4
def kappa(z):
return (kappa_back + kappa_4k * np.exp(-z / 1000 - 4))
# create grid:
z = np.asarray(np.linspace(-4000, 0, 80))
# Initial conditions for buoyancy profile in the basin:
def b_basin(z):
return bs * np.exp(z / 300.)
def b_north(z):
return 1e-3*bs * np.exp(z / 300.)
# create overturning model instance
AMOC = Psi_Thermwind(z=z, b1=b_basin, b2=b_north)
# and solve for initial overturning streamfunction:
AMOC.solve()
# evaluate overturning in isopycnal space:
[Psi_iso_b, Psi_iso_n] = AMOC.Psibz()
# Create figure:
fig = plt.figure(figsize=(6, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
plt.ylim((-4e3, 0))
ax1.set_xlim((-5, 20))
ax2.set_xlim((-0.01, 0.04))
ax1.set_xlabel('$\Psi$', fontsize=14)
ax2.set_xlabel('b', fontsize=14)
# create adv-diff column model instance for basin
basin = Column(
z=z, kappa=kappa, Area=A_basin, b=b_basin, bs=bs, bbot=bbot
)
# create adv-diff column model instance for basin
north = Column(
z=z, kappa=kappa, Area=A_north, b=b_north, bs=bs_north, bbot=bbot
)
# Main time stepping loop
for ii in range(0, total_iters):
# update buoyancy profile
wAb = Psi_iso_b * 1e6
wAN = -Psi_iso_n * 1e6
basin.timestep(wA=wAb, dt=dt)
north.timestep(wA=wAN, dt=dt, do_conv=True)
if ii % MOC_up_iters == 0:
# update overturning streamfunction (can be done less frequently)
AMOC.update(b1=basin.b, b2=north.b)
AMOC.solve()
# evaluate overturning in isopycnal space:
[Psi_iso_b, Psi_iso_n] = AMOC.Psibz()
if ii % plot_iters == 0:
# Plot current state:
ax1.plot(AMOC.Psi, AMOC.z, linewidth=0.5)
ax2.plot(basin.b, basin.z, linewidth=0.5)
ax2.plot(north.b, north.z, linewidth=0.5)
plt.pause(0.01)
# Plot final results:
fig = plt.figure(figsize=(6, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax1.plot(AMOC.Psi, AMOC.z, linewidth=2, color='r')
ax2.plot(basin.b, basin.z, linewidth=2, color='b')
ax2.plot(north.b, basin.z, linewidth=2, color='c')
ax1.plot(0. * AMOC.z, AMOC.z, linewidth=0.5, color='k')
ax1.set_xlim((-5, 15))
ax2.set_xlim((-0.01, 0.03))
ax1.set_xlabel('$\Psi$', fontsize=14)
ax2.set_xlabel('b', fontsize=14)
plt.ylim((-4e3, 0))
| '''
This script shows an example of a "two-column" model for the
diffusive overturning circulation in a basin, integrated via time-stepping.
The first column represents the basin, while the second column represents the
northern sinking region. In this example we are using an isopycnal mapping of
overturning circulation, i.e. the overturning is converted into isopycnal space
and then mapped onto the different water columns. In this configuration
the model is effectively isopycnal, with the vertical coordinate representing
the mean depth of the respective isopycnal in the column.
'''
from pymoc.modules import Psi_Thermwind, Column
import numpy as np
from matplotlib import pyplot as plt
# boundary conditions:
bs = 0.03
bs_north = 0.0
bbot = -0.003
A_basin = 8e13 #area of the basin
A_north = A_basin / 100. #area of northern sinking region
# time-stepping parameters:
dt = 86400 * 30 # time-step for vert. adv. diff. calc.
MOC_up_iters = int(
np.floor(2 * 360 * 86400 / dt)
) # multiplier for MOC time-step (MOC is updated every MOC_up_iters time steps)
plot_iters = int(
np.ceil(500 * 360 * 86400 / dt)
) # plotting frequency (in iterations)
total_iters = int(
np.ceil(4000 * 360 * 86400 / dt)
) # total number of timesteps
# The next few lines define a reasonable vertically varying kappa profile:
# (to use const. kappa, simply define kappa as scalar)
kappa_back = 1e-5
kappa_4k = 3e-4
def kappa(z):
return (kappa_back + kappa_4k * np.exp(-z / 1000 - 4))
# create grid:
z = np.asarray(np.linspace(-4000, 0, 80))
# Initial conditions for buoyancy profile in the basin:
def b_basin(z):
return bs * np.exp(z / 300.)
def b_north(z):
return 1e-3*bs * np.exp(z / 300.)
# create overturning model instance
AMOC = Psi_Thermwind(z=z, b1=b_basin, b2=b_north)
# and solve for initial overturning streamfunction:
AMOC.solve()
# evaluate overturning in isopycnal space:
[Psi_iso_b, Psi_iso_n] = AMOC.Psibz()
# Create figure:
fig = plt.figure(figsize=(6, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
plt.ylim((-4e3, 0))
ax1.set_xlim((-5, 20))
ax2.set_xlim((-0.01, 0.04))
ax1.set_xlabel('$\Psi$', fontsize=14)
ax2.set_xlabel('b', fontsize=14)
# create adv-diff column model instance for basin
basin = Column(
z=z, kappa=kappa, Area=A_basin, b=b_basin, bs=bs, bbot=bbot
)
# create adv-diff column model instance for basin
north = Column(
z=z, kappa=kappa, Area=A_north, b=b_north, bs=bs_north, bbot=bbot
)
# Main time stepping loop
for ii in range(0, total_iters):
# update buoyancy profile
wAb = Psi_iso_b * 1e6
wAN = -Psi_iso_n * 1e6
basin.timestep(wA=wAb, dt=dt)
north.timestep(wA=wAN, dt=dt, do_conv=True)
if ii % MOC_up_iters == 0:
# update overturning streamfunction (can be done less frequently)
AMOC.update(b1=basin.b, b2=north.b)
AMOC.solve()
# evaluate overturning in isopycnal space:
[Psi_iso_b, Psi_iso_n] = AMOC.Psibz()
if ii % plot_iters == 0:
# Plot current state:
ax1.plot(AMOC.Psi, AMOC.z, linewidth=0.5)
ax2.plot(basin.b, basin.z, linewidth=0.5)
ax2.plot(north.b, north.z, linewidth=0.5)
plt.pause(0.01)
# Plot final results:
fig = plt.figure(figsize=(6, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax1.plot(AMOC.Psi, AMOC.z, linewidth=2, color='r')
ax2.plot(basin.b, basin.z, linewidth=2, color='b')
ax2.plot(north.b, basin.z, linewidth=2, color='c')
ax1.plot(0. * AMOC.z, AMOC.z, linewidth=0.5, color='k')
ax1.set_xlim((-5, 15))
ax2.set_xlim((-0.01, 0.03))
ax1.set_xlabel('$\Psi$', fontsize=14)
ax2.set_xlabel('b', fontsize=14)
plt.ylim((-4e3, 0)) | en | 0.814279 | This script shows an example of a "two-column" model for the diffusive overturning circulation in a basin, integrated via time-stepping. The first column represents the basin, while the second column represents the northern sinking region. In this example we are using an isopycnal mapping of overturning circulation, i.e. the overturning is converted into isopycnal space and then mapped onto the different water columns. In this configuration the model is effectively isopycnal, with the vertical coordinate representing the mean depth of the respective isopycnal in the column. # boundary conditions: #area of the basin #area of northern sinking region # time-stepping parameters: # time-step for vert. adv. diff. calc. # multiplier for MOC time-step (MOC is updated every MOC_up_iters time steps) # plotting frequency (in iterations) # total number of timesteps # The next few lines define a reasonable vertically varying kappa profile: # (to use const. kappa, simply define kappa as scalar) # create grid: # Initial conditions for buoyancy profile in the basin: # create overturning model instance # and solve for initial overturning streamfunction: # evaluate overturning in isopycnal space: # Create figure: # create adv-diff column model instance for basin # create adv-diff column model instance for basin # Main time stepping loop # update buoyancy profile # update overturning streamfunction (can be done less frequently) # evaluate overturning in isopycnal space: # Plot current state: # Plot final results: | 3.323168 | 3 |
main/migrations/0008_auto_20181204_1512.py | czbiohub/crispycrunch | 4 | 6621795 | # Generated by Django 2.1.1 on 2018-12-04 23:12
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20181127_1937'),
]
operations = [
migrations.AlterField(
model_name='guidedesign',
name='genome',
field=models.CharField(choices=[('hg38', 'Homo sapiens - Human - UCSC Dec. 2013 (GRCh38/hg38)'), ('mm10', 'Mus musculus - Mouse - UCSC Dec. 2011 (GRCm38/mm10)')], default='hg38', max_length=80),
),
migrations.AlterField(
model_name='primerdesign',
name='max_amplicon_length',
field=models.IntegerField(default=400, help_text='Amplicon = primer product. Length after HDR insertion.', validators=[django.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(800)], verbose_name='Maximum amplicon length'),
),
]
| # Generated by Django 2.1.1 on 2018-12-04 23:12
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20181127_1937'),
]
operations = [
migrations.AlterField(
model_name='guidedesign',
name='genome',
field=models.CharField(choices=[('hg38', 'Homo sapiens - Human - UCSC Dec. 2013 (GRCh38/hg38)'), ('mm10', 'Mus musculus - Mouse - UCSC Dec. 2011 (GRCm38/mm10)')], default='hg38', max_length=80),
),
migrations.AlterField(
model_name='primerdesign',
name='max_amplicon_length',
field=models.IntegerField(default=400, help_text='Amplicon = primer product. Length after HDR insertion.', validators=[django.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(800)], verbose_name='Maximum amplicon length'),
),
]
| en | 0.748351 | # Generated by Django 2.1.1 on 2018-12-04 23:12 | 1.867495 | 2 |
backend/webapp/errors.py | tanshuai/reference-wallet | 14 | 6621796 | <gh_stars>10-100
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
import traceback
from http import HTTPStatus
from json import JSONDecodeError
from flask import Blueprint, jsonify, make_response, current_app
from werkzeug.exceptions import HTTPException
errors = Blueprint("errors", __name__)
@errors.app_errorhandler(HTTPException)
def handle_http_exception(error):
"""Just logs the error. Any unhandled error will eventually get here."""
real_error = getattr(error, "original_exception", error)
current_app.logger.exception(real_error)
response = {
"code": error.code,
"error": error.description,
}
return make_response(jsonify(response), error.code)
@errors.app_errorhandler(JSONDecodeError)
def handle_unexpected_error(error):
status_code = HTTPStatus.BAD_REQUEST
response = {"code": HTTPStatus.BAD_REQUEST, "error": "Could not parse json data"}
current_app.logger.error(f"error: {error}, exec: {traceback.format_exc()}")
return make_response(jsonify(response), status_code)
| # Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
import traceback
from http import HTTPStatus
from json import JSONDecodeError
from flask import Blueprint, jsonify, make_response, current_app
from werkzeug.exceptions import HTTPException
errors = Blueprint("errors", __name__)
@errors.app_errorhandler(HTTPException)
def handle_http_exception(error):
"""Just logs the error. Any unhandled error will eventually get here."""
real_error = getattr(error, "original_exception", error)
current_app.logger.exception(real_error)
response = {
"code": error.code,
"error": error.description,
}
return make_response(jsonify(response), error.code)
@errors.app_errorhandler(JSONDecodeError)
def handle_unexpected_error(error):
status_code = HTTPStatus.BAD_REQUEST
response = {"code": HTTPStatus.BAD_REQUEST, "error": "Could not parse json data"}
current_app.logger.error(f"error: {error}, exec: {traceback.format_exc()}")
return make_response(jsonify(response), status_code) | en | 0.444465 | # Copyright (c) The Diem Core Contributors # SPDX-License-Identifier: Apache-2.0 Just logs the error. Any unhandled error will eventually get here. | 2.337685 | 2 |
alternate_cycling_in_list.py | rutvij15/problem-solving | 0 | 6621797 | <reponame>rutvij15/problem-solving
# Python3 code to demonstrate working of
# Alternate Cycling in list
# using reversed() + islice() + iter() + cycle() + next() + list comprehension
from itertools import islice, cycle
# initialize list
test_list = [5, 6, 8, 9, 10, 21, 3]
# printing original list
print("The original list is : " + str(test_list))
# Alternate Cycling in list
# using reversed() + islice() + iter() + cycle() + next() + list comprehension
res = [next(i) for i in islice(cycle((iter(test_list),
reversed(test_list))), len(test_list))]
# printing result
print("Alternate Cyclic iteration is : " + str(res))
| # Python3 code to demonstrate working of
# Alternate Cycling in list
# using reversed() + islice() + iter() + cycle() + next() + list comprehension
from itertools import islice, cycle
# initialize list
test_list = [5, 6, 8, 9, 10, 21, 3]
# printing original list
print("The original list is : " + str(test_list))
# Alternate Cycling in list
# using reversed() + islice() + iter() + cycle() + next() + list comprehension
res = [next(i) for i in islice(cycle((iter(test_list),
reversed(test_list))), len(test_list))]
# printing result
print("Alternate Cyclic iteration is : " + str(res)) | en | 0.495329 | # Python3 code to demonstrate working of # Alternate Cycling in list # using reversed() + islice() + iter() + cycle() + next() + list comprehension # initialize list # printing original list # Alternate Cycling in list # using reversed() + islice() + iter() + cycle() + next() + list comprehension # printing result | 4.47335 | 4 |
containercafe/common/states.py | rcbops-qe/containercafe | 0 | 6621798 | """
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class UnrecognizedState(Exception):
pass
class State(object):
""" Current state of a container """
INITIAL = 0
CREATED = 1
STARTED = 2
RUNNING = 3
STOPPED = 4
DESTROYED = 5
def __init__(self, initial_state=INITIAL):
self.value = initial_state
def __str__(self):
return 'State({})'.format(self.value)
def set_state(self, state):
if hasattr(self, state):
self.value = state
else:
raise UnrecognizedState
| """
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class UnrecognizedState(Exception):
pass
class State(object):
""" Current state of a container """
INITIAL = 0
CREATED = 1
STARTED = 2
RUNNING = 3
STOPPED = 4
DESTROYED = 5
def __init__(self, initial_state=INITIAL):
self.value = initial_state
def __str__(self):
return 'State({})'.format(self.value)
def set_state(self, state):
if hasattr(self, state):
self.value = state
else:
raise UnrecognizedState
| en | 0.851137 | Copyright 2014 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Current state of a container | 2.418448 | 2 |
tellme/tests/testproject/settings.py | danihodovic/django-tellme | 147 | 6621799 | <gh_stars>100-1000
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = "Shuuut... It's a secret"
DEBUG = False
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.admin',
'tellme',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'tellme.tests.testproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tellme.tests.testproject.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.environ.get('DATABASE_PATH', ':memory:'),
}
}
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
TELLME_FEEDBACK_EMAIL = '<EMAIL>'
| import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = "Shuuut... It's a secret"
DEBUG = False
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.admin',
'tellme',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'tellme.tests.testproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tellme.tests.testproject.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.environ.get('DATABASE_PATH', ':memory:'),
}
}
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
TELLME_FEEDBACK_EMAIL = '<EMAIL>' | none | 1 | 1.627113 | 2 | |
deepneuro/preprocessing/skullstrip.py | ysuter/DeepNeuro | 113 | 6621800 | <gh_stars>100-1000
import subprocess
import os
import numpy as np
from deepneuro.preprocessing.preprocessor import Preprocessor
from deepneuro.utilities.conversion import read_image_files, save_numpy_2_nifti
from deepneuro.utilities.util import add_parameter, quotes
from deepneuro.outputs.segmentation import PatchesInference
from deepneuro.postprocessing.label import BinarizeLabel, FillHoles, LargestComponents
from deepneuro.models.model import load_model_with_output
FNULL = open(os.devnull, 'w')
class SkullStrip(Preprocessor):
def load(self, kwargs):
""" Parameters
----------
depth : int, optional
Specified the layers deep the proposed U-Net should go.
Layer depth is symmetric on both upsampling and downsampling
arms.
max_filter: int, optional
Specifies the number of filters at the bottom level of the U-Net.
"""
add_parameter(self, kwargs, 'command', ['fsl4.1-bet2'])
# add_parameter(self, kwargs, 'command', ['bet2'])
add_parameter(self, kwargs, 'same_mask', True)
add_parameter(self, kwargs, 'reference_channel', None)
add_parameter(self, kwargs, 'bet2_f', .5)
add_parameter(self, kwargs, 'bet2_g', 0)
add_parameter(self, kwargs, 'name', 'SkullStrip')
add_parameter(self, kwargs, 'preprocessor_string', '_SkullStripped')
self.array_input = True
self.mask_string = '_Skullstrip_Mask'
self.mask_filename = None
def initialize(self, data_collection):
super(SkullStrip, self).initialize(data_collection)
for label, data_group in list(data_collection.data_groups.items()):
reference_filename = data_group.data[data_collection.current_case][self.reference_channel]
self.mask_filename = self.generate_output_filename(reference_filename, self.mask_string)
if type(data_group.preprocessed_case) is list:
input_file = data_group.preprocessed_case[self.reference_channel]
else:
# What to do about affines here... Also, reroute this file to a temporary directory.
input_file = save_numpy_2_nifti(data_group.preprocessed_case[..., self.reference_channel], data_group.preprocessed_affine, self.generate_output_filename(reference_filename))
specific_command = self.command + [quotes(input_file), quotes(self.mask_filename), '-f', str(self.bet2_f), '-g', str(self.bet2_g), '-m']
subprocess.call(' '.join(specific_command), shell=True)
os.rename(self.mask_filename + '_mask.nii.gz', self.mask_filename)
self.mask_numpy = read_image_files(self.mask_filename, return_affine=False)
def preprocess(self, data_group):
self.output_data = data_group.preprocessed_case
# Ineffective numpy broadcasting happening here..
self.output_data[self.mask_numpy[..., 0] == 0] = 0
data_group.preprocessed_data = self.output_data
class SkullStrip_Model(Preprocessor):
""" Performs skull-stripping using a model trained in DeepNeuro.
"""
def load(self, kwargs):
""" Parameters
----------
name : str, optional
Preprocessor name for internal use. Default is 'SkullStrip_Model'
preprocessor_string: str, optional
Appended suffix to filenames saved out from this preprocessor.
Default is '_SkullStripped'
reference_channel: int or list, optional
model: DeepNeuroModel, optional
DeepNeuroModel from which to run inference in this preprocessor.
"""
add_parameter(self, kwargs, 'reference_channel', [0, 1])
add_parameter(self, kwargs, 'model', None)
# Data Output Parameters
add_parameter(self, kwargs, 'output_filename', 'skullstrip_mask.nii.gz')
add_parameter(self, kwargs, 'name', 'SkullStrip_Model')
add_parameter(self, kwargs, 'preprocessor_string', '_SkullStripped')
self.array_input = True
self.mask_string = '_Skullstrip_Mask'
self.mask_filename = None
self.mask_numpy = None
if type(self.reference_channel) is not list:
self.reference_channel = [self.reference_channel]
def initialize(self, data_collection):
super(SkullStrip_Model, self).initialize(data_collection)
if self.model is None:
skullstripping_prediction_parameters = {'inputs': ['input_data'],
'output_filename': self.output_filename,
'batch_size': 50,
'patch_overlaps': 3,
'output_patch_shape': (56, 56, 6, 1),
'save_to_file': False,
'data_collection': data_collection,
'verbose': self.verbose}
self.model = load_model_with_output(model_name='skullstrip_mri', outputs=[PatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents()])
def execute(self, data_collection, return_array=False):
if self.mask_numpy is None:
for label, data_group in list(data_collection.data_groups.items()):
input_data = {'input_data': np.take(data_group.preprocessed_case, self.reference_channel, axis=-1)[np.newaxis, ...]}
# Hacky -- TODO: Revise.
self.model.outputs[-1].model = self.model
self.model.outputs[-1].input_patch_shape = self.model.outputs[-1].model.model.layers[0].input_shape
self.model.outputs[-1].process_case(input_data)
self.model.outputs[-1].postprocess(input_data)
reference_filename = data_group.data[data_collection.current_case][self.reference_channel[0]]
self.mask_filename = self.generate_output_filename(reference_filename, self.mask_string)
save_numpy_2_nifti(np.squeeze(self.model.outputs[-1].return_objects[-1]), self.mask_filename, data_group.preprocessed_affine) # Hacky
self.mask_numpy = read_image_files(self.mask_filename, return_affine=False)
super(SkullStrip_Model, self).execute(data_collection, return_array)
def preprocess(self, data_group):
self.output_data = data_group.preprocessed_case
# Ineffective numpy broadcasting happening here..
self.output_data[self.mask_numpy[..., 0] == 0] = 0
data_group.preprocessed_data = self.output_data | import subprocess
import os
import numpy as np
from deepneuro.preprocessing.preprocessor import Preprocessor
from deepneuro.utilities.conversion import read_image_files, save_numpy_2_nifti
from deepneuro.utilities.util import add_parameter, quotes
from deepneuro.outputs.segmentation import PatchesInference
from deepneuro.postprocessing.label import BinarizeLabel, FillHoles, LargestComponents
from deepneuro.models.model import load_model_with_output
FNULL = open(os.devnull, 'w')
class SkullStrip(Preprocessor):
def load(self, kwargs):
""" Parameters
----------
depth : int, optional
Specified the layers deep the proposed U-Net should go.
Layer depth is symmetric on both upsampling and downsampling
arms.
max_filter: int, optional
Specifies the number of filters at the bottom level of the U-Net.
"""
add_parameter(self, kwargs, 'command', ['fsl4.1-bet2'])
# add_parameter(self, kwargs, 'command', ['bet2'])
add_parameter(self, kwargs, 'same_mask', True)
add_parameter(self, kwargs, 'reference_channel', None)
add_parameter(self, kwargs, 'bet2_f', .5)
add_parameter(self, kwargs, 'bet2_g', 0)
add_parameter(self, kwargs, 'name', 'SkullStrip')
add_parameter(self, kwargs, 'preprocessor_string', '_SkullStripped')
self.array_input = True
self.mask_string = '_Skullstrip_Mask'
self.mask_filename = None
def initialize(self, data_collection):
super(SkullStrip, self).initialize(data_collection)
for label, data_group in list(data_collection.data_groups.items()):
reference_filename = data_group.data[data_collection.current_case][self.reference_channel]
self.mask_filename = self.generate_output_filename(reference_filename, self.mask_string)
if type(data_group.preprocessed_case) is list:
input_file = data_group.preprocessed_case[self.reference_channel]
else:
# What to do about affines here... Also, reroute this file to a temporary directory.
input_file = save_numpy_2_nifti(data_group.preprocessed_case[..., self.reference_channel], data_group.preprocessed_affine, self.generate_output_filename(reference_filename))
specific_command = self.command + [quotes(input_file), quotes(self.mask_filename), '-f', str(self.bet2_f), '-g', str(self.bet2_g), '-m']
subprocess.call(' '.join(specific_command), shell=True)
os.rename(self.mask_filename + '_mask.nii.gz', self.mask_filename)
self.mask_numpy = read_image_files(self.mask_filename, return_affine=False)
def preprocess(self, data_group):
self.output_data = data_group.preprocessed_case
# Ineffective numpy broadcasting happening here..
self.output_data[self.mask_numpy[..., 0] == 0] = 0
data_group.preprocessed_data = self.output_data
class SkullStrip_Model(Preprocessor):
""" Performs skull-stripping using a model trained in DeepNeuro.
"""
def load(self, kwargs):
""" Parameters
----------
name : str, optional
Preprocessor name for internal use. Default is 'SkullStrip_Model'
preprocessor_string: str, optional
Appended suffix to filenames saved out from this preprocessor.
Default is '_SkullStripped'
reference_channel: int or list, optional
model: DeepNeuroModel, optional
DeepNeuroModel from which to run inference in this preprocessor.
"""
add_parameter(self, kwargs, 'reference_channel', [0, 1])
add_parameter(self, kwargs, 'model', None)
# Data Output Parameters
add_parameter(self, kwargs, 'output_filename', 'skullstrip_mask.nii.gz')
add_parameter(self, kwargs, 'name', 'SkullStrip_Model')
add_parameter(self, kwargs, 'preprocessor_string', '_SkullStripped')
self.array_input = True
self.mask_string = '_Skullstrip_Mask'
self.mask_filename = None
self.mask_numpy = None
if type(self.reference_channel) is not list:
self.reference_channel = [self.reference_channel]
def initialize(self, data_collection):
super(SkullStrip_Model, self).initialize(data_collection)
if self.model is None:
skullstripping_prediction_parameters = {'inputs': ['input_data'],
'output_filename': self.output_filename,
'batch_size': 50,
'patch_overlaps': 3,
'output_patch_shape': (56, 56, 6, 1),
'save_to_file': False,
'data_collection': data_collection,
'verbose': self.verbose}
self.model = load_model_with_output(model_name='skullstrip_mri', outputs=[PatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents()])
def execute(self, data_collection, return_array=False):
if self.mask_numpy is None:
for label, data_group in list(data_collection.data_groups.items()):
input_data = {'input_data': np.take(data_group.preprocessed_case, self.reference_channel, axis=-1)[np.newaxis, ...]}
# Hacky -- TODO: Revise.
self.model.outputs[-1].model = self.model
self.model.outputs[-1].input_patch_shape = self.model.outputs[-1].model.model.layers[0].input_shape
self.model.outputs[-1].process_case(input_data)
self.model.outputs[-1].postprocess(input_data)
reference_filename = data_group.data[data_collection.current_case][self.reference_channel[0]]
self.mask_filename = self.generate_output_filename(reference_filename, self.mask_string)
save_numpy_2_nifti(np.squeeze(self.model.outputs[-1].return_objects[-1]), self.mask_filename, data_group.preprocessed_affine) # Hacky
self.mask_numpy = read_image_files(self.mask_filename, return_affine=False)
super(SkullStrip_Model, self).execute(data_collection, return_array)
def preprocess(self, data_group):
self.output_data = data_group.preprocessed_case
# Ineffective numpy broadcasting happening here..
self.output_data[self.mask_numpy[..., 0] == 0] = 0
data_group.preprocessed_data = self.output_data | en | 0.629229 | Parameters ---------- depth : int, optional Specified the layers deep the proposed U-Net should go. Layer depth is symmetric on both upsampling and downsampling arms. max_filter: int, optional Specifies the number of filters at the bottom level of the U-Net. # add_parameter(self, kwargs, 'command', ['bet2']) # What to do about affines here... Also, reroute this file to a temporary directory. # Ineffective numpy broadcasting happening here.. Performs skull-stripping using a model trained in DeepNeuro. Parameters ---------- name : str, optional Preprocessor name for internal use. Default is 'SkullStrip_Model' preprocessor_string: str, optional Appended suffix to filenames saved out from this preprocessor. Default is '_SkullStripped' reference_channel: int or list, optional model: DeepNeuroModel, optional DeepNeuroModel from which to run inference in this preprocessor. # Data Output Parameters # Hacky -- TODO: Revise. # Hacky # Ineffective numpy broadcasting happening here.. | 2.187788 | 2 |
Day-069/decorators.py | adrianurdar/100DaysOfCode-Bootcamp | 1 | 6621801 | from functools import wraps
from flask_login import current_user
from flask import render_template, abort
# ONLY ADMIN DECORATOR
def admin_only(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.id != 1:
return render_template(abort(403))
return f(*args, **kwargs)
return decorated_function
| from functools import wraps
from flask_login import current_user
from flask import render_template, abort
# ONLY ADMIN DECORATOR
def admin_only(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.id != 1:
return render_template(abort(403))
return f(*args, **kwargs)
return decorated_function
| ja | 0.250229 | # ONLY ADMIN DECORATOR | 2.280935 | 2 |
001113StepikPyGEK/StepikPyGEK001113сh03p03st03C02_20200408.py | SafonovMikhail/python_000577 | 0 | 6621802 | n = float(input()) # школьников
k = float(input()) # яблок
print(k // n)
print(k - n * (k // n))
| n = float(input()) # школьников
k = float(input()) # яблок
print(k // n)
print(k - n * (k // n))
| ru | 0.982495 | # школьников # яблок | 3.276004 | 3 |
aioairzone/const.py | Noltari/aioairzone | 0 | 6621803 | <filename>aioairzone/const.py
"""Airzone library constants."""
API_ACS_POINT = "acs_temp"
API_AIR_DEMAND = "air_demand"
API_COLD_STAGE = "coldStage"
API_COLD_STAGES = "coldStages"
API_COOL_MAX_TEMP = "coolmaxtemp"
API_COOL_MIN_TEMP = "coolmintemp"
API_COOL_SET_POINT = "coolsetpoint"
API_DATA = "data"
API_DOUBLE_SET_POINT = "double_sp"
API_ERROR = "error"
API_ERRORS = "errors"
API_FLOOR_DEMAND = "floor_demand"
API_HEAT_MAX_TEMP = "heatmaxtemp"
API_HEAT_MIN_TEMP = "heatmintemp"
API_HEAT_SET_POINT = "heatsetpoint"
API_HEAT_STAGE = "heatStage"
API_HEAT_STAGES = "heatStages"
API_HUMIDITY = "humidity"
API_HVAC = "hvac"
API_INTERFACE = "interface"
API_MAC = "mac"
API_MAX_TEMP = "maxTemp"
API_MIN_TEMP = "minTemp"
API_MODE = "mode"
API_MODES = "modes"
API_NAME = "name"
API_ON = "on"
API_POWER = "power"
API_ROOM_TEMP = "roomTemp"
API_SET_POINT = "setpoint"
API_SPEED = "speed"
API_SPEEDS = "speeds"
API_SYSTEM_FIRMWARE = "system_firmware"
API_SYSTEM_ID = "systemID"
API_SYSTEM_TYPE = "system_type"
API_SYSTEMS = "systems"
API_THERMOS_FIRMWARE = "thermos_firmware"
API_THERMOS_RADIO = "thermos_radio"
API_THERMOS_TYPE = "thermos_type"
API_UNITS = "units"
API_V1 = "api/v1"
API_WEBSERVER = "webserver"
API_WIFI = "wifi"
API_WIFI_CHANNEL = "wifi_channel"
API_WIFI_QUALITY = "wifi_quality"
API_WIFI_RSSI = "wifi_rssi"
API_WS_AIDOO = "ws_aidoo"
API_WS_AZ = "ws_az"
API_WS_FIRMWARE = "firmware"
API_WS_TYPE = "ws_type"
API_ZONE_ID = "zoneID"
API_ERROR_LOW_BATTERY = "Low battery"
API_ERROR_METHOD_NOT_SUPPORTED = "Method not provided or not supported"
API_ERROR_REQUEST_MALFORMED = "request malformed"
API_ERROR_SYSTEM_ID_NOT_AVAILABLE = "systemid not avaiable"
API_ERROR_SYSTEM_ID_OUT_RANGE = "systemid out of range"
API_ERROR_ZONE_ID_NOT_AVAILABLE = "zoneid not avaiable"
API_ERROR_ZONE_ID_NOT_PROVIDED = "zoneid not provided"
API_ERROR_ZONE_ID_OUT_RANGE = "zoneid out of range"
API_DOUBLE_SET_POINT_PARAMS = {
API_COOL_MAX_TEMP,
API_COOL_MIN_TEMP,
API_COOL_SET_POINT,
API_HEAT_MAX_TEMP,
API_HEAT_MIN_TEMP,
API_HEAT_SET_POINT,
}
API_SYSTEM_PARAMS = [
API_MODE,
API_SPEED,
]
API_ZONE_PARAMS = [
API_COOL_SET_POINT,
API_COLD_STAGE,
API_HEAT_SET_POINT,
API_HEAT_STAGE,
API_NAME,
API_ON,
API_SET_POINT,
]
AZD_AIR_DEMAND = "air-demand"
AZD_BATTERY_LOW = "battery-low"
AZD_COLD_STAGE = "cold-stage"
AZD_COLD_STAGES = "cold-stages"
AZD_COOL_TEMP_MAX = "cool-temp-max"
AZD_COOL_TEMP_MIN = "cool-temp-min"
AZD_COOL_TEMP_SET = "cool-temp-set"
AZD_DEMAND = "demand"
AZD_DOUBLE_SET_POINT = "double-set-point"
AZD_ENERGY = "energy"
AZD_ERRORS = "errors"
AZD_FIRMWARE = "firmware"
AZD_FULL_NAME = "full-name"
AZD_FLOOR_DEMAND = "floor-demand"
AZD_HEAT_TEMP_MAX = "heat-temp-max"
AZD_HEAT_TEMP_MIN = "heat-temp-min"
AZD_HEAT_TEMP_SET = "heat-temp-set"
AZD_HEAT_STAGE = "heat-stage"
AZD_HEAT_STAGES = "heat-stages"
AZD_HUMIDITY = "humidity"
AZD_ID = "id"
AZD_INTERFACE = "interface"
AZD_MAC = "mac"
AZD_MASTER = "master"
AZD_MODE = "mode"
AZD_MODEL = "model"
AZD_MODES = "modes"
AZD_NAME = "name"
AZD_ON = "on"
AZD_PROBLEMS = "problems"
AZD_SPEED = "speed"
AZD_SPEEDS = "speeds"
AZD_SYSTEM = "system"
AZD_SYSTEMS = "systems"
AZD_SYSTEMS_NUM = "num-systems"
AZD_TEMP = "temp"
AZD_TEMP_MAX = "temp-max"
AZD_TEMP_MIN = "temp-min"
AZD_TEMP_SET = "temp-set"
AZD_TEMP_UNIT = "temp-unit"
AZD_THERMOSTAT_FW = "thermostat-fw"
AZD_THERMOSTAT_MODEL = "thermostat-model"
AZD_THERMOSTAT_RADIO = "thermostat-radio"
AZD_WEBSERVER = "webserver"
AZD_WIFI_CHANNEL = "wifi-channel"
AZD_WIFI_QUALITY = "wifi-quality"
AZD_WIFI_RSSI = "wifi-rssi"
AZD_ZONES = "zones"
AZD_ZONES_NUM = "num-zones"
DEFAULT_PORT = 3000
DEFAULT_SYSTEM_ID = 0
ERROR_SYSTEM = "system"
ERROR_ZONE = "zone"
HTTP_CALL_TIMEOUT = 10
RAW_HVAC = "hvac"
RAW_SYSTEMS = "systems"
RAW_WEBSERVER = "webserver"
THERMOSTAT_RADIO = "Radio"
THERMOSTAT_WIRED = "Wired"
| <filename>aioairzone/const.py
"""Airzone library constants."""
API_ACS_POINT = "acs_temp"
API_AIR_DEMAND = "air_demand"
API_COLD_STAGE = "coldStage"
API_COLD_STAGES = "coldStages"
API_COOL_MAX_TEMP = "coolmaxtemp"
API_COOL_MIN_TEMP = "coolmintemp"
API_COOL_SET_POINT = "coolsetpoint"
API_DATA = "data"
API_DOUBLE_SET_POINT = "double_sp"
API_ERROR = "error"
API_ERRORS = "errors"
API_FLOOR_DEMAND = "floor_demand"
API_HEAT_MAX_TEMP = "heatmaxtemp"
API_HEAT_MIN_TEMP = "heatmintemp"
API_HEAT_SET_POINT = "heatsetpoint"
API_HEAT_STAGE = "heatStage"
API_HEAT_STAGES = "heatStages"
API_HUMIDITY = "humidity"
API_HVAC = "hvac"
API_INTERFACE = "interface"
API_MAC = "mac"
API_MAX_TEMP = "maxTemp"
API_MIN_TEMP = "minTemp"
API_MODE = "mode"
API_MODES = "modes"
API_NAME = "name"
API_ON = "on"
API_POWER = "power"
API_ROOM_TEMP = "roomTemp"
API_SET_POINT = "setpoint"
API_SPEED = "speed"
API_SPEEDS = "speeds"
API_SYSTEM_FIRMWARE = "system_firmware"
API_SYSTEM_ID = "systemID"
API_SYSTEM_TYPE = "system_type"
API_SYSTEMS = "systems"
API_THERMOS_FIRMWARE = "thermos_firmware"
API_THERMOS_RADIO = "thermos_radio"
API_THERMOS_TYPE = "thermos_type"
API_UNITS = "units"
API_V1 = "api/v1"
API_WEBSERVER = "webserver"
API_WIFI = "wifi"
API_WIFI_CHANNEL = "wifi_channel"
API_WIFI_QUALITY = "wifi_quality"
API_WIFI_RSSI = "wifi_rssi"
API_WS_AIDOO = "ws_aidoo"
API_WS_AZ = "ws_az"
API_WS_FIRMWARE = "firmware"
API_WS_TYPE = "ws_type"
API_ZONE_ID = "zoneID"
API_ERROR_LOW_BATTERY = "Low battery"
API_ERROR_METHOD_NOT_SUPPORTED = "Method not provided or not supported"
API_ERROR_REQUEST_MALFORMED = "request malformed"
API_ERROR_SYSTEM_ID_NOT_AVAILABLE = "systemid not avaiable"
API_ERROR_SYSTEM_ID_OUT_RANGE = "systemid out of range"
API_ERROR_ZONE_ID_NOT_AVAILABLE = "zoneid not avaiable"
API_ERROR_ZONE_ID_NOT_PROVIDED = "zoneid not provided"
API_ERROR_ZONE_ID_OUT_RANGE = "zoneid out of range"
API_DOUBLE_SET_POINT_PARAMS = {
API_COOL_MAX_TEMP,
API_COOL_MIN_TEMP,
API_COOL_SET_POINT,
API_HEAT_MAX_TEMP,
API_HEAT_MIN_TEMP,
API_HEAT_SET_POINT,
}
API_SYSTEM_PARAMS = [
API_MODE,
API_SPEED,
]
API_ZONE_PARAMS = [
API_COOL_SET_POINT,
API_COLD_STAGE,
API_HEAT_SET_POINT,
API_HEAT_STAGE,
API_NAME,
API_ON,
API_SET_POINT,
]
AZD_AIR_DEMAND = "air-demand"
AZD_BATTERY_LOW = "battery-low"
AZD_COLD_STAGE = "cold-stage"
AZD_COLD_STAGES = "cold-stages"
AZD_COOL_TEMP_MAX = "cool-temp-max"
AZD_COOL_TEMP_MIN = "cool-temp-min"
AZD_COOL_TEMP_SET = "cool-temp-set"
AZD_DEMAND = "demand"
AZD_DOUBLE_SET_POINT = "double-set-point"
AZD_ENERGY = "energy"
AZD_ERRORS = "errors"
AZD_FIRMWARE = "firmware"
AZD_FULL_NAME = "full-name"
AZD_FLOOR_DEMAND = "floor-demand"
AZD_HEAT_TEMP_MAX = "heat-temp-max"
AZD_HEAT_TEMP_MIN = "heat-temp-min"
AZD_HEAT_TEMP_SET = "heat-temp-set"
AZD_HEAT_STAGE = "heat-stage"
AZD_HEAT_STAGES = "heat-stages"
AZD_HUMIDITY = "humidity"
AZD_ID = "id"
AZD_INTERFACE = "interface"
AZD_MAC = "mac"
AZD_MASTER = "master"
AZD_MODE = "mode"
AZD_MODEL = "model"
AZD_MODES = "modes"
AZD_NAME = "name"
AZD_ON = "on"
AZD_PROBLEMS = "problems"
AZD_SPEED = "speed"
AZD_SPEEDS = "speeds"
AZD_SYSTEM = "system"
AZD_SYSTEMS = "systems"
AZD_SYSTEMS_NUM = "num-systems"
AZD_TEMP = "temp"
AZD_TEMP_MAX = "temp-max"
AZD_TEMP_MIN = "temp-min"
AZD_TEMP_SET = "temp-set"
AZD_TEMP_UNIT = "temp-unit"
AZD_THERMOSTAT_FW = "thermostat-fw"
AZD_THERMOSTAT_MODEL = "thermostat-model"
AZD_THERMOSTAT_RADIO = "thermostat-radio"
AZD_WEBSERVER = "webserver"
AZD_WIFI_CHANNEL = "wifi-channel"
AZD_WIFI_QUALITY = "wifi-quality"
AZD_WIFI_RSSI = "wifi-rssi"
AZD_ZONES = "zones"
AZD_ZONES_NUM = "num-zones"
DEFAULT_PORT = 3000
DEFAULT_SYSTEM_ID = 0
ERROR_SYSTEM = "system"
ERROR_ZONE = "zone"
HTTP_CALL_TIMEOUT = 10
RAW_HVAC = "hvac"
RAW_SYSTEMS = "systems"
RAW_WEBSERVER = "webserver"
THERMOSTAT_RADIO = "Radio"
THERMOSTAT_WIRED = "Wired"
| en | 0.69048 | Airzone library constants. | 1.60511 | 2 |
src/plat/base_shop_edit.py | jack139/fair | 1 | 6621804 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from bson.objectid import ObjectId
from config import setting
import helper, lbs
db = setting.db_web
url = ('/plat/base_shop_edit')
class handler: #class PlatBaseShopEdit:
def GET(self):
if helper.logged(helper.PRIV_USER,'PLAT_BASE_SHOP'):
render = helper.create_render(globals={'str':str})
user_data=web.input(base_shop='')
if user_data.base_shop=='':
return render.info('错误的参数!')
db_shop=db.base_shop.find_one({'_id':ObjectId(user_data.base_shop)})
if db_shop!=None:
return render.base_shop_edit(helper.get_session_uname(), helper.get_privilege_name(),
db_shop, helper.SHOP_TYPE)
else:
return render.info('错误的参数!')
else:
raise web.seeother('/')
def POST(self):
if helper.logged(helper.PRIV_USER,'PLAT_BASE_SHOP'):
render = helper.create_render()
user_data=web.input(base_shop='', shop_name='',shortname='',abstract='',available='1',
address='',people='1',type='',worker=0,note='',app_shop='0',radius='2')
#print user_data
if user_data.shop_name=='':
return render.info('店名不能为空!')
if user_data.type=='':
return render.info('门店类型不能为空!')
# 取得lbs坐标
ret, loc0 = lbs.addr_to_loc(user_data['address'].encode('utf-8'))
if ret<0:
loc0 = {'lat': 0, 'lng': 0}
# 取得多边形坐标
#poly=user_data['poly'].encode('utf-8').split(',')
#poly_xy=[]
#if len(poly)>1:
# for i in poly:
# ret, loc = lbs.addr_to_loc(i)
# if ret<0:
# loc = (0,0)
# poly_xy.append((loc['lat'],loc['lng']))
# poly_xy.append(poly_xy[0])
poly=user_data['poly'].encode('utf-8').split(';') # 已经是百度坐标
poly_xy=[]
if len(poly)>1:
for i in poly:
poly_xy.append(eval(i))
poly_xy.append(poly_xy[0])
db.base_shop.update_one({'_id':ObjectId(user_data['base_shop'])}, {'$set':{
'name' : user_data['shop_name'],
'shortname' : user_data['shortname'],
'abstract' : user_data['abstract'],
'address' : user_data['address'],
'loc' : loc0,
'people' : user_data['people'],
'type' : user_data['type'],
'available' : int(user_data['available']),
'worker' : int(user_data['worker']),
#'image' : user_data['image'].split(','),
'app_shop' : int(user_data['app_shop']),
'radius' : int(user_data['radius']),
'poly' : user_data['poly'],
'poly_xy' : poly_xy,
'note' : user_data['note'],
}})
db.base_shop.update_one({'_id':ObjectId(user_data['base_shop'])}, {'$push':{
'history' : (helper.time_str(), helper.get_session_uname(), '修改'), # 纪录操作历史
}})
return render.info('成功保存!','/plat/base_shop')
else:
raise web.seeother('/')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from bson.objectid import ObjectId
from config import setting
import helper, lbs
db = setting.db_web
url = ('/plat/base_shop_edit')
class handler: #class PlatBaseShopEdit:
def GET(self):
if helper.logged(helper.PRIV_USER,'PLAT_BASE_SHOP'):
render = helper.create_render(globals={'str':str})
user_data=web.input(base_shop='')
if user_data.base_shop=='':
return render.info('错误的参数!')
db_shop=db.base_shop.find_one({'_id':ObjectId(user_data.base_shop)})
if db_shop!=None:
return render.base_shop_edit(helper.get_session_uname(), helper.get_privilege_name(),
db_shop, helper.SHOP_TYPE)
else:
return render.info('错误的参数!')
else:
raise web.seeother('/')
def POST(self):
if helper.logged(helper.PRIV_USER,'PLAT_BASE_SHOP'):
render = helper.create_render()
user_data=web.input(base_shop='', shop_name='',shortname='',abstract='',available='1',
address='',people='1',type='',worker=0,note='',app_shop='0',radius='2')
#print user_data
if user_data.shop_name=='':
return render.info('店名不能为空!')
if user_data.type=='':
return render.info('门店类型不能为空!')
# 取得lbs坐标
ret, loc0 = lbs.addr_to_loc(user_data['address'].encode('utf-8'))
if ret<0:
loc0 = {'lat': 0, 'lng': 0}
# 取得多边形坐标
#poly=user_data['poly'].encode('utf-8').split(',')
#poly_xy=[]
#if len(poly)>1:
# for i in poly:
# ret, loc = lbs.addr_to_loc(i)
# if ret<0:
# loc = (0,0)
# poly_xy.append((loc['lat'],loc['lng']))
# poly_xy.append(poly_xy[0])
poly=user_data['poly'].encode('utf-8').split(';') # 已经是百度坐标
poly_xy=[]
if len(poly)>1:
for i in poly:
poly_xy.append(eval(i))
poly_xy.append(poly_xy[0])
db.base_shop.update_one({'_id':ObjectId(user_data['base_shop'])}, {'$set':{
'name' : user_data['shop_name'],
'shortname' : user_data['shortname'],
'abstract' : user_data['abstract'],
'address' : user_data['address'],
'loc' : loc0,
'people' : user_data['people'],
'type' : user_data['type'],
'available' : int(user_data['available']),
'worker' : int(user_data['worker']),
#'image' : user_data['image'].split(','),
'app_shop' : int(user_data['app_shop']),
'radius' : int(user_data['radius']),
'poly' : user_data['poly'],
'poly_xy' : poly_xy,
'note' : user_data['note'],
}})
db.base_shop.update_one({'_id':ObjectId(user_data['base_shop'])}, {'$push':{
'history' : (helper.time_str(), helper.get_session_uname(), '修改'), # 纪录操作历史
}})
return render.info('成功保存!','/plat/base_shop')
else:
raise web.seeother('/') | en | 0.199112 | #!/usr/bin/env python # -*- coding: utf-8 -*- #class PlatBaseShopEdit: #print user_data # 取得lbs坐标 # 取得多边形坐标 #poly=user_data['poly'].encode('utf-8').split(',') #poly_xy=[] #if len(poly)>1: # for i in poly: # ret, loc = lbs.addr_to_loc(i) # if ret<0: # loc = (0,0) # poly_xy.append((loc['lat'],loc['lng'])) # poly_xy.append(poly_xy[0]) # 已经是百度坐标 #'image' : user_data['image'].split(','), # 纪录操作历史 | 2.054214 | 2 |
meggie/utilities/dialogs/groupSelectionDialogMain.py | Teekuningas/meggie | 4 | 6621805 | """ Contains a class for logic of the group selection dialog.
"""
import logging
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from meggie.utilities.dialogs.groupSelectionDialogUi import Ui_groupSelectionDialog
from meggie.utilities.messaging import exc_messagebox
from meggie.utilities.validators import validate_name
class GroupSelectionDialog(QtWidgets.QDialog):
""" Contains the logic for group selection dialog.
"""
def __init__(self, experiment, parent, handler):
QtWidgets.QDialog.__init__(self, parent)
self.ui = Ui_groupSelectionDialog()
self.ui.setupUi(self)
self.handler = handler
subjects = experiment.subjects.keys()
subject_count = len(subjects)
# fill the dialog with subjects
for idx, subject_name in enumerate(subjects):
self._add_item(idx, subject_name)
self.subjects = subjects
def _add_item(self, idx, name):
setattr(self.ui, 'horizontalLayoutGroup_' +
str(idx), QtWidgets.QHBoxLayout())
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).setObjectName(
'horizontalLayoutGroup_' + str(idx))
setattr(self.ui, 'checkBoxGroup_' + str(idx),
QtWidgets.QCheckBox(self.ui.groupBoxGroups))
getattr(self.ui, 'checkBoxGroup_' + str(idx)
).setObjectName('checkBoxGroup_' + str(idx))
getattr(self.ui, 'checkBoxGroup_' + str(idx)).setText('')
getattr(self.ui, 'checkBoxGroup_' + str(idx)).setMaximumSize(20, 20)
getattr(self.ui, 'checkBoxGroup_' + str(idx)
).setCheckState(QtCore.Qt.Checked)
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).addWidget(
getattr(self.ui, 'checkBoxGroup_' + str(idx)))
setattr(self.ui, 'labelGroup_' + str(idx),
QtWidgets.QLabel(self.ui.groupBoxGroups))
getattr(self.ui, 'labelGroup_' + str(idx)
).setObjectName('labelGroup_' + str(idx))
getattr(self.ui, 'labelGroup_' + str(idx)).setText(name)
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).addWidget(
getattr(self.ui, 'labelGroup_' + str(idx)))
setattr(self.ui, 'spinBoxGroup_' + str(idx),
QtWidgets.QSpinBox(self.ui.groupBoxGroups))
getattr(self.ui, 'spinBoxGroup_' + str(idx)).setMinimum(1)
getattr(self.ui, 'spinBoxGroup_' + str(idx)).setMaximumSize(40, 1000)
getattr(self.ui, 'spinBoxGroup_' + str(idx)
).setObjectName('spinBoxGroup_' + str(idx))
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).addWidget(
getattr(self.ui, 'spinBoxGroup_' + str(idx)))
self.ui.gridLayout.addLayout(getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)),
idx, 2, 1, 1)
def accept(self):
groups = {}
for idx, subject in enumerate(self.subjects):
selected = getattr(self.ui, 'checkBoxGroup_' +
str(idx)).checkState()
if selected != QtCore.Qt.Checked:
continue
group_id = getattr(self.ui, 'spinBoxGroup_' + str(idx)).value()
if group_id in groups:
groups[group_id].append(subject)
else:
groups[group_id] = [subject]
self.handler(groups)
self.close()
| """ Contains a class for logic of the group selection dialog.
"""
import logging
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from meggie.utilities.dialogs.groupSelectionDialogUi import Ui_groupSelectionDialog
from meggie.utilities.messaging import exc_messagebox
from meggie.utilities.validators import validate_name
class GroupSelectionDialog(QtWidgets.QDialog):
""" Contains the logic for group selection dialog.
"""
def __init__(self, experiment, parent, handler):
QtWidgets.QDialog.__init__(self, parent)
self.ui = Ui_groupSelectionDialog()
self.ui.setupUi(self)
self.handler = handler
subjects = experiment.subjects.keys()
subject_count = len(subjects)
# fill the dialog with subjects
for idx, subject_name in enumerate(subjects):
self._add_item(idx, subject_name)
self.subjects = subjects
def _add_item(self, idx, name):
setattr(self.ui, 'horizontalLayoutGroup_' +
str(idx), QtWidgets.QHBoxLayout())
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).setObjectName(
'horizontalLayoutGroup_' + str(idx))
setattr(self.ui, 'checkBoxGroup_' + str(idx),
QtWidgets.QCheckBox(self.ui.groupBoxGroups))
getattr(self.ui, 'checkBoxGroup_' + str(idx)
).setObjectName('checkBoxGroup_' + str(idx))
getattr(self.ui, 'checkBoxGroup_' + str(idx)).setText('')
getattr(self.ui, 'checkBoxGroup_' + str(idx)).setMaximumSize(20, 20)
getattr(self.ui, 'checkBoxGroup_' + str(idx)
).setCheckState(QtCore.Qt.Checked)
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).addWidget(
getattr(self.ui, 'checkBoxGroup_' + str(idx)))
setattr(self.ui, 'labelGroup_' + str(idx),
QtWidgets.QLabel(self.ui.groupBoxGroups))
getattr(self.ui, 'labelGroup_' + str(idx)
).setObjectName('labelGroup_' + str(idx))
getattr(self.ui, 'labelGroup_' + str(idx)).setText(name)
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).addWidget(
getattr(self.ui, 'labelGroup_' + str(idx)))
setattr(self.ui, 'spinBoxGroup_' + str(idx),
QtWidgets.QSpinBox(self.ui.groupBoxGroups))
getattr(self.ui, 'spinBoxGroup_' + str(idx)).setMinimum(1)
getattr(self.ui, 'spinBoxGroup_' + str(idx)).setMaximumSize(40, 1000)
getattr(self.ui, 'spinBoxGroup_' + str(idx)
).setObjectName('spinBoxGroup_' + str(idx))
getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)).addWidget(
getattr(self.ui, 'spinBoxGroup_' + str(idx)))
self.ui.gridLayout.addLayout(getattr(self.ui, 'horizontalLayoutGroup_' + str(idx)),
idx, 2, 1, 1)
def accept(self):
groups = {}
for idx, subject in enumerate(self.subjects):
selected = getattr(self.ui, 'checkBoxGroup_' +
str(idx)).checkState()
if selected != QtCore.Qt.Checked:
continue
group_id = getattr(self.ui, 'spinBoxGroup_' + str(idx)).value()
if group_id in groups:
groups[group_id].append(subject)
else:
groups[group_id] = [subject]
self.handler(groups)
self.close()
| en | 0.777723 | Contains a class for logic of the group selection dialog. Contains the logic for group selection dialog. # fill the dialog with subjects | 2.492825 | 2 |
week7_EDA_pbi_map_rss/day1_rss_scraping_zip_xml/skyscanner_complete_problem/variables.py | PabloEduardoMartinezPicazo/Bootcamp-DataScience-2021 | 0 | 6621806 | <filename>week7_EDA_pbi_map_rss/day1_rss_scraping_zip_xml/skyscanner_complete_problem/variables.py<gh_stars>0
import json
with open('./credentials.json') as f:
credentials_gmail = json.load(f)
from_city = "MADR-sky"
to_city = "TYOA-sky"
salida = "2021-07"
llegada = "2021-08"
URL = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/"
headers = {
'x-rapidapi-key': "ed20a5eec4msh0f52d1c7ecac524p142a60jsnd882a51f9f9d",
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com"
}
| <filename>week7_EDA_pbi_map_rss/day1_rss_scraping_zip_xml/skyscanner_complete_problem/variables.py<gh_stars>0
import json
with open('./credentials.json') as f:
credentials_gmail = json.load(f)
from_city = "MADR-sky"
to_city = "TYOA-sky"
salida = "2021-07"
llegada = "2021-08"
URL = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/"
headers = {
'x-rapidapi-key': "ed20a5eec4msh0f52d1c7ecac524p142a60jsnd882a51f9f9d",
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com"
}
| none | 1 | 2.238692 | 2 | |
src/dataops/forms.py | uts-cic/ontask_b | 0 | 6621807 | <filename>src/dataops/forms.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
from django import forms
import ontask.ontask_prefs
from ontask.forms import RestrictedFileField, column_to_field
# Field prefix to use in forms to avoid using column names (they are given by
# the user and may pose a problem (injection bugs)
field_prefix = '___ontask___upload_'
# Step 1 of the CSV upload
class UploadCSVFileForm(forms.Form):
"""
Form to read a csv file. It also allows to specify the number of lines to
skip at the top and the bottom of the file. This functionality is offered
by the underlyng function read_csv in Pandas
"""
file = RestrictedFileField(
max_upload_size=str(ontask.ontask_prefs.MAX_UPLOAD_SIZE),
content_types=json.loads(str(ontask.ontask_prefs.CONTENT_TYPES)),
allow_empty_file=False,
label="",
help_text='File in CSV format (typically produced by a statistics'
' package or Excel)')
skip_lines_at_top = forms.IntegerField(
label='Lines to skip at the top',
help_text="Number of lines to skip at the top when reading the file",
initial=0,
required=False
)
skip_lines_at_bottom = forms.IntegerField(
label='Lines to skip at the bottom',
help_text="Number of lines to skip at the bottom when reading the "
"file",
initial=0,
required=False
)
def clean(self, *args, **kwargs):
"""
Function to check that the integers are positive.
:return: The cleaned data
"""
data = super(UploadCSVFileForm, self).clean(*args, **kwargs)
if data['skip_lines_at_top'] < 0:
self.add_error(
'skip_lines_at_top',
'This number has to be zero or positive'
)
if data['skip_lines_at_bottom'] < 0:
self.add_error(
'skip_lines_at_bottom',
'This number has to be zero or positive'
)
return data
# Step 1 of the CSV upload
class UploadExcelFileForm(forms.Form):
"""
Form to read an Excel file.
"""
file = RestrictedFileField(
max_upload_size=str(ontask.ontask_prefs.MAX_UPLOAD_SIZE),
content_types=[
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
allow_empty_file=False,
label="",
help_text='File in Excel format (.xls or .xlsx)')
sheet = forms.CharField(max_length=512,
required=True,
initial='Sheet 1')
# Step 1 of the CSV upload
class UploadSQLForm(forms.Form):
"""
Form to read data from SQL. We collect information to create a Database URI
to be used by SQLAlchemy:
dialect[+driver]://user:password@host/dbname[?key=value..]
"""
dialect = forms.CharField(
label='Dialect',
max_length=512,
required=True,
initial='',
help_text='Database type (mysql, oracle, postgresql, etc.'
)
driver = forms.CharField(
label='Driver',
max_length=512,
required=False,
initial='',
help_text='Name of the driver implementing the DBAPI'
)
dbusername = forms.CharField(
max_length=512,
label="Database user name",
required=False,
initial='',
help_text='User name to connect'
)
dbpassword = forms.CharField(
label='Database password',
required=False,
widget=forms.PasswordInput
)
host = forms.CharField(
label='Host',
max_length=512,
required=True,
help_text='Host to connect (include port if needed)'
)
dbname = forms.CharField(
label='Database name',
max_length=512,
required=True,
help_text='Name of the database'
)
query = forms.CharField(
label='Query',
required=True,
widget=forms.Textarea,
help_text='SQL query or table name to read'
)
def clean(self):
data = super(UploadSQLForm, self).clean()
if 'localhost' in data['host'] or '127.0.0' in data['host']:
self.add_error(
'host',
'Given host value is not accepted for security reasons.'
)
return data
# Form to select columns to upload and rename
class SelectColumnUploadForm(forms.Form):
def __init__(self, *args, **kargs):
"""
Kargs contain:
column_names: list with names of the columns to upload,
is_key: list stating if the corresponding column is key
:param args:
:param kargs:
"""
# Names of the columns to process and Boolean stating if they are key
self.column_names = kargs.pop('column_names')
self.is_key = kargs.pop('is_key')
super(SelectColumnUploadForm, self).__init__(*args, **kargs)
# Create as many fields as the given columns
for idx, c in enumerate(self.column_names):
self.fields['upload_%s' % idx] = forms.BooleanField(
label='',
required=False,
)
self.fields['new_name_%s' % idx] = forms.CharField(
initial=c,
label='',
strip=True,
required=False
)
def clean(self):
cleaned_data = super(SelectColumnUploadForm, self).clean()
upload_list = [cleaned_data.get('upload_%s' % i, False)
for i in range(len(self.column_names))]
# Check if at least a unique column has been selected
both_lists = zip(upload_list, self.is_key)
if not any([a and b for a, b in both_lists]):
raise forms.ValidationError('No unique column specified',
code='invalid')
# Get list of new names
new_names = [cleaned_data.get('new_name_%s' % i)
for i in range(len(self.column_names))]
# Step 3 of the CSV upload: select unique keys to merge
class SelectKeysForm(forms.Form):
how_merge_choices = [('left', 'only the keys in the table'),
('right', 'only the new keys'),
('outer', 'the union of the table and new keys '
'(outer)'),
('inner', 'the intersection of the table and new'
' keys (inner)')]
how_dup_columns_choices = [('override', 'override columns with new data'),
('rename', 'be renamed and become new columns.')]
dst_help = """Key column in the existing table to match with the new
data."""
src_help = """Key column in the new table to match with the existing data."""
merge_help = """How the keys in the table and the file are used for the
merge: 1) If only the keys from the table are used, any row in the file
with a key value not in the table is removed (default). 2) If only the
keys from the file are used, any row in the table with a key value not
in the file is removed. 3) If the union of keys is used, no row is
removed, but some rows will have empty values. 4) If the intersection of
the keys is used, only those rows with keys in both the table and the
file will be updated, the rest will be deleted."""
how_dup_columns_help = """The new data has columns with names identical
to those that are already part of the table. You may choose to override
them with the new data, or rename the new data and add them as new
columns."""
def __init__(self, *args, **kargs):
# Get the dst choices
dst_choices = [(x, x) for x in kargs.pop('dst_keys')]
dst_selected_key = kargs.pop('dst_selected_key')
dst_choice_initial = \
next((v for x, v in enumerate(dst_choices)
if v[0] == dst_selected_key),
('', '---'))
# Get the src choices
src_choices = [(x, x) for x in kargs.pop('src_keys')]
src_selected_key = kargs.pop('src_selected_key')
src_choice_initial = \
next((v for x, v in enumerate(src_choices)
if v[0] == src_selected_key),
('', '---'))
how_merge = kargs.pop('how_merge', None)
how_merge_initial = \
next((v for x, v in enumerate(self.how_merge_choices)
if v[0] == how_merge),
None)
# Boolean telling us if we have to add field to handle overlapping
# column names
are_overlap_cols = kargs.pop('are_overlap_cols')
how_dup_columns = kargs.pop('how_dup_columns')
super(SelectKeysForm, self).__init__(*args, **kargs)
self.fields['dst_key'] = \
forms.ChoiceField(initial=dst_choice_initial,
choices=dst_choices,
required=True,
label='Key Column in Table',
help_text=self.dst_help)
self.fields['src_key'] = \
forms.ChoiceField(initial=src_choice_initial,
choices=src_choices,
required=True,
label='Key Column in CSV',
help_text=self.src_help)
self.fields['how_merge'] = \
forms.ChoiceField(initial=how_merge_initial,
choices=self.how_merge_choices,
required=True,
label='Merge rows using',
help_text=self.merge_help)
if are_overlap_cols:
how_dup_columns_initial = \
next((v for x, v in enumerate(self.how_dup_columns_choices)
if v[0] == how_dup_columns), None)
self.fields['how_dup_columns'] = \
forms.ChoiceField(initial=how_dup_columns_initial,
choices=self.how_dup_columns_choices,
required=True,
label='Columns with already existing names'
' will',
help_text=self.merge_help)
# Form to allow value selection through unique keys in a workflow
class RowFilterForm(forms.Form):
def __init__(self, *args, **kargs):
# Store the instance
self.workflow = kargs.pop('workflow')
# Get the unique keys names and types
columns = self.workflow.columns.all()
self.key_names = [x.name for x in columns if x.is_key]
self.key_types = [x.data_type for x in columns if x.is_key]
super(RowFilterForm, self).__init__(*args, **kargs)
for name, field_type in zip(self.key_names, self.key_types):
if field_type == 'string':
self.fields[name] = forms.CharField(initial='',
label=name,
required=False)
elif field_type == 'integer':
self.fields[name] = forms.IntegerField(label=name,
required=False)
elif field_type == 'double':
self.fields[name] = forms.FloatField(label=name,
required=False)
elif field_type == 'boolean':
self.fields[name] = forms.BooleanField(required=False,
label=name)
elif field_type == 'datetime':
self.fields[name] = forms.DateTimeField(required=False,
label=name)
else:
raise Exception('Unable to process datatype', field_type)
# Form to enter values in a row
class RowForm(forms.Form):
def __init__(self, *args, **kargs):
# Store the instance
self.workflow = kargs.pop('workflow', None)
self.initial_values = kargs.pop('initial_values', None)
super(RowForm, self).__init__(*args, **kargs)
if not self.workflow:
return
# Get the columns
self.columns = self.workflow.get_columns()
# If no initial values have been given, replicate a list of Nones
if not self.initial_values:
self.initial_values = [None] * len(self.columns)
for idx, column in enumerate(self.columns):
field_name = field_prefix + '%s' % idx
self.fields[field_name] = \
column_to_field(column, self.initial_values[idx])
if column.is_key and self.initial_values[idx]:
self.fields[field_name].widget.attrs['readonly'] = 'readonly'
| <filename>src/dataops/forms.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
from django import forms
import ontask.ontask_prefs
from ontask.forms import RestrictedFileField, column_to_field
# Field prefix to use in forms to avoid using column names (they are given by
# the user and may pose a problem (injection bugs)
field_prefix = '___ontask___upload_'
# Step 1 of the CSV upload
class UploadCSVFileForm(forms.Form):
"""
Form to read a csv file. It also allows to specify the number of lines to
skip at the top and the bottom of the file. This functionality is offered
by the underlyng function read_csv in Pandas
"""
file = RestrictedFileField(
max_upload_size=str(ontask.ontask_prefs.MAX_UPLOAD_SIZE),
content_types=json.loads(str(ontask.ontask_prefs.CONTENT_TYPES)),
allow_empty_file=False,
label="",
help_text='File in CSV format (typically produced by a statistics'
' package or Excel)')
skip_lines_at_top = forms.IntegerField(
label='Lines to skip at the top',
help_text="Number of lines to skip at the top when reading the file",
initial=0,
required=False
)
skip_lines_at_bottom = forms.IntegerField(
label='Lines to skip at the bottom',
help_text="Number of lines to skip at the bottom when reading the "
"file",
initial=0,
required=False
)
def clean(self, *args, **kwargs):
"""
Function to check that the integers are positive.
:return: The cleaned data
"""
data = super(UploadCSVFileForm, self).clean(*args, **kwargs)
if data['skip_lines_at_top'] < 0:
self.add_error(
'skip_lines_at_top',
'This number has to be zero or positive'
)
if data['skip_lines_at_bottom'] < 0:
self.add_error(
'skip_lines_at_bottom',
'This number has to be zero or positive'
)
return data
# Step 1 of the CSV upload
class UploadExcelFileForm(forms.Form):
"""
Form to read an Excel file.
"""
file = RestrictedFileField(
max_upload_size=str(ontask.ontask_prefs.MAX_UPLOAD_SIZE),
content_types=[
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
allow_empty_file=False,
label="",
help_text='File in Excel format (.xls or .xlsx)')
sheet = forms.CharField(max_length=512,
required=True,
initial='Sheet 1')
# Step 1 of the CSV upload
class UploadSQLForm(forms.Form):
"""
Form to read data from SQL. We collect information to create a Database URI
to be used by SQLAlchemy:
dialect[+driver]://user:password@host/dbname[?key=value..]
"""
dialect = forms.CharField(
label='Dialect',
max_length=512,
required=True,
initial='',
help_text='Database type (mysql, oracle, postgresql, etc.'
)
driver = forms.CharField(
label='Driver',
max_length=512,
required=False,
initial='',
help_text='Name of the driver implementing the DBAPI'
)
dbusername = forms.CharField(
max_length=512,
label="Database user name",
required=False,
initial='',
help_text='User name to connect'
)
dbpassword = forms.CharField(
label='Database password',
required=False,
widget=forms.PasswordInput
)
host = forms.CharField(
label='Host',
max_length=512,
required=True,
help_text='Host to connect (include port if needed)'
)
dbname = forms.CharField(
label='Database name',
max_length=512,
required=True,
help_text='Name of the database'
)
query = forms.CharField(
label='Query',
required=True,
widget=forms.Textarea,
help_text='SQL query or table name to read'
)
def clean(self):
data = super(UploadSQLForm, self).clean()
if 'localhost' in data['host'] or '127.0.0' in data['host']:
self.add_error(
'host',
'Given host value is not accepted for security reasons.'
)
return data
# Form to select columns to upload and rename
class SelectColumnUploadForm(forms.Form):
def __init__(self, *args, **kargs):
"""
Kargs contain:
column_names: list with names of the columns to upload,
is_key: list stating if the corresponding column is key
:param args:
:param kargs:
"""
# Names of the columns to process and Boolean stating if they are key
self.column_names = kargs.pop('column_names')
self.is_key = kargs.pop('is_key')
super(SelectColumnUploadForm, self).__init__(*args, **kargs)
# Create as many fields as the given columns
for idx, c in enumerate(self.column_names):
self.fields['upload_%s' % idx] = forms.BooleanField(
label='',
required=False,
)
self.fields['new_name_%s' % idx] = forms.CharField(
initial=c,
label='',
strip=True,
required=False
)
def clean(self):
cleaned_data = super(SelectColumnUploadForm, self).clean()
upload_list = [cleaned_data.get('upload_%s' % i, False)
for i in range(len(self.column_names))]
# Check if at least a unique column has been selected
both_lists = zip(upload_list, self.is_key)
if not any([a and b for a, b in both_lists]):
raise forms.ValidationError('No unique column specified',
code='invalid')
# Get list of new names
new_names = [cleaned_data.get('new_name_%s' % i)
for i in range(len(self.column_names))]
# Step 3 of the CSV upload: select unique keys to merge
class SelectKeysForm(forms.Form):
how_merge_choices = [('left', 'only the keys in the table'),
('right', 'only the new keys'),
('outer', 'the union of the table and new keys '
'(outer)'),
('inner', 'the intersection of the table and new'
' keys (inner)')]
how_dup_columns_choices = [('override', 'override columns with new data'),
('rename', 'be renamed and become new columns.')]
dst_help = """Key column in the existing table to match with the new
data."""
src_help = """Key column in the new table to match with the existing data."""
merge_help = """How the keys in the table and the file are used for the
merge: 1) If only the keys from the table are used, any row in the file
with a key value not in the table is removed (default). 2) If only the
keys from the file are used, any row in the table with a key value not
in the file is removed. 3) If the union of keys is used, no row is
removed, but some rows will have empty values. 4) If the intersection of
the keys is used, only those rows with keys in both the table and the
file will be updated, the rest will be deleted."""
how_dup_columns_help = """The new data has columns with names identical
to those that are already part of the table. You may choose to override
them with the new data, or rename the new data and add them as new
columns."""
def __init__(self, *args, **kargs):
# Get the dst choices
dst_choices = [(x, x) for x in kargs.pop('dst_keys')]
dst_selected_key = kargs.pop('dst_selected_key')
dst_choice_initial = \
next((v for x, v in enumerate(dst_choices)
if v[0] == dst_selected_key),
('', '---'))
# Get the src choices
src_choices = [(x, x) for x in kargs.pop('src_keys')]
src_selected_key = kargs.pop('src_selected_key')
src_choice_initial = \
next((v for x, v in enumerate(src_choices)
if v[0] == src_selected_key),
('', '---'))
how_merge = kargs.pop('how_merge', None)
how_merge_initial = \
next((v for x, v in enumerate(self.how_merge_choices)
if v[0] == how_merge),
None)
# Boolean telling us if we have to add field to handle overlapping
# column names
are_overlap_cols = kargs.pop('are_overlap_cols')
how_dup_columns = kargs.pop('how_dup_columns')
super(SelectKeysForm, self).__init__(*args, **kargs)
self.fields['dst_key'] = \
forms.ChoiceField(initial=dst_choice_initial,
choices=dst_choices,
required=True,
label='Key Column in Table',
help_text=self.dst_help)
self.fields['src_key'] = \
forms.ChoiceField(initial=src_choice_initial,
choices=src_choices,
required=True,
label='Key Column in CSV',
help_text=self.src_help)
self.fields['how_merge'] = \
forms.ChoiceField(initial=how_merge_initial,
choices=self.how_merge_choices,
required=True,
label='Merge rows using',
help_text=self.merge_help)
if are_overlap_cols:
how_dup_columns_initial = \
next((v for x, v in enumerate(self.how_dup_columns_choices)
if v[0] == how_dup_columns), None)
self.fields['how_dup_columns'] = \
forms.ChoiceField(initial=how_dup_columns_initial,
choices=self.how_dup_columns_choices,
required=True,
label='Columns with already existing names'
' will',
help_text=self.merge_help)
# Form to allow value selection through unique keys in a workflow
class RowFilterForm(forms.Form):
def __init__(self, *args, **kargs):
# Store the instance
self.workflow = kargs.pop('workflow')
# Get the unique keys names and types
columns = self.workflow.columns.all()
self.key_names = [x.name for x in columns if x.is_key]
self.key_types = [x.data_type for x in columns if x.is_key]
super(RowFilterForm, self).__init__(*args, **kargs)
for name, field_type in zip(self.key_names, self.key_types):
if field_type == 'string':
self.fields[name] = forms.CharField(initial='',
label=name,
required=False)
elif field_type == 'integer':
self.fields[name] = forms.IntegerField(label=name,
required=False)
elif field_type == 'double':
self.fields[name] = forms.FloatField(label=name,
required=False)
elif field_type == 'boolean':
self.fields[name] = forms.BooleanField(required=False,
label=name)
elif field_type == 'datetime':
self.fields[name] = forms.DateTimeField(required=False,
label=name)
else:
raise Exception('Unable to process datatype', field_type)
# Form to enter values in a row
class RowForm(forms.Form):
def __init__(self, *args, **kargs):
# Store the instance
self.workflow = kargs.pop('workflow', None)
self.initial_values = kargs.pop('initial_values', None)
super(RowForm, self).__init__(*args, **kargs)
if not self.workflow:
return
# Get the columns
self.columns = self.workflow.get_columns()
# If no initial values have been given, replicate a list of Nones
if not self.initial_values:
self.initial_values = [None] * len(self.columns)
for idx, column in enumerate(self.columns):
field_name = field_prefix + '%s' % idx
self.fields[field_name] = \
column_to_field(column, self.initial_values[idx])
if column.is_key and self.initial_values[idx]:
self.fields[field_name].widget.attrs['readonly'] = 'readonly'
| en | 0.858885 | # -*- coding: utf-8 -*- # Field prefix to use in forms to avoid using column names (they are given by # the user and may pose a problem (injection bugs) # Step 1 of the CSV upload Form to read a csv file. It also allows to specify the number of lines to skip at the top and the bottom of the file. This functionality is offered by the underlyng function read_csv in Pandas Function to check that the integers are positive. :return: The cleaned data # Step 1 of the CSV upload Form to read an Excel file. # Step 1 of the CSV upload Form to read data from SQL. We collect information to create a Database URI to be used by SQLAlchemy: dialect[+driver]://user:password@host/dbname[?key=value..] # Form to select columns to upload and rename Kargs contain: column_names: list with names of the columns to upload, is_key: list stating if the corresponding column is key :param args: :param kargs: # Names of the columns to process and Boolean stating if they are key # Create as many fields as the given columns # Check if at least a unique column has been selected # Get list of new names # Step 3 of the CSV upload: select unique keys to merge Key column in the existing table to match with the new data. Key column in the new table to match with the existing data. How the keys in the table and the file are used for the merge: 1) If only the keys from the table are used, any row in the file with a key value not in the table is removed (default). 2) If only the keys from the file are used, any row in the table with a key value not in the file is removed. 3) If the union of keys is used, no row is removed, but some rows will have empty values. 4) If the intersection of the keys is used, only those rows with keys in both the table and the file will be updated, the rest will be deleted. The new data has columns with names identical to those that are already part of the table. You may choose to override them with the new data, or rename the new data and add them as new columns. # Get the dst choices # Get the src choices # Boolean telling us if we have to add field to handle overlapping # column names # Form to allow value selection through unique keys in a workflow # Store the instance # Get the unique keys names and types # Form to enter values in a row # Store the instance # Get the columns # If no initial values have been given, replicate a list of Nones | 2.882541 | 3 |
amf/amfinder_plot.py | EEvangelisti/amfinder | 8 | 6621808 | # AMFinder - amfinder_plot.py
#
# MIT License
# Copyright (c) 2021 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Plots accuracy and loss after training.
Functions
-----------
:function initialize: Defines plot style.
:function draw: draws a loss/accuracy plot.
"""
import io
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
from matplotlib.ticker import MaxNLocator
def initialize():
""" Defines graph style. """
pyplot.style.use('classic')
def draw(history, epochs, title, x_range, t_name, v_name):
""" """
pyplot.clf()
pyplot.grid(True)
t_values = history[t_name]
v_values = history[v_name]
pyplot.plot(x_range, t_values, 'b-o', label='Test set')
pyplot.plot(x_range, v_values, 'g-s', label='Validation set')
pyplot.xlabel('Epoch')
pyplot.ylabel('Value')
pyplot.title(title)
padding = 0.1
legend_pos = 'upper right'
if title[0:4] == 'Loss':
pyplot.xlim(-padding, epochs + padding)
else:
legend_pos = 'lower right'
pyplot.axis([-padding, epochs + padding, 0, 1])
axes = pyplot.gca()
axes.autoscale(enable=True, axis='x', tight=False)
axes.xaxis.set_major_locator(MaxNLocator(integer=True))
pyplot.legend(loc=legend_pos)
pyplot.draw()
plot_data = io.BytesIO()
pyplot.savefig(plot_data, format='png')
return plot_data
| # AMFinder - amfinder_plot.py
#
# MIT License
# Copyright (c) 2021 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Plots accuracy and loss after training.
Functions
-----------
:function initialize: Defines plot style.
:function draw: draws a loss/accuracy plot.
"""
import io
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
from matplotlib.ticker import MaxNLocator
def initialize():
""" Defines graph style. """
pyplot.style.use('classic')
def draw(history, epochs, title, x_range, t_name, v_name):
""" """
pyplot.clf()
pyplot.grid(True)
t_values = history[t_name]
v_values = history[v_name]
pyplot.plot(x_range, t_values, 'b-o', label='Test set')
pyplot.plot(x_range, v_values, 'g-s', label='Validation set')
pyplot.xlabel('Epoch')
pyplot.ylabel('Value')
pyplot.title(title)
padding = 0.1
legend_pos = 'upper right'
if title[0:4] == 'Loss':
pyplot.xlim(-padding, epochs + padding)
else:
legend_pos = 'lower right'
pyplot.axis([-padding, epochs + padding, 0, 1])
axes = pyplot.gca()
axes.autoscale(enable=True, axis='x', tight=False)
axes.xaxis.set_major_locator(MaxNLocator(integer=True))
pyplot.legend(loc=legend_pos)
pyplot.draw()
plot_data = io.BytesIO()
pyplot.savefig(plot_data, format='png')
return plot_data
| en | 0.759662 | # AMFinder - amfinder_plot.py # # MIT License # Copyright (c) 2021 <NAME>, <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. Plots accuracy and loss after training. Functions ----------- :function initialize: Defines plot style. :function draw: draws a loss/accuracy plot. Defines graph style. | 2.184295 | 2 |
commands/prismatic_star.py | SeymourGx/RunePy | 0 | 6621809 | from discord.ext import commands
from util.arguments import Arguments
from util.choices import between
from shlex import split
class PrismaticStar:
def __init__(self, bot):
self.bot = bot
@staticmethod
def get_star_dict():
return {
'small': [74.7, 81.9, 91.8, 100.8, 111.6, 124.2, 135.9, 151.2, 166.5, 183.6, 203.4, 224.1, 246.6, 273.6, 301.5, 313.9, 328, 341.9, 356.8, 371.8, 388.6, 404.1, 421.9, 440.3, 459.1, 478.5, 499.6, 520.7, 543.1, 566.7, 591.1, 616.3, 643, 671, 699.6, 729.7, 761.7, 794.2, 829.3, 864.4, 902.2, 940.9, 981.3, 1023.7, 1067.8, 1114.5, 1164.2, 1214.7, 1266, 1321.2],
'medium': [149.4, 163.8, 183.6, 201.6, 223.2, 248.4, 271.8, 302.4, 333, 367.2, 406.8, 448.2, 493.2, 547.2, 603, 627.7, 655.9, 683.7, 713.6, 743.5, 777.1, 808.1, 843.8, 880.6, 918.2, 957, 999.1, 1041.4, 1086.2, 1133.4, 1182.2, 1232.6, 1285.9, 1342, 1399.2, 1459.3, 1523.3, 1588.3, 1658.6, 1728.8, 1804.3, 1881.7, 1962.6, 2047.4, 2135.5, 2229, 2328.3, 2429.3, 2531.9, 2642.4, 2755.4, 2879.2, 2998.4, 3130.8, 3268.5, 3412.5, 3563.7, 3709.8, 3878.4, 4042.2, 4216.9, 4405.1, 4586.9, 4809.7, 5001.1, 5211.3, 5445.4, 5709.6, 5927.9, 6222.3, 6462.7, 6742, 7071.4, 7396.5, 7712.6, 8014.4, 8387.6, 8751.8, 9100.9, 9551.9, 9998.2, 10434, 10852.2, 11428.7, 11804.1, 12358.6, 12900.7, 13421.7, 14213.9, 14691.6, 15483.5, 15874, 16627.5, 17365.9, 18077.8, 19354.6, 20033.6, 20644.3],
'large': [300, 331.2, 369.6, 408, 447.6, 499.2, 590.4, 609.6, 932.4, 736.8, 816, 902.4, 986.4, 1099.2, 1209.6, 1255.2, 1315.2, 1368, 1430.4, 1488, 1557.6, 1617.6, 1689.6, 1764, 1843.2, 1915.2, 1945.2, 1987.2, 2174.4, 2270.4, 2367.6, 2467.2, 2572.8, 2684.4, 2798.4, 2920.8, 3048, 3177.6, 3319.2, 3458.4, 3609.6, 3765.6, 3926.4, 4096.8, 4269.6, 4459.2, 4658.4, 4860, 5064, 5284.8, 5511.6, 5760, 5997.6, 6261.6, 6537.6, 6825.6, 7128, 7420.8, 7759.2, 8084.4, 8436, 8810.4, 9174, 9621.6, 10118.4, 10423.2, 10891.2, 11419.2, 11856, 12445.2, 12926.4, 13484.4, 14143.2, 14793.6, 15426, 16029.6, 16776, 17504.4, 18202.8, 19104, 19996.8, 20868, 21704.4, 22857.6, 23608.8, 24158.4, 25802.4, 26844, 28428, 29383.2, 30967.2, 31749.6, 33256.8, 34732.8, 36156, 38709.6, 40068, 41289.6],
'huge': [598.8, 734.4, 739.2, 816, 895.2, 998.4, 1180.8, 1219.2, 1370.4, 1473.6, 1632, 1804.8, 1974, 2198.4, 2419.2, 2511.6, 2630.4, 2736, 2860.8, 2976, 3115.2, 3235.2, 3379.2, 3528, 3685.2, 3830.4, 3997.2, 3974.4, 4348.8, 4540.8, 4735.2, 4934.4, 5145.6, 4954.8, 5596.8, 5846.4, 6096, 6355.2, 6638.4, 6916.8, 7219.2, 7531.2, 7852.8, 8193.6, 8539.2, 8918.4, 9316.8, 9720, 10128, 10569.6, 11022, 11520, 11995.2, 12523.2, 13075.2, 13651.2, 14255.5, 14841.6, 15518.4, 16168.8, 16872, 17620.8, 18348, 19243.2, 20236.8, 20846.4, 21782.4, 22838.4, 23712, 24889.2, 25851.6, 26968.8, 28286.4, 29588.4, 30850.8, 32059.2, 33552, 35007.6, 36405.6, 38208, 39993.6, 41736, 43408.8, 45716.4, 47216.4, 49435.2, 51603.6, 53686.8, 56856, 58766.4, 61934.4, 63499.2, 66513.6, 69464.4, 72312, 77419.2, 80136, 82578]
}
@commands.command(aliases=['primstar', 'prim', 'prismatic', 'prismaticstar'],
description='Shows the amount of exp gained from using a prismatic star.')
async def star(self, *, msg):
parser = Arguments(allow_abbrev=False, prog='star')
parser.add_argument('size', choices=['small', 'medium', 'large', 'huge'], help='The size of the prismatic star.')
parser.add_argument('level', type=between(1, 120), help='The level the prismatic star will be used on.')
try:
args = parser.parse_args(split(msg))
except SystemExit:
await self.bot.say('```%s```' % parser.format_help())
return
except Exception as e:
await self.bot.say('```%s```' % str(e))
return
xp = PrismaticStar.get_star_dict()[args.size][min(97, args.level - 1)]
await self.bot.say('A **{}** prismatic start at level **{:,}** would yield **{:,}** XP.'
.format(args.size, args.level, xp))
def setup(bot):
bot.add_cog(PrismaticStar(bot))
| from discord.ext import commands
from util.arguments import Arguments
from util.choices import between
from shlex import split
class PrismaticStar:
def __init__(self, bot):
self.bot = bot
@staticmethod
def get_star_dict():
return {
'small': [74.7, 81.9, 91.8, 100.8, 111.6, 124.2, 135.9, 151.2, 166.5, 183.6, 203.4, 224.1, 246.6, 273.6, 301.5, 313.9, 328, 341.9, 356.8, 371.8, 388.6, 404.1, 421.9, 440.3, 459.1, 478.5, 499.6, 520.7, 543.1, 566.7, 591.1, 616.3, 643, 671, 699.6, 729.7, 761.7, 794.2, 829.3, 864.4, 902.2, 940.9, 981.3, 1023.7, 1067.8, 1114.5, 1164.2, 1214.7, 1266, 1321.2],
'medium': [149.4, 163.8, 183.6, 201.6, 223.2, 248.4, 271.8, 302.4, 333, 367.2, 406.8, 448.2, 493.2, 547.2, 603, 627.7, 655.9, 683.7, 713.6, 743.5, 777.1, 808.1, 843.8, 880.6, 918.2, 957, 999.1, 1041.4, 1086.2, 1133.4, 1182.2, 1232.6, 1285.9, 1342, 1399.2, 1459.3, 1523.3, 1588.3, 1658.6, 1728.8, 1804.3, 1881.7, 1962.6, 2047.4, 2135.5, 2229, 2328.3, 2429.3, 2531.9, 2642.4, 2755.4, 2879.2, 2998.4, 3130.8, 3268.5, 3412.5, 3563.7, 3709.8, 3878.4, 4042.2, 4216.9, 4405.1, 4586.9, 4809.7, 5001.1, 5211.3, 5445.4, 5709.6, 5927.9, 6222.3, 6462.7, 6742, 7071.4, 7396.5, 7712.6, 8014.4, 8387.6, 8751.8, 9100.9, 9551.9, 9998.2, 10434, 10852.2, 11428.7, 11804.1, 12358.6, 12900.7, 13421.7, 14213.9, 14691.6, 15483.5, 15874, 16627.5, 17365.9, 18077.8, 19354.6, 20033.6, 20644.3],
'large': [300, 331.2, 369.6, 408, 447.6, 499.2, 590.4, 609.6, 932.4, 736.8, 816, 902.4, 986.4, 1099.2, 1209.6, 1255.2, 1315.2, 1368, 1430.4, 1488, 1557.6, 1617.6, 1689.6, 1764, 1843.2, 1915.2, 1945.2, 1987.2, 2174.4, 2270.4, 2367.6, 2467.2, 2572.8, 2684.4, 2798.4, 2920.8, 3048, 3177.6, 3319.2, 3458.4, 3609.6, 3765.6, 3926.4, 4096.8, 4269.6, 4459.2, 4658.4, 4860, 5064, 5284.8, 5511.6, 5760, 5997.6, 6261.6, 6537.6, 6825.6, 7128, 7420.8, 7759.2, 8084.4, 8436, 8810.4, 9174, 9621.6, 10118.4, 10423.2, 10891.2, 11419.2, 11856, 12445.2, 12926.4, 13484.4, 14143.2, 14793.6, 15426, 16029.6, 16776, 17504.4, 18202.8, 19104, 19996.8, 20868, 21704.4, 22857.6, 23608.8, 24158.4, 25802.4, 26844, 28428, 29383.2, 30967.2, 31749.6, 33256.8, 34732.8, 36156, 38709.6, 40068, 41289.6],
'huge': [598.8, 734.4, 739.2, 816, 895.2, 998.4, 1180.8, 1219.2, 1370.4, 1473.6, 1632, 1804.8, 1974, 2198.4, 2419.2, 2511.6, 2630.4, 2736, 2860.8, 2976, 3115.2, 3235.2, 3379.2, 3528, 3685.2, 3830.4, 3997.2, 3974.4, 4348.8, 4540.8, 4735.2, 4934.4, 5145.6, 4954.8, 5596.8, 5846.4, 6096, 6355.2, 6638.4, 6916.8, 7219.2, 7531.2, 7852.8, 8193.6, 8539.2, 8918.4, 9316.8, 9720, 10128, 10569.6, 11022, 11520, 11995.2, 12523.2, 13075.2, 13651.2, 14255.5, 14841.6, 15518.4, 16168.8, 16872, 17620.8, 18348, 19243.2, 20236.8, 20846.4, 21782.4, 22838.4, 23712, 24889.2, 25851.6, 26968.8, 28286.4, 29588.4, 30850.8, 32059.2, 33552, 35007.6, 36405.6, 38208, 39993.6, 41736, 43408.8, 45716.4, 47216.4, 49435.2, 51603.6, 53686.8, 56856, 58766.4, 61934.4, 63499.2, 66513.6, 69464.4, 72312, 77419.2, 80136, 82578]
}
@commands.command(aliases=['primstar', 'prim', 'prismatic', 'prismaticstar'],
description='Shows the amount of exp gained from using a prismatic star.')
async def star(self, *, msg):
parser = Arguments(allow_abbrev=False, prog='star')
parser.add_argument('size', choices=['small', 'medium', 'large', 'huge'], help='The size of the prismatic star.')
parser.add_argument('level', type=between(1, 120), help='The level the prismatic star will be used on.')
try:
args = parser.parse_args(split(msg))
except SystemExit:
await self.bot.say('```%s```' % parser.format_help())
return
except Exception as e:
await self.bot.say('```%s```' % str(e))
return
xp = PrismaticStar.get_star_dict()[args.size][min(97, args.level - 1)]
await self.bot.say('A **{}** prismatic start at level **{:,}** would yield **{:,}** XP.'
.format(args.size, args.level, xp))
def setup(bot):
bot.add_cog(PrismaticStar(bot))
| none | 1 | 2.791066 | 3 | |
code/tools/dist_aux.py | mselli/rss-extended | 5 | 6621810 | import carla
from shapely.geometry import Polygon
import numpy as np
import math
def get_distance(v1, v2):
dist = math.sqrt( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )
return dist
'''
def is_between(v1, v2, v3):
r = get_distance(v2,v1) + get_distance(v3,v1) == get_distance(v2,v3)
return r
'''
def check_collision(rect0, rect1):
p1 = Polygon(tuple(map(tuple, rect0)))
p2 = Polygon(tuple(map(tuple, rect1)))
collision = (p1.intersection(p2).area > 0.0)
return collision
def get_dist_vert_vert(rect0, rect1):
perm = [[x,y] for x in range(0, np.shape(rect0)[0]) for y in range(np.shape(rect1)[0])]
# Take among all 4 * 4 = 16 pairs of points (points from different rectangles)
D = []
for pair in perm:
v0 = rect0[pair[0]]
v1 = rect1[pair[1]]
D.append(get_distance(v0, v1))
d = min(D)
return d
def get_dist_vert_segm(rect_vert, rect_segm):
perm = [[x,y] for x in range(0, np.shape(rect_vert)[0]) for y in range(np.shape(rect_segm)[0])]
##############################
D = []
for pair in perm:
# take one point from rect_vert
ind_vert = pair[0]
# take two consequtive points from rect_segm
ind_s0 = pair[1]
ind_s1 = (pair[1]+1) % np.shape(rect_segm)[0] # if 4 --> 0
point_vert = rect_vert[ind_vert]
point_s0 = rect_segm[ind_s0]
point_s1 = rect_segm[ind_s1]
vect_segm = point_s0-point_s1
vect_0 = point_s0-point_vert
vect_1 = point_vert-point_s1
angle0 = math.acos(np.dot(vect_segm, vect_0) / (np.linalg.norm(vect_segm) * np.linalg.norm(vect_0)))
angle1 = math.acos(np.dot(vect_segm, vect_1) / (np.linalg.norm(vect_segm) * np.linalg.norm(vect_1)))
# check if perpendicular belongs to the line p1--p2
notBetween = False
if (abs(math.degrees(angle0)) >90 or abs(math.degrees(angle1)) > 90):
notBetween = True
if not notBetween:
#print('point_s0 = (%.2f, %.2f)' % (point_s0[0], point_s0[1]))
#print('point_s1 = (%.2f, %.2f)' % (point_s1[0], point_s1[1]))
#print('point_vert = (%.2f, %.2f)' % (point_vert[0], point_vert[1]))
#print(math.degrees(angle0))
#print(math.degrees(angle1))
# perp length from vert to segment
perp = np.linalg.norm(np.cross(point_s1-point_s0, point_s0-point_vert))/np.linalg.norm(point_s0-point_s1)
D.append(perp)
try:
d = min(D)
except ValueError:
d = float('Inf')
return d
def evaluate_dist(vehicles):
# What should this return if there is only one vehicle?
V = [] # array of vertices
for vehicle in vehicles:
print('vehicle')
print(vehicle)
transform = vehicle.get_transform()
bounding_box = vehicle.bounding_box
#print(bounding_box)
#print(transform)
# 8 bounding box vertices relative to (0,0,0)
ext = bounding_box.extent
points = np.array([
[ ext.x, ext.y],
[- ext.x, ext.y],
[- ext.x, - ext.y],
[ ext.x, - ext.y]
])
for point in points:
ll = carla.Location(x=point[0],y=point[1],z=1)
ll = transform.transform(ll)
point[0] = ll.x
point[1] = ll.y
#world.world.debug.draw_point(ll, color=carla.Color(r=255, g=0, b=0))
V.append(points)
#world.world.debug.draw_box(bounding_box, transform.rotation)
d = 0.0
if (len(V) >= 2):
rect0 = V[0]
rect1 = V[1]
bool_collision = check_collision(rect0, rect1)
#print(bool_collision)
if not bool_collision:
min1 = get_dist_vert_vert(rect0, rect1)
min2 = get_dist_vert_segm(rect0, rect1)
min3 = get_dist_vert_segm(rect1, rect0)
#print('min1 = %.2f' % min1)
#print('min2 = %.2f' % min2)
#print('min3 = %.2f' % min3)
d = min(min1, min2, min3)
return d
| import carla
from shapely.geometry import Polygon
import numpy as np
import math
def get_distance(v1, v2):
dist = math.sqrt( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )
return dist
'''
def is_between(v1, v2, v3):
r = get_distance(v2,v1) + get_distance(v3,v1) == get_distance(v2,v3)
return r
'''
def check_collision(rect0, rect1):
p1 = Polygon(tuple(map(tuple, rect0)))
p2 = Polygon(tuple(map(tuple, rect1)))
collision = (p1.intersection(p2).area > 0.0)
return collision
def get_dist_vert_vert(rect0, rect1):
perm = [[x,y] for x in range(0, np.shape(rect0)[0]) for y in range(np.shape(rect1)[0])]
# Take among all 4 * 4 = 16 pairs of points (points from different rectangles)
D = []
for pair in perm:
v0 = rect0[pair[0]]
v1 = rect1[pair[1]]
D.append(get_distance(v0, v1))
d = min(D)
return d
def get_dist_vert_segm(rect_vert, rect_segm):
perm = [[x,y] for x in range(0, np.shape(rect_vert)[0]) for y in range(np.shape(rect_segm)[0])]
##############################
D = []
for pair in perm:
# take one point from rect_vert
ind_vert = pair[0]
# take two consequtive points from rect_segm
ind_s0 = pair[1]
ind_s1 = (pair[1]+1) % np.shape(rect_segm)[0] # if 4 --> 0
point_vert = rect_vert[ind_vert]
point_s0 = rect_segm[ind_s0]
point_s1 = rect_segm[ind_s1]
vect_segm = point_s0-point_s1
vect_0 = point_s0-point_vert
vect_1 = point_vert-point_s1
angle0 = math.acos(np.dot(vect_segm, vect_0) / (np.linalg.norm(vect_segm) * np.linalg.norm(vect_0)))
angle1 = math.acos(np.dot(vect_segm, vect_1) / (np.linalg.norm(vect_segm) * np.linalg.norm(vect_1)))
# check if perpendicular belongs to the line p1--p2
notBetween = False
if (abs(math.degrees(angle0)) >90 or abs(math.degrees(angle1)) > 90):
notBetween = True
if not notBetween:
#print('point_s0 = (%.2f, %.2f)' % (point_s0[0], point_s0[1]))
#print('point_s1 = (%.2f, %.2f)' % (point_s1[0], point_s1[1]))
#print('point_vert = (%.2f, %.2f)' % (point_vert[0], point_vert[1]))
#print(math.degrees(angle0))
#print(math.degrees(angle1))
# perp length from vert to segment
perp = np.linalg.norm(np.cross(point_s1-point_s0, point_s0-point_vert))/np.linalg.norm(point_s0-point_s1)
D.append(perp)
try:
d = min(D)
except ValueError:
d = float('Inf')
return d
def evaluate_dist(vehicles):
# What should this return if there is only one vehicle?
V = [] # array of vertices
for vehicle in vehicles:
print('vehicle')
print(vehicle)
transform = vehicle.get_transform()
bounding_box = vehicle.bounding_box
#print(bounding_box)
#print(transform)
# 8 bounding box vertices relative to (0,0,0)
ext = bounding_box.extent
points = np.array([
[ ext.x, ext.y],
[- ext.x, ext.y],
[- ext.x, - ext.y],
[ ext.x, - ext.y]
])
for point in points:
ll = carla.Location(x=point[0],y=point[1],z=1)
ll = transform.transform(ll)
point[0] = ll.x
point[1] = ll.y
#world.world.debug.draw_point(ll, color=carla.Color(r=255, g=0, b=0))
V.append(points)
#world.world.debug.draw_box(bounding_box, transform.rotation)
d = 0.0
if (len(V) >= 2):
rect0 = V[0]
rect1 = V[1]
bool_collision = check_collision(rect0, rect1)
#print(bool_collision)
if not bool_collision:
min1 = get_dist_vert_vert(rect0, rect1)
min2 = get_dist_vert_segm(rect0, rect1)
min3 = get_dist_vert_segm(rect1, rect0)
#print('min1 = %.2f' % min1)
#print('min2 = %.2f' % min2)
#print('min3 = %.2f' % min3)
d = min(min1, min2, min3)
return d
| en | 0.43222 | def is_between(v1, v2, v3): r = get_distance(v2,v1) + get_distance(v3,v1) == get_distance(v2,v3) return r # Take among all 4 * 4 = 16 pairs of points (points from different rectangles) ############################## # take one point from rect_vert # take two consequtive points from rect_segm # if 4 --> 0 # check if perpendicular belongs to the line p1--p2 #print('point_s0 = (%.2f, %.2f)' % (point_s0[0], point_s0[1])) #print('point_s1 = (%.2f, %.2f)' % (point_s1[0], point_s1[1])) #print('point_vert = (%.2f, %.2f)' % (point_vert[0], point_vert[1])) #print(math.degrees(angle0)) #print(math.degrees(angle1)) # perp length from vert to segment # What should this return if there is only one vehicle? # array of vertices #print(bounding_box) #print(transform) # 8 bounding box vertices relative to (0,0,0) #world.world.debug.draw_point(ll, color=carla.Color(r=255, g=0, b=0)) #world.world.debug.draw_box(bounding_box, transform.rotation) #print(bool_collision) #print('min1 = %.2f' % min1) #print('min2 = %.2f' % min2) #print('min3 = %.2f' % min3) | 2.811695 | 3 |
src/socketclient/utils/http/cookie_util.py | TKaxv-7S/jd-assistant | 16 | 6621811 | <filename>src/socketclient/utils/http/cookie_util.py
from http.cookiejar import CookieJar
from requests import cookies, models
from socketclient.log import logger
def merge_cookies_from_response(cookie_jar, http_response, url):
p = models.PreparedRequest()
p.prepare(url=url)
cookies.extract_cookies_to_jar(cookie_jar, p, http_response)
# cookie_list_str = http_response.info().getlist('set-cookie')
# cookie_jar._now = int(time.time())
# cookie_set = cookiejar.parse_ns_headers(cookie_list_str)
# cookie_tuples = cookie_jar._normalized_cookie_tuples(cookie_set)
#
# for tup in cookie_tuples:
# cookie = mark_cookie(tup)
# if cookie:
# cookie_jar.set_cookie(cookie)
return cookie_jar
def get_cookies_str(cookie):
try:
if isinstance(cookie, CookieJar):
cookie_array = []
for _cookie in iter(cookie):
cookie_array.append(f'{_cookie.name}={_cookie.value};')
return ''.join(cookie_array)
elif isinstance(cookie, dict):
return cookies.cookiejar_from_dict(cookie).__str__()
elif isinstance(cookie, str):
return cookie
else:
logger.warning('cookie类型不匹配,使用空值')
return ''
except Exception as e:
logger.error('cookie转换异常,信息:%s', e)
return ''
# def mark_cookie(tup):
# return None
| <filename>src/socketclient/utils/http/cookie_util.py
from http.cookiejar import CookieJar
from requests import cookies, models
from socketclient.log import logger
def merge_cookies_from_response(cookie_jar, http_response, url):
p = models.PreparedRequest()
p.prepare(url=url)
cookies.extract_cookies_to_jar(cookie_jar, p, http_response)
# cookie_list_str = http_response.info().getlist('set-cookie')
# cookie_jar._now = int(time.time())
# cookie_set = cookiejar.parse_ns_headers(cookie_list_str)
# cookie_tuples = cookie_jar._normalized_cookie_tuples(cookie_set)
#
# for tup in cookie_tuples:
# cookie = mark_cookie(tup)
# if cookie:
# cookie_jar.set_cookie(cookie)
return cookie_jar
def get_cookies_str(cookie):
try:
if isinstance(cookie, CookieJar):
cookie_array = []
for _cookie in iter(cookie):
cookie_array.append(f'{_cookie.name}={_cookie.value};')
return ''.join(cookie_array)
elif isinstance(cookie, dict):
return cookies.cookiejar_from_dict(cookie).__str__()
elif isinstance(cookie, str):
return cookie
else:
logger.warning('cookie类型不匹配,使用空值')
return ''
except Exception as e:
logger.error('cookie转换异常,信息:%s', e)
return ''
# def mark_cookie(tup):
# return None
| en | 0.206757 | # cookie_list_str = http_response.info().getlist('set-cookie') # cookie_jar._now = int(time.time()) # cookie_set = cookiejar.parse_ns_headers(cookie_list_str) # cookie_tuples = cookie_jar._normalized_cookie_tuples(cookie_set) # # for tup in cookie_tuples: # cookie = mark_cookie(tup) # if cookie: # cookie_jar.set_cookie(cookie) # def mark_cookie(tup): # return None | 2.653843 | 3 |
WirelessMonitoringModule/gr-radar/examples/usrp/usrp_echotimer_sync_pulse.py | Aekai/Wi-Mind | 1 | 6621812 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Usrp Echotimer Sync Pulse
# Generated: Sun Feb 11 12:05:57 2018
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import radar
import sip
import sys
from gnuradio import qtgui
class usrp_echotimer_sync_pulse(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Usrp Echotimer Sync Pulse")
Qt.QWidget.__init__(self)
self.setWindowTitle("Usrp Echotimer Sync Pulse")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "usrp_echotimer_sync_pulse")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.wait_samp = wait_samp = 100,100,100,100
self.send_samp = send_samp = 100,400,300
self.packet_len = packet_len = sum(wait_samp)+sum(send_samp)
self.wait_to_start = wait_to_start = 0.03
self.tx_gain = tx_gain = 20
self.samp_rate = samp_rate = 15000000
self.rx_gain = rx_gain = 40
self.num_delay_samp = num_delay_samp = 0
self.num_corr = num_corr = packet_len
self.min_output_buffer = min_output_buffer = packet_len*2
self.center_freq = center_freq = 2400000000
##################################################
# Blocks
##################################################
self._tx_gain_range = Range(0, 100, 1, 20, 200)
self._tx_gain_win = RangeWidget(self._tx_gain_range, self.set_tx_gain, 'TX Gain', "counter_slider", float)
self.top_layout.addWidget(self._tx_gain_win)
self._rx_gain_range = Range(0, 100, 1, 40, 200)
self._rx_gain_win = RangeWidget(self._rx_gain_range, self.set_rx_gain, 'RX Gain', "counter_slider", float)
self.top_layout.addWidget(self._rx_gain_win)
self._num_delay_samp_range = Range(0, packet_len, 1, 0, 200)
self._num_delay_samp_win = RangeWidget(self._num_delay_samp_range, self.set_num_delay_samp, 'Number of delayed samples', "counter_slider", float)
self.top_layout.addWidget(self._num_delay_samp_win)
self._num_corr_range = Range(0, packet_len, 1, packet_len, 200)
self._num_corr_win = RangeWidget(self._num_corr_range, self.set_num_corr, 'Number of cross correlations', "counter_slider", float)
self.top_layout.addWidget(self._num_corr_win)
self.radar_usrp_echotimer_cc_0 = radar.usrp_echotimer_cc(samp_rate, center_freq, int(num_delay_samp), '', '', 'internal', 'none', 'TX/RX', tx_gain, 0.2, wait_to_start, 0, '', '', 'internal', 'none', 'RX2', rx_gain, 0.2, wait_to_start, 0, "packet_len")
(self.radar_usrp_echotimer_cc_0).set_min_output_buffer(2400)
self.radar_signal_generator_sync_pulse_c_0 = radar.signal_generator_sync_pulse_c(packet_len, (send_samp), (wait_samp), 0.5, "packet_len")
(self.radar_signal_generator_sync_pulse_c_0).set_min_output_buffer(2400)
self.radar_print_results_0 = radar.print_results(False, "")
self.radar_estimator_sync_pulse_c_0 = radar.estimator_sync_pulse_c(int(num_corr), "packet_len")
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
packet_len, #size
samp_rate, #samp_rate
'QT GUI Plot', #name
2 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_sink_x_0_win)
self.blocks_complex_to_mag_0_0 = blocks.complex_to_mag(1)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
##################################################
# Connections
##################################################
self.msg_connect((self.radar_estimator_sync_pulse_c_0, 'Msg out'), (self.radar_print_results_0, 'Msg in'))
self.connect((self.blocks_complex_to_mag_0, 0), (self.qtgui_time_sink_x_0, 1))
self.connect((self.blocks_complex_to_mag_0_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.radar_signal_generator_sync_pulse_c_0, 0), (self.blocks_complex_to_mag_0_0, 0))
self.connect((self.radar_signal_generator_sync_pulse_c_0, 0), (self.radar_estimator_sync_pulse_c_0, 0))
self.connect((self.radar_signal_generator_sync_pulse_c_0, 0), (self.radar_usrp_echotimer_cc_0, 0))
self.connect((self.radar_usrp_echotimer_cc_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.radar_usrp_echotimer_cc_0, 0), (self.radar_estimator_sync_pulse_c_0, 1))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "usrp_echotimer_sync_pulse")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_wait_samp(self):
return self.wait_samp
def set_wait_samp(self, wait_samp):
self.wait_samp = wait_samp
self.set_packet_len(sum(self.wait_samp)+sum(self.send_samp))
def get_send_samp(self):
return self.send_samp
def set_send_samp(self, send_samp):
self.send_samp = send_samp
self.set_packet_len(sum(self.wait_samp)+sum(self.send_samp))
def get_packet_len(self):
return self.packet_len
def set_packet_len(self, packet_len):
self.packet_len = packet_len
self.set_num_corr(self.packet_len)
self.set_min_output_buffer(self.packet_len*2)
def get_wait_to_start(self):
return self.wait_to_start
def set_wait_to_start(self, wait_to_start):
self.wait_to_start = wait_to_start
def get_tx_gain(self):
return self.tx_gain
def set_tx_gain(self, tx_gain):
self.tx_gain = tx_gain
self.radar_usrp_echotimer_cc_0.set_tx_gain(self.tx_gain)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
self.radar_usrp_echotimer_cc_0.set_rx_gain(self.rx_gain)
def get_num_delay_samp(self):
return self.num_delay_samp
def set_num_delay_samp(self, num_delay_samp):
self.num_delay_samp = num_delay_samp
self.radar_usrp_echotimer_cc_0.set_num_delay_samps(int(self.num_delay_samp))
def get_num_corr(self):
return self.num_corr
def set_num_corr(self, num_corr):
self.num_corr = num_corr
self.radar_estimator_sync_pulse_c_0.set_num_xcorr(int(self.num_corr))
def get_min_output_buffer(self):
return self.min_output_buffer
def set_min_output_buffer(self, min_output_buffer):
self.min_output_buffer = min_output_buffer
def get_center_freq(self):
return self.center_freq
def set_center_freq(self, center_freq):
self.center_freq = center_freq
def main(top_block_cls=usrp_echotimer_sync_pulse, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Usrp Echotimer Sync Pulse
# Generated: Sun Feb 11 12:05:57 2018
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import radar
import sip
import sys
from gnuradio import qtgui
class usrp_echotimer_sync_pulse(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Usrp Echotimer Sync Pulse")
Qt.QWidget.__init__(self)
self.setWindowTitle("Usrp Echotimer Sync Pulse")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "usrp_echotimer_sync_pulse")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.wait_samp = wait_samp = 100,100,100,100
self.send_samp = send_samp = 100,400,300
self.packet_len = packet_len = sum(wait_samp)+sum(send_samp)
self.wait_to_start = wait_to_start = 0.03
self.tx_gain = tx_gain = 20
self.samp_rate = samp_rate = 15000000
self.rx_gain = rx_gain = 40
self.num_delay_samp = num_delay_samp = 0
self.num_corr = num_corr = packet_len
self.min_output_buffer = min_output_buffer = packet_len*2
self.center_freq = center_freq = 2400000000
##################################################
# Blocks
##################################################
self._tx_gain_range = Range(0, 100, 1, 20, 200)
self._tx_gain_win = RangeWidget(self._tx_gain_range, self.set_tx_gain, 'TX Gain', "counter_slider", float)
self.top_layout.addWidget(self._tx_gain_win)
self._rx_gain_range = Range(0, 100, 1, 40, 200)
self._rx_gain_win = RangeWidget(self._rx_gain_range, self.set_rx_gain, 'RX Gain', "counter_slider", float)
self.top_layout.addWidget(self._rx_gain_win)
self._num_delay_samp_range = Range(0, packet_len, 1, 0, 200)
self._num_delay_samp_win = RangeWidget(self._num_delay_samp_range, self.set_num_delay_samp, 'Number of delayed samples', "counter_slider", float)
self.top_layout.addWidget(self._num_delay_samp_win)
self._num_corr_range = Range(0, packet_len, 1, packet_len, 200)
self._num_corr_win = RangeWidget(self._num_corr_range, self.set_num_corr, 'Number of cross correlations', "counter_slider", float)
self.top_layout.addWidget(self._num_corr_win)
self.radar_usrp_echotimer_cc_0 = radar.usrp_echotimer_cc(samp_rate, center_freq, int(num_delay_samp), '', '', 'internal', 'none', 'TX/RX', tx_gain, 0.2, wait_to_start, 0, '', '', 'internal', 'none', 'RX2', rx_gain, 0.2, wait_to_start, 0, "packet_len")
(self.radar_usrp_echotimer_cc_0).set_min_output_buffer(2400)
self.radar_signal_generator_sync_pulse_c_0 = radar.signal_generator_sync_pulse_c(packet_len, (send_samp), (wait_samp), 0.5, "packet_len")
(self.radar_signal_generator_sync_pulse_c_0).set_min_output_buffer(2400)
self.radar_print_results_0 = radar.print_results(False, "")
self.radar_estimator_sync_pulse_c_0 = radar.estimator_sync_pulse_c(int(num_corr), "packet_len")
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
packet_len, #size
samp_rate, #samp_rate
'QT GUI Plot', #name
2 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_time_sink_x_0_win)
self.blocks_complex_to_mag_0_0 = blocks.complex_to_mag(1)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
##################################################
# Connections
##################################################
self.msg_connect((self.radar_estimator_sync_pulse_c_0, 'Msg out'), (self.radar_print_results_0, 'Msg in'))
self.connect((self.blocks_complex_to_mag_0, 0), (self.qtgui_time_sink_x_0, 1))
self.connect((self.blocks_complex_to_mag_0_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.radar_signal_generator_sync_pulse_c_0, 0), (self.blocks_complex_to_mag_0_0, 0))
self.connect((self.radar_signal_generator_sync_pulse_c_0, 0), (self.radar_estimator_sync_pulse_c_0, 0))
self.connect((self.radar_signal_generator_sync_pulse_c_0, 0), (self.radar_usrp_echotimer_cc_0, 0))
self.connect((self.radar_usrp_echotimer_cc_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.radar_usrp_echotimer_cc_0, 0), (self.radar_estimator_sync_pulse_c_0, 1))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "usrp_echotimer_sync_pulse")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_wait_samp(self):
return self.wait_samp
def set_wait_samp(self, wait_samp):
self.wait_samp = wait_samp
self.set_packet_len(sum(self.wait_samp)+sum(self.send_samp))
def get_send_samp(self):
return self.send_samp
def set_send_samp(self, send_samp):
self.send_samp = send_samp
self.set_packet_len(sum(self.wait_samp)+sum(self.send_samp))
def get_packet_len(self):
return self.packet_len
def set_packet_len(self, packet_len):
self.packet_len = packet_len
self.set_num_corr(self.packet_len)
self.set_min_output_buffer(self.packet_len*2)
def get_wait_to_start(self):
return self.wait_to_start
def set_wait_to_start(self, wait_to_start):
self.wait_to_start = wait_to_start
def get_tx_gain(self):
return self.tx_gain
def set_tx_gain(self, tx_gain):
self.tx_gain = tx_gain
self.radar_usrp_echotimer_cc_0.set_tx_gain(self.tx_gain)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
def get_rx_gain(self):
return self.rx_gain
def set_rx_gain(self, rx_gain):
self.rx_gain = rx_gain
self.radar_usrp_echotimer_cc_0.set_rx_gain(self.rx_gain)
def get_num_delay_samp(self):
return self.num_delay_samp
def set_num_delay_samp(self, num_delay_samp):
self.num_delay_samp = num_delay_samp
self.radar_usrp_echotimer_cc_0.set_num_delay_samps(int(self.num_delay_samp))
def get_num_corr(self):
return self.num_corr
def set_num_corr(self, num_corr):
self.num_corr = num_corr
self.radar_estimator_sync_pulse_c_0.set_num_xcorr(int(self.num_corr))
def get_min_output_buffer(self):
return self.min_output_buffer
def set_min_output_buffer(self, min_output_buffer):
self.min_output_buffer = min_output_buffer
def get_center_freq(self):
return self.center_freq
def set_center_freq(self, center_freq):
self.center_freq = center_freq
def main(top_block_cls=usrp_echotimer_sync_pulse, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| de | 0.726184 | #!/usr/bin/env python2 # -*- coding: utf-8 -*- ################################################## # GNU Radio Python Flow Graph # Title: Usrp Echotimer Sync Pulse # Generated: Sun Feb 11 12:05:57 2018 ################################################## ################################################## # Variables ################################################## ################################################## # Blocks ################################################## #size #samp_rate #name #number of inputs ################################################## # Connections ################################################## | 2.06429 | 2 |
application/original_data_entry.py | DonBlaine/OpenDoorData | 0 | 6621813 | <reponame>DonBlaine/OpenDoorData<gh_stars>0
import csv
import time as tm
import datetime
from dateutil.parser import parse
import models
from linear_model import get_linear_coef
import os
from app import app
import logging
#useful for viewing the specific sql queries to debug
if app.config['DEBUG']:
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
def main():
os.chdir("..")
def epochtime(x):
string = parse(x)
epoch = int(tm.mktime(string.timetuple()))
return epoch
def parseName(x):
inst1 = x.find(">")+2
inst2 = x.find(">", inst1)
building = x[inst1:inst2-1].lower()
room = x[inst2+6:]
return (building,room)
models.db.create_tables([models.room,
models.User,
models.module,
models.wifi_log,
models.timetable,
models.survey,
models.regressionModel,
models.building
], safe=True)
models.regressionModel.create(weight = get_linear_coef("original_cleaned_data", "full.csv", "survey_data.csv"))
models.building.create(name = "School of Computer Science",
code = "scs",
phone = "+353 1 716 2483",
email = "<EMAIL>",
opening_hour_weekday = "09:00",
closing_hour_weekday = "19:00",
lat = 53.3092327,
lon = -6.2239067,
image_dir = "images/scs.jpg"
)
models.room.create(room_num = 2,
building = "school of computer science",
room_cap = 90,
building_code = "scs",
code = "B002"
)
models.room.create(room_num = 3,
building = "school of computer science",
room_cap = 90,
building_code = "scs",
code = "B003"
)
models.room.create(room_num = 4,
building = "school of computer science",
room_cap = 220,
building_code = "scs",
code = "B004"
)
models.User.create(username = "admin",
password = "password",
email = "<EMAIL>",
first_name = 'Don',
last_name = "Blaine",
admin = True
)
user = models.User.get(models.User.username == "admin")
user.set_password ("password")
user.save()
file = r"Data/original_cleaned_data/timetable.csv"
with open(file, 'r') as f:
mycsv= csv.reader(f)
mylist = list(mycsv)
modlist = []
for i in range (1, len(mylist)):
if len(mylist[i][3])>1:
modulecode = mylist[i][3]
if modulecode in modlist:
continue
else:
models.module.create(module_code = modulecode,
instructor = user.username
)
modlist.append(modulecode)
for i in range (1, len(mylist)):
roomid = mylist[i][1]
build = mylist[i][5]
time1 = int(mylist[i][2])
reg_stu = mylist[i][4] if mylist[i][4]!= "" else 0
modulecode = mylist[i][3] if len(mylist[i][3])>1 else None
models.timetable.create(room_id = roomid,
building = build,
mod_code = modulecode,
event_time = time1,
reg_stu = int(float(reg_stu)),
time = datetime.datetime.fromtimestamp(time1)
)
f.close()
file = r"Data/original_cleaned_data/full.csv"
with open(file, 'r') as f:
mycsv= csv.reader(f)
mylist = list(mycsv)
for i in range(len(mylist)):
roomid = int(parseName(mylist[i][0])[1])
build = "school of " + parseName(mylist[i][0])[0]
etime = float(epochtime(mylist[i][1]))
models.wifi_log.create(room_id = roomid,
building = build,
event_time = etime,
assoc_devices = mylist[i][2],
auth_devices = mylist[i][3],
time = datetime.datetime.fromtimestamp(etime)
)
f.close()
file = r"Data/original_cleaned_data/survey_data.csv"
with open(file, 'r') as f:
mycsv= csv.reader(f)
mylist = list(mycsv)
for i in range(1, len(mylist)):
roomid = mylist[i][1]
build = mylist[i][4]
etime = int(mylist[i][2])
models.survey.create(room_id = roomid,
building = build,
event_time = etime,
occupancy = mylist[i][3],
reporter = user.username,
time = datetime.datetime.fromtimestamp(etime)
)
f.close()
models.db.close()
print ("The database should now be available")
if __name__ == '__main__':
main()
| import csv
import time as tm
import datetime
from dateutil.parser import parse
import models
from linear_model import get_linear_coef
import os
from app import app
import logging
#useful for viewing the specific sql queries to debug
if app.config['DEBUG']:
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
def main():
os.chdir("..")
def epochtime(x):
string = parse(x)
epoch = int(tm.mktime(string.timetuple()))
return epoch
def parseName(x):
inst1 = x.find(">")+2
inst2 = x.find(">", inst1)
building = x[inst1:inst2-1].lower()
room = x[inst2+6:]
return (building,room)
models.db.create_tables([models.room,
models.User,
models.module,
models.wifi_log,
models.timetable,
models.survey,
models.regressionModel,
models.building
], safe=True)
models.regressionModel.create(weight = get_linear_coef("original_cleaned_data", "full.csv", "survey_data.csv"))
models.building.create(name = "School of Computer Science",
code = "scs",
phone = "+353 1 716 2483",
email = "<EMAIL>",
opening_hour_weekday = "09:00",
closing_hour_weekday = "19:00",
lat = 53.3092327,
lon = -6.2239067,
image_dir = "images/scs.jpg"
)
models.room.create(room_num = 2,
building = "school of computer science",
room_cap = 90,
building_code = "scs",
code = "B002"
)
models.room.create(room_num = 3,
building = "school of computer science",
room_cap = 90,
building_code = "scs",
code = "B003"
)
models.room.create(room_num = 4,
building = "school of computer science",
room_cap = 220,
building_code = "scs",
code = "B004"
)
models.User.create(username = "admin",
password = "password",
email = "<EMAIL>",
first_name = 'Don',
last_name = "Blaine",
admin = True
)
user = models.User.get(models.User.username == "admin")
user.set_password ("password")
user.save()
file = r"Data/original_cleaned_data/timetable.csv"
with open(file, 'r') as f:
mycsv= csv.reader(f)
mylist = list(mycsv)
modlist = []
for i in range (1, len(mylist)):
if len(mylist[i][3])>1:
modulecode = mylist[i][3]
if modulecode in modlist:
continue
else:
models.module.create(module_code = modulecode,
instructor = user.username
)
modlist.append(modulecode)
for i in range (1, len(mylist)):
roomid = mylist[i][1]
build = mylist[i][5]
time1 = int(mylist[i][2])
reg_stu = mylist[i][4] if mylist[i][4]!= "" else 0
modulecode = mylist[i][3] if len(mylist[i][3])>1 else None
models.timetable.create(room_id = roomid,
building = build,
mod_code = modulecode,
event_time = time1,
reg_stu = int(float(reg_stu)),
time = datetime.datetime.fromtimestamp(time1)
)
f.close()
file = r"Data/original_cleaned_data/full.csv"
with open(file, 'r') as f:
mycsv= csv.reader(f)
mylist = list(mycsv)
for i in range(len(mylist)):
roomid = int(parseName(mylist[i][0])[1])
build = "school of " + parseName(mylist[i][0])[0]
etime = float(epochtime(mylist[i][1]))
models.wifi_log.create(room_id = roomid,
building = build,
event_time = etime,
assoc_devices = mylist[i][2],
auth_devices = mylist[i][3],
time = datetime.datetime.fromtimestamp(etime)
)
f.close()
file = r"Data/original_cleaned_data/survey_data.csv"
with open(file, 'r') as f:
mycsv= csv.reader(f)
mylist = list(mycsv)
for i in range(1, len(mylist)):
roomid = mylist[i][1]
build = mylist[i][4]
etime = int(mylist[i][2])
models.survey.create(room_id = roomid,
building = build,
event_time = etime,
occupancy = mylist[i][3],
reporter = user.username,
time = datetime.datetime.fromtimestamp(etime)
)
f.close()
models.db.close()
print ("The database should now be available")
if __name__ == '__main__':
main() | en | 0.66955 | #useful for viewing the specific sql queries to debug | 2.584738 | 3 |
quad_ws/src/quad_simulation/quad_simulation/src/env_tester.py | reubenstr/zuko | 1 | 6621814 | #!/usr/bin/env python
'''
Test the simulation environment.
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import sys
import time
import os
import argparse
sys.path.append('../')
from gym_env import GymEnv
from gui_param_control import GuiParamControl
from kinematics import Kinematics
from bezier_gait import BezierGait
from env_randomizer import EnvRandomizer
# ARGUMENTS
parser = argparse.ArgumentParser(description="Environment Tester (No Joystick).")
parser.add_argument("-hf",
"--HeightField",
help="Use HeightField",
action='store_true')
parser.add_argument("-r",
"--DebugRack",
help="Put Spot on an Elevated Rack",
action='store_true')
parser.add_argument("-p",
"--DebugPath",
help="Draw Spot's Foot Path",
action='store_true')
parser.add_argument("-ay",
"--AutoYaw",
help="Automatically Adjust Spot's Yaw",
action='store_true')
parser.add_argument("-ar",
"--AutoReset",
help="Automatically Reset Environment When Spot Falls",
action='store_true')
parser.add_argument("-dr",
"--DontRandomize",
help="Do NOT Randomize State and Environment.",
action='store_true')
ARGS = parser.parse_args()
def main():
""" The main() function. """
print("STARTING ENVIRONMENT TEST")
seed = 0
max_timesteps = 4e6
if ARGS.DebugRack:
on_rack = True
else:
on_rack = False
if ARGS.DebugPath:
draw_foot_path = True
else:
draw_foot_path = False
if ARGS.HeightField:
height_field = True
else:
height_field = False
if ARGS.DontRandomize:
env_randomizer = None
else:
env_randomizer = EnvRandomizer()
env = GymEnv(render=True,
on_rack=on_rack,
height_field=height_field,
draw_foot_path=draw_foot_path,
env_randomizer=env_randomizer)
# Set seeds
env.seed(seed)
np.random.seed(seed)
state_dim = env.observation_space.shape[0]
print("STATE DIM: {}".format(state_dim))
action_dim = env.action_space.shape[0]
print("ACTION DIM: {}".format(action_dim))
max_action = float(env.action_space.high[0])
state = env.reset()
gui_param_controller = GuiParamControl(env.spot.quadruped)
spot_model = Kinematics()
T_bf0 = spot_model.WorldToFoot
T_bf = copy.deepcopy(T_bf0)
bezier_gait = BezierGait(dt=env._time_step)
action = env.action_space.sample()
yaw = 0.0
t = 0
while t < (int(max_timesteps)):
pos, orn, StepLength, LateralFraction, YawRate, StepVelocity, ClearanceHeight, PenetrationDepth, SwingPeriod = gui_param_controller.UserInput()
# experiemental
# bezier_gait.Tswing = SwingPeriod
# fix yaw for sim
yaw = env.return_yaw()
P_yaw = 5.0
if ARGS.AutoYaw:
YawRate += -yaw * P_yaw
# print("YAW RATE: {}".format(YawRate))
contacts = state[-4:]
# Get Desired Foot Poses
T_bf = bezier_gait.GenerateTrajectory(StepLength, LateralFraction, YawRate,
StepVelocity, T_bf0, T_bf, ClearanceHeight, PenetrationDepth, contacts)
joint_angles = spot_model.InverseKinimatics(orn, pos, T_bf)
#FL_Elbow.append(np.degrees(joint_angles[0][-1]))
# for i, (key, Tbf_in) in enumerate(T_bf.items()):
# print("{}: \t Angle: {}".format(key, np.degrees(joint_angles[i])))
# print("-------------------------")
env.pass_joint_angles(joint_angles.reshape(-1))
# Get External Observations (data into the simulation for later machine learning)
# env.spot.GetExternalObservations(bezier_gait, bezier_stepper)
# Step
# Returns simulation data.
state, reward, done, _ = env.step(action)
# print("IMU Roll: {}".format(state[0]))
# print("IMU Pitch: {}".format(state[1]))
# print("IMU GX: {}".format(state[2]))
# print("IMU GY: {}".format(state[3]))
# print("IMU GZ: {}".format(state[4]))
# print("IMU AX: {}".format(state[5]))
# print("IMU AY: {}".format(state[6]))
# print("IMU AZ: {}".format(state[7]))
# print("-------------------------")
if done:
print("DONE")
if ARGS.AutoReset:
env.reset()
# plt.plot()
# # plt.plot(FL_phases, label="FL")
# # plt.plot(FR_phases, label="FR")
# # plt.plot(BL_phases, label="BL")
# # plt.plot(BR_phases, label="BR")
# plt.plot(FL_Elbow, label="FL ELbow (Deg)")
# plt.xlabel("dt")
# plt.ylabel("value")
# plt.title("Leg Phases")
# plt.legend()
# plt.show()
# time.sleep(1.0)
t += 1
env.close()
print(joint_angles)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
'''
Test the simulation environment.
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import sys
import time
import os
import argparse
sys.path.append('../')
from gym_env import GymEnv
from gui_param_control import GuiParamControl
from kinematics import Kinematics
from bezier_gait import BezierGait
from env_randomizer import EnvRandomizer
# ARGUMENTS
parser = argparse.ArgumentParser(description="Environment Tester (No Joystick).")
parser.add_argument("-hf",
"--HeightField",
help="Use HeightField",
action='store_true')
parser.add_argument("-r",
"--DebugRack",
help="Put Spot on an Elevated Rack",
action='store_true')
parser.add_argument("-p",
"--DebugPath",
help="Draw Spot's Foot Path",
action='store_true')
parser.add_argument("-ay",
"--AutoYaw",
help="Automatically Adjust Spot's Yaw",
action='store_true')
parser.add_argument("-ar",
"--AutoReset",
help="Automatically Reset Environment When Spot Falls",
action='store_true')
parser.add_argument("-dr",
"--DontRandomize",
help="Do NOT Randomize State and Environment.",
action='store_true')
ARGS = parser.parse_args()
def main():
""" The main() function. """
print("STARTING ENVIRONMENT TEST")
seed = 0
max_timesteps = 4e6
if ARGS.DebugRack:
on_rack = True
else:
on_rack = False
if ARGS.DebugPath:
draw_foot_path = True
else:
draw_foot_path = False
if ARGS.HeightField:
height_field = True
else:
height_field = False
if ARGS.DontRandomize:
env_randomizer = None
else:
env_randomizer = EnvRandomizer()
env = GymEnv(render=True,
on_rack=on_rack,
height_field=height_field,
draw_foot_path=draw_foot_path,
env_randomizer=env_randomizer)
# Set seeds
env.seed(seed)
np.random.seed(seed)
state_dim = env.observation_space.shape[0]
print("STATE DIM: {}".format(state_dim))
action_dim = env.action_space.shape[0]
print("ACTION DIM: {}".format(action_dim))
max_action = float(env.action_space.high[0])
state = env.reset()
gui_param_controller = GuiParamControl(env.spot.quadruped)
spot_model = Kinematics()
T_bf0 = spot_model.WorldToFoot
T_bf = copy.deepcopy(T_bf0)
bezier_gait = BezierGait(dt=env._time_step)
action = env.action_space.sample()
yaw = 0.0
t = 0
while t < (int(max_timesteps)):
pos, orn, StepLength, LateralFraction, YawRate, StepVelocity, ClearanceHeight, PenetrationDepth, SwingPeriod = gui_param_controller.UserInput()
# experiemental
# bezier_gait.Tswing = SwingPeriod
# fix yaw for sim
yaw = env.return_yaw()
P_yaw = 5.0
if ARGS.AutoYaw:
YawRate += -yaw * P_yaw
# print("YAW RATE: {}".format(YawRate))
contacts = state[-4:]
# Get Desired Foot Poses
T_bf = bezier_gait.GenerateTrajectory(StepLength, LateralFraction, YawRate,
StepVelocity, T_bf0, T_bf, ClearanceHeight, PenetrationDepth, contacts)
joint_angles = spot_model.InverseKinimatics(orn, pos, T_bf)
#FL_Elbow.append(np.degrees(joint_angles[0][-1]))
# for i, (key, Tbf_in) in enumerate(T_bf.items()):
# print("{}: \t Angle: {}".format(key, np.degrees(joint_angles[i])))
# print("-------------------------")
env.pass_joint_angles(joint_angles.reshape(-1))
# Get External Observations (data into the simulation for later machine learning)
# env.spot.GetExternalObservations(bezier_gait, bezier_stepper)
# Step
# Returns simulation data.
state, reward, done, _ = env.step(action)
# print("IMU Roll: {}".format(state[0]))
# print("IMU Pitch: {}".format(state[1]))
# print("IMU GX: {}".format(state[2]))
# print("IMU GY: {}".format(state[3]))
# print("IMU GZ: {}".format(state[4]))
# print("IMU AX: {}".format(state[5]))
# print("IMU AY: {}".format(state[6]))
# print("IMU AZ: {}".format(state[7]))
# print("-------------------------")
if done:
print("DONE")
if ARGS.AutoReset:
env.reset()
# plt.plot()
# # plt.plot(FL_phases, label="FL")
# # plt.plot(FR_phases, label="FR")
# # plt.plot(BL_phases, label="BL")
# # plt.plot(BR_phases, label="BR")
# plt.plot(FL_Elbow, label="FL ELbow (Deg)")
# plt.xlabel("dt")
# plt.ylabel("value")
# plt.title("Leg Phases")
# plt.legend()
# plt.show()
# time.sleep(1.0)
t += 1
env.close()
print(joint_angles)
if __name__ == '__main__':
main()
| en | 0.295078 | #!/usr/bin/env python Test the simulation environment. # ARGUMENTS The main() function. # Set seeds # experiemental # bezier_gait.Tswing = SwingPeriod # fix yaw for sim # print("YAW RATE: {}".format(YawRate)) # Get Desired Foot Poses #FL_Elbow.append(np.degrees(joint_angles[0][-1])) # for i, (key, Tbf_in) in enumerate(T_bf.items()): # print("{}: \t Angle: {}".format(key, np.degrees(joint_angles[i]))) # print("-------------------------") # Get External Observations (data into the simulation for later machine learning) # env.spot.GetExternalObservations(bezier_gait, bezier_stepper) # Step # Returns simulation data. # print("IMU Roll: {}".format(state[0])) # print("IMU Pitch: {}".format(state[1])) # print("IMU GX: {}".format(state[2])) # print("IMU GY: {}".format(state[3])) # print("IMU GZ: {}".format(state[4])) # print("IMU AX: {}".format(state[5])) # print("IMU AY: {}".format(state[6])) # print("IMU AZ: {}".format(state[7])) # print("-------------------------") # plt.plot() # # plt.plot(FL_phases, label="FL") # # plt.plot(FR_phases, label="FR") # # plt.plot(BL_phases, label="BL") # # plt.plot(BR_phases, label="BR") # plt.plot(FL_Elbow, label="FL ELbow (Deg)") # plt.xlabel("dt") # plt.ylabel("value") # plt.title("Leg Phases") # plt.legend() # plt.show() # time.sleep(1.0) | 2.619312 | 3 |
h2o-py/tests/testdir_misc/pyunit_http_import.py | kernelrich/h2o-3 | 6,098 | 6621815 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def http_import():
url = "http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip"
aa = h2o.import_file(path=url)
assert aa.nrow == 194560, "Unexpected number of lines. %s" % aa.nrow
url = "https://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip"
aa = h2o.import_file(path=url)
assert aa.nrow == 194560, "Unexpected number of lines. %s" % aa.nrow
if __name__ == "__main__":
pyunit_utils.standalone_test(http_import)
else:
http_import()
| import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def http_import():
url = "http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip"
aa = h2o.import_file(path=url)
assert aa.nrow == 194560, "Unexpected number of lines. %s" % aa.nrow
url = "https://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip"
aa = h2o.import_file(path=url)
assert aa.nrow == 194560, "Unexpected number of lines. %s" % aa.nrow
if __name__ == "__main__":
pyunit_utils.standalone_test(http_import)
else:
http_import()
| none | 1 | 2.426754 | 2 | |
Algorithme de vol/main.py | BasileAmeeuw/DroneDelivreur | 0 | 6621816 | from dronekit import connect, VehicleMode, LocationGlobalRelative
import time
import os, re
import pyrebase
import firebase_admin
from firebase_admin import credentials
from google.cloud import firestore
import board
import adafruit_hcsr04
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print("Basic pre-arm checks")
# Don't try to arm until autopilot is ready
while not vehicle.is_armable:
print(" Waiting for vehicle to initialise...")
time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
# Confirm vehicle armed before attempting to take off
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto
# (otherwise the command after Vehicle.simple_takeoff will execute
# immediately).
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
# Break and return from function just below target altitude.
if vehicle.location.global_relative_frame.alt >= aTargetAltitude * 0.95:
print("Reached target altitude")
break
time.sleep(3)
def change_alt (altitude):
print ("Changing Altitude")
if vehicle.armed :
location = vehicle.location.global_relative_frame
final_alt = location.alt + altitude
location.alt = final_alt
vehicle.simple_goto(location)
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
# Break and return from function just below target altitude.
if vehicle.location.global_relative_frame.alt >= final_alt - 0.02 and vehicle.location.global_relative_frame.alt <= final_alt + 0.02 :
print("Reached target altitude")
break
time.sleep(1)
else:
print ("Vehicule is not Armed")
def go_to (relative_coord):
if vehicle.armed :
vehicle.simple_goto (relative_coord, groundspeed = 10)
while True :
print(" Vehicule is at: ", vehicle.location.global_relative_frame)
if (vehicle.location.global_relative_frame.lon >= relative_coord.lon - 0.00001 and vehicle.location.global_relative_frame.lon <= relative_coord.lon + 0.00001) and (vehicle.location.global_relative_frame.lat >= relative_coord.lat - 0.00001 and vehicle.location.global_relative_frame.lat <= relative_coord.lat + 0.00001) :
print("Target Reached")
break
time.sleep(3)
else:
print ("Vehicule is not Armed")
def rtl (alt):
if vehicle.armed :
go_to(alt)
else:
print ("Vehicule is not Armed")
def land (alt):
altitude = alt
alt_initial = vehicle.location.global_relative_frame.alt
for i in range (1,int((alt+0.5)*2)):
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_up(altitude)
else :
print ("Landing")
change_alt (-0.5)
altitude -= 0.5
def move_up (alt):
# 1 degree -> 111,111 km
# 1m = 0,000009 degree
print ("Move Up")
gps = vehicle.location.global_relative_frame
gps.lon += 0.000009
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_down (alt)
else :
land (alt)
def move_down (alt) :
print ("Move Down")
gps = vehicle.location.global_relative_frame
gps.lon -= 0.000009 *2
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_left (alt)
else :
land (alt)
def move_left (alt) :
print ("Move Left")
gps = vehicle.location.global_relative_frame
gps.lon += 0.000009
gps.lat -= 0.000009
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_right (alt)
else :
land (alt)
def move_right (alt) :
print ("Move Right")
gps = vehicle.location.global_relative_frame
gps.lat += 0.000009 *2
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
print ("No possibility")
else :
land (alt)
def distance(TRIGGER, ECHO):
sonar = adafruit_hcsr04.HCSR04(trigger_pin=TRIGGER, echo_pin=ECHO)
while sonar.distance == 0 :
try:
print(sonar.distance)
except:
print("Retrying!")
time.sleep (0.5)
print (str(sonar.distance) + " cm")
if sonar.distance <= 20 :
return True
else :
return False
def armed_off():
if vehicle.armed :
vehicle.armed = False
def db_connection (config):
firebase=pyrebase.initialize_app(firebaseConfig)
storage=firebase.storage()
try:
firebase_admin.get_app()
print('firebase intialized.')
except ValueError as e:
print('firebase not initialized. initialize.')
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="serviceAccountKey.json"
return firestore.Client()
firebaseConfig = {
"apiKey": "<KEY>",
"authDomain": "delivreapp-5221e.firebaseapp.com",
"projectId": "delivreapp-5221e",
"databaseURL": "https://del-ivre-default-rtdb.europe-west1.firebasedatabase.app",
"storageBucket": "delivreapp-5221e.appspot.com",
"messagingSenderId": "661920641786",
"appId": "1:661920641786:web:dca2c085b5ff60f1b18f43",
"measurementId": "G-CLR5PFH3G4"
}
#set GPIO Pins
GPIO_TRIGGER1 = board.D14
GPIO_ECHO1 = board.D15
GPIO_TRIGGER2 = board.D17
GPIO_ECHO2 = board.D27
vehicle = connect('udp:127.0.0.1:14550', wait_ready=True)
print('Connecting to vehicle : %s' % vehicle)
print ("Simulation Location : \n%s" % vehicle.location.global_relative_frame)
init = vehicle.location.global_relative_frame
while True:
nbImg=[];
go=True
while True:
try:
doc=db.collection('Users').where("Drone","==",nDrone).get()[0]
break
except:
continue
nom=doc.get("Nom")
prenom=doc.get("Prenom")
coord=doc.get("GPS")
image=nom+"#"+prenom+".jpg"
print(nom,end=' ')
print(prenom,end="\n")
print(doc.get("Commande"),end="\n")
print(coord)
#téléchargement image
storage.child(image).download(image)
###variables d'initiation reconnaissance faciale
known_face_encodings = []
known_face_names = []
face_locations = []
face_encodings = []
process_this_frame = True
#Image enregistrée dans la base de donnée
try:
new_image=face_recognition.load_image_file(image)
new_face_encoding = face_recognition.face_encodings(new_image)[0]
known_face_encodings.append(new_face_encoding)
known_face_names.append(prenom + " " + nom)
print("photo", " dans reconaissance faciale")
except:
img1 = Image.open(image)
img1.save("img1.jpg","JPEG")
try:
time.sleep(0.001)
img1.save("img1.jpg","JPEG")
time.sleep(0.001)
img2=img1.rotate(90)
img2.show()
time.sleep(0.001)
img2.save("img2.jpg","JPEG")
img3=img2.rotate(90)
time.sleep(0.001)
img3.save("img3.jpg","JPEG")
img4=img3.rotate(90)
time.sleep(0.001)
img4.save("img4.jpg","JPEG")
#os.remove(image)
print("image enregistrée")
except:
print("probleme dans le téléchargement de l'image")
for i in range(1,5):
try:
new_image=face_recognition.load_image_file("img"+ str(nbImg) + ".jpg")
new_face_encoding = face_recognition.face_encodings(new_image)[0]
known_face_encodings.append(new_face_encoding)
known_face_names.append(prenom + " " + nom)
nbImg=i
print("photo" , str(i) , " dans reconaissance faciale")
except:
os.remove("img"+ str(i) + ".jpg")
print("photo ", str(i) , "non prise en compte")
###Décolage, jusqu'a reconaissance
print("décolage jusque coordonnée gps et arrivée à 1m50")
arm_and_takeoff (10)
print("go_to position: ", coord)
time.sleep(2)
# go_to (LocationGlobalRelative(float(-35.3633), float(149.1652294), float(10)))
#go_to (LocationGlobalRelative(float(coord["Latitude"]), float(coord["Longitude"]), float(50)))
land (8.5)
#Face recognition
#change_alt (-48.5)
print("descendre")
time.sleep(2)
Reco=True
#algo reconnaissance faciale
print("lancement algorithme de reconnaissance faciale")
while Reco:
# Grab a single frame of video
ret, frame = cv2.VideoCapture(0).read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
if name==prenom+" "+nom:
print(name, end=' ')
print("a bien été reconnu, on procède donc a l'attérissage.")
Reco=False
process_this_frame = not process_this_frame
#Attérissage et déposage paquet
print("attérissage sur .... pour que la personne récupère sa commande")
'''land (48.5)'''
time.sleep(2)
print("land successfull: alt: 0")
#Attente de 1 minute avant de redémarrer
print("attente de 1 minute avant de redémarrer")
time.sleep(5)
#suppression image du PC
print("image supprimé de la mémoire du rpi")
for i in range(len(nbImg)):
os.remove("img"+ str(nbImg[i]) + ".jpg")
#retour à la base
print("retour à la base pour une nouvelle commande")
rtl()
armed_off()
print("suppression de la commande si pas encore fait")
try:
id=str(int(doc.get("Id")))
db.collection('Users').document(id).delete()
print("la commande a été supprimée")
storage.delete_blob(image)
storage.delete()
except:
print("la commande était déja supprimée")
vehicle.close() | from dronekit import connect, VehicleMode, LocationGlobalRelative
import time
import os, re
import pyrebase
import firebase_admin
from firebase_admin import credentials
from google.cloud import firestore
import board
import adafruit_hcsr04
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print("Basic pre-arm checks")
# Don't try to arm until autopilot is ready
while not vehicle.is_armable:
print(" Waiting for vehicle to initialise...")
time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
# Confirm vehicle armed before attempting to take off
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto
# (otherwise the command after Vehicle.simple_takeoff will execute
# immediately).
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
# Break and return from function just below target altitude.
if vehicle.location.global_relative_frame.alt >= aTargetAltitude * 0.95:
print("Reached target altitude")
break
time.sleep(3)
def change_alt (altitude):
print ("Changing Altitude")
if vehicle.armed :
location = vehicle.location.global_relative_frame
final_alt = location.alt + altitude
location.alt = final_alt
vehicle.simple_goto(location)
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
# Break and return from function just below target altitude.
if vehicle.location.global_relative_frame.alt >= final_alt - 0.02 and vehicle.location.global_relative_frame.alt <= final_alt + 0.02 :
print("Reached target altitude")
break
time.sleep(1)
else:
print ("Vehicule is not Armed")
def go_to (relative_coord):
if vehicle.armed :
vehicle.simple_goto (relative_coord, groundspeed = 10)
while True :
print(" Vehicule is at: ", vehicle.location.global_relative_frame)
if (vehicle.location.global_relative_frame.lon >= relative_coord.lon - 0.00001 and vehicle.location.global_relative_frame.lon <= relative_coord.lon + 0.00001) and (vehicle.location.global_relative_frame.lat >= relative_coord.lat - 0.00001 and vehicle.location.global_relative_frame.lat <= relative_coord.lat + 0.00001) :
print("Target Reached")
break
time.sleep(3)
else:
print ("Vehicule is not Armed")
def rtl (alt):
if vehicle.armed :
go_to(alt)
else:
print ("Vehicule is not Armed")
def land (alt):
altitude = alt
alt_initial = vehicle.location.global_relative_frame.alt
for i in range (1,int((alt+0.5)*2)):
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_up(altitude)
else :
print ("Landing")
change_alt (-0.5)
altitude -= 0.5
def move_up (alt):
# 1 degree -> 111,111 km
# 1m = 0,000009 degree
print ("Move Up")
gps = vehicle.location.global_relative_frame
gps.lon += 0.000009
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_down (alt)
else :
land (alt)
def move_down (alt) :
print ("Move Down")
gps = vehicle.location.global_relative_frame
gps.lon -= 0.000009 *2
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_left (alt)
else :
land (alt)
def move_left (alt) :
print ("Move Left")
gps = vehicle.location.global_relative_frame
gps.lon += 0.000009
gps.lat -= 0.000009
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
move_right (alt)
else :
land (alt)
def move_right (alt) :
print ("Move Right")
gps = vehicle.location.global_relative_frame
gps.lat += 0.000009 *2
go_to(gps)
detect1 = distance (GPIO_TRIGGER1,GPIO_ECHO1)
detect2 = distance (GPIO_TRIGGER2,GPIO_ECHO2)
if detect1 == True or detect2 == True :
print ("No possibility")
else :
land (alt)
def distance(TRIGGER, ECHO):
sonar = adafruit_hcsr04.HCSR04(trigger_pin=TRIGGER, echo_pin=ECHO)
while sonar.distance == 0 :
try:
print(sonar.distance)
except:
print("Retrying!")
time.sleep (0.5)
print (str(sonar.distance) + " cm")
if sonar.distance <= 20 :
return True
else :
return False
def armed_off():
if vehicle.armed :
vehicle.armed = False
def db_connection (config):
firebase=pyrebase.initialize_app(firebaseConfig)
storage=firebase.storage()
try:
firebase_admin.get_app()
print('firebase intialized.')
except ValueError as e:
print('firebase not initialized. initialize.')
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="serviceAccountKey.json"
return firestore.Client()
firebaseConfig = {
"apiKey": "<KEY>",
"authDomain": "delivreapp-5221e.firebaseapp.com",
"projectId": "delivreapp-5221e",
"databaseURL": "https://del-ivre-default-rtdb.europe-west1.firebasedatabase.app",
"storageBucket": "delivreapp-5221e.appspot.com",
"messagingSenderId": "661920641786",
"appId": "1:661920641786:web:dca2c085b5ff60f1b18f43",
"measurementId": "G-CLR5PFH3G4"
}
#set GPIO Pins
GPIO_TRIGGER1 = board.D14
GPIO_ECHO1 = board.D15
GPIO_TRIGGER2 = board.D17
GPIO_ECHO2 = board.D27
vehicle = connect('udp:127.0.0.1:14550', wait_ready=True)
print('Connecting to vehicle : %s' % vehicle)
print ("Simulation Location : \n%s" % vehicle.location.global_relative_frame)
init = vehicle.location.global_relative_frame
while True:
nbImg=[];
go=True
while True:
try:
doc=db.collection('Users').where("Drone","==",nDrone).get()[0]
break
except:
continue
nom=doc.get("Nom")
prenom=doc.get("Prenom")
coord=doc.get("GPS")
image=nom+"#"+prenom+".jpg"
print(nom,end=' ')
print(prenom,end="\n")
print(doc.get("Commande"),end="\n")
print(coord)
#téléchargement image
storage.child(image).download(image)
###variables d'initiation reconnaissance faciale
known_face_encodings = []
known_face_names = []
face_locations = []
face_encodings = []
process_this_frame = True
#Image enregistrée dans la base de donnée
try:
new_image=face_recognition.load_image_file(image)
new_face_encoding = face_recognition.face_encodings(new_image)[0]
known_face_encodings.append(new_face_encoding)
known_face_names.append(prenom + " " + nom)
print("photo", " dans reconaissance faciale")
except:
img1 = Image.open(image)
img1.save("img1.jpg","JPEG")
try:
time.sleep(0.001)
img1.save("img1.jpg","JPEG")
time.sleep(0.001)
img2=img1.rotate(90)
img2.show()
time.sleep(0.001)
img2.save("img2.jpg","JPEG")
img3=img2.rotate(90)
time.sleep(0.001)
img3.save("img3.jpg","JPEG")
img4=img3.rotate(90)
time.sleep(0.001)
img4.save("img4.jpg","JPEG")
#os.remove(image)
print("image enregistrée")
except:
print("probleme dans le téléchargement de l'image")
for i in range(1,5):
try:
new_image=face_recognition.load_image_file("img"+ str(nbImg) + ".jpg")
new_face_encoding = face_recognition.face_encodings(new_image)[0]
known_face_encodings.append(new_face_encoding)
known_face_names.append(prenom + " " + nom)
nbImg=i
print("photo" , str(i) , " dans reconaissance faciale")
except:
os.remove("img"+ str(i) + ".jpg")
print("photo ", str(i) , "non prise en compte")
###Décolage, jusqu'a reconaissance
print("décolage jusque coordonnée gps et arrivée à 1m50")
arm_and_takeoff (10)
print("go_to position: ", coord)
time.sleep(2)
# go_to (LocationGlobalRelative(float(-35.3633), float(149.1652294), float(10)))
#go_to (LocationGlobalRelative(float(coord["Latitude"]), float(coord["Longitude"]), float(50)))
land (8.5)
#Face recognition
#change_alt (-48.5)
print("descendre")
time.sleep(2)
Reco=True
#algo reconnaissance faciale
print("lancement algorithme de reconnaissance faciale")
while Reco:
# Grab a single frame of video
ret, frame = cv2.VideoCapture(0).read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
if name==prenom+" "+nom:
print(name, end=' ')
print("a bien été reconnu, on procède donc a l'attérissage.")
Reco=False
process_this_frame = not process_this_frame
#Attérissage et déposage paquet
print("attérissage sur .... pour que la personne récupère sa commande")
'''land (48.5)'''
time.sleep(2)
print("land successfull: alt: 0")
#Attente de 1 minute avant de redémarrer
print("attente de 1 minute avant de redémarrer")
time.sleep(5)
#suppression image du PC
print("image supprimé de la mémoire du rpi")
for i in range(len(nbImg)):
os.remove("img"+ str(nbImg[i]) + ".jpg")
#retour à la base
print("retour à la base pour une nouvelle commande")
rtl()
armed_off()
print("suppression de la commande si pas encore fait")
try:
id=str(int(doc.get("Id")))
db.collection('Users').document(id).delete()
print("la commande a été supprimée")
storage.delete_blob(image)
storage.delete()
except:
print("la commande était déja supprimée")
vehicle.close() | en | 0.599423 | Arms vehicle and fly to aTargetAltitude. # Don't try to arm until autopilot is ready # Copter should arm in GUIDED mode # Confirm vehicle armed before attempting to take off # Take off to target altitude # Wait until the vehicle reaches a safe height before processing the goto # (otherwise the command after Vehicle.simple_takeoff will execute # immediately). # Break and return from function just below target altitude. # Break and return from function just below target altitude. # 1 degree -> 111,111 km # 1m = 0,000009 degree #set GPIO Pins #téléchargement image ###variables d'initiation reconnaissance faciale #Image enregistrée dans la base de donnée #os.remove(image) ###Décolage, jusqu'a reconaissance # go_to (LocationGlobalRelative(float(-35.3633), float(149.1652294), float(10))) #go_to (LocationGlobalRelative(float(coord["Latitude"]), float(coord["Longitude"]), float(50))) #Face recognition #change_alt (-48.5) #algo reconnaissance faciale # Grab a single frame of video # Resize frame of video to 1/4 size for faster face recognition processing # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) # Only process every other frame of video to save time # Find all the faces and face encodings in the current frame of video # See if the face is a match for the known face(s) # If a match was found in known_face_encodings, just use the first one. #Attérissage et déposage paquet land (48.5) #Attente de 1 minute avant de redémarrer #suppression image du PC #retour à la base | 2.916501 | 3 |
src/examples/FogCentrality/CentricityPopulation.py | MarkoRimac/YAFS | 58 | 6621817 | from yafs.population import Population
class Statical(Population):
"""
This implementation of a population algorithm statically assigns the generation of a source in a node of the topology. It is only invoked in the initialization.
Extends: :mod: Population
"""
def initial_allocation(self,sim,app_name):
#Assignment of SINK and SOURCE pure modules
for ctrl in self.sink_control:
if "id" in ctrl.keys():
module = ctrl["module"]
for idx in ctrl["id"]:
sim.deploy_sink(app_name, node=idx, module=module)
for ctrl in self.src_control:
if "id" in ctrl.keys():
msg = ctrl["message"]
dst = ctrl["distribution"]
for idx in ctrl["id"]:
idsrc = sim.deploy_source(app_name, id_node=idx, msg=msg, distribution=dst)
#end assignments | from yafs.population import Population
class Statical(Population):
"""
This implementation of a population algorithm statically assigns the generation of a source in a node of the topology. It is only invoked in the initialization.
Extends: :mod: Population
"""
def initial_allocation(self,sim,app_name):
#Assignment of SINK and SOURCE pure modules
for ctrl in self.sink_control:
if "id" in ctrl.keys():
module = ctrl["module"]
for idx in ctrl["id"]:
sim.deploy_sink(app_name, node=idx, module=module)
for ctrl in self.src_control:
if "id" in ctrl.keys():
msg = ctrl["message"]
dst = ctrl["distribution"]
for idx in ctrl["id"]:
idsrc = sim.deploy_source(app_name, id_node=idx, msg=msg, distribution=dst)
#end assignments | en | 0.818579 | This implementation of a population algorithm statically assigns the generation of a source in a node of the topology. It is only invoked in the initialization. Extends: :mod: Population #Assignment of SINK and SOURCE pure modules #end assignments | 2.780849 | 3 |
commodities/models/orm.py | uktrade/tamato | 14 | 6621818 | <reponame>uktrade/tamato<gh_stars>10-100
from __future__ import annotations
from dataclasses import dataclass
from typing import Set
from django.db import models
from django.db import transaction
from django.db.models import Q
from polymorphic.managers import PolymorphicManager
from treebeard.mp_tree import MP_Node
from commodities import business_rules
from commodities import validators
from commodities.querysets import GoodsNomenclatureIndentQuerySet
from common.business_rules import UpdateValidity
from common.fields import LongDescription
from common.models import NumericSID
from common.models import TrackedModel
from common.models.mixins.description import DescriptionMixin
from common.models.mixins.description import DescriptionQueryset
from common.models.mixins.validity import ValidityMixin
from common.models.mixins.validity import ValidityStartMixin
from common.util import TaricDateRange
from footnotes.validators import ApplicationCode
from measures import business_rules as measures_business_rules
@dataclass
class CommodityCode:
"""A dataclass for commodity codes with a range of convenience
properties."""
code: str
@property
def chapter(self) -> str:
"""Returns the HS chapter for the commodity code."""
return self.code[:2]
@property
def heading(self) -> str:
"""Returns the HS heading for the commodity code."""
return self.code[:4]
@property
def subheading(self) -> str:
"""Returns the HS subheading for the commodity code."""
return self.code[:6]
@property
def cn_subheading(self) -> str:
"""Returns the CN subheading for the commodity code."""
return self.code[:8]
@property
def dot_code(self) -> str:
"""Returns the commodity code in dot format."""
code = self.code
return f"{code[:4]}.{code[4:6]}.{code[6:8]}.{code[8:]}"
@property
def trimmed_dot_code(self) -> str:
"""Returns the commodity code in dot format, without trailing zero
pairs."""
parts = self.dot_code.split(".")
for i, part in enumerate(parts[::-1]):
if part != "00":
return ".".join(parts[: len(parts) - i])
@property
def trimmed_code(self) -> str:
"""Returns the commodity code without trailing zero pairs."""
return self.trimmed_dot_code.replace(".", "")
@property
def is_chapter(self) -> bool:
"""Returns true if the commodity code represents a HS chapter."""
return self.trimmed_code.rstrip("0") == self.chapter
@property
def is_heading(self) -> bool:
"""Returns true if the commodity code represents a HS heading."""
return self.trimmed_code == self.heading and not self.is_chapter
@property
def is_subheading(self) -> bool:
"""Returns true if the commodity code represents a HS subheading."""
return self.trimmed_code == self.subheading
@property
def is_cn_subheading(self) -> bool:
"""Returns true if the commodity code represents a CN subheading."""
return self.trimmed_code == self.cn_subheading
@property
def is_taric_subheading(self) -> bool:
"""Returns true if the commodity code represents a Taric subheading."""
return self.trimmed_code == self.code
@property
def is_taric_code(self) -> bool:
return self.code[8:] != "00"
def __str__(self):
"""Returns a string representation of the dataclass instance."""
return self.code
class GoodsNomenclature(TrackedModel, ValidityMixin):
record_code = "400"
subrecord_code = "00"
sid = NumericSID()
# These are character fields as they often has leading 0s
item_id = models.CharField(
max_length=10,
validators=[validators.item_id_validator],
db_index=True,
)
suffix = models.CharField(
max_length=2,
validators=[validators.suffix_validator],
db_index=True,
)
statistical = models.BooleanField()
origins = models.ManyToManyField(
"self",
through="GoodsNomenclatureOrigin",
through_fields=("new_goods_nomenclature", "derived_from_goods_nomenclature"),
)
successors = models.ManyToManyField(
"self",
through="GoodsNomenclatureSuccessor",
through_fields=(
"replaced_goods_nomenclature",
"absorbed_into_goods_nomenclature",
),
)
@property
def code(self) -> CommodityCode:
"""Returns a CommodityCode instance for the good."""
return CommodityCode(code=self.item_id)
@property
def footnote_application_codes(self) -> Set[ApplicationCode]:
codes = {ApplicationCode.TARIC_NOMENCLATURE, ApplicationCode.DYNAMIC_FOOTNOTE}
if not self.is_taric_code:
codes.add(ApplicationCode.CN_NOMENCLATURE)
return codes
indirect_business_rules = (
business_rules.NIG10,
business_rules.NIG18,
business_rules.NIG2,
business_rules.NIG22,
business_rules.NIG7,
measures_business_rules.ME1,
measures_business_rules.ME7,
measures_business_rules.ME71,
measures_business_rules.ME88,
)
business_rules = (
business_rules.NIG1,
business_rules.NIG5,
business_rules.NIG30,
business_rules.NIG31,
business_rules.NIG34,
business_rules.NIG35,
UpdateValidity,
)
class Meta:
verbose_name = "commodity code"
def __str__(self):
return self.item_id
@property
def autocomplete_label(self):
return f"{self} - {self.get_description().description}"
@property
def dependent_measures(self):
return self.measures.model.objects.filter(
goods_nomenclature__sid=self.sid,
).approved_up_to_transaction(self.transaction)
@property
def is_taric_code(self) -> bool:
return self.code.is_taric_code
def in_use(self):
return self.dependent_measures.exists()
class GoodsNomenclatureIndent(TrackedModel, ValidityStartMixin):
record_code = "400"
subrecord_code = "05"
objects: GoodsNomenclatureIndentQuerySet = PolymorphicManager.from_queryset(
GoodsNomenclatureIndentQuerySet,
)()
sid = NumericSID()
indent = models.PositiveIntegerField(db_index=True)
indented_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
related_name="indents",
)
indirect_business_rules = (business_rules.NIG11,)
business_rules = (business_rules.NIG2, UpdateValidity)
validity_over = "indented_goods_nomenclature"
def get_parent_indents(self):
parent_path_query = Q()
for path in self.nodes.values_list("path", flat=True):
parent_path_query = parent_path_query | Q(
nodes__path=path[: -GoodsNomenclatureIndentNode.steplen],
)
return GoodsNomenclatureIndent.objects.filter(parent_path_query)
def save(self, *args, **kwargs):
return_value = super().save(*args, **kwargs)
if not hasattr(self, "version_group"):
self.version_group = self._get_version_group()
return return_value
def __str__(self):
return f"Goods Nomenclature Indent: {self.indent} - {self.indented_goods_nomenclature}"
class GoodsNomenclatureIndentNode(MP_Node, ValidityMixin):
"""
Goods Nomenclature naturally falls into the structure of a hierarchical
tree. As there is a root good e.g. "Live Animals; Animal Products" which
then has branch nodes such as "Live animals" and "Meat and edible meat
offal". And so on and so forth until leaf nodes are found.
To represent this efficiently in a database a Materialized Path is used. There is some
documentation on this here: https://django-treebeard.readthedocs.io/en/latest/mp_tree.html
The Crux of the system is every node is given a "path" attribute. A path is constructed of
"steps". Steps by default are 4 character blocks. The number of steps given to a node
determine its depth. Root nodes are given a single step as a path. A child of a root node
will have a path starting with the parents path, then with an extra step added on.
This way queries for all child nodes are as simple as:
.. code:: SQL
SELECT *
FROM table
WHERE path LIKE "{parent_path}%";
and a parent node query would be:
.. code:: SQL
SELECT *
FROM table
WHERE path = parent_path[:-4]
Sadly for legacy reasons the visible codes given to goods do not well conform to this
structure. These given ids are generally limited to 10 characters with numeric only steps
of 2 characters each. This would naturally mean a tree can only be 5 layers deep, however
there are instances where the tariff goes to 14 levels. As a result the step based system
has been ignored historically. There are also cases where the same ID is given to multiple
objects with other identifying values included (e.g. suffixes) and an entire indent table
to represent the tree depth. This, combined with suffixes and some ordering within the item
ID gave the actual location.
The indent table initially looks like a good candidate. However, due to how the legacy
system was implemented (i.e., without a tree), the legacy indents would move fluidly
between parents without the need for an update - a feature that would be incompatible with
an implemented tree system at this table.
This implementation keeps a separate untracked table for tree nodes, keeping the tree entirely
separate from the main implementation of the data system. The node holds a Foreign Key to the
indent table, allowing the tree to be accessed through the indent. The indent then has a Foreign Key
to the relevant Goods Nomenclature so they can be edited separately. This does away with
the need to analyse the item id and suffix as well as the indent - as the node gives us
an entire description of the tree and its related commodities on its own, over time.
"""
# Coming from the legacy tracked model this model needs a new primary key.
# Given paths are always unique in MP trees this is the best candidate for the PK.
path = models.CharField(max_length=255, unique=True, primary_key=True)
indent = models.ForeignKey(
GoodsNomenclatureIndent,
on_delete=models.PROTECT,
related_name="nodes",
)
creating_transaction = models.ForeignKey(
"common.Transaction",
on_delete=models.PROTECT,
related_name="goods_nomenclature_indent_nodes",
)
def get_measures(self, **kwargs):
if self.indent.measures.exists():
return self.indent.measures.all()
query = self.get_ancestors().filter(
indent__indented_goods_nomenclature__measures__isnull=False, **kwargs
)
if query.exists():
return query.first().measures.all()
return False
def has_measure_in_tree(self):
ascendant_measures = self.get_ancestors().filter(
indent__indented_goods_nomenclature__measures__isnull=False,
)
descendant_measures = self.get_descendants().filter(
indent__indented_goods_nomenclature__measures__isnull=False,
)
return (
self.indent.measures.exists()
or ascendant_measures.exists()
or descendant_measures.exists()
)
def _get_restricted_valid_between(
self,
valid_between: TaricDateRange,
) -> TaricDateRange:
new_valid_between = self.valid_between
if not new_valid_between.lower or (
valid_between.lower and new_valid_between.lower < valid_between.lower
):
new_valid_between = TaricDateRange(
valid_between.lower,
new_valid_between.upper,
)
if not new_valid_between.upper or (
valid_between.upper and new_valid_between.upper > valid_between.upper
):
new_valid_between = TaricDateRange(
new_valid_between.lower,
valid_between.upper,
)
return new_valid_between
@transaction.atomic
def copy_tree(
self,
parent: GoodsNomenclatureIndentNode,
valid_between: TaricDateRange,
transaction,
):
new_valid_between = self._get_restricted_valid_between(valid_between)
new_node = parent.add_child(
indent=self.indent,
valid_between=new_valid_between,
creating_transaction=transaction,
)
for child in self.get_children():
child.copy_tree(new_node, valid_between, transaction)
return new_node
@transaction.atomic
def restrict_valid_between(self, valid_between: TaricDateRange):
self.valid_between = self._get_restricted_valid_between(valid_between)
for child in self.get_children():
child.restrict_valid_between(self.valid_between)
self.save()
def __str__(self):
return f"path={self.path}, indent=({self.indent})"
class GoodsNomenclatureDescription(DescriptionMixin, TrackedModel):
record_code = "400"
subrecord_code = "15"
period_record_code = "400"
period_subrecord_code = "10"
objects = PolymorphicManager.from_queryset(DescriptionQueryset)()
sid = NumericSID()
described_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
related_name="descriptions",
)
description = LongDescription()
indirect_business_rules = (business_rules.NIG12,)
class Meta:
ordering = ("validity_start",)
class GoodsNomenclatureOrigin(TrackedModel):
"""
Represents a link between a newly-created GoodsNomenclature and the codes
that previously represented it.
This will often be the parent nomenclature code. A GoodsNomenclature can
have multiple origins when the hierarchy has been reorganised and the new
classification was previously covered by multiple codes.
"""
record_code = "400"
subrecord_code = "35"
new_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
related_name="origin_links",
on_delete=models.PROTECT,
)
derived_from_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
)
identifying_fields = (
"new_goods_nomenclature__sid",
"derived_from_goods_nomenclature__sid",
)
indirect_business_rules = (business_rules.NIG5,)
business_rules = (business_rules.NIG7, UpdateValidity)
def __str__(self):
return (
f"derived_from=({self.derived_from_goods_nomenclature}), "
f"new=({self.new_goods_nomenclature})"
)
class GoodsNomenclatureSuccessor(TrackedModel):
"""
Represents a link between a end-dated GoodsNomenclature and the codes that
have replaced it (or in TARIC parlance have "absorbed" it).
The replacing codes cover the goods that this classification code previously
covered.
"""
record_code = "400"
subrecord_code = "40"
replaced_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
related_name="successor_links",
on_delete=models.PROTECT,
)
absorbed_into_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
)
identifying_fields = (
"replaced_goods_nomenclature__sid",
"absorbed_into_goods_nomenclature__sid",
)
business_rules = (business_rules.NIG10, UpdateValidity)
def __str__(self):
return (
f"replaced=({self.replaced_goods_nomenclature}), "
f"absorbed_into=({self.absorbed_into_goods_nomenclature})"
)
class FootnoteAssociationGoodsNomenclature(TrackedModel, ValidityMixin):
record_code = "400"
subrecord_code = "20"
goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
related_name="footnote_associations",
)
associated_footnote = models.ForeignKey(
"footnotes.Footnote",
on_delete=models.PROTECT,
)
identifying_fields = (
"goods_nomenclature__sid",
"associated_footnote__footnote_id",
"associated_footnote__footnote_type__footnote_type_id",
)
business_rules = (
business_rules.NIG18,
business_rules.NIG22,
business_rules.NIG23,
business_rules.NIG24,
UpdateValidity,
)
| from __future__ import annotations
from dataclasses import dataclass
from typing import Set
from django.db import models
from django.db import transaction
from django.db.models import Q
from polymorphic.managers import PolymorphicManager
from treebeard.mp_tree import MP_Node
from commodities import business_rules
from commodities import validators
from commodities.querysets import GoodsNomenclatureIndentQuerySet
from common.business_rules import UpdateValidity
from common.fields import LongDescription
from common.models import NumericSID
from common.models import TrackedModel
from common.models.mixins.description import DescriptionMixin
from common.models.mixins.description import DescriptionQueryset
from common.models.mixins.validity import ValidityMixin
from common.models.mixins.validity import ValidityStartMixin
from common.util import TaricDateRange
from footnotes.validators import ApplicationCode
from measures import business_rules as measures_business_rules
@dataclass
class CommodityCode:
"""A dataclass for commodity codes with a range of convenience
properties."""
code: str
@property
def chapter(self) -> str:
"""Returns the HS chapter for the commodity code."""
return self.code[:2]
@property
def heading(self) -> str:
"""Returns the HS heading for the commodity code."""
return self.code[:4]
@property
def subheading(self) -> str:
"""Returns the HS subheading for the commodity code."""
return self.code[:6]
@property
def cn_subheading(self) -> str:
"""Returns the CN subheading for the commodity code."""
return self.code[:8]
@property
def dot_code(self) -> str:
"""Returns the commodity code in dot format."""
code = self.code
return f"{code[:4]}.{code[4:6]}.{code[6:8]}.{code[8:]}"
@property
def trimmed_dot_code(self) -> str:
"""Returns the commodity code in dot format, without trailing zero
pairs."""
parts = self.dot_code.split(".")
for i, part in enumerate(parts[::-1]):
if part != "00":
return ".".join(parts[: len(parts) - i])
@property
def trimmed_code(self) -> str:
"""Returns the commodity code without trailing zero pairs."""
return self.trimmed_dot_code.replace(".", "")
@property
def is_chapter(self) -> bool:
"""Returns true if the commodity code represents a HS chapter."""
return self.trimmed_code.rstrip("0") == self.chapter
@property
def is_heading(self) -> bool:
"""Returns true if the commodity code represents a HS heading."""
return self.trimmed_code == self.heading and not self.is_chapter
@property
def is_subheading(self) -> bool:
"""Returns true if the commodity code represents a HS subheading."""
return self.trimmed_code == self.subheading
@property
def is_cn_subheading(self) -> bool:
"""Returns true if the commodity code represents a CN subheading."""
return self.trimmed_code == self.cn_subheading
@property
def is_taric_subheading(self) -> bool:
"""Returns true if the commodity code represents a Taric subheading."""
return self.trimmed_code == self.code
@property
def is_taric_code(self) -> bool:
return self.code[8:] != "00"
def __str__(self):
"""Returns a string representation of the dataclass instance."""
return self.code
class GoodsNomenclature(TrackedModel, ValidityMixin):
record_code = "400"
subrecord_code = "00"
sid = NumericSID()
# These are character fields as they often has leading 0s
item_id = models.CharField(
max_length=10,
validators=[validators.item_id_validator],
db_index=True,
)
suffix = models.CharField(
max_length=2,
validators=[validators.suffix_validator],
db_index=True,
)
statistical = models.BooleanField()
origins = models.ManyToManyField(
"self",
through="GoodsNomenclatureOrigin",
through_fields=("new_goods_nomenclature", "derived_from_goods_nomenclature"),
)
successors = models.ManyToManyField(
"self",
through="GoodsNomenclatureSuccessor",
through_fields=(
"replaced_goods_nomenclature",
"absorbed_into_goods_nomenclature",
),
)
@property
def code(self) -> CommodityCode:
"""Returns a CommodityCode instance for the good."""
return CommodityCode(code=self.item_id)
@property
def footnote_application_codes(self) -> Set[ApplicationCode]:
codes = {ApplicationCode.TARIC_NOMENCLATURE, ApplicationCode.DYNAMIC_FOOTNOTE}
if not self.is_taric_code:
codes.add(ApplicationCode.CN_NOMENCLATURE)
return codes
indirect_business_rules = (
business_rules.NIG10,
business_rules.NIG18,
business_rules.NIG2,
business_rules.NIG22,
business_rules.NIG7,
measures_business_rules.ME1,
measures_business_rules.ME7,
measures_business_rules.ME71,
measures_business_rules.ME88,
)
business_rules = (
business_rules.NIG1,
business_rules.NIG5,
business_rules.NIG30,
business_rules.NIG31,
business_rules.NIG34,
business_rules.NIG35,
UpdateValidity,
)
class Meta:
verbose_name = "commodity code"
def __str__(self):
return self.item_id
@property
def autocomplete_label(self):
return f"{self} - {self.get_description().description}"
@property
def dependent_measures(self):
return self.measures.model.objects.filter(
goods_nomenclature__sid=self.sid,
).approved_up_to_transaction(self.transaction)
@property
def is_taric_code(self) -> bool:
return self.code.is_taric_code
def in_use(self):
return self.dependent_measures.exists()
class GoodsNomenclatureIndent(TrackedModel, ValidityStartMixin):
record_code = "400"
subrecord_code = "05"
objects: GoodsNomenclatureIndentQuerySet = PolymorphicManager.from_queryset(
GoodsNomenclatureIndentQuerySet,
)()
sid = NumericSID()
indent = models.PositiveIntegerField(db_index=True)
indented_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
related_name="indents",
)
indirect_business_rules = (business_rules.NIG11,)
business_rules = (business_rules.NIG2, UpdateValidity)
validity_over = "indented_goods_nomenclature"
def get_parent_indents(self):
parent_path_query = Q()
for path in self.nodes.values_list("path", flat=True):
parent_path_query = parent_path_query | Q(
nodes__path=path[: -GoodsNomenclatureIndentNode.steplen],
)
return GoodsNomenclatureIndent.objects.filter(parent_path_query)
def save(self, *args, **kwargs):
return_value = super().save(*args, **kwargs)
if not hasattr(self, "version_group"):
self.version_group = self._get_version_group()
return return_value
def __str__(self):
return f"Goods Nomenclature Indent: {self.indent} - {self.indented_goods_nomenclature}"
class GoodsNomenclatureIndentNode(MP_Node, ValidityMixin):
"""
Goods Nomenclature naturally falls into the structure of a hierarchical
tree. As there is a root good e.g. "Live Animals; Animal Products" which
then has branch nodes such as "Live animals" and "Meat and edible meat
offal". And so on and so forth until leaf nodes are found.
To represent this efficiently in a database a Materialized Path is used. There is some
documentation on this here: https://django-treebeard.readthedocs.io/en/latest/mp_tree.html
The Crux of the system is every node is given a "path" attribute. A path is constructed of
"steps". Steps by default are 4 character blocks. The number of steps given to a node
determine its depth. Root nodes are given a single step as a path. A child of a root node
will have a path starting with the parents path, then with an extra step added on.
This way queries for all child nodes are as simple as:
.. code:: SQL
SELECT *
FROM table
WHERE path LIKE "{parent_path}%";
and a parent node query would be:
.. code:: SQL
SELECT *
FROM table
WHERE path = parent_path[:-4]
Sadly for legacy reasons the visible codes given to goods do not well conform to this
structure. These given ids are generally limited to 10 characters with numeric only steps
of 2 characters each. This would naturally mean a tree can only be 5 layers deep, however
there are instances where the tariff goes to 14 levels. As a result the step based system
has been ignored historically. There are also cases where the same ID is given to multiple
objects with other identifying values included (e.g. suffixes) and an entire indent table
to represent the tree depth. This, combined with suffixes and some ordering within the item
ID gave the actual location.
The indent table initially looks like a good candidate. However, due to how the legacy
system was implemented (i.e., without a tree), the legacy indents would move fluidly
between parents without the need for an update - a feature that would be incompatible with
an implemented tree system at this table.
This implementation keeps a separate untracked table for tree nodes, keeping the tree entirely
separate from the main implementation of the data system. The node holds a Foreign Key to the
indent table, allowing the tree to be accessed through the indent. The indent then has a Foreign Key
to the relevant Goods Nomenclature so they can be edited separately. This does away with
the need to analyse the item id and suffix as well as the indent - as the node gives us
an entire description of the tree and its related commodities on its own, over time.
"""
# Coming from the legacy tracked model this model needs a new primary key.
# Given paths are always unique in MP trees this is the best candidate for the PK.
path = models.CharField(max_length=255, unique=True, primary_key=True)
indent = models.ForeignKey(
GoodsNomenclatureIndent,
on_delete=models.PROTECT,
related_name="nodes",
)
creating_transaction = models.ForeignKey(
"common.Transaction",
on_delete=models.PROTECT,
related_name="goods_nomenclature_indent_nodes",
)
def get_measures(self, **kwargs):
if self.indent.measures.exists():
return self.indent.measures.all()
query = self.get_ancestors().filter(
indent__indented_goods_nomenclature__measures__isnull=False, **kwargs
)
if query.exists():
return query.first().measures.all()
return False
def has_measure_in_tree(self):
ascendant_measures = self.get_ancestors().filter(
indent__indented_goods_nomenclature__measures__isnull=False,
)
descendant_measures = self.get_descendants().filter(
indent__indented_goods_nomenclature__measures__isnull=False,
)
return (
self.indent.measures.exists()
or ascendant_measures.exists()
or descendant_measures.exists()
)
def _get_restricted_valid_between(
self,
valid_between: TaricDateRange,
) -> TaricDateRange:
new_valid_between = self.valid_between
if not new_valid_between.lower or (
valid_between.lower and new_valid_between.lower < valid_between.lower
):
new_valid_between = TaricDateRange(
valid_between.lower,
new_valid_between.upper,
)
if not new_valid_between.upper or (
valid_between.upper and new_valid_between.upper > valid_between.upper
):
new_valid_between = TaricDateRange(
new_valid_between.lower,
valid_between.upper,
)
return new_valid_between
@transaction.atomic
def copy_tree(
self,
parent: GoodsNomenclatureIndentNode,
valid_between: TaricDateRange,
transaction,
):
new_valid_between = self._get_restricted_valid_between(valid_between)
new_node = parent.add_child(
indent=self.indent,
valid_between=new_valid_between,
creating_transaction=transaction,
)
for child in self.get_children():
child.copy_tree(new_node, valid_between, transaction)
return new_node
@transaction.atomic
def restrict_valid_between(self, valid_between: TaricDateRange):
self.valid_between = self._get_restricted_valid_between(valid_between)
for child in self.get_children():
child.restrict_valid_between(self.valid_between)
self.save()
def __str__(self):
return f"path={self.path}, indent=({self.indent})"
class GoodsNomenclatureDescription(DescriptionMixin, TrackedModel):
record_code = "400"
subrecord_code = "15"
period_record_code = "400"
period_subrecord_code = "10"
objects = PolymorphicManager.from_queryset(DescriptionQueryset)()
sid = NumericSID()
described_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
related_name="descriptions",
)
description = LongDescription()
indirect_business_rules = (business_rules.NIG12,)
class Meta:
ordering = ("validity_start",)
class GoodsNomenclatureOrigin(TrackedModel):
"""
Represents a link between a newly-created GoodsNomenclature and the codes
that previously represented it.
This will often be the parent nomenclature code. A GoodsNomenclature can
have multiple origins when the hierarchy has been reorganised and the new
classification was previously covered by multiple codes.
"""
record_code = "400"
subrecord_code = "35"
new_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
related_name="origin_links",
on_delete=models.PROTECT,
)
derived_from_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
)
identifying_fields = (
"new_goods_nomenclature__sid",
"derived_from_goods_nomenclature__sid",
)
indirect_business_rules = (business_rules.NIG5,)
business_rules = (business_rules.NIG7, UpdateValidity)
def __str__(self):
return (
f"derived_from=({self.derived_from_goods_nomenclature}), "
f"new=({self.new_goods_nomenclature})"
)
class GoodsNomenclatureSuccessor(TrackedModel):
"""
Represents a link between a end-dated GoodsNomenclature and the codes that
have replaced it (or in TARIC parlance have "absorbed" it).
The replacing codes cover the goods that this classification code previously
covered.
"""
record_code = "400"
subrecord_code = "40"
replaced_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
related_name="successor_links",
on_delete=models.PROTECT,
)
absorbed_into_goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
)
identifying_fields = (
"replaced_goods_nomenclature__sid",
"absorbed_into_goods_nomenclature__sid",
)
business_rules = (business_rules.NIG10, UpdateValidity)
def __str__(self):
return (
f"replaced=({self.replaced_goods_nomenclature}), "
f"absorbed_into=({self.absorbed_into_goods_nomenclature})"
)
class FootnoteAssociationGoodsNomenclature(TrackedModel, ValidityMixin):
record_code = "400"
subrecord_code = "20"
goods_nomenclature = models.ForeignKey(
GoodsNomenclature,
on_delete=models.PROTECT,
related_name="footnote_associations",
)
associated_footnote = models.ForeignKey(
"footnotes.Footnote",
on_delete=models.PROTECT,
)
identifying_fields = (
"goods_nomenclature__sid",
"associated_footnote__footnote_id",
"associated_footnote__footnote_type__footnote_type_id",
)
business_rules = (
business_rules.NIG18,
business_rules.NIG22,
business_rules.NIG23,
business_rules.NIG24,
UpdateValidity,
) | en | 0.907509 | A dataclass for commodity codes with a range of convenience properties. Returns the HS chapter for the commodity code. Returns the HS heading for the commodity code. Returns the HS subheading for the commodity code. Returns the CN subheading for the commodity code. Returns the commodity code in dot format. Returns the commodity code in dot format, without trailing zero pairs. Returns the commodity code without trailing zero pairs. Returns true if the commodity code represents a HS chapter. Returns true if the commodity code represents a HS heading. Returns true if the commodity code represents a HS subheading. Returns true if the commodity code represents a CN subheading. Returns true if the commodity code represents a Taric subheading. Returns a string representation of the dataclass instance. # These are character fields as they often has leading 0s Returns a CommodityCode instance for the good. Goods Nomenclature naturally falls into the structure of a hierarchical tree. As there is a root good e.g. "Live Animals; Animal Products" which then has branch nodes such as "Live animals" and "Meat and edible meat offal". And so on and so forth until leaf nodes are found. To represent this efficiently in a database a Materialized Path is used. There is some documentation on this here: https://django-treebeard.readthedocs.io/en/latest/mp_tree.html The Crux of the system is every node is given a "path" attribute. A path is constructed of "steps". Steps by default are 4 character blocks. The number of steps given to a node determine its depth. Root nodes are given a single step as a path. A child of a root node will have a path starting with the parents path, then with an extra step added on. This way queries for all child nodes are as simple as: .. code:: SQL SELECT * FROM table WHERE path LIKE "{parent_path}%"; and a parent node query would be: .. code:: SQL SELECT * FROM table WHERE path = parent_path[:-4] Sadly for legacy reasons the visible codes given to goods do not well conform to this structure. These given ids are generally limited to 10 characters with numeric only steps of 2 characters each. This would naturally mean a tree can only be 5 layers deep, however there are instances where the tariff goes to 14 levels. As a result the step based system has been ignored historically. There are also cases where the same ID is given to multiple objects with other identifying values included (e.g. suffixes) and an entire indent table to represent the tree depth. This, combined with suffixes and some ordering within the item ID gave the actual location. The indent table initially looks like a good candidate. However, due to how the legacy system was implemented (i.e., without a tree), the legacy indents would move fluidly between parents without the need for an update - a feature that would be incompatible with an implemented tree system at this table. This implementation keeps a separate untracked table for tree nodes, keeping the tree entirely separate from the main implementation of the data system. The node holds a Foreign Key to the indent table, allowing the tree to be accessed through the indent. The indent then has a Foreign Key to the relevant Goods Nomenclature so they can be edited separately. This does away with the need to analyse the item id and suffix as well as the indent - as the node gives us an entire description of the tree and its related commodities on its own, over time. # Coming from the legacy tracked model this model needs a new primary key. # Given paths are always unique in MP trees this is the best candidate for the PK. Represents a link between a newly-created GoodsNomenclature and the codes that previously represented it. This will often be the parent nomenclature code. A GoodsNomenclature can have multiple origins when the hierarchy has been reorganised and the new classification was previously covered by multiple codes. Represents a link between a end-dated GoodsNomenclature and the codes that have replaced it (or in TARIC parlance have "absorbed" it). The replacing codes cover the goods that this classification code previously covered. | 2.101476 | 2 |
i2c_rip.py | mikeroslikov/BLIMP | 0 | 6621819 | <filename>i2c_rip.py
import subprocess
import re
import time
data1=""
data2=""
for i in range(0,256):
data1+=str(subprocess.check_output("i2cget -y 1 0x1e "+hex(i)+" w",stderr=subprocess.STDOUT, shell = True))
time.sleep(1)
for i in range(0,256):
data2+=str(subprocess.check_output("i2cget -y 1 0x1e "+hex(i)+" w",stderr=subprocess.STDOUT, shell = True))
time.sleep(1)
a1=re.findall(r"0x....", data1)
a2=re.findall(r"0x....", data2)
for j in range(0,len(a1)):
if a1[j] != a2[j]:
print(str(j)) | <filename>i2c_rip.py
import subprocess
import re
import time
data1=""
data2=""
for i in range(0,256):
data1+=str(subprocess.check_output("i2cget -y 1 0x1e "+hex(i)+" w",stderr=subprocess.STDOUT, shell = True))
time.sleep(1)
for i in range(0,256):
data2+=str(subprocess.check_output("i2cget -y 1 0x1e "+hex(i)+" w",stderr=subprocess.STDOUT, shell = True))
time.sleep(1)
a1=re.findall(r"0x....", data1)
a2=re.findall(r"0x....", data2)
for j in range(0,len(a1)):
if a1[j] != a2[j]:
print(str(j)) | none | 1 | 2.347718 | 2 | |
config.py | samuel930930/Dual_Manifold_Adversarial_Training | 4 | 6621820 | <gh_stars>1-10
class DotDict(dict):
def __getattr__(self, name):
value = self[name]
if isinstance(value, dict):
value = DotDict(value)
return value
if __name__ == '__main__':
import yaml
config_file = './invGAN.yml'
with open(config_file) as f:
config = DotDict(yaml.load(f))
print(config)
print(config.generator)
print(config.generator.type)
print(config.generator.norm.bn)
print(config.optimizer.type)
#print(config.generator.a) | class DotDict(dict):
def __getattr__(self, name):
value = self[name]
if isinstance(value, dict):
value = DotDict(value)
return value
if __name__ == '__main__':
import yaml
config_file = './invGAN.yml'
with open(config_file) as f:
config = DotDict(yaml.load(f))
print(config)
print(config.generator)
print(config.generator.type)
print(config.generator.norm.bn)
print(config.optimizer.type)
#print(config.generator.a) | es | 0.172809 | #print(config.generator.a) | 2.60649 | 3 |
dive/kgp.py | abos5/pythontutor | 0 | 6621821 | <gh_stars>0
"""Kant Generator for Python
Generates mock philosophy based on a context-free grammar
Usage: python kgp.py [options] [source]
Options:
-g ..., --grammar=... use specified grammar file or URL
-h, --help show this help
-d show debugging information while parsing
Examples:
kgp.py generates several paragraphs of Kantian philosophy
kgp.py -g husserl.xml generates several paragraphs of Husserl
kpg.py "<xref id='paragraph'/>" generates a paragraph of Kant
kgp.py template.xml reads from template.xml to decide what to generate
"""
from xml.dom import minidom
import random
import toolbox
import sys
import getopt
_debug = 0
class NoSourceError(Exception):
pass
class KantGenerator(object):
""" generates mock philosophy based on a context-free grammar"""
def __init__(self, grammar, source=None):
self.loadGrammar(grammar)
self.loadSource(source and source or self.getDefaultSource())
self.refresh()
def _load(self, source):
"""load XML input source, return parsed XML document
- a URL of a remote XML file("http://diveintopython.org/kant.xml")
- a filename of a local XML file ("path/to/kant.xml")
- standard input("-")
- the actual XML document, as a string
"""
sock = toolbox.openAnything(source)
xmldoc = minidom.parse(sock).documentElement
sock.close()
return xmldoc
def loadGrammar(self, grammar):
"""load context-free grammar"""
self.grammar = self._load(grammar)
self.refs = {}
for ref in self.grammar.getElementsByTagName('ref'):
self.refs[ref.attributes["id"].value] = ref
def loadSource(self, source):
self.source = self._load(source)
def getDefaultSource(self):
"""guess defualt source of the current grammar"""
xrefs = {}
for xref in self.grammar.getElementsByTagName("xrefs"):
xrefs[xref.attributes["id"].value] = 1
xrefs = xrefs.keys()
standaloneXrefs = [e for e in self.refs.keys() if e not in xrefs]
print(standaloneXrefs)
if not standaloneXrefs:
raise(NoSourceError, "can't guess source, and no source specified")
return '<xref id="%s"/>' % random.choice(standaloneXrefs)
def reset(self):
"""reset parser"""
self.pieces = []
self.capitalizeNextWord = 0
def refresh(self):
"""reset output buffer, re-parse entire source file, and return output
"""
self.reset()
self.parse(self.source)
return self.output()
def output(self):
"""output generated text"""
return "".join(self.pieces)
def randomChildElement(self, node):
choices = [e for e in node.childNodes if e.nodeType == e.ELEMENT_NODE]
chosen = random.choice(choices)
self.debug_log('%s available choices: %s\n' % (
len(choices), [e.toxml() for e in choices]))
self.debug_log('Chosen: %s\n' % chosen.toxml())
return chosen
def debug_log(self, msg):
if _debug:
sys.stderr.write(msg)
def parse(self, node):
parseMethod = getattr(self, "parse_%s" % node.__class__.__name__)
parseMethod(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
text = node.data
if self.capitalizeNextWord:
self.pieces.append(text[0].upper())
self.pieces.append(text[1:])
self.capitalizeNextWord = 0
else:
self.pieces.append(text)
def parse_Element(self, node):
handlerMethod = getattr(self, 'do_%s' % node.tagName)
handlerMethod(node)
def parse_Comment(self, node):
pass
def parse_NoneType(self, node):
pass
def do_xref(self, node):
"""handle <xref id='...'> tag
An <xref id='...'> tag is a cross-reference to a <ref id='...'>
tag. <xref id='sentence'/> evaluates to a randomly chosen child of
<ref id='sentence'>.
"""
id = node.attributes['id'].value
self.parse(self.randomChildElement(self.refs[id]))
def do_p(self, node):
"""handle <p> tag
The <p> tag is the core of the grammar. It can contain almost
anything: freeform text, <choice> tags, <xref> tags, even other
<p> tags. If a "class='sentence'" attribute is found, a flag
is set and the next word will be capitalized. If a "chance='X'"
attribute is found, there is an X% chance that the tag will be
evaluated (and therefore a (100-X)% chance that it will be
completely ignored)
"""
keys = node.attributes.keys()
if "class" in keys:
if node.attributes["class"].value == "sentence":
self.capitalizeNextWord = 1
if "chance" in keys:
chance = int(node.attributes['chance'].value)
doit = (chance > random.randrange(100))
else:
doit = 1
if doit:
for child in node.childNodes:
self.parse(child)
def do_choice(self, node):
self.parse(self.randomChildElement)
def usage():
print(__doc__)
def main(argv):
grammar = "kant.xml"
try:
opts, args = getopt.getopt(argv, "hg:d", ["help", "grammar="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "-help"):
usage()
sys.exit()
elif opt == '-d':
global _debug
_debug = 1
elif opt in ("-g", "-grammar"):
grammar = arg
source = "".join(args)
k = KantGenerator(grammar, source)
print(k.output())
if __name__ == "__main__":
main(["-gkant.xml", 'kant_source.xml'])
# main(sys.argv[1:])
# print(sys.argv[1:])
# end of file
| """Kant Generator for Python
Generates mock philosophy based on a context-free grammar
Usage: python kgp.py [options] [source]
Options:
-g ..., --grammar=... use specified grammar file or URL
-h, --help show this help
-d show debugging information while parsing
Examples:
kgp.py generates several paragraphs of Kantian philosophy
kgp.py -g husserl.xml generates several paragraphs of Husserl
kpg.py "<xref id='paragraph'/>" generates a paragraph of Kant
kgp.py template.xml reads from template.xml to decide what to generate
"""
from xml.dom import minidom
import random
import toolbox
import sys
import getopt
_debug = 0
class NoSourceError(Exception):
pass
class KantGenerator(object):
""" generates mock philosophy based on a context-free grammar"""
def __init__(self, grammar, source=None):
self.loadGrammar(grammar)
self.loadSource(source and source or self.getDefaultSource())
self.refresh()
def _load(self, source):
"""load XML input source, return parsed XML document
- a URL of a remote XML file("http://diveintopython.org/kant.xml")
- a filename of a local XML file ("path/to/kant.xml")
- standard input("-")
- the actual XML document, as a string
"""
sock = toolbox.openAnything(source)
xmldoc = minidom.parse(sock).documentElement
sock.close()
return xmldoc
def loadGrammar(self, grammar):
"""load context-free grammar"""
self.grammar = self._load(grammar)
self.refs = {}
for ref in self.grammar.getElementsByTagName('ref'):
self.refs[ref.attributes["id"].value] = ref
def loadSource(self, source):
self.source = self._load(source)
def getDefaultSource(self):
"""guess defualt source of the current grammar"""
xrefs = {}
for xref in self.grammar.getElementsByTagName("xrefs"):
xrefs[xref.attributes["id"].value] = 1
xrefs = xrefs.keys()
standaloneXrefs = [e for e in self.refs.keys() if e not in xrefs]
print(standaloneXrefs)
if not standaloneXrefs:
raise(NoSourceError, "can't guess source, and no source specified")
return '<xref id="%s"/>' % random.choice(standaloneXrefs)
def reset(self):
"""reset parser"""
self.pieces = []
self.capitalizeNextWord = 0
def refresh(self):
"""reset output buffer, re-parse entire source file, and return output
"""
self.reset()
self.parse(self.source)
return self.output()
def output(self):
"""output generated text"""
return "".join(self.pieces)
def randomChildElement(self, node):
choices = [e for e in node.childNodes if e.nodeType == e.ELEMENT_NODE]
chosen = random.choice(choices)
self.debug_log('%s available choices: %s\n' % (
len(choices), [e.toxml() for e in choices]))
self.debug_log('Chosen: %s\n' % chosen.toxml())
return chosen
def debug_log(self, msg):
if _debug:
sys.stderr.write(msg)
def parse(self, node):
parseMethod = getattr(self, "parse_%s" % node.__class__.__name__)
parseMethod(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
text = node.data
if self.capitalizeNextWord:
self.pieces.append(text[0].upper())
self.pieces.append(text[1:])
self.capitalizeNextWord = 0
else:
self.pieces.append(text)
def parse_Element(self, node):
handlerMethod = getattr(self, 'do_%s' % node.tagName)
handlerMethod(node)
def parse_Comment(self, node):
pass
def parse_NoneType(self, node):
pass
def do_xref(self, node):
"""handle <xref id='...'> tag
An <xref id='...'> tag is a cross-reference to a <ref id='...'>
tag. <xref id='sentence'/> evaluates to a randomly chosen child of
<ref id='sentence'>.
"""
id = node.attributes['id'].value
self.parse(self.randomChildElement(self.refs[id]))
def do_p(self, node):
"""handle <p> tag
The <p> tag is the core of the grammar. It can contain almost
anything: freeform text, <choice> tags, <xref> tags, even other
<p> tags. If a "class='sentence'" attribute is found, a flag
is set and the next word will be capitalized. If a "chance='X'"
attribute is found, there is an X% chance that the tag will be
evaluated (and therefore a (100-X)% chance that it will be
completely ignored)
"""
keys = node.attributes.keys()
if "class" in keys:
if node.attributes["class"].value == "sentence":
self.capitalizeNextWord = 1
if "chance" in keys:
chance = int(node.attributes['chance'].value)
doit = (chance > random.randrange(100))
else:
doit = 1
if doit:
for child in node.childNodes:
self.parse(child)
def do_choice(self, node):
self.parse(self.randomChildElement)
def usage():
print(__doc__)
def main(argv):
grammar = "kant.xml"
try:
opts, args = getopt.getopt(argv, "hg:d", ["help", "grammar="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "-help"):
usage()
sys.exit()
elif opt == '-d':
global _debug
_debug = 1
elif opt in ("-g", "-grammar"):
grammar = arg
source = "".join(args)
k = KantGenerator(grammar, source)
print(k.output())
if __name__ == "__main__":
main(["-gkant.xml", 'kant_source.xml'])
# main(sys.argv[1:])
# print(sys.argv[1:])
# end of file | en | 0.581286 | Kant Generator for Python Generates mock philosophy based on a context-free grammar Usage: python kgp.py [options] [source] Options: -g ..., --grammar=... use specified grammar file or URL -h, --help show this help -d show debugging information while parsing Examples: kgp.py generates several paragraphs of Kantian philosophy kgp.py -g husserl.xml generates several paragraphs of Husserl kpg.py "<xref id='paragraph'/>" generates a paragraph of Kant kgp.py template.xml reads from template.xml to decide what to generate generates mock philosophy based on a context-free grammar load XML input source, return parsed XML document - a URL of a remote XML file("http://diveintopython.org/kant.xml") - a filename of a local XML file ("path/to/kant.xml") - standard input("-") - the actual XML document, as a string load context-free grammar guess defualt source of the current grammar reset parser reset output buffer, re-parse entire source file, and return output output generated text handle <xref id='...'> tag An <xref id='...'> tag is a cross-reference to a <ref id='...'> tag. <xref id='sentence'/> evaluates to a randomly chosen child of <ref id='sentence'>. handle <p> tag The <p> tag is the core of the grammar. It can contain almost anything: freeform text, <choice> tags, <xref> tags, even other <p> tags. If a "class='sentence'" attribute is found, a flag is set and the next word will be capitalized. If a "chance='X'" attribute is found, there is an X% chance that the tag will be evaluated (and therefore a (100-X)% chance that it will be completely ignored) # main(sys.argv[1:]) # print(sys.argv[1:]) # end of file | 3.001174 | 3 |
setup.py | Vivikar/jooble_se | 1 | 6621822 | from google_drive_downloader import GoogleDriveDownloader as gdd
import os
import config_global as config
if not exit(os.path.join(config.index_dir)):
os.makedirs(os.path.join(config.index_dir))
gdd.download_file_from_google_drive(file_id='1fExvkfef61ADTZ8TVkdNIBlhAmznX_YK',
dest_path=os.path.join(config.index_dir, "documents_id.json"),
unzip=False)
gdd.download_file_from_google_drive(file_id='1Dws329i0tkGDj5FJq7xBrVDb8K0G_j-h',
dest_path=os.path.join(config.index_dir, "inverted_index.json"),
unzip=False)
gdd.download_file_from_google_drive(file_id='1Kwq_L4UnHs-hMQQU-I-KmF1ngSwZ43tM',
dest_path=os.path.join(config.index_dir, "vectorizer_tfidf.dat"),
unzip=False)
gdd.download_file_from_google_drive(file_id='1mzb24qwiKOOrs_H6gYX9QqTX5ZCzSKby',
dest_path=os.path.join(config.index_dir, "forward_index.json"),
unzip=False)
| from google_drive_downloader import GoogleDriveDownloader as gdd
import os
import config_global as config
if not exit(os.path.join(config.index_dir)):
os.makedirs(os.path.join(config.index_dir))
gdd.download_file_from_google_drive(file_id='1fExvkfef61ADTZ8TVkdNIBlhAmznX_YK',
dest_path=os.path.join(config.index_dir, "documents_id.json"),
unzip=False)
gdd.download_file_from_google_drive(file_id='1Dws329i0tkGDj5FJq7xBrVDb8K0G_j-h',
dest_path=os.path.join(config.index_dir, "inverted_index.json"),
unzip=False)
gdd.download_file_from_google_drive(file_id='1Kwq_L4UnHs-hMQQU-I-KmF1ngSwZ43tM',
dest_path=os.path.join(config.index_dir, "vectorizer_tfidf.dat"),
unzip=False)
gdd.download_file_from_google_drive(file_id='1mzb24qwiKOOrs_H6gYX9QqTX5ZCzSKby',
dest_path=os.path.join(config.index_dir, "forward_index.json"),
unzip=False)
| none | 1 | 2.700909 | 3 | |
app.py | ShubheshDixit/atnogames | 0 | 6621823 | from flask import Flask, render_template, jsonify, request
import json, os
from get_data import get_game_details
app = Flask(__name__, template_folder='./templates/', static_folder='./static/')
@app.route('/')
def hello_world():
return render_template("index.html")
@app.route('/games')
def get_games():
with open('game_info.json', 'r') as f:
data = json.load(f)
return render_template('games_view.html', pages = data['game_pages'])
@app.route('/details')
def get_data():
link = request.args.get('url')
with open('game_info.json', 'r') as f:
data = json.load(f)
pages = data['game_pages']
for page in pages:
g_list = page['games_list']
for game in g_list:
g_link = game['link']
meta = game['meta_details']
if g_link == link:
return jsonify(meta)
if __name__ == '__main__':
app.run(host='192.168.1.16', port=5000) | from flask import Flask, render_template, jsonify, request
import json, os
from get_data import get_game_details
app = Flask(__name__, template_folder='./templates/', static_folder='./static/')
@app.route('/')
def hello_world():
return render_template("index.html")
@app.route('/games')
def get_games():
with open('game_info.json', 'r') as f:
data = json.load(f)
return render_template('games_view.html', pages = data['game_pages'])
@app.route('/details')
def get_data():
link = request.args.get('url')
with open('game_info.json', 'r') as f:
data = json.load(f)
pages = data['game_pages']
for page in pages:
g_list = page['games_list']
for game in g_list:
g_link = game['link']
meta = game['meta_details']
if g_link == link:
return jsonify(meta)
if __name__ == '__main__':
app.run(host='192.168.1.16', port=5000) | none | 1 | 2.742627 | 3 | |
ontask/workflow/ops.py | LucasFranciscoCorreia/ontask_b | 0 | 6621824 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Functions to perform various operations in a workflow."""
import copy
from typing import List, Optional
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext, ugettext_lazy as _
from ontask import create_new_name
from ontask.dataops.pandas import load_table
from ontask.dataops.sql import (
add_column_to_db, copy_column_in_db, df_drop_column, get_rows,
is_column_unique,
)
from ontask.models import Column, Condition, Log, Workflow
RANDOM_PWD_LENGTH = 50
def workflow_delete_column(
workflow: Workflow,
column: Column,
cond_to_delete: Optional[List[Condition]] = None,
):
"""Remove column from ontask.workflow.
Given a workflow and a column, removes it from the workflow (and the
corresponding data frame
:param workflow: Workflow object
:param column: Column object to delete
:param cond_to_delete: List of conditions to delete after removing the
column
:return: Nothing. Effect reflected in the database
"""
# Drop the column from the DB table storing the data frame
df_drop_column(workflow.get_data_frame_table_name(), column.name)
# Reposition the columns above the one being deleted
workflow.reposition_columns(column.position, workflow.ncols + 1)
# Delete the column
column.delete()
# Update the information in the workflow
workflow.ncols = workflow.ncols - 1
workflow.save()
if not cond_to_delete:
# The conditions to delete are not given, so calculate them
# Get the conditions/actions attached to this workflow
cond_to_delete = [
cond for cond in Condition.objects.filter(
action__workflow=workflow,
)
if column in cond.columns.all()]
# If a column disappears, the conditions that contain that variable
# are removed
actions_without_filters = []
for condition in cond_to_delete:
if condition.is_filter:
actions_without_filters.append(condition.action)
# Formula has the name of the deleted column. Delete it
condition.delete()
# Traverse the actions for which the filter has been deleted and reassess
# all their conditions
# TODO: Explore how to do this asynchronously (or lazy)
map(lambda act: act.update_n_rows_selected(), actions_without_filters)
# If a column disappears, the views that contain only that column need to
# disappear as well as they are no longer relevant.
for view in workflow.views.all():
if view.columns.count() == 0:
view.delete()
def workflow_restrict_column(column: Column) -> Optional[str]:
"""Set category of the column to the existing set of values.
Given a workflow and a column, modifies the column so that only the
values already present are allowed for future updates.
:param column: Column object to restrict
:return: String with error or None if correct
"""
# Load the data frame
data_frame = load_table(
column.workflow.get_data_frame_table_name())
cat_values = set(data_frame[column.name].dropna())
if not cat_values:
# Column has no meaningful values. Nothing to do.
return _('Column has no meaningful values')
# Set categories
column.set_categories(list(cat_values))
column.save()
# Re-evaluate the operands in the workflow
column.workflow.set_query_builder_ops()
column.workflow.save()
# Correct execution
return None
def do_workflow_update_lusers(workflow: Workflow, log_item: Log):
"""Recalculate the field lusers.
Recalculate the elements in the field lusers of the workflow based on the
fields luser_email_column and luser_email_column_MD5
:param workflow: Workflow to update
:param log_item: Log where to leave the status of the operation
:return: Changes in the lusers ManyToMany relationships
"""
# Get the column content
emails = get_rows(
workflow.get_data_frame_table_name(),
column_names=[workflow.luser_email_column.name])
luser_list = []
created = 0
for row in emails:
uemail = row[workflow.luser_email_column.name]
luser = get_user_model().objects.filter(email=uemail).first()
if not luser:
# Create user
if settings.DEBUG:
# Define users with the same password in development
password = '<PASSWORD>' # NOQA
else:
password = get_random_string(length=RANDOM_PWD_LENGTH)
luser = get_user_model().objects.create_user(
email=uemail,
password=password,
)
created += 1
luser_list.append(luser)
# Assign result
workflow.lusers.set(luser_list)
# Report status
log_item.payload['total_users'] = emails.rowcount
log_item.payload['new_users'] = created
log_item.payload['status'] = ugettext(
'Learner emails successfully updated.',
)
log_item.save()
def do_clone_column_only(
column: Column,
new_workflow: Optional[Workflow] = None,
new_name: Optional[str] = None,
) -> Column:
"""Clone a column.
:param column: Object to clone.
:param new_workflow: Optional new worklow object to link to.
:param new_name: Optional new name to use.
:result: New object.
"""
if new_name is None:
new_name = column.name
if new_workflow is None:
new_workflow = column.workflow
new_column = Column(
name=new_name,
description_text=column.description_text,
workflow=new_workflow,
data_type=column.data_type,
is_key=column.is_key,
position=column.position,
in_viz=column.in_viz,
categories=copy.deepcopy(column.categories),
active_from=column.active_from,
active_to=column.active_to,
)
new_column.save()
return new_column
def clone_wf_column(column: Column) -> Column:
"""Create a clone of a column.
:param column: Object to clone
:return: Cloned object (DF has an additional column as well
"""
workflow = column.workflow
new_column = do_clone_column_only(
column,
new_name=create_new_name(column.name, workflow.columns))
# Update the number of columns in the workflow
workflow.ncols += 1
workflow.save()
workflow.refresh_from_db()
# Reposition the new column at the end
new_column.position = workflow.ncols
new_column.save()
# Create the column in the database
add_column_to_db(
workflow.get_data_frame_table_name(),
new_column.name,
new_column.data_type)
copy_column_in_db(
workflow.get_data_frame_table_name(),
column.name,
new_column.name)
return new_column
def check_key_columns(workflow: Workflow):
"""Check that key columns maintain their property.
Function used to verify that after changes in the DB the key columns
maintain their property.
:param workflow: Object to use for the verification.
:return: Nothing. Raise exception if key column lost the property.
"""
col_name = next(
(col.name for col in workflow.columns.filter(is_key=True)
if not is_column_unique(
workflow.get_data_frame_table_name(), col.name)),
None)
if col_name:
raise Exception(_(
'The new data does not preserve the key '
+ 'property of column "{0}"'.format(col_name)))
| # -*- coding: utf-8 -*-
"""Functions to perform various operations in a workflow."""
import copy
from typing import List, Optional
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext, ugettext_lazy as _
from ontask import create_new_name
from ontask.dataops.pandas import load_table
from ontask.dataops.sql import (
add_column_to_db, copy_column_in_db, df_drop_column, get_rows,
is_column_unique,
)
from ontask.models import Column, Condition, Log, Workflow
RANDOM_PWD_LENGTH = 50
def workflow_delete_column(
workflow: Workflow,
column: Column,
cond_to_delete: Optional[List[Condition]] = None,
):
"""Remove column from ontask.workflow.
Given a workflow and a column, removes it from the workflow (and the
corresponding data frame
:param workflow: Workflow object
:param column: Column object to delete
:param cond_to_delete: List of conditions to delete after removing the
column
:return: Nothing. Effect reflected in the database
"""
# Drop the column from the DB table storing the data frame
df_drop_column(workflow.get_data_frame_table_name(), column.name)
# Reposition the columns above the one being deleted
workflow.reposition_columns(column.position, workflow.ncols + 1)
# Delete the column
column.delete()
# Update the information in the workflow
workflow.ncols = workflow.ncols - 1
workflow.save()
if not cond_to_delete:
# The conditions to delete are not given, so calculate them
# Get the conditions/actions attached to this workflow
cond_to_delete = [
cond for cond in Condition.objects.filter(
action__workflow=workflow,
)
if column in cond.columns.all()]
# If a column disappears, the conditions that contain that variable
# are removed
actions_without_filters = []
for condition in cond_to_delete:
if condition.is_filter:
actions_without_filters.append(condition.action)
# Formula has the name of the deleted column. Delete it
condition.delete()
# Traverse the actions for which the filter has been deleted and reassess
# all their conditions
# TODO: Explore how to do this asynchronously (or lazy)
map(lambda act: act.update_n_rows_selected(), actions_without_filters)
# If a column disappears, the views that contain only that column need to
# disappear as well as they are no longer relevant.
for view in workflow.views.all():
if view.columns.count() == 0:
view.delete()
def workflow_restrict_column(column: Column) -> Optional[str]:
"""Set category of the column to the existing set of values.
Given a workflow and a column, modifies the column so that only the
values already present are allowed for future updates.
:param column: Column object to restrict
:return: String with error or None if correct
"""
# Load the data frame
data_frame = load_table(
column.workflow.get_data_frame_table_name())
cat_values = set(data_frame[column.name].dropna())
if not cat_values:
# Column has no meaningful values. Nothing to do.
return _('Column has no meaningful values')
# Set categories
column.set_categories(list(cat_values))
column.save()
# Re-evaluate the operands in the workflow
column.workflow.set_query_builder_ops()
column.workflow.save()
# Correct execution
return None
def do_workflow_update_lusers(workflow: Workflow, log_item: Log):
"""Recalculate the field lusers.
Recalculate the elements in the field lusers of the workflow based on the
fields luser_email_column and luser_email_column_MD5
:param workflow: Workflow to update
:param log_item: Log where to leave the status of the operation
:return: Changes in the lusers ManyToMany relationships
"""
# Get the column content
emails = get_rows(
workflow.get_data_frame_table_name(),
column_names=[workflow.luser_email_column.name])
luser_list = []
created = 0
for row in emails:
uemail = row[workflow.luser_email_column.name]
luser = get_user_model().objects.filter(email=uemail).first()
if not luser:
# Create user
if settings.DEBUG:
# Define users with the same password in development
password = '<PASSWORD>' # NOQA
else:
password = get_random_string(length=RANDOM_PWD_LENGTH)
luser = get_user_model().objects.create_user(
email=uemail,
password=password,
)
created += 1
luser_list.append(luser)
# Assign result
workflow.lusers.set(luser_list)
# Report status
log_item.payload['total_users'] = emails.rowcount
log_item.payload['new_users'] = created
log_item.payload['status'] = ugettext(
'Learner emails successfully updated.',
)
log_item.save()
def do_clone_column_only(
column: Column,
new_workflow: Optional[Workflow] = None,
new_name: Optional[str] = None,
) -> Column:
"""Clone a column.
:param column: Object to clone.
:param new_workflow: Optional new worklow object to link to.
:param new_name: Optional new name to use.
:result: New object.
"""
if new_name is None:
new_name = column.name
if new_workflow is None:
new_workflow = column.workflow
new_column = Column(
name=new_name,
description_text=column.description_text,
workflow=new_workflow,
data_type=column.data_type,
is_key=column.is_key,
position=column.position,
in_viz=column.in_viz,
categories=copy.deepcopy(column.categories),
active_from=column.active_from,
active_to=column.active_to,
)
new_column.save()
return new_column
def clone_wf_column(column: Column) -> Column:
"""Create a clone of a column.
:param column: Object to clone
:return: Cloned object (DF has an additional column as well
"""
workflow = column.workflow
new_column = do_clone_column_only(
column,
new_name=create_new_name(column.name, workflow.columns))
# Update the number of columns in the workflow
workflow.ncols += 1
workflow.save()
workflow.refresh_from_db()
# Reposition the new column at the end
new_column.position = workflow.ncols
new_column.save()
# Create the column in the database
add_column_to_db(
workflow.get_data_frame_table_name(),
new_column.name,
new_column.data_type)
copy_column_in_db(
workflow.get_data_frame_table_name(),
column.name,
new_column.name)
return new_column
def check_key_columns(workflow: Workflow):
"""Check that key columns maintain their property.
Function used to verify that after changes in the DB the key columns
maintain their property.
:param workflow: Object to use for the verification.
:return: Nothing. Raise exception if key column lost the property.
"""
col_name = next(
(col.name for col in workflow.columns.filter(is_key=True)
if not is_column_unique(
workflow.get_data_frame_table_name(), col.name)),
None)
if col_name:
raise Exception(_(
'The new data does not preserve the key '
+ 'property of column "{0}"'.format(col_name))) | en | 0.868118 | # -*- coding: utf-8 -*- Functions to perform various operations in a workflow. Remove column from ontask.workflow. Given a workflow and a column, removes it from the workflow (and the corresponding data frame :param workflow: Workflow object :param column: Column object to delete :param cond_to_delete: List of conditions to delete after removing the column :return: Nothing. Effect reflected in the database # Drop the column from the DB table storing the data frame # Reposition the columns above the one being deleted # Delete the column # Update the information in the workflow # The conditions to delete are not given, so calculate them # Get the conditions/actions attached to this workflow # If a column disappears, the conditions that contain that variable # are removed # Formula has the name of the deleted column. Delete it # Traverse the actions for which the filter has been deleted and reassess # all their conditions # TODO: Explore how to do this asynchronously (or lazy) # If a column disappears, the views that contain only that column need to # disappear as well as they are no longer relevant. Set category of the column to the existing set of values. Given a workflow and a column, modifies the column so that only the values already present are allowed for future updates. :param column: Column object to restrict :return: String with error or None if correct # Load the data frame # Column has no meaningful values. Nothing to do. # Set categories # Re-evaluate the operands in the workflow # Correct execution Recalculate the field lusers. Recalculate the elements in the field lusers of the workflow based on the fields luser_email_column and luser_email_column_MD5 :param workflow: Workflow to update :param log_item: Log where to leave the status of the operation :return: Changes in the lusers ManyToMany relationships # Get the column content # Create user # Define users with the same password in development # NOQA # Assign result # Report status Clone a column. :param column: Object to clone. :param new_workflow: Optional new worklow object to link to. :param new_name: Optional new name to use. :result: New object. Create a clone of a column. :param column: Object to clone :return: Cloned object (DF has an additional column as well # Update the number of columns in the workflow # Reposition the new column at the end # Create the column in the database Check that key columns maintain their property. Function used to verify that after changes in the DB the key columns maintain their property. :param workflow: Object to use for the verification. :return: Nothing. Raise exception if key column lost the property. | 2.708185 | 3 |
pyphabricatordb/phragment.py | veblush/PyPhabricatorDb | 0 | 6621825 | # coding: utf-8
from sqlalchemy import Column, Index, Integer, String, VARBINARY
from sqlalchemy import String, Unicode, ForeignKey
from sqlalchemy.orm import relationship, backref
from dbdatetime import dbdatetime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Edge(Base):
__tablename__ = 'edge'
__table_args__ = (
Index('key_dst', 'dst', 'type', 'src', unique=True),
Index('src', 'src', 'type', 'dateCreated', 'seq')
)
src = Column(String, primary_key=True, nullable=False)
type = Column(Integer, primary_key=True, nullable=False)
dst = Column(String, primary_key=True, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
seq = Column(Integer, nullable=False)
dataID = Column(Integer)
class EdgeData(Base):
__tablename__ = 'edgedata'
id = Column(Integer, primary_key=True)
data = Column(Unicode, nullable=False)
class PhragmentFragment(Base):
__tablename__ = 'phragment_fragment'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
path = Column(Unicode(128), nullable=False, unique=True)
depth = Column(Integer, nullable=False)
latestVersionPHID = Column(String)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class PhragmentFragmentVersion(Base):
__tablename__ = 'phragment_fragmentversion'
__table_args__ = (
Index('key_version', 'fragmentPHID', 'sequence', unique=True),
)
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
sequence = Column(Integer, nullable=False)
fragmentPHID = Column(String, nullable=False)
filePHID = Column(String)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class PhragmentSnapshot(Base):
__tablename__ = 'phragment_snapshot'
__table_args__ = (
Index('key_name', 'primaryFragmentPHID', 'name', unique=True),
)
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
primaryFragmentPHID = Column(String, nullable=False)
name = Column(Unicode(128), nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class PhragmentSnapshotChild(Base):
__tablename__ = 'phragment_snapshotchild'
__table_args__ = (
Index('key_child', 'snapshotPHID', 'fragmentPHID', 'fragmentVersionPHID', unique=True),
)
id = Column(Integer, primary_key=True)
snapshotPHID = Column(String, nullable=False)
fragmentPHID = Column(String, nullable=False)
fragmentVersionPHID = Column(String)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False) | # coding: utf-8
from sqlalchemy import Column, Index, Integer, String, VARBINARY
from sqlalchemy import String, Unicode, ForeignKey
from sqlalchemy.orm import relationship, backref
from dbdatetime import dbdatetime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Edge(Base):
__tablename__ = 'edge'
__table_args__ = (
Index('key_dst', 'dst', 'type', 'src', unique=True),
Index('src', 'src', 'type', 'dateCreated', 'seq')
)
src = Column(String, primary_key=True, nullable=False)
type = Column(Integer, primary_key=True, nullable=False)
dst = Column(String, primary_key=True, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
seq = Column(Integer, nullable=False)
dataID = Column(Integer)
class EdgeData(Base):
__tablename__ = 'edgedata'
id = Column(Integer, primary_key=True)
data = Column(Unicode, nullable=False)
class PhragmentFragment(Base):
__tablename__ = 'phragment_fragment'
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
path = Column(Unicode(128), nullable=False, unique=True)
depth = Column(Integer, nullable=False)
latestVersionPHID = Column(String)
viewPolicy = Column(String, nullable=False)
editPolicy = Column(String, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class PhragmentFragmentVersion(Base):
__tablename__ = 'phragment_fragmentversion'
__table_args__ = (
Index('key_version', 'fragmentPHID', 'sequence', unique=True),
)
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
sequence = Column(Integer, nullable=False)
fragmentPHID = Column(String, nullable=False)
filePHID = Column(String)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class PhragmentSnapshot(Base):
__tablename__ = 'phragment_snapshot'
__table_args__ = (
Index('key_name', 'primaryFragmentPHID', 'name', unique=True),
)
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
primaryFragmentPHID = Column(String, nullable=False)
name = Column(Unicode(128), nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class PhragmentSnapshotChild(Base):
__tablename__ = 'phragment_snapshotchild'
__table_args__ = (
Index('key_child', 'snapshotPHID', 'fragmentPHID', 'fragmentVersionPHID', unique=True),
)
id = Column(Integer, primary_key=True)
snapshotPHID = Column(String, nullable=False)
fragmentPHID = Column(String, nullable=False)
fragmentVersionPHID = Column(String)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False) | en | 0.833554 | # coding: utf-8 | 2.151447 | 2 |
DeviceIDManager.py | SixSecondMonks/light-modules | 6 | 6621826 | <reponame>SixSecondMonks/light-modules
import sys
from bibliopixel.drivers.serial_driver import DriverSerial
run = True
print "Press Ctrl+C any time to exit."
try:
while run:
ignored = raw_input(
"\nConnect just one Serial device (AllPixel) and press enter...")
ports = DriverSerial.findSerialDevices()
if len(ports):
try:
id = DriverSerial.getDeviceID(ports[0])
print "Device ID of {}: {}".format(ports[0], id)
newID = raw_input("Input new ID (enter to skip): ")
if newID != '':
try:
newID = int(newID)
if newID < 0 or newID > 255:
raise ValueError()
try:
DriverSerial.setDeviceID(ports[0], newID)
print "Device ID set to: {}".format(DriverSerial.getDeviceID(ports[0]))
except:
pass
except ValueError:
print "Please enter a valid number between 0 and 255."
except Exception, e:
print e
else:
print "No serial devices found. Please connect one."
except KeyboardInterrupt, err:
pass
else:
pass
| import sys
from bibliopixel.drivers.serial_driver import DriverSerial
run = True
print "Press Ctrl+C any time to exit."
try:
while run:
ignored = raw_input(
"\nConnect just one Serial device (AllPixel) and press enter...")
ports = DriverSerial.findSerialDevices()
if len(ports):
try:
id = DriverSerial.getDeviceID(ports[0])
print "Device ID of {}: {}".format(ports[0], id)
newID = raw_input("Input new ID (enter to skip): ")
if newID != '':
try:
newID = int(newID)
if newID < 0 or newID > 255:
raise ValueError()
try:
DriverSerial.setDeviceID(ports[0], newID)
print "Device ID set to: {}".format(DriverSerial.getDeviceID(ports[0]))
except:
pass
except ValueError:
print "Please enter a valid number between 0 and 255."
except Exception, e:
print e
else:
print "No serial devices found. Please connect one."
except KeyboardInterrupt, err:
pass
else:
pass | none | 1 | 3.261799 | 3 | |
km_api/know_me/journal/__init__.py | knowmetools/km-api | 4 | 6621827 | default_app_config = "know_me.journal.apps.JournalAppConfig"
| default_app_config = "know_me.journal.apps.JournalAppConfig"
| none | 1 | 1.151121 | 1 | |
pyvision/detection/efficientdet/train.py | indiradutta/PyVision | 31 | 6621828 | <filename>pyvision/detection/efficientdet/train.py
import os
import argparse
import time
from tqdm.auto import tqdm
import shutil
import numpy as np
import sys
import torch.nn as nn
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
sys.path.append(os.path.basename(__file__)+"/lib")
from lib.model import EfficientDet
from lib.dataset import CustomDataset, Resizer, Normalizer, Augmenter, collater
def parse_args():
parser = argparse.ArgumentParser(description="EfficientDet: Scalable and Efficient Object Detection training module")
# General Parameters
parser.add_argument("--name", type=str, default="exp_0", help="Name of experiment")
# Model parameters
parser.add_argument("--model_coeff", type=int, default=0, required=True, help="Efficientdet model coeff (b0, b1, ....)")
parser.add_argument("--image_size", type=int, default=512, help="The common height and width for all images")
parser.add_argument("--ckpt", type=str, help="path to checkpoint from where to resume training ")
# Training parameters
parser.add_argument("--batch_size", type=int, default=8, help="Batch size for training")
parser.add_argument("--lr", type=float, default=1e-4, help="Initial Learning rate for training")
parser.add_argument("--gpu", type=bool, default=True, required=True, help="True if training is to use GPU. False if not.")
parser.add_argument("--alpha", type=float, default=0.25, help="Alpha parameter for focal loss")
parser.add_argument("--gamma", type=float, default=1.5, help="Gamma parameter for focal loss")
parser.add_argument("--epochs", type=int, default=100, help="Number of epochs to run training for")
parser.add_argument("--es_min_delta", type=float, default=0.0, help="Early Stopping's Parameter: minimum change in loss to qualify as improvement")
parser.add_argument("--es_patience", type=int, default=0, help="Early stopping's parameter: Number of epochs with no improvement in loss to stop training. 0 to disable")
# Logging parameters
parser.add_argument("--log_path", type=str, default="tensorboard/", help="Path to store tensorboard logs")
parser.add_argument("--save_path", type=str, default="trained/", help="path to folder where to save trained model")
parser.add_argument("--best_epoch", type=int, default=0)
parser.add_argument("--best_loss", type=float, default=1e5)
# Train Dataset parameters
# Format of Dataset:
# - Root Directory
# - Annotations (COCO Format)
# - train_instance.json
# - test_instance.json
# - val_instance.json
# - train
# - img1
# - img2
# .
# .
# - imgn
# - test
# - img1
# - img2
# .
# .
# - imgn
# - val
# - img1
# - img2
# .
# .
# - imgn
parser.add_argument("--root_dir", type=str, required=True, help="Path to root dataset directory")
parser.add_argument("--coco_dir", type=str, default="./", required=True)
parser.add_argument("--img_dir", type=str, required=True, help="Name of the folder containing the imgs in the root dir")
parser.add_argument("--set_dir", type=str, required=True, help="name of set (train/test/val) being used for this")
parser.add_argument("--num_threads", type=int, default=2, help="Number of threads to utilize for loading data")
# Validation parameters
parser.add_argument("--val", type=bool, default=False, help="Perform validation boolean")
parser.add_argument("--val_interval", type=int, default=5, help="Epochs interval after which to run validation")
parser.add_argument("--val_dir", type=str, help="Path to Validation set root directory")
parser.add_argument("--val_imgs", type=str, help="Path to Val set imgs")
parser.add_argument("--val_coco", type=str)
parser.add_argument("--val_set", type=str, help="Path to set dir")
args = parser.parse_args()
return args
def Train(args):
if args.gpu and not torch.cuda.is_available():
raise ValueError(f"--gpu is {args.gpu} but cuda not found")
if args.gpu:
device = "cuda"
else:
device = "cpu"
# setting the trainloader
trainset = CustomDataset(
root_dir = args.root_dir + "/" + args.coco_dir,
img_dir = args.img_dir,
set_name = args.set_dir,
transform = transforms.Compose([Normalizer(), Augmenter(), Resizer()])
)
trainloader = DataLoader(
trainset,
batch_size = args.batch_size,
shuffle = False,
drop_last = False,
collate_fn = collater,
num_workers = args.num_threads
)
# If validation is enabled, set the val loader
if args.val:
valset = CustomDataset(
root_dir = args.val_dir + "/" + args.val_coco,
img_dir = args.val_imgs,
set_name = args.val_set,
transform = transforms.Compose([Normalizer(), Resizer()])
)
valloader = DataLoader(
valset,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
collate_fn=collater,
num_workers=args.num_threads
)
# setting the device and other model params
num_classes = trainset.num_classes()
efficientdet = EfficientDet(
model_coeff = args.model_coeff,
num_classes=num_classes,
focal_alpha = args.alpha,
focal_gamma = args.gamma,
device = device
)
# loading pretrained models (if passed)
try:
efficientdet.load_state_dict(torch.load(args.ckpt))
print("checkpoint loaded successfully!")
except Exception as e:
print("ERROR: Model Loading failed: ", e)
efficientdet = efficientdet.to(device)
efficientdet.train()
# Setting the optimizer and scheduler
optimizer = torch.optim.Adam(efficientdet.parameters(), args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
# set up logging and model save directories
args.log_path = args.log_path + "/" + "EfficientDet" + "/" + args.name
if os.path.isdir(args.log_path):
shutil.rmtree(args.log_path)
os.makedirs(args.log_path)
if os.path.isdir(args.save_path):
shutil.rmtree(args.save_path)
os.makedirs(args.save_path)
# setting up the tensorboard writer
writer = SummaryWriter(args.log_path)
len_trainloader = len(trainloader)
if args.val:
for epoch in range(args.epochs):
efficientdet.train()
epoch_loss = []
epoch_progress = tqdm(trainloader)
for idx, data in enumerate(epoch_progress):
try:
# zero grading the optimizer
optimizer.zero_grad()
# forward pass
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
# Optimization block
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
total_loss = cls_loss + reg_loss
if total_loss == 0:
continue
total_loss.backward()
torch.nn.utils.clip_grad_norm_(efficientdet.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(total_loss))
total_mean_loss = np.mean(epoch_loss)
epoch_progress.set_description(
"Epoch: {}/{}, Batch id: {}/{}, Classification Loss: {:.5f}, Regression Loss: {:.5f}, Batch Loss: {:.5f}, Total Loss: {:.5f}".format(
epoch+1, args.epochs, idx, len_trainloader, cls_loss, reg_loss, total_loss, total_mean_loss
)
)
writer.add_scalar('Train/Total_Loss', total_mean_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Regression_Loss', reg_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Classification_loss (Focal Loss)', cls_loss, epoch * len_trainloader + idx)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
if epoch % args.val_interval == 0:
efficientdet.eval()
loss_reg_ls = []
loss_cls_ls = []
for idx, data in enumerate(valloader):
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
with torch.no_grad():
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
loss_cls_ls.append(float(cls_loss))
loss_reg_ls.append(float(reg_loss))
cls_loss = np.mean(loss_cls_ls)
reg_loss = np.mean(loss_reg_ls)
loss = cls_loss + reg_loss
print(
'Epoch: {}/{}, Classification Loss: {:1.5f}, Regression Loss: {:1.5f}, Total Loss: {:1.5f}'.format(
epoch+1, args.epochs, cls_loss, reg_loss, np.mean(loss)
)
)
writer.add_scalar('Val/Total_Loss', loss, epoch)
writer.add_scalar('Val/Regression_Loss', reg_loss, epoch)
writer.add_scalar('Val/Classification_Loss', cls_loss, epoch)
if loss + args.es_min_delta < args.best_loss:
args.best_loss = loss
args.best_epoch = epoch
torch.save(efficientdet, os.path.join(args.save_path, "efficientdet_best.pth"))
dummy = torch.rand(1, 3, 512, 512)
dummy = dummy.to(device)
if isinstance(efficientdet, nn.DataParallel):
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
try:
torch.onnx.export(
efficientdet.module, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
except:
print("Failed ONNX export")
else:
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(
efficientdet, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
efficientdet.backbone_net.model.set_swish(memory_efficient=True)
if epoch - args.best_epoch > args.es_patience > 0:
print(f"Stopped training at epoch: {epoch}, Lowerst loss: {loss}")
break
else:
for epoch in range(args.epochs):
efficientdet.train()
epoch_loss = []
epoch_progress = tqdm(trainloader)
for idx, data in enumerate(epoch_progress):
try:
# zero grading the optimizer
optimizer.zero_grad()
# forward pass
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
# Optimization block
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
total_loss = cls_loss + reg_loss
if total_loss == 0:
continue
total_loss.backward()
torch.nn.utils.clip_grad_norm_(efficientdet.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(total_loss))
total_mean_loss = np.mean(epoch_loss)
epoch_progress.set_description(
"Epoch: {}/{}, Batch id: {}/{}, Classification Loss: {:.5f}, Regression Loss: {:.5f}, Batch Loss: {:.5f}, Total Loss: {:.5f}".format(
epoch+1, args.epochs, idx, len_trainloader, cls_loss, reg_loss, total_loss, total_mean_loss
)
)
writer.add_scalar('Train/Total_Loss', total_mean_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Regression_Loss', reg_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Classification_loss (Focal Loss)', cls_loss, epoch * len_trainloader + idx)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
torch.save(efficientdet, os.path.join(args.save_path, "efficientdet_best.pth"))
dummy = torch.rand(1, 3, 512, 512)
dummy = dummy.to(device)
if isinstance(efficientdet, nn.DataParallel):
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
try:
torch.onnx.export(
efficientdet.module, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
except:
print("Failed ONNX export")
else:
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(
efficientdet, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
efficientdet.backbone_net.model.set_swish(memory_efficient=True)
writer.close()
if __name__ == "__main__":
opts = parse_args()
Train(opts) | <filename>pyvision/detection/efficientdet/train.py
import os
import argparse
import time
from tqdm.auto import tqdm
import shutil
import numpy as np
import sys
import torch.nn as nn
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
sys.path.append(os.path.basename(__file__)+"/lib")
from lib.model import EfficientDet
from lib.dataset import CustomDataset, Resizer, Normalizer, Augmenter, collater
def parse_args():
parser = argparse.ArgumentParser(description="EfficientDet: Scalable and Efficient Object Detection training module")
# General Parameters
parser.add_argument("--name", type=str, default="exp_0", help="Name of experiment")
# Model parameters
parser.add_argument("--model_coeff", type=int, default=0, required=True, help="Efficientdet model coeff (b0, b1, ....)")
parser.add_argument("--image_size", type=int, default=512, help="The common height and width for all images")
parser.add_argument("--ckpt", type=str, help="path to checkpoint from where to resume training ")
# Training parameters
parser.add_argument("--batch_size", type=int, default=8, help="Batch size for training")
parser.add_argument("--lr", type=float, default=1e-4, help="Initial Learning rate for training")
parser.add_argument("--gpu", type=bool, default=True, required=True, help="True if training is to use GPU. False if not.")
parser.add_argument("--alpha", type=float, default=0.25, help="Alpha parameter for focal loss")
parser.add_argument("--gamma", type=float, default=1.5, help="Gamma parameter for focal loss")
parser.add_argument("--epochs", type=int, default=100, help="Number of epochs to run training for")
parser.add_argument("--es_min_delta", type=float, default=0.0, help="Early Stopping's Parameter: minimum change in loss to qualify as improvement")
parser.add_argument("--es_patience", type=int, default=0, help="Early stopping's parameter: Number of epochs with no improvement in loss to stop training. 0 to disable")
# Logging parameters
parser.add_argument("--log_path", type=str, default="tensorboard/", help="Path to store tensorboard logs")
parser.add_argument("--save_path", type=str, default="trained/", help="path to folder where to save trained model")
parser.add_argument("--best_epoch", type=int, default=0)
parser.add_argument("--best_loss", type=float, default=1e5)
# Train Dataset parameters
# Format of Dataset:
# - Root Directory
# - Annotations (COCO Format)
# - train_instance.json
# - test_instance.json
# - val_instance.json
# - train
# - img1
# - img2
# .
# .
# - imgn
# - test
# - img1
# - img2
# .
# .
# - imgn
# - val
# - img1
# - img2
# .
# .
# - imgn
parser.add_argument("--root_dir", type=str, required=True, help="Path to root dataset directory")
parser.add_argument("--coco_dir", type=str, default="./", required=True)
parser.add_argument("--img_dir", type=str, required=True, help="Name of the folder containing the imgs in the root dir")
parser.add_argument("--set_dir", type=str, required=True, help="name of set (train/test/val) being used for this")
parser.add_argument("--num_threads", type=int, default=2, help="Number of threads to utilize for loading data")
# Validation parameters
parser.add_argument("--val", type=bool, default=False, help="Perform validation boolean")
parser.add_argument("--val_interval", type=int, default=5, help="Epochs interval after which to run validation")
parser.add_argument("--val_dir", type=str, help="Path to Validation set root directory")
parser.add_argument("--val_imgs", type=str, help="Path to Val set imgs")
parser.add_argument("--val_coco", type=str)
parser.add_argument("--val_set", type=str, help="Path to set dir")
args = parser.parse_args()
return args
def Train(args):
if args.gpu and not torch.cuda.is_available():
raise ValueError(f"--gpu is {args.gpu} but cuda not found")
if args.gpu:
device = "cuda"
else:
device = "cpu"
# setting the trainloader
trainset = CustomDataset(
root_dir = args.root_dir + "/" + args.coco_dir,
img_dir = args.img_dir,
set_name = args.set_dir,
transform = transforms.Compose([Normalizer(), Augmenter(), Resizer()])
)
trainloader = DataLoader(
trainset,
batch_size = args.batch_size,
shuffle = False,
drop_last = False,
collate_fn = collater,
num_workers = args.num_threads
)
# If validation is enabled, set the val loader
if args.val:
valset = CustomDataset(
root_dir = args.val_dir + "/" + args.val_coco,
img_dir = args.val_imgs,
set_name = args.val_set,
transform = transforms.Compose([Normalizer(), Resizer()])
)
valloader = DataLoader(
valset,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
collate_fn=collater,
num_workers=args.num_threads
)
# setting the device and other model params
num_classes = trainset.num_classes()
efficientdet = EfficientDet(
model_coeff = args.model_coeff,
num_classes=num_classes,
focal_alpha = args.alpha,
focal_gamma = args.gamma,
device = device
)
# loading pretrained models (if passed)
try:
efficientdet.load_state_dict(torch.load(args.ckpt))
print("checkpoint loaded successfully!")
except Exception as e:
print("ERROR: Model Loading failed: ", e)
efficientdet = efficientdet.to(device)
efficientdet.train()
# Setting the optimizer and scheduler
optimizer = torch.optim.Adam(efficientdet.parameters(), args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
# set up logging and model save directories
args.log_path = args.log_path + "/" + "EfficientDet" + "/" + args.name
if os.path.isdir(args.log_path):
shutil.rmtree(args.log_path)
os.makedirs(args.log_path)
if os.path.isdir(args.save_path):
shutil.rmtree(args.save_path)
os.makedirs(args.save_path)
# setting up the tensorboard writer
writer = SummaryWriter(args.log_path)
len_trainloader = len(trainloader)
if args.val:
for epoch in range(args.epochs):
efficientdet.train()
epoch_loss = []
epoch_progress = tqdm(trainloader)
for idx, data in enumerate(epoch_progress):
try:
# zero grading the optimizer
optimizer.zero_grad()
# forward pass
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
# Optimization block
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
total_loss = cls_loss + reg_loss
if total_loss == 0:
continue
total_loss.backward()
torch.nn.utils.clip_grad_norm_(efficientdet.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(total_loss))
total_mean_loss = np.mean(epoch_loss)
epoch_progress.set_description(
"Epoch: {}/{}, Batch id: {}/{}, Classification Loss: {:.5f}, Regression Loss: {:.5f}, Batch Loss: {:.5f}, Total Loss: {:.5f}".format(
epoch+1, args.epochs, idx, len_trainloader, cls_loss, reg_loss, total_loss, total_mean_loss
)
)
writer.add_scalar('Train/Total_Loss', total_mean_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Regression_Loss', reg_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Classification_loss (Focal Loss)', cls_loss, epoch * len_trainloader + idx)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
if epoch % args.val_interval == 0:
efficientdet.eval()
loss_reg_ls = []
loss_cls_ls = []
for idx, data in enumerate(valloader):
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
with torch.no_grad():
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
loss_cls_ls.append(float(cls_loss))
loss_reg_ls.append(float(reg_loss))
cls_loss = np.mean(loss_cls_ls)
reg_loss = np.mean(loss_reg_ls)
loss = cls_loss + reg_loss
print(
'Epoch: {}/{}, Classification Loss: {:1.5f}, Regression Loss: {:1.5f}, Total Loss: {:1.5f}'.format(
epoch+1, args.epochs, cls_loss, reg_loss, np.mean(loss)
)
)
writer.add_scalar('Val/Total_Loss', loss, epoch)
writer.add_scalar('Val/Regression_Loss', reg_loss, epoch)
writer.add_scalar('Val/Classification_Loss', cls_loss, epoch)
if loss + args.es_min_delta < args.best_loss:
args.best_loss = loss
args.best_epoch = epoch
torch.save(efficientdet, os.path.join(args.save_path, "efficientdet_best.pth"))
dummy = torch.rand(1, 3, 512, 512)
dummy = dummy.to(device)
if isinstance(efficientdet, nn.DataParallel):
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
try:
torch.onnx.export(
efficientdet.module, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
except:
print("Failed ONNX export")
else:
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(
efficientdet, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
efficientdet.backbone_net.model.set_swish(memory_efficient=True)
if epoch - args.best_epoch > args.es_patience > 0:
print(f"Stopped training at epoch: {epoch}, Lowerst loss: {loss}")
break
else:
for epoch in range(args.epochs):
efficientdet.train()
epoch_loss = []
epoch_progress = tqdm(trainloader)
for idx, data in enumerate(epoch_progress):
try:
# zero grading the optimizer
optimizer.zero_grad()
# forward pass
img_batch = data['img'].to(device).float()
annot_batch = data['annot'].to(device)
cls_loss, reg_loss = efficientdet([img_batch, annot_batch])
# Optimization block
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
total_loss = cls_loss + reg_loss
if total_loss == 0:
continue
total_loss.backward()
torch.nn.utils.clip_grad_norm_(efficientdet.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(total_loss))
total_mean_loss = np.mean(epoch_loss)
epoch_progress.set_description(
"Epoch: {}/{}, Batch id: {}/{}, Classification Loss: {:.5f}, Regression Loss: {:.5f}, Batch Loss: {:.5f}, Total Loss: {:.5f}".format(
epoch+1, args.epochs, idx, len_trainloader, cls_loss, reg_loss, total_loss, total_mean_loss
)
)
writer.add_scalar('Train/Total_Loss', total_mean_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Regression_Loss', reg_loss, epoch * len_trainloader + idx)
writer.add_scalar('Train/Classification_loss (Focal Loss)', cls_loss, epoch * len_trainloader + idx)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
torch.save(efficientdet, os.path.join(args.save_path, "efficientdet_best.pth"))
dummy = torch.rand(1, 3, 512, 512)
dummy = dummy.to(device)
if isinstance(efficientdet, nn.DataParallel):
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
try:
torch.onnx.export(
efficientdet.module, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
except:
print("Failed ONNX export")
else:
efficientdet.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(
efficientdet, dummy, os.path.join(args.save_path, "efficientdet_best.onnx"),
verbose=False, opset_version=11
)
efficientdet.backbone_net.model.set_swish(memory_efficient=True)
writer.close()
if __name__ == "__main__":
opts = parse_args()
Train(opts) | en | 0.473766 | # General Parameters # Model parameters # Training parameters # Logging parameters # Train Dataset parameters # Format of Dataset: # - Root Directory # - Annotations (COCO Format) # - train_instance.json # - test_instance.json # - val_instance.json # - train # - img1 # - img2 # . # . # - imgn # - test # - img1 # - img2 # . # . # - imgn # - val # - img1 # - img2 # . # . # - imgn # Validation parameters # setting the trainloader # If validation is enabled, set the val loader # setting the device and other model params # loading pretrained models (if passed) # Setting the optimizer and scheduler # set up logging and model save directories # setting up the tensorboard writer # zero grading the optimizer # forward pass # Optimization block # zero grading the optimizer # forward pass # Optimization block | 2.371068 | 2 |
tests/hello_lib_test.py | asottile/setuptools-golang-examples | 11 | 6621829 | <gh_stars>10-100
import hello_lib
def test_hello_lib():
assert hello_lib.ohai('anthony') == 'ohai, anthony'
| import hello_lib
def test_hello_lib():
assert hello_lib.ohai('anthony') == 'ohai, anthony' | none | 1 | 2.02996 | 2 | |
src/cortex/cortex_internal/lib/api/utils.py | cortexlabs/nucleus | 18 | 6621830 | <reponame>cortexlabs/nucleus<gh_stars>10-100
# Copyright 2022 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import glob
import itertools
import json
import os
import shutil
import threading as td
import time
import traceback
from collections import defaultdict
from http import HTTPStatus
from typing import Any, Callable, Dict, List, Optional
from starlette.responses import Response
from cortex_internal.lib import util
from cortex_internal.lib.exceptions import CortexException, UserRuntimeException
from cortex_internal.lib.log import configure_logger
from cortex_internal.lib.model import validate_model_paths
from cortex_internal.lib.storage import S3
from cortex_internal.lib.type import HandlerType
logger = configure_logger("cortex", os.environ["CORTEX_LOG_CONFIG_FILE"])
def model_downloader(
handler_type: HandlerType,
bucket_name: str,
model_name: str,
model_version: str,
model_path: str,
temp_dir: str,
model_dir: str,
) -> Optional[datetime.datetime]:
"""
Downloads model to disk. Validates the s3 model path and the downloaded model.
Args:
handler_type: The handler type as implemented by the API.
bucket_name: Name of the bucket where the model is stored.
model_name: Name of the model. Is part of the model's local path.
model_version: Version of the model. Is part of the model's local path.
model_path: Model prefix of the versioned model.
temp_dir: Where to temporarily store the model for validation.
model_dir: The top directory of where all models are stored locally.
Returns:
The model's timestamp. None if the model didn't pass the validation, if it doesn't exist or if there are not enough permissions.
"""
logger.info(
f"downloading from bucket {bucket_name}/{model_path}, model {model_name} of version {model_version}, temporarily to {temp_dir} and then finally to {model_dir}"
)
client = S3(bucket_name)
# validate upstream S3 model
sub_paths, ts = client.search(model_path)
try:
validate_model_paths(sub_paths, handler_type, model_path)
except CortexException:
logger.info(f"failed validating model {model_name} of version {model_version}")
return None
# download model to temp dir
temp_dest = os.path.join(temp_dir, model_name, model_version)
try:
client.download_dir_contents(model_path, temp_dest)
except CortexException:
logger.info(
f"failed downloading model {model_name} of version {model_version} to temp dir {temp_dest}"
)
shutil.rmtree(temp_dest)
return None
# validate model
model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True)
model_contents = util.remove_non_empty_directory_paths(model_contents)
try:
validate_model_paths(model_contents, handler_type, temp_dest)
except CortexException:
logger.info(
f"failed validating model {model_name} of version {model_version} from temp dir"
)
shutil.rmtree(temp_dest)
return None
# move model to dest dir
model_top_dir = os.path.join(model_dir, model_name)
ondisk_model_version = os.path.join(model_top_dir, model_version)
logger.info(
f"moving model {model_name} of version {model_version} to final dir {ondisk_model_version}"
)
if os.path.isdir(ondisk_model_version):
shutil.rmtree(ondisk_model_version)
shutil.move(temp_dest, ondisk_model_version)
return max(ts)
class DynamicBatcher:
def __init__(
self,
handler_impl: Callable,
method_name: str,
max_batch_size: int,
batch_interval_seconds: int,
test_mode: bool = False,
):
self.method_name = method_name
self.handler_impl = handler_impl
self.batch_max_size = max_batch_size
self.batch_interval_seconds = batch_interval_seconds # measured in seconds
self.test_mode = test_mode # only for unit testing
self._test_batch_lengths = [] # only when unit testing
self.barrier = td.Barrier(self.batch_max_size + 1)
self.samples = {}
self.results = {}
td.Thread(target=self._batch_engine, daemon=True).start()
self.sample_id_generator = itertools.count()
def _batch_engine(self):
while True:
if len(self.results) > 0:
time.sleep(0.001)
continue
try:
self.barrier.wait(self.batch_interval_seconds)
except td.BrokenBarrierError:
pass
self.results = {}
sample_ids = self._get_sample_ids(self.batch_max_size)
try:
if self.samples:
batch = self._make_batch(sample_ids)
results = getattr(self.handler_impl, self.method_name)(**batch)
if not isinstance(results, list):
raise UserRuntimeException(
f"please return a list when using server side batching, got {type(results)}"
)
if self.test_mode:
self._test_batch_lengths.append(len(results))
self.results = dict(zip(sample_ids, results))
except Exception as e:
self.results = {sample_id: e for sample_id in sample_ids}
logger.error(traceback.format_exc())
finally:
for sample_id in sample_ids:
del self.samples[sample_id]
self.barrier.reset()
def _get_sample_ids(self, max_number: int) -> List[int]:
if len(self.samples) <= max_number:
return list(self.samples.keys())
return sorted(self.samples)[:max_number]
def _make_batch(self, sample_ids: List[int]) -> Dict[str, List[Any]]:
batched_samples = defaultdict(list)
for sample_id in sample_ids:
for key, sample in self.samples[sample_id].items():
batched_samples[key].append(sample)
return dict(batched_samples)
def _enqueue_request(self, sample_id: int, **kwargs):
"""
Enqueue sample for batch processing. This is a blocking method.
"""
self.samples[sample_id] = kwargs
try:
self.barrier.wait()
except td.BrokenBarrierError:
pass
def process(self, **kwargs):
"""
Queues a request to be batched with other incoming request, waits for the response
and returns the processed result. This is a blocking method.
"""
sample_id = next(self.sample_id_generator)
self._enqueue_request(sample_id, **kwargs)
result = self._get_result(sample_id)
return result
def _get_result(self, sample_id: int) -> Any:
"""
Return the processed result. This is a blocking method.
"""
while sample_id not in self.results:
time.sleep(0.001)
result = self.results[sample_id]
del self.results[sample_id]
if isinstance(result, Exception):
return Response(
content=str(result),
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
media_type="text/plain",
)
return result
| # Copyright 2022 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import glob
import itertools
import json
import os
import shutil
import threading as td
import time
import traceback
from collections import defaultdict
from http import HTTPStatus
from typing import Any, Callable, Dict, List, Optional
from starlette.responses import Response
from cortex_internal.lib import util
from cortex_internal.lib.exceptions import CortexException, UserRuntimeException
from cortex_internal.lib.log import configure_logger
from cortex_internal.lib.model import validate_model_paths
from cortex_internal.lib.storage import S3
from cortex_internal.lib.type import HandlerType
logger = configure_logger("cortex", os.environ["CORTEX_LOG_CONFIG_FILE"])
def model_downloader(
handler_type: HandlerType,
bucket_name: str,
model_name: str,
model_version: str,
model_path: str,
temp_dir: str,
model_dir: str,
) -> Optional[datetime.datetime]:
"""
Downloads model to disk. Validates the s3 model path and the downloaded model.
Args:
handler_type: The handler type as implemented by the API.
bucket_name: Name of the bucket where the model is stored.
model_name: Name of the model. Is part of the model's local path.
model_version: Version of the model. Is part of the model's local path.
model_path: Model prefix of the versioned model.
temp_dir: Where to temporarily store the model for validation.
model_dir: The top directory of where all models are stored locally.
Returns:
The model's timestamp. None if the model didn't pass the validation, if it doesn't exist or if there are not enough permissions.
"""
logger.info(
f"downloading from bucket {bucket_name}/{model_path}, model {model_name} of version {model_version}, temporarily to {temp_dir} and then finally to {model_dir}"
)
client = S3(bucket_name)
# validate upstream S3 model
sub_paths, ts = client.search(model_path)
try:
validate_model_paths(sub_paths, handler_type, model_path)
except CortexException:
logger.info(f"failed validating model {model_name} of version {model_version}")
return None
# download model to temp dir
temp_dest = os.path.join(temp_dir, model_name, model_version)
try:
client.download_dir_contents(model_path, temp_dest)
except CortexException:
logger.info(
f"failed downloading model {model_name} of version {model_version} to temp dir {temp_dest}"
)
shutil.rmtree(temp_dest)
return None
# validate model
model_contents = glob.glob(os.path.join(temp_dest, "**"), recursive=True)
model_contents = util.remove_non_empty_directory_paths(model_contents)
try:
validate_model_paths(model_contents, handler_type, temp_dest)
except CortexException:
logger.info(
f"failed validating model {model_name} of version {model_version} from temp dir"
)
shutil.rmtree(temp_dest)
return None
# move model to dest dir
model_top_dir = os.path.join(model_dir, model_name)
ondisk_model_version = os.path.join(model_top_dir, model_version)
logger.info(
f"moving model {model_name} of version {model_version} to final dir {ondisk_model_version}"
)
if os.path.isdir(ondisk_model_version):
shutil.rmtree(ondisk_model_version)
shutil.move(temp_dest, ondisk_model_version)
return max(ts)
class DynamicBatcher:
def __init__(
self,
handler_impl: Callable,
method_name: str,
max_batch_size: int,
batch_interval_seconds: int,
test_mode: bool = False,
):
self.method_name = method_name
self.handler_impl = handler_impl
self.batch_max_size = max_batch_size
self.batch_interval_seconds = batch_interval_seconds # measured in seconds
self.test_mode = test_mode # only for unit testing
self._test_batch_lengths = [] # only when unit testing
self.barrier = td.Barrier(self.batch_max_size + 1)
self.samples = {}
self.results = {}
td.Thread(target=self._batch_engine, daemon=True).start()
self.sample_id_generator = itertools.count()
def _batch_engine(self):
while True:
if len(self.results) > 0:
time.sleep(0.001)
continue
try:
self.barrier.wait(self.batch_interval_seconds)
except td.BrokenBarrierError:
pass
self.results = {}
sample_ids = self._get_sample_ids(self.batch_max_size)
try:
if self.samples:
batch = self._make_batch(sample_ids)
results = getattr(self.handler_impl, self.method_name)(**batch)
if not isinstance(results, list):
raise UserRuntimeException(
f"please return a list when using server side batching, got {type(results)}"
)
if self.test_mode:
self._test_batch_lengths.append(len(results))
self.results = dict(zip(sample_ids, results))
except Exception as e:
self.results = {sample_id: e for sample_id in sample_ids}
logger.error(traceback.format_exc())
finally:
for sample_id in sample_ids:
del self.samples[sample_id]
self.barrier.reset()
def _get_sample_ids(self, max_number: int) -> List[int]:
if len(self.samples) <= max_number:
return list(self.samples.keys())
return sorted(self.samples)[:max_number]
def _make_batch(self, sample_ids: List[int]) -> Dict[str, List[Any]]:
batched_samples = defaultdict(list)
for sample_id in sample_ids:
for key, sample in self.samples[sample_id].items():
batched_samples[key].append(sample)
return dict(batched_samples)
def _enqueue_request(self, sample_id: int, **kwargs):
"""
Enqueue sample for batch processing. This is a blocking method.
"""
self.samples[sample_id] = kwargs
try:
self.barrier.wait()
except td.BrokenBarrierError:
pass
def process(self, **kwargs):
"""
Queues a request to be batched with other incoming request, waits for the response
and returns the processed result. This is a blocking method.
"""
sample_id = next(self.sample_id_generator)
self._enqueue_request(sample_id, **kwargs)
result = self._get_result(sample_id)
return result
def _get_result(self, sample_id: int) -> Any:
"""
Return the processed result. This is a blocking method.
"""
while sample_id not in self.results:
time.sleep(0.001)
result = self.results[sample_id]
del self.results[sample_id]
if isinstance(result, Exception):
return Response(
content=str(result),
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
media_type="text/plain",
)
return result | en | 0.861558 | # Copyright 2022 Cortex Labs, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Downloads model to disk. Validates the s3 model path and the downloaded model. Args: handler_type: The handler type as implemented by the API. bucket_name: Name of the bucket where the model is stored. model_name: Name of the model. Is part of the model's local path. model_version: Version of the model. Is part of the model's local path. model_path: Model prefix of the versioned model. temp_dir: Where to temporarily store the model for validation. model_dir: The top directory of where all models are stored locally. Returns: The model's timestamp. None if the model didn't pass the validation, if it doesn't exist or if there are not enough permissions. # validate upstream S3 model # download model to temp dir # validate model # move model to dest dir # measured in seconds # only for unit testing # only when unit testing Enqueue sample for batch processing. This is a blocking method. Queues a request to be batched with other incoming request, waits for the response and returns the processed result. This is a blocking method. Return the processed result. This is a blocking method. | 1.793933 | 2 |
legacy/migrations/0001_initial.py | naderm/farnsworth | 0 | 6621831 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TeacherEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(help_text=b'Date of this event.', null=True, blank=True)),
('title', models.CharField(help_text=b'The title of this event.', max_length=56, null=True, blank=True)),
('description', models.TextField(help_text=b'The description of this event.', null=True, blank=True)),
],
options={
'ordering': ['-date'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherNote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(help_text=b'Date and time when this note was posted.', null=True, blank=True)),
('name', models.CharField(help_text=b'The name given by the user who posted this request.', max_length=56, null=True, blank=True)),
('body', models.TextField(help_text=b'The body of this note.', null=True, blank=True)),
],
options={
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('request_type', models.CharField(help_text=b'The request type for this request.', max_length=15, null=True, blank=True)),
('teacher_key', models.CharField(help_text=b'Legacy primary key based on datetime.', max_length=24, null=True, blank=True)),
('timestamp', models.DateTimeField(help_text=b'Date and time when this request was posted.', null=True, blank=True)),
('name', models.CharField(help_text=b'The name given by the user who posted this request.', max_length=56, null=True, blank=True)),
('body', models.TextField(help_text=b'The body of this request.', null=True, blank=True)),
],
options={
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherResponse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(help_text=b'Date and time when this response was posted.', null=True, blank=True)),
('name', models.CharField(help_text=b'The name given by the user who posted this request.', max_length=56, null=True, blank=True)),
('body', models.TextField(help_text=b'The body of this response.', null=True, blank=True)),
('request', models.ForeignKey(help_text=b'The request to which this is a response.', to='legacy.TeacherRequest')),
],
options={
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='TeacherEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField(help_text=b'Date of this event.', null=True, blank=True)),
('title', models.CharField(help_text=b'The title of this event.', max_length=56, null=True, blank=True)),
('description', models.TextField(help_text=b'The description of this event.', null=True, blank=True)),
],
options={
'ordering': ['-date'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherNote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(help_text=b'Date and time when this note was posted.', null=True, blank=True)),
('name', models.CharField(help_text=b'The name given by the user who posted this request.', max_length=56, null=True, blank=True)),
('body', models.TextField(help_text=b'The body of this note.', null=True, blank=True)),
],
options={
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('request_type', models.CharField(help_text=b'The request type for this request.', max_length=15, null=True, blank=True)),
('teacher_key', models.CharField(help_text=b'Legacy primary key based on datetime.', max_length=24, null=True, blank=True)),
('timestamp', models.DateTimeField(help_text=b'Date and time when this request was posted.', null=True, blank=True)),
('name', models.CharField(help_text=b'The name given by the user who posted this request.', max_length=56, null=True, blank=True)),
('body', models.TextField(help_text=b'The body of this request.', null=True, blank=True)),
],
options={
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherResponse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(help_text=b'Date and time when this response was posted.', null=True, blank=True)),
('name', models.CharField(help_text=b'The name given by the user who posted this request.', max_length=56, null=True, blank=True)),
('body', models.TextField(help_text=b'The body of this response.', null=True, blank=True)),
('request', models.ForeignKey(help_text=b'The request to which this is a response.', to='legacy.TeacherRequest')),
],
options={
'ordering': ['-timestamp'],
},
bases=(models.Model,),
),
] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.99028 | 2 |
learningml/GoF/analysis/legend/plot_legend.py | weissercn/learningml | 1 | 6621832 | from __future__ import print_function
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import time
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf", bbox='tight', pad_inches= 0.1)
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 3
def binomial_error(l1):
err_list = []
for item in l1:
if item==1. or item==0.: err_list.append(np.sqrt(100./101.*(1.-100./101.)/101.))
else: err_list.append(np.sqrt(item*(1.-item)/100.))
return err_list
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
dimensions= [1.,2.]
ml_classifiers_dict={'nn':[2.,3.], 'svm':[3.,4.], 'BDT_best':[4.,5.]}
xwidth=[0.1,0.1]
chi2_best=[6.,7.]
fig = plt.figure()
figlegend = plt.figure(figsize=(8,6))
ax = fig.add_axes([0.2,0.15,0.75,0.8])
l1=ax.errorbar(dimensions,ml_classifiers_dict['nn'], xerr=xwidth, yerr=ml_classifiers_dict['nn'], linestyle='', marker='s', markersize=15, color='green', label=r'$ANN$')
l2=ax.errorbar(dimensions,ml_classifiers_dict['BDT_best'], xerr=xwidth, yerr=ml_classifiers_dict['BDT_best'], linestyle='', marker='o', markersize=15, color='green', label=r'$BDT$')
l3=ax.errorbar(dimensions,ml_classifiers_dict['svm'], xerr=xwidth, yerr=ml_classifiers_dict['svm'], linestyle='', marker='^', markersize=15, color='green', label=r'$SVM$')
l4=ax.errorbar(dimensions,chi2_best, xerr=xwidth, yerr=chi2_best, linestyle='', marker='x', markersize=15, color='magenta', label=r'$\chi^2$')
l = [l1,l2,l3,l4]
figlegend.legend(l,('ANN','BDT','SVM', r'$\chi^2$'),frameon=False, numpoints=1)
figlegend.savefig('legend.pdf')
| from __future__ import print_function
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import time
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf", bbox='tight', pad_inches= 0.1)
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 3
def binomial_error(l1):
err_list = []
for item in l1:
if item==1. or item==0.: err_list.append(np.sqrt(100./101.*(1.-100./101.)/101.))
else: err_list.append(np.sqrt(item*(1.-item)/100.))
return err_list
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
dimensions= [1.,2.]
ml_classifiers_dict={'nn':[2.,3.], 'svm':[3.,4.], 'BDT_best':[4.,5.]}
xwidth=[0.1,0.1]
chi2_best=[6.,7.]
fig = plt.figure()
figlegend = plt.figure(figsize=(8,6))
ax = fig.add_axes([0.2,0.15,0.75,0.8])
l1=ax.errorbar(dimensions,ml_classifiers_dict['nn'], xerr=xwidth, yerr=ml_classifiers_dict['nn'], linestyle='', marker='s', markersize=15, color='green', label=r'$ANN$')
l2=ax.errorbar(dimensions,ml_classifiers_dict['BDT_best'], xerr=xwidth, yerr=ml_classifiers_dict['BDT_best'], linestyle='', marker='o', markersize=15, color='green', label=r'$BDT$')
l3=ax.errorbar(dimensions,ml_classifiers_dict['svm'], xerr=xwidth, yerr=ml_classifiers_dict['svm'], linestyle='', marker='^', markersize=15, color='green', label=r'$SVM$')
l4=ax.errorbar(dimensions,chi2_best, xerr=xwidth, yerr=chi2_best, linestyle='', marker='x', markersize=15, color='magenta', label=r'$\chi^2$')
l = [l1,l2,l3,l4]
figlegend.legend(l,('ANN','BDT','SVM', r'$\chi^2$'),frameon=False, numpoints=1)
figlegend.savefig('legend.pdf')
| de | 0.844461 | ################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################ #mpl.rcParams['text.usetex'] = True #mpl.rcParams['text.latex.preamble'] = [r'\boldmath'] ################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################ | 1.614174 | 2 |
kslurm/models/__init__.py | pvandyken/cluster_utils | 0 | 6621833 | <filename>kslurm/models/__init__.py
__submodules__ = [
"formatters",
"kslurm",
"slurm",
"update",
"validators",
"job_templates",
]
# <AUTOGEN_INIT>
from kslurm.models.formatters import (
mem,
time,
)
from kslurm.models.kslurm import (
KslurmModel,
)
from kslurm.models.slurm import (
SlurmModel,
)
from kslurm.models.update import (
UpdateModel,
VERSION_REGEX,
)
from kslurm.models.validators import (
job_template,
)
from kslurm.models.job_templates import (
TemplateArgs,
Templates,
list_templates,
set_template,
templates,
)
__all__ = [
"KslurmModel",
"SlurmModel",
"TemplateArgs",
"Templates",
"UpdateModel",
"VERSION_REGEX",
"job_template",
"list_templates",
"mem",
"set_template",
"templates",
"time",
]
# </AUTOGEN_INIT>
| <filename>kslurm/models/__init__.py
__submodules__ = [
"formatters",
"kslurm",
"slurm",
"update",
"validators",
"job_templates",
]
# <AUTOGEN_INIT>
from kslurm.models.formatters import (
mem,
time,
)
from kslurm.models.kslurm import (
KslurmModel,
)
from kslurm.models.slurm import (
SlurmModel,
)
from kslurm.models.update import (
UpdateModel,
VERSION_REGEX,
)
from kslurm.models.validators import (
job_template,
)
from kslurm.models.job_templates import (
TemplateArgs,
Templates,
list_templates,
set_template,
templates,
)
__all__ = [
"KslurmModel",
"SlurmModel",
"TemplateArgs",
"Templates",
"UpdateModel",
"VERSION_REGEX",
"job_template",
"list_templates",
"mem",
"set_template",
"templates",
"time",
]
# </AUTOGEN_INIT>
| es | 0.70873 | # <AUTOGEN_INIT> # </AUTOGEN_INIT> | 1.590245 | 2 |
Lab1/A2.py | stevemman/FC308-Labs | 0 | 6621834 | <reponame>stevemman/FC308-Labs
# Store how many miles each person has travelled.
Shila = 20
Cornelius = 100
Ariel = 52
# Cost per mile is 20p
cost = 20
# Calculate and print the cost of travel for all candidates
print("Shila has driven", Shila, "miles. At 20p per mile the expenses paid are", Shila*cost, "p")
print("Cornelius has driven", Cornelius, "miles. At 20p per mile the expenses paid are", Cornelius*cost, "p")
print("Ariel has driven", Ariel, "miles. At 20p per mile the expenses paid are", Ariel*cost, "p")
| # Store how many miles each person has travelled.
Shila = 20
Cornelius = 100
Ariel = 52
# Cost per mile is 20p
cost = 20
# Calculate and print the cost of travel for all candidates
print("Shila has driven", Shila, "miles. At 20p per mile the expenses paid are", Shila*cost, "p")
print("Cornelius has driven", Cornelius, "miles. At 20p per mile the expenses paid are", Cornelius*cost, "p")
print("Ariel has driven", Ariel, "miles. At 20p per mile the expenses paid are", Ariel*cost, "p") | en | 0.948848 | # Store how many miles each person has travelled. # Cost per mile is 20p # Calculate and print the cost of travel for all candidates | 3.679262 | 4 |
IGA_PD_example.py | johntfoster/IGA | 4 | 6621835 |
# coding: utf-8
# This notebook provides an example code for using the IGA2D class
# In[1]:
import IGA
import numpy as np
import matplotlib.pyplot as plt
#get_ipython().magic(u'matplotlib inline')
# In[2]:
def run_case_1(num_knots, order, delta, norm, quad_degree=10):
h = 1.0 / num_knots
if delta > h:
num_boundary_elements = np.ceil(delta / h)
else:
num_boundary_elements = 1
omega_p1 = np.linspace(-delta, 0, num=(num_boundary_elements + 1))
omega = np.linspace(0, 1, num=(num_knots+1))
omega_p2 = np.linspace(1, 1 + delta, num=(num_boundary_elements + 1))
knot_vector = np.r_[-delta * np.ones(order), omega_p1[:-1], omega[:-1], omega_p2, np.ones(order) * (1 + delta)]
iga = IGA.PD1D(knot_vector, order, delta)
iga.degree = quad_degree
u = lambda x: x * (1 - x)
b = lambda x: np.ones(x.shape[0])
iga.compute_solutions(u, b, num_boundary_elements)
return iga.compute_error(norm=norm)
# In[ ]:
dofs = np.array([100,700])
errs = [ run_case_1(num_knots, order=1, delta=0.25, norm=2, quad_degree=4) for num_knots in dofs ]
# In[ ]:
# In[ ]:
#Fit a straight line
coefs = np.polyfit(np.log10(1.0 / dofs), np.log10(errs), 1)
y = 10 ** (coefs[0] * np.log10(1.0 / dofs) + coefs[1])
#Plot
plt.loglog(1.0 / dofs, y, 'b-')
plt.loglog(1.0 / dofs, errs, 'b^')
plt.xlabel("$\log_{10} h$")
plt.ylabel("$\log_{10} \Vert Error \Vert_{L_2}$");
|
# coding: utf-8
# This notebook provides an example code for using the IGA2D class
# In[1]:
import IGA
import numpy as np
import matplotlib.pyplot as plt
#get_ipython().magic(u'matplotlib inline')
# In[2]:
def run_case_1(num_knots, order, delta, norm, quad_degree=10):
h = 1.0 / num_knots
if delta > h:
num_boundary_elements = np.ceil(delta / h)
else:
num_boundary_elements = 1
omega_p1 = np.linspace(-delta, 0, num=(num_boundary_elements + 1))
omega = np.linspace(0, 1, num=(num_knots+1))
omega_p2 = np.linspace(1, 1 + delta, num=(num_boundary_elements + 1))
knot_vector = np.r_[-delta * np.ones(order), omega_p1[:-1], omega[:-1], omega_p2, np.ones(order) * (1 + delta)]
iga = IGA.PD1D(knot_vector, order, delta)
iga.degree = quad_degree
u = lambda x: x * (1 - x)
b = lambda x: np.ones(x.shape[0])
iga.compute_solutions(u, b, num_boundary_elements)
return iga.compute_error(norm=norm)
# In[ ]:
dofs = np.array([100,700])
errs = [ run_case_1(num_knots, order=1, delta=0.25, norm=2, quad_degree=4) for num_knots in dofs ]
# In[ ]:
# In[ ]:
#Fit a straight line
coefs = np.polyfit(np.log10(1.0 / dofs), np.log10(errs), 1)
y = 10 ** (coefs[0] * np.log10(1.0 / dofs) + coefs[1])
#Plot
plt.loglog(1.0 / dofs, y, 'b-')
plt.loglog(1.0 / dofs, errs, 'b^')
plt.xlabel("$\log_{10} h$")
plt.ylabel("$\log_{10} \Vert Error \Vert_{L_2}$");
| en | 0.35861 | # coding: utf-8 # This notebook provides an example code for using the IGA2D class # In[1]: #get_ipython().magic(u'matplotlib inline') # In[2]: # In[ ]: # In[ ]: # In[ ]: #Fit a straight line #Plot | 3.043122 | 3 |
rhetoric/view.py | avanov/Rhetoric | 9 | 6621836 | <gh_stars>1-10
from django.core.urlresolvers import RegexURLPattern as DjangoRegexURLPattern
from django.http import HttpResponse
from django.http import Http404
import venusian
class view_config(object):
venusian = venusian
def __init__(self, **settings):
self.__dict__.update(settings)
def __call__(self, wrapped):
settings = self.__dict__.copy()
depth = settings.pop('_depth', 0)
def callback(scanner, name, obj):
scanner.config.add_view(view=obj, **settings)
info = self.venusian.attach(wrapped, callback, category='rhetoric', depth=depth + 1)
if info.scope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings.get('attr') is None:
settings['attr'] = wrapped.__name__
return wrapped
class view_defaults(view_config):
""" This object is a copy of ``pyramid.view.view_defaults``.
A class :term:`decorator` which, when applied to a class, will
provide defaults for all view configurations that use the class. This
decorator accepts all the arguments accepted by
:meth:`pyramid.view.view_config`, and each has the same meaning.
See :ref:`view_defaults` for more information.
"""
def __call__(self, wrapped):
wrapped.__view_defaults__ = self.__dict__.copy()
return wrapped
class ViewCallback(object):
""" Wrapper object around actual view callables that checks predicates during the request
and processes results returned from view callables during the response.
"""
def __init__(self, viewlist):
self.viewlist = viewlist
def __call__(self, request, *args, **kwargs):
view_settings = self.find_view_settings(request, args, kwargs)
response = view_settings['view'](request, *args, **kwargs)
return self.process_callback_response(request, response, view_settings)
def find_view_settings(self, request, args, kwargs):
if hasattr(request, 'rhetoric_view_settings'):
return getattr(request, 'rhetoric_view_settings')
# cache
for view_settings in self.viewlist:
passed, request = self.check_predicates(view_settings, request, args, kwargs)
if passed:
setattr(request, 'rhetoric_view_settings', view_settings)
return view_settings
raise Http404
def check_predicates(self, view_settings, request, req_args, req_kw):
predicates = view_settings['predicates']
# here predicate is an instance object
for predicate in predicates:
is_passed = predicate(None, request)
if not is_passed:
return is_passed, request
return True, request
def process_callback_response(self, request, response, view_settings):
if isinstance(response, HttpResponse):
# Do not process standard django responses
return response
renderer = view_settings['renderer']
return renderer(request, response)
class RegexURLPattern(DjangoRegexURLPattern):
def __init__(self, regex, default_args=None, name=None, viewlist=None):
super(RegexURLPattern, self).__init__(regex, ViewCallback(viewlist), default_args, name)
self.viewlist = viewlist
| from django.core.urlresolvers import RegexURLPattern as DjangoRegexURLPattern
from django.http import HttpResponse
from django.http import Http404
import venusian
class view_config(object):
venusian = venusian
def __init__(self, **settings):
self.__dict__.update(settings)
def __call__(self, wrapped):
settings = self.__dict__.copy()
depth = settings.pop('_depth', 0)
def callback(scanner, name, obj):
scanner.config.add_view(view=obj, **settings)
info = self.venusian.attach(wrapped, callback, category='rhetoric', depth=depth + 1)
if info.scope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings.get('attr') is None:
settings['attr'] = wrapped.__name__
return wrapped
class view_defaults(view_config):
""" This object is a copy of ``pyramid.view.view_defaults``.
A class :term:`decorator` which, when applied to a class, will
provide defaults for all view configurations that use the class. This
decorator accepts all the arguments accepted by
:meth:`pyramid.view.view_config`, and each has the same meaning.
See :ref:`view_defaults` for more information.
"""
def __call__(self, wrapped):
wrapped.__view_defaults__ = self.__dict__.copy()
return wrapped
class ViewCallback(object):
""" Wrapper object around actual view callables that checks predicates during the request
and processes results returned from view callables during the response.
"""
def __init__(self, viewlist):
self.viewlist = viewlist
def __call__(self, request, *args, **kwargs):
view_settings = self.find_view_settings(request, args, kwargs)
response = view_settings['view'](request, *args, **kwargs)
return self.process_callback_response(request, response, view_settings)
def find_view_settings(self, request, args, kwargs):
if hasattr(request, 'rhetoric_view_settings'):
return getattr(request, 'rhetoric_view_settings')
# cache
for view_settings in self.viewlist:
passed, request = self.check_predicates(view_settings, request, args, kwargs)
if passed:
setattr(request, 'rhetoric_view_settings', view_settings)
return view_settings
raise Http404
def check_predicates(self, view_settings, request, req_args, req_kw):
predicates = view_settings['predicates']
# here predicate is an instance object
for predicate in predicates:
is_passed = predicate(None, request)
if not is_passed:
return is_passed, request
return True, request
def process_callback_response(self, request, response, view_settings):
if isinstance(response, HttpResponse):
# Do not process standard django responses
return response
renderer = view_settings['renderer']
return renderer(request, response)
class RegexURLPattern(DjangoRegexURLPattern):
def __init__(self, regex, default_args=None, name=None, viewlist=None):
super(RegexURLPattern, self).__init__(regex, ViewCallback(viewlist), default_args, name)
self.viewlist = viewlist | en | 0.884414 | # if the decorator was attached to a method in a class, or # otherwise executed at class scope, we need to set an # 'attr' into the settings if one isn't already in there This object is a copy of ``pyramid.view.view_defaults``. A class :term:`decorator` which, when applied to a class, will provide defaults for all view configurations that use the class. This decorator accepts all the arguments accepted by :meth:`pyramid.view.view_config`, and each has the same meaning. See :ref:`view_defaults` for more information. Wrapper object around actual view callables that checks predicates during the request and processes results returned from view callables during the response. # cache # here predicate is an instance object # Do not process standard django responses | 2.325196 | 2 |
common/NormalNN24.py | akweury/improved_normal_inference | 0 | 6621837 | <filename>common/NormalNN24.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
class NormalNN24(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.__name__ = 'NormalNN24'
kernel_down = (3, 3)
kernel_down_2 = (5, 5)
kernel_up = (3, 3)
kernel_up_2 = (5, 5)
padding_down = (1, 1)
padding_down_2 = (2, 2)
padding_up = (1, 1)
padding_up_2 = (2, 2)
self.active = nn.LeakyReLU(0.01)
self.active_last = nn.Tanh()
# self.active = nn.ReLU()
channel_size_1 = 32
channel_size_2 = 64
self.dconv1 = nn.Conv2d(in_ch, channel_size_1, kernel_down, (1, 1), padding_down)
self.dconv2 = nn.Conv2d(channel_size_1, channel_size_1, kernel_down, (1, 1), padding_down)
self.dconv3 = nn.Conv2d(channel_size_1, channel_size_1, kernel_down, (1, 1), padding_down)
self.uconv1 = nn.Conv2d(channel_size_2, channel_size_1, kernel_up, (1, 1), padding_up)
self.uconv2 = nn.Conv2d(channel_size_2, channel_size_1, kernel_up, (1, 1), padding_up)
self.uconv3 = nn.Conv2d(channel_size_2, channel_size_1, kernel_up, (1, 1), padding_up)
self.conv1 = nn.Conv2d(channel_size_1, out_ch, (1, 1), (1, 1), (0, 0))
self.conv2 = nn.Conv2d(out_ch, out_ch, (1, 1), (1, 1), (0, 0))
def forward(self, x0):
x1 = self.active(self.dconv1(x0)) # 512,512
x1 = self.active(self.dconv2(x1)) # 512,512
x1 = self.active(self.dconv3(x1)) # 512,512
# Downsample 1
ds = 2
x1_ds, idx = F.max_pool2d(x1, ds, ds, return_indices=True) # 256,256
x1_ds /= 4
x2_ds = self.active(self.dconv2(x1_ds)) # 256,256
x2_ds = self.active(self.dconv3(x2_ds)) # 256,256
# Downsample 2
ds = 2
x2_ds, idx = F.max_pool2d(x2_ds, ds, ds, return_indices=True) # 128,128
x2_ds /= 4
x3_ds = self.active(self.dconv2(x2_ds)) # 128,128
x3_ds = self.active(self.dconv3(x3_ds)) # 128,128
# Downsample 3
ds = 2
x3_ds, idx = F.max_pool2d(x3_ds, ds, ds, return_indices=True) # 64,64
x3_ds /= 4
x4_ds = self.active(self.dconv2(x3_ds)) # 64,64
x4_ds = self.active(self.dconv3(x4_ds)) # 64,64
# Upsample 1
x4 = F.interpolate(x4_ds, x3_ds.size()[2:], mode='nearest') # 128,128
x34_ds = self.active(self.uconv1(torch.cat((x3_ds, x4), 1))) # 128, 128
# Upsample 2
x34 = F.interpolate(x34_ds, x2_ds.size()[2:], mode='nearest')
x23_ds = self.active(self.uconv2(torch.cat((x2_ds, x34), 1))) # 256, 256
# # Upsample 3
x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest') # 512, 512
xout = self.active(self.uconv3(torch.cat((x23, x1), 1))) # 512, 512
xout = self.conv1(xout) # 512, 512
xout = self.conv2(xout)
return xout
| <filename>common/NormalNN24.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
class NormalNN24(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.__name__ = 'NormalNN24'
kernel_down = (3, 3)
kernel_down_2 = (5, 5)
kernel_up = (3, 3)
kernel_up_2 = (5, 5)
padding_down = (1, 1)
padding_down_2 = (2, 2)
padding_up = (1, 1)
padding_up_2 = (2, 2)
self.active = nn.LeakyReLU(0.01)
self.active_last = nn.Tanh()
# self.active = nn.ReLU()
channel_size_1 = 32
channel_size_2 = 64
self.dconv1 = nn.Conv2d(in_ch, channel_size_1, kernel_down, (1, 1), padding_down)
self.dconv2 = nn.Conv2d(channel_size_1, channel_size_1, kernel_down, (1, 1), padding_down)
self.dconv3 = nn.Conv2d(channel_size_1, channel_size_1, kernel_down, (1, 1), padding_down)
self.uconv1 = nn.Conv2d(channel_size_2, channel_size_1, kernel_up, (1, 1), padding_up)
self.uconv2 = nn.Conv2d(channel_size_2, channel_size_1, kernel_up, (1, 1), padding_up)
self.uconv3 = nn.Conv2d(channel_size_2, channel_size_1, kernel_up, (1, 1), padding_up)
self.conv1 = nn.Conv2d(channel_size_1, out_ch, (1, 1), (1, 1), (0, 0))
self.conv2 = nn.Conv2d(out_ch, out_ch, (1, 1), (1, 1), (0, 0))
def forward(self, x0):
x1 = self.active(self.dconv1(x0)) # 512,512
x1 = self.active(self.dconv2(x1)) # 512,512
x1 = self.active(self.dconv3(x1)) # 512,512
# Downsample 1
ds = 2
x1_ds, idx = F.max_pool2d(x1, ds, ds, return_indices=True) # 256,256
x1_ds /= 4
x2_ds = self.active(self.dconv2(x1_ds)) # 256,256
x2_ds = self.active(self.dconv3(x2_ds)) # 256,256
# Downsample 2
ds = 2
x2_ds, idx = F.max_pool2d(x2_ds, ds, ds, return_indices=True) # 128,128
x2_ds /= 4
x3_ds = self.active(self.dconv2(x2_ds)) # 128,128
x3_ds = self.active(self.dconv3(x3_ds)) # 128,128
# Downsample 3
ds = 2
x3_ds, idx = F.max_pool2d(x3_ds, ds, ds, return_indices=True) # 64,64
x3_ds /= 4
x4_ds = self.active(self.dconv2(x3_ds)) # 64,64
x4_ds = self.active(self.dconv3(x4_ds)) # 64,64
# Upsample 1
x4 = F.interpolate(x4_ds, x3_ds.size()[2:], mode='nearest') # 128,128
x34_ds = self.active(self.uconv1(torch.cat((x3_ds, x4), 1))) # 128, 128
# Upsample 2
x34 = F.interpolate(x34_ds, x2_ds.size()[2:], mode='nearest')
x23_ds = self.active(self.uconv2(torch.cat((x2_ds, x34), 1))) # 256, 256
# # Upsample 3
x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest') # 512, 512
xout = self.active(self.uconv3(torch.cat((x23, x1), 1))) # 512, 512
xout = self.conv1(xout) # 512, 512
xout = self.conv2(xout)
return xout
| en | 0.362683 | # self.active = nn.ReLU() # 512,512 # 512,512 # 512,512 # Downsample 1 # 256,256 # 256,256 # 256,256 # Downsample 2 # 128,128 # 128,128 # 128,128 # Downsample 3 # 64,64 # 64,64 # 64,64 # Upsample 1 # 128,128 # 128, 128 # Upsample 2 # 256, 256 # # Upsample 3 # 512, 512 # 512, 512 # 512, 512 | 2.132518 | 2 |
testing/test_lexer.py | jweinraub/hippyvm | 289 | 6621838 | <gh_stars>100-1000
import pytest
from hippy.lexer import Lexer, LexerError
class TestLexer(object):
def setup_class(cls):
cls.lexer = Lexer()
def lex(self, buf):
self.lexer.input(buf, 0, 0)
return [i.name for i in self.lexer.token() if i]
def lex_full(self, buf):
self.lexer.input(buf, 0, 0)
return [(i.name, i.source, i.source_pos.lineno)
for i in self.lexer.token() if i]
def lex_content(self, buf):
self.lexer.input(buf, 0, 0)
return [i.source for i in self.lexer.token() if i]
def test_basic(self):
assert self.lex("12 + 12") == ["T_LNUMBER", "+", "T_LNUMBER"]
def test_variable(self):
assert self.lex("$x 12") == ["T_VARIABLE", "T_LNUMBER"]
def test_keyword_indetifier(self):
assert self.lex("return $xyz") == ['T_RETURN', 'T_VARIABLE']
def test_ctx_obj(self):
assert self.lex("interface $x->interface") == ["T_INTERFACE",
"T_VARIABLE",
"T_OBJECT_OPERATOR",
"T_STRING"]
def test_case_insensitive_keywords(self):
assert self.lex("Interface") == self.lex("interface") == ["T_INTERFACE"]
assert self.lex("InstanceOf") == self.lex("instanceof") == ["T_INSTANCEOF"]
assert self.lex("Class") == self.lex("class") == ["T_CLASS"]
def test_left_bracket(self):
assert self.lex('"x $var y"') == ['"', "T_ENCAPSED_AND_WHITESPACE",
"T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", '"']
assert self.lex('"x{$var}y"') == ['"', "T_ENCAPSED_AND_WHITESPACE",
"T_DOLLAR_OPEN_CURLY_BRACES",
"T_VARIABLE", "}",
"T_ENCAPSED_AND_WHITESPACE", '"']
def test_brackets_expr(self):
assert self.lex('"a{$x[1 + 2]}b"') == [
'"',
"T_ENCAPSED_AND_WHITESPACE",
"T_DOLLAR_OPEN_CURLY_BRACES",
"T_VARIABLE",
"[",
"T_LNUMBER", "+", "T_LNUMBER",
"]",
"}",
"T_ENCAPSED_AND_WHITESPACE",
'"'
]
def test_simple_brackets(self):
assert self.lex('"$a[13]"') == [
'"', "T_VARIABLE", "[", "T_NUM_STRING", "]", '"'
]
def test_dollar_brackets(self):
assert self.lex('"${a}"') == [
'"', "T_DOLLAR_OPEN_CURLY_BRACES", "T_VARIABLE", "}", '"'
]
def test_escaped_quotes(self):
assert self.lex('"x \\\"$a\\\""') == [
'"', "T_ENCAPSED_AND_WHITESPACE",
"T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE",
'"'
]
def test_complex_case(self):
exp = ['"', "T_ENCAPSED_AND_WHITESPACE", "T_VARIABLE",
"[", "T_VARIABLE", "]", "T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", '"']
assert self.lex('"\\${x$x[$y]$x}"') == exp
def test_dollar_no_var(self):
exp = ['"', "T_VARIABLE", "T_ENCAPSED_AND_WHITESPACE", '"']
assert self.lex('"$a/$1"') == exp
def test_heredoc_1(self):
r = self.lex("<<< HERE\n sadsadasdas \nHERE;\n $var")
assert r == [
'T_START_HEREDOC',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';', 'T_VARIABLE'
]
r = self.lex("<<< HERE\n sadsadasdas \nHERE\n $var")
assert r == [
'T_START_HEREDOC',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', 'T_VARIABLE'
]
with pytest.raises(LexerError):
self.lex("<<< HERE\n sadsadasdas \nHERE; $var")
def test_heredoc_2(self):
r = self.lex("<<< HERE\nHERE;")
assert r == ['T_START_HEREDOC', 'T_END_HEREDOC', ';']
with pytest.raises(LexerError):
self.lex("<<< HERE\nHERE; $var")
def test_heredoc_3(self):
r = self.lex("""<<< HERE\n asd1 {$foo} asd2 \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_DOLLAR_OPEN_CURLY_BRACES', 'T_VARIABLE', '}',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_4(self):
r = self.lex("""<<< HERE\n sads $foo adasdas \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_5(self):
r = self.lex("""<<< HERE\n sads\n "$foo" adasdas \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_6(self):
r = self.lex("""<<< HERE\n sads\n "$foo" adasdas \nHERE;\n <<< HERE\n sads\n "$foo" adasdas \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';',
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_7(self):
r = self.lex_full("""<<< HERE\n sads HERE adasdas \nHERE;\n""")
assert r == [
('T_START_HEREDOC', '<<< HERE\n', 0),
('T_ENCAPSED_AND_WHITESPACE', ' sads HERE adasdas ', 1),
('T_END_HEREDOC', 'HERE', 2),
(';', ';', 2)
]
def test_heredoc_8(self):
r = self.lex_full("<<<X\nXX\n X\nX;\n")
assert r == [
('T_START_HEREDOC', '<<<X\n', 0),
('T_ENCAPSED_AND_WHITESPACE', 'XX\n X', 1),
('T_END_HEREDOC', 'X', 3),
(';', ';', 3)]
def test_heredoc_with_quoted_dollar(self):
r = self.lex_full("<<<X\n\"$\"\nX;\n")
assert r == [
('T_START_HEREDOC', '<<<X\n', 0),
('T_ENCAPSED_AND_WHITESPACE', '"$"', 1),
('T_END_HEREDOC', 'X', 2),
(';', ';', 2)]
def test_heredoc_error(self):
with pytest.raises(LexerError) as excinfo:
self.lex("<<< HERE\n sadsadasdas\n")
assert excinfo.value.message == 'unfinished heredoc'
def test_nowdoc_1(self):
r = self.lex("<<< 'HERE'\n sadsadasdas \nHERE;\n $var")
assert r == ['T_NOWDOC', ';', 'T_VARIABLE']
r = self.lex("<<< 'HERE'\n sadsadasdas \nHERE\n $var")
assert r == ['T_NOWDOC', 'T_VARIABLE']
source = "<<< 'HERE'\n\n sadsa $x;\nHERE"
self.lexer.input(source, 0, 0)
tokens = list(self.lexer.token())
assert tokens[0].source == "\n sadsa $x;"
def test_nowdoc_2(self):
r = self.lex("<<< 'HERE'\n$a {$b} sadsadasdas \nHERE;\n $var")
assert r == ['T_NOWDOC', ';', 'T_VARIABLE']
def test_string_backslash(self):
r = self.lex('$rp .= "+(\\\\$i)";')
assert r == ['T_VARIABLE', 'T_CONCAT_EQUAL', '"',
'T_ENCAPSED_AND_WHITESPACE', 'T_VARIABLE',
'T_ENCAPSED_AND_WHITESPACE', '"', ";"]
def test_b_quote(self):
r = self.lex('b"xy$a z"')
assert r == ['"', "T_ENCAPSED_AND_WHITESPACE", "T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", '"']
def test_var(self):
r = self.lex('"sadsada {$class} sadads\n"')
assert r == ['"', 'T_ENCAPSED_AND_WHITESPACE',
"T_DOLLAR_OPEN_CURLY_BRACES", "T_VARIABLE", "}",
"T_ENCAPSED_AND_WHITESPACE", '"']
def test_backtick(self):
r = self.lex('`ls "-1"`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_2(self):
r = self.lex('`ls "-1" "-2"`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_3(self):
r = self.lex('`ls "-1" -2 `')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_4(self):
r = self.lex('`ls "-1" \'-2\' `')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_5(self):
r = self.lex('`ls $php ls`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_6(self):
r = self.lex('`ls "$php" ls`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_7(self):
src = '`ls "$php" ls $sdf->fsdf`'
r = self.lex(src)
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', '`']
assert self.lex_content(src) == ['`', 'ls "', '$php', '" ls ',
'$sdf->fsdf', '`']
def test_backtick_8(self):
r = self.lex('`ls "$php" ls \'asdasd\' \'asdasd\'`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_9(self):
r = self.lex('`$php $php $hph`')
assert r == ['`', 'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', '`']
def test_backtick_10(self):
r = self.lex('`echo "`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_dollar_at_the_end_1(self):
r = self.lex('"xyz $a $" + 3')
assert r == ['"', "T_ENCAPSED_AND_WHITESPACE", "T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", "T_DOLLAR", '"', "+", "T_LNUMBER"]
def test_dollar_at_the_end_2(self):
r = self.lex('"%{$errors[1]}$"')
assert r == ['"', 'T_ENCAPSED_AND_WHITESPACE',
'T_DOLLAR_OPEN_CURLY_BRACES', 'T_VARIABLE',
'[', 'T_LNUMBER', ']', '}', 'T_DOLLAR', '"'
]
def test_namespace_statement(self):
r = self.lex("namespace Foo\\Bar;")
assert r == ["T_NAMESPACE",
"T_STRING", "T_NS_SEPARATOR", "T_STRING", ";"]
| import pytest
from hippy.lexer import Lexer, LexerError
class TestLexer(object):
def setup_class(cls):
cls.lexer = Lexer()
def lex(self, buf):
self.lexer.input(buf, 0, 0)
return [i.name for i in self.lexer.token() if i]
def lex_full(self, buf):
self.lexer.input(buf, 0, 0)
return [(i.name, i.source, i.source_pos.lineno)
for i in self.lexer.token() if i]
def lex_content(self, buf):
self.lexer.input(buf, 0, 0)
return [i.source for i in self.lexer.token() if i]
def test_basic(self):
assert self.lex("12 + 12") == ["T_LNUMBER", "+", "T_LNUMBER"]
def test_variable(self):
assert self.lex("$x 12") == ["T_VARIABLE", "T_LNUMBER"]
def test_keyword_indetifier(self):
assert self.lex("return $xyz") == ['T_RETURN', 'T_VARIABLE']
def test_ctx_obj(self):
assert self.lex("interface $x->interface") == ["T_INTERFACE",
"T_VARIABLE",
"T_OBJECT_OPERATOR",
"T_STRING"]
def test_case_insensitive_keywords(self):
assert self.lex("Interface") == self.lex("interface") == ["T_INTERFACE"]
assert self.lex("InstanceOf") == self.lex("instanceof") == ["T_INSTANCEOF"]
assert self.lex("Class") == self.lex("class") == ["T_CLASS"]
def test_left_bracket(self):
assert self.lex('"x $var y"') == ['"', "T_ENCAPSED_AND_WHITESPACE",
"T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", '"']
assert self.lex('"x{$var}y"') == ['"', "T_ENCAPSED_AND_WHITESPACE",
"T_DOLLAR_OPEN_CURLY_BRACES",
"T_VARIABLE", "}",
"T_ENCAPSED_AND_WHITESPACE", '"']
def test_brackets_expr(self):
assert self.lex('"a{$x[1 + 2]}b"') == [
'"',
"T_ENCAPSED_AND_WHITESPACE",
"T_DOLLAR_OPEN_CURLY_BRACES",
"T_VARIABLE",
"[",
"T_LNUMBER", "+", "T_LNUMBER",
"]",
"}",
"T_ENCAPSED_AND_WHITESPACE",
'"'
]
def test_simple_brackets(self):
assert self.lex('"$a[13]"') == [
'"', "T_VARIABLE", "[", "T_NUM_STRING", "]", '"'
]
def test_dollar_brackets(self):
assert self.lex('"${a}"') == [
'"', "T_DOLLAR_OPEN_CURLY_BRACES", "T_VARIABLE", "}", '"'
]
def test_escaped_quotes(self):
assert self.lex('"x \\\"$a\\\""') == [
'"', "T_ENCAPSED_AND_WHITESPACE",
"T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE",
'"'
]
def test_complex_case(self):
exp = ['"', "T_ENCAPSED_AND_WHITESPACE", "T_VARIABLE",
"[", "T_VARIABLE", "]", "T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", '"']
assert self.lex('"\\${x$x[$y]$x}"') == exp
def test_dollar_no_var(self):
exp = ['"', "T_VARIABLE", "T_ENCAPSED_AND_WHITESPACE", '"']
assert self.lex('"$a/$1"') == exp
def test_heredoc_1(self):
r = self.lex("<<< HERE\n sadsadasdas \nHERE;\n $var")
assert r == [
'T_START_HEREDOC',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';', 'T_VARIABLE'
]
r = self.lex("<<< HERE\n sadsadasdas \nHERE\n $var")
assert r == [
'T_START_HEREDOC',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', 'T_VARIABLE'
]
with pytest.raises(LexerError):
self.lex("<<< HERE\n sadsadasdas \nHERE; $var")
def test_heredoc_2(self):
r = self.lex("<<< HERE\nHERE;")
assert r == ['T_START_HEREDOC', 'T_END_HEREDOC', ';']
with pytest.raises(LexerError):
self.lex("<<< HERE\nHERE; $var")
def test_heredoc_3(self):
r = self.lex("""<<< HERE\n asd1 {$foo} asd2 \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_DOLLAR_OPEN_CURLY_BRACES', 'T_VARIABLE', '}',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_4(self):
r = self.lex("""<<< HERE\n sads $foo adasdas \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_5(self):
r = self.lex("""<<< HERE\n sads\n "$foo" adasdas \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE',
'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_6(self):
r = self.lex("""<<< HERE\n sads\n "$foo" adasdas \nHERE;\n <<< HERE\n sads\n "$foo" adasdas \nHERE;\n""")
assert r == [
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';',
'T_START_HEREDOC', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_END_HEREDOC', ';'
]
def test_heredoc_7(self):
r = self.lex_full("""<<< HERE\n sads HERE adasdas \nHERE;\n""")
assert r == [
('T_START_HEREDOC', '<<< HERE\n', 0),
('T_ENCAPSED_AND_WHITESPACE', ' sads HERE adasdas ', 1),
('T_END_HEREDOC', 'HERE', 2),
(';', ';', 2)
]
def test_heredoc_8(self):
r = self.lex_full("<<<X\nXX\n X\nX;\n")
assert r == [
('T_START_HEREDOC', '<<<X\n', 0),
('T_ENCAPSED_AND_WHITESPACE', 'XX\n X', 1),
('T_END_HEREDOC', 'X', 3),
(';', ';', 3)]
def test_heredoc_with_quoted_dollar(self):
r = self.lex_full("<<<X\n\"$\"\nX;\n")
assert r == [
('T_START_HEREDOC', '<<<X\n', 0),
('T_ENCAPSED_AND_WHITESPACE', '"$"', 1),
('T_END_HEREDOC', 'X', 2),
(';', ';', 2)]
def test_heredoc_error(self):
with pytest.raises(LexerError) as excinfo:
self.lex("<<< HERE\n sadsadasdas\n")
assert excinfo.value.message == 'unfinished heredoc'
def test_nowdoc_1(self):
r = self.lex("<<< 'HERE'\n sadsadasdas \nHERE;\n $var")
assert r == ['T_NOWDOC', ';', 'T_VARIABLE']
r = self.lex("<<< 'HERE'\n sadsadasdas \nHERE\n $var")
assert r == ['T_NOWDOC', 'T_VARIABLE']
source = "<<< 'HERE'\n\n sadsa $x;\nHERE"
self.lexer.input(source, 0, 0)
tokens = list(self.lexer.token())
assert tokens[0].source == "\n sadsa $x;"
def test_nowdoc_2(self):
r = self.lex("<<< 'HERE'\n$a {$b} sadsadasdas \nHERE;\n $var")
assert r == ['T_NOWDOC', ';', 'T_VARIABLE']
def test_string_backslash(self):
r = self.lex('$rp .= "+(\\\\$i)";')
assert r == ['T_VARIABLE', 'T_CONCAT_EQUAL', '"',
'T_ENCAPSED_AND_WHITESPACE', 'T_VARIABLE',
'T_ENCAPSED_AND_WHITESPACE', '"', ";"]
def test_b_quote(self):
r = self.lex('b"xy$a z"')
assert r == ['"', "T_ENCAPSED_AND_WHITESPACE", "T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", '"']
def test_var(self):
r = self.lex('"sadsada {$class} sadads\n"')
assert r == ['"', 'T_ENCAPSED_AND_WHITESPACE',
"T_DOLLAR_OPEN_CURLY_BRACES", "T_VARIABLE", "}",
"T_ENCAPSED_AND_WHITESPACE", '"']
def test_backtick(self):
r = self.lex('`ls "-1"`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_2(self):
r = self.lex('`ls "-1" "-2"`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_3(self):
r = self.lex('`ls "-1" -2 `')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_4(self):
r = self.lex('`ls "-1" \'-2\' `')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_5(self):
r = self.lex('`ls $php ls`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_6(self):
r = self.lex('`ls "$php" ls`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_7(self):
src = '`ls "$php" ls $sdf->fsdf`'
r = self.lex(src)
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', '`']
assert self.lex_content(src) == ['`', 'ls "', '$php', '" ls ',
'$sdf->fsdf', '`']
def test_backtick_8(self):
r = self.lex('`ls "$php" ls \'asdasd\' \'asdasd\'`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_backtick_9(self):
r = self.lex('`$php $php $hph`')
assert r == ['`', 'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', 'T_ENCAPSED_AND_WHITESPACE',
'T_VARIABLE', '`']
def test_backtick_10(self):
r = self.lex('`echo "`')
assert r == ['`', 'T_ENCAPSED_AND_WHITESPACE', '`']
def test_dollar_at_the_end_1(self):
r = self.lex('"xyz $a $" + 3')
assert r == ['"', "T_ENCAPSED_AND_WHITESPACE", "T_VARIABLE",
"T_ENCAPSED_AND_WHITESPACE", "T_DOLLAR", '"', "+", "T_LNUMBER"]
def test_dollar_at_the_end_2(self):
r = self.lex('"%{$errors[1]}$"')
assert r == ['"', 'T_ENCAPSED_AND_WHITESPACE',
'T_DOLLAR_OPEN_CURLY_BRACES', 'T_VARIABLE',
'[', 'T_LNUMBER', ']', '}', 'T_DOLLAR', '"'
]
def test_namespace_statement(self):
r = self.lex("namespace Foo\\Bar;")
assert r == ["T_NAMESPACE",
"T_STRING", "T_NS_SEPARATOR", "T_STRING", ";"] | en | 0.27245 | <<< HERE\n asd1 {$foo} asd2 \nHERE;\n <<< HERE\n sads $foo adasdas \nHERE;\n <<< HERE\n sads\n "$foo" adasdas \nHERE;\n <<< HERE\n sads\n "$foo" adasdas \nHERE;\n <<< HERE\n sads\n "$foo" adasdas \nHERE;\n <<< HERE\n sads HERE adasdas \nHERE;\n | 2.502337 | 3 |
plugins/atomio/find_replace.py | nielsvm/toggle-desktop | 1 | 6621839 | <gh_stars>1-10
from core.action import Action
class FindReplace(Action):
"""Rewrite a value in one of Atom's cson configuration files."""
def binary_dependencies(self):
return ['atom']
def arguments(self):
return [
('path', 'The configuration file to rewrite a value in.'),
('find', 'The string to be replaced.'),
('replace', 'The replacement value.')
]
def execute(self, path, find, replace):
new = []
with open(path, 'r') as old:
for line in old:
new.append(line.replace(find, replace))
with open(path, 'w') as pathnew:
pathnew.write(''.join(new))
pathnew.close()
return True
| from core.action import Action
class FindReplace(Action):
"""Rewrite a value in one of Atom's cson configuration files."""
def binary_dependencies(self):
return ['atom']
def arguments(self):
return [
('path', 'The configuration file to rewrite a value in.'),
('find', 'The string to be replaced.'),
('replace', 'The replacement value.')
]
def execute(self, path, find, replace):
new = []
with open(path, 'r') as old:
for line in old:
new.append(line.replace(find, replace))
with open(path, 'w') as pathnew:
pathnew.write(''.join(new))
pathnew.close()
return True | en | 0.717028 | Rewrite a value in one of Atom's cson configuration files. | 2.902268 | 3 |
move_plan.py | alecgtx/test_tello | 0 | 6621840 | from djitellopy import Tello
tello=Tello()
tello.connect()
tello.takeoff()
#move
tello.move_up(100)
#tello.move_forward(50)
tello.rotate_clockwise(90)
#tello.move_back(50)
#tello.move_up(50)
tello.land()
pass | from djitellopy import Tello
tello=Tello()
tello.connect()
tello.takeoff()
#move
tello.move_up(100)
#tello.move_forward(50)
tello.rotate_clockwise(90)
#tello.move_back(50)
#tello.move_up(50)
tello.land()
pass | en | 0.215208 | #move #tello.move_forward(50) #tello.move_back(50) #tello.move_up(50) | 2.071773 | 2 |
approval_notification/main.py | leontopliss/okta-workflows-slack-approval | 1 | 6621841 | import json
import slack
import os
import uuid
from google.cloud import firestore
from secrets import get_secret
from logger import setup_logger
gcp_project_id = os.getenv('GCP_PROJECT')
slack_token = None
notification_key = None
log = setup_logger()
def approval_notify(request):
# loading secrets as lazy globals
# can't be global as this creates issues with automated deployment
# as cold start on initial deployment can't access the variables
global slack_channel, slack_token, notification_key
if not notification_key:
notification_key = get_secret(gcp_project_id, 'notification-key')
if ('X-Api-Key' not in request.headers or not api_key_valid(request.headers['X-Api-Key'])):
log.fatal('API key is invalid')
return 'unauthorized', 403
if not api_key_long_enough(request.headers['X-Api-Key']):
log.warning('API key is too short please make it at least 10 characters')
try:
request_json = request.get_json(silent=True)
title = request_json["title"]
approval_type = request_json["type"]
data = json.loads(request_json["data"])
msg_fields = request_json["msg_fields"]
slack_channel = request_json["slack_channel"]
except KeyError as err:
log.error('payload malformed or mandatory data missing: {}'.format(err))
return 'payload malformed or mandatory data missing', 500
log.debug(json.dumps(request_json))
request_id = uuid.uuid4().hex
if not slack_token:
slack_token = get_secret(gcp_project_id, 'slack-token')
client = slack.WebClient(token=slack_token)
# Message posted to Slack as blocks
msg_blocks = [
construct_title_msg_blocks(title),
construct_field_msg_blocks(msg_fields,data),
construct_actions_msg_block(request_id)
]
write_to_datastore(request_id, approval_type, data)
# Send message to Slack
try:
client.chat_postMessage(
channel=slack_channel,
blocks=msg_blocks
)
except slack.errors.SlackApiError as err:
log.error('could not post to slack: {}'.format(err))
return 'error posting to slack', 500
return 'ok', 200
def api_key_valid(key_provided):
if (key_provided == notification_key):
return True
else:
return False
def api_key_long_enough(key_provided):
if len(key_provided) < 10:
return False
else:
return True
def construct_title_msg_blocks(title):
title_block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "{}\n".format(title)
}
}
return title_block
def construct_field_msg_blocks(msg_fields, data):
msg_fields_list = []
for field in msg_fields:
try:
field_data = data[field]
except KeyError:
log.debug('could not find field {} in the data'.format(field))
continue
#TODO temporarily displaying fields as lower case
# add in display name
json_field = {
"type": "mrkdwn",
"text": "*{}*\n{}".format(field.lower(),field_data)
}
msg_fields_list.append(json_field)
field_block = {
"type": "section",
"fields": msg_fields_list
}
return field_block
def construct_actions_msg_block(request_id):
actions_block = {
"type": "actions",
"elements": [
{
"type": "button",
"action_id": "approve",
"text": {
"type": "plain_text",
"text": "Approve"
},
"style": "primary",
"value": request_id
},
{
"type": "button",
"action_id": "reject",
"text": {
"type": "plain_text",
"text": "Reject"
},
"style": "danger",
"value": request_id
}
]
}
return actions_block
def write_to_datastore(request_id, approval_type, data):
db = firestore.Client()
doc_ref = db.collection('approvals').document(request_id)
doc_ref.set({
'type': approval_type,
'status': 'approval_required',
'requested_at': firestore.SERVER_TIMESTAMP,
'data': data
})
| import json
import slack
import os
import uuid
from google.cloud import firestore
from secrets import get_secret
from logger import setup_logger
gcp_project_id = os.getenv('GCP_PROJECT')
slack_token = None
notification_key = None
log = setup_logger()
def approval_notify(request):
# loading secrets as lazy globals
# can't be global as this creates issues with automated deployment
# as cold start on initial deployment can't access the variables
global slack_channel, slack_token, notification_key
if not notification_key:
notification_key = get_secret(gcp_project_id, 'notification-key')
if ('X-Api-Key' not in request.headers or not api_key_valid(request.headers['X-Api-Key'])):
log.fatal('API key is invalid')
return 'unauthorized', 403
if not api_key_long_enough(request.headers['X-Api-Key']):
log.warning('API key is too short please make it at least 10 characters')
try:
request_json = request.get_json(silent=True)
title = request_json["title"]
approval_type = request_json["type"]
data = json.loads(request_json["data"])
msg_fields = request_json["msg_fields"]
slack_channel = request_json["slack_channel"]
except KeyError as err:
log.error('payload malformed or mandatory data missing: {}'.format(err))
return 'payload malformed or mandatory data missing', 500
log.debug(json.dumps(request_json))
request_id = uuid.uuid4().hex
if not slack_token:
slack_token = get_secret(gcp_project_id, 'slack-token')
client = slack.WebClient(token=slack_token)
# Message posted to Slack as blocks
msg_blocks = [
construct_title_msg_blocks(title),
construct_field_msg_blocks(msg_fields,data),
construct_actions_msg_block(request_id)
]
write_to_datastore(request_id, approval_type, data)
# Send message to Slack
try:
client.chat_postMessage(
channel=slack_channel,
blocks=msg_blocks
)
except slack.errors.SlackApiError as err:
log.error('could not post to slack: {}'.format(err))
return 'error posting to slack', 500
return 'ok', 200
def api_key_valid(key_provided):
if (key_provided == notification_key):
return True
else:
return False
def api_key_long_enough(key_provided):
if len(key_provided) < 10:
return False
else:
return True
def construct_title_msg_blocks(title):
title_block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "{}\n".format(title)
}
}
return title_block
def construct_field_msg_blocks(msg_fields, data):
msg_fields_list = []
for field in msg_fields:
try:
field_data = data[field]
except KeyError:
log.debug('could not find field {} in the data'.format(field))
continue
#TODO temporarily displaying fields as lower case
# add in display name
json_field = {
"type": "mrkdwn",
"text": "*{}*\n{}".format(field.lower(),field_data)
}
msg_fields_list.append(json_field)
field_block = {
"type": "section",
"fields": msg_fields_list
}
return field_block
def construct_actions_msg_block(request_id):
actions_block = {
"type": "actions",
"elements": [
{
"type": "button",
"action_id": "approve",
"text": {
"type": "plain_text",
"text": "Approve"
},
"style": "primary",
"value": request_id
},
{
"type": "button",
"action_id": "reject",
"text": {
"type": "plain_text",
"text": "Reject"
},
"style": "danger",
"value": request_id
}
]
}
return actions_block
def write_to_datastore(request_id, approval_type, data):
db = firestore.Client()
doc_ref = db.collection('approvals').document(request_id)
doc_ref.set({
'type': approval_type,
'status': 'approval_required',
'requested_at': firestore.SERVER_TIMESTAMP,
'data': data
})
| en | 0.920037 | # loading secrets as lazy globals # can't be global as this creates issues with automated deployment # as cold start on initial deployment can't access the variables # Message posted to Slack as blocks # Send message to Slack #TODO temporarily displaying fields as lower case # add in display name | 2.386642 | 2 |
trajopt/spacetime/builtinConstraints.py | uwgraphics/trajectoryoptimizer-public | 0 | 6621842 | from trajopt.spacetime.constraint import Constraint
import trajopt.utilities.adInterface as AD
import numpy as N
class Nail(Constraint):
"""
simplest possible constraint - puts a point at a specific position
"""
def __init__(self, _pointID, _position, _noZ):
Constraint.__init__(self, 1, 0, _noZ)
self.pointID = _pointID
self.position = _position
def constraint(self, points, **kwargs):
if self.noZ:
return [points[self.pointID][0] - self.position[0], points[self.pointID][1] - self.position[1]], []
else:
return [points[self.pointID][0] - self.position[0], points[self.pointID][1] - self.position[1],
points[self.pointID][2] - self.position[2]], []
def __repr__(self):
return "<Nail %d @ (%f,%f,%f)>" % (self.pointID, self.position[0], self.position[1], self.position[2])
class alignAxis(Constraint):
def __init__(self, _pt, _ax, _vec):
Constraint.__init__(self, 1, 0, False)
self.pt = _pt
self.ax = _ax
self.vec = _vec
def constraint(self, frames, **kwargs):
return [1 - N.dot(frames[self.pt][:, self.ax], self.vec)], []
class alignAxisGT(Constraint):
def __init__(self, _pt, _ax, _vec, _gtv=.99):
Constraint.__init__(self, 0, 1, False)
self.pt = _pt
self.ax = _ax
self.vec = _vec
self.gtv = _gtv
def constraint(self, frames, **kwargs):
return [], [N.dot(frames[self.pt][:, self.ax], self.vec) - self.gtv]
class Marker(Constraint):
"""
simplest possible constraint - a nail that doesn't actually connect to anything!
"""
def __init__(self, _position, _noZ=False):
Constraint.__init__(self, 1, 0, _noZ)
self.position = _position
def constraint(self, **kwargs):
if self.noZ:
return [], []
else:
return [], []
def __repr__(self):
return "<Marker %d @ (%f,%f,%f)>" % (self.pointID, self.position[0], self.position[1], self.position[2])
class AboveFloor(Constraint):
"""
simplest inequality - only works in Y
"""
def __init__(self, _pointID, _noZ, _floorHeight=0):
Constraint.__init__(self, False, True, _noZ)
self.pointID = _pointID
self.floorHeight = _floorHeight
def constraint(self, points, **kwargs):
return [], [points[self.pointID][1] - self.floorHeight]
class VariableBetween(Constraint):
"""
kindof like a joint limit - but implemented as a constraint
"""
def __init__(self, _varID, _minV, _maxV, _noZ):
Constraint.__init__(self, False, True, _noZ)
self.varID = _varID
self.maxV = _maxV
self.minV = _minV
def constraint(self, state, **kwargs):
return [], [state[self.varID] - self.minV, self.maxV - state[self.varID]]
class StateVelocity(Constraint):
"""
makes sure a state variable is less than or equal to a max velocity
_vMax is max velocity per frame (time parameterize frames outside the constraint)
"""
def __init__(self, _varID, _vMax, noZ=False):
Constraint.__init__(self, False, True, noZ)
self.varID = _varID
self.vMax = _vMax
self.usesStateDerivatives = 1
def constraint(self, stvel, **kwargs):
return [], [self.vMax - abs(stvel[self.varID])]
class pointDistance(Constraint):
"""
makes sure a point is at least some distance from a fixed location
"""
def __init__(self, _pointID, _r, _x, _y, _z, noZ):
Constraint.__init__(self, False, True, noZ)
self.pointID = _pointID
self.r = _r
self.x = _x
self.y = _y
self.z = _z
def constraint(self, points, **kwargs):
dx = self.x - points[self.pointID][0]
dy = self.y - points[self.pointID][1]
if self.noZ:
dst = AD.MATH.sqrt(dx * dx + dy * dy)
else:
dz = self.z - points[self.pointID][2]
dst = AD.MATH.sqrt(dx * dx + dy * dy + dz * dz)
return [], [dst - self.r]
class allPointsDistance(Constraint):
"""
makes sure all points of a robot are at least some distance from a fixed location
ignores the first points if you like
"""
def __init__(self, _r, _x, _y, _z, _noZ, _firstPoint=1, numPoints=None):
Constraint.__init__(self, False, True, _noZ)
self.first = _firstPoint
self.r = _r
self.x = _x
self.y = _y
self.z = _z
def constraint(self, points, **kwargs):
lst = []
for i in range(self.first, len(points)):
dx = self.x - points[i][0]
dy = self.y - points[i][1]
if self.noZ:
dst = AD.MATH.sqrt(dx * dx + dy * dy)
else:
dz = self.z - points[i][2]
dst = AD.MATH.sqrt(dx * dx + dy * dy + dz * dz)
lst.append(dst - self.r)
return [], lst
| from trajopt.spacetime.constraint import Constraint
import trajopt.utilities.adInterface as AD
import numpy as N
class Nail(Constraint):
"""
simplest possible constraint - puts a point at a specific position
"""
def __init__(self, _pointID, _position, _noZ):
Constraint.__init__(self, 1, 0, _noZ)
self.pointID = _pointID
self.position = _position
def constraint(self, points, **kwargs):
if self.noZ:
return [points[self.pointID][0] - self.position[0], points[self.pointID][1] - self.position[1]], []
else:
return [points[self.pointID][0] - self.position[0], points[self.pointID][1] - self.position[1],
points[self.pointID][2] - self.position[2]], []
def __repr__(self):
return "<Nail %d @ (%f,%f,%f)>" % (self.pointID, self.position[0], self.position[1], self.position[2])
class alignAxis(Constraint):
def __init__(self, _pt, _ax, _vec):
Constraint.__init__(self, 1, 0, False)
self.pt = _pt
self.ax = _ax
self.vec = _vec
def constraint(self, frames, **kwargs):
return [1 - N.dot(frames[self.pt][:, self.ax], self.vec)], []
class alignAxisGT(Constraint):
def __init__(self, _pt, _ax, _vec, _gtv=.99):
Constraint.__init__(self, 0, 1, False)
self.pt = _pt
self.ax = _ax
self.vec = _vec
self.gtv = _gtv
def constraint(self, frames, **kwargs):
return [], [N.dot(frames[self.pt][:, self.ax], self.vec) - self.gtv]
class Marker(Constraint):
"""
simplest possible constraint - a nail that doesn't actually connect to anything!
"""
def __init__(self, _position, _noZ=False):
Constraint.__init__(self, 1, 0, _noZ)
self.position = _position
def constraint(self, **kwargs):
if self.noZ:
return [], []
else:
return [], []
def __repr__(self):
return "<Marker %d @ (%f,%f,%f)>" % (self.pointID, self.position[0], self.position[1], self.position[2])
class AboveFloor(Constraint):
"""
simplest inequality - only works in Y
"""
def __init__(self, _pointID, _noZ, _floorHeight=0):
Constraint.__init__(self, False, True, _noZ)
self.pointID = _pointID
self.floorHeight = _floorHeight
def constraint(self, points, **kwargs):
return [], [points[self.pointID][1] - self.floorHeight]
class VariableBetween(Constraint):
"""
kindof like a joint limit - but implemented as a constraint
"""
def __init__(self, _varID, _minV, _maxV, _noZ):
Constraint.__init__(self, False, True, _noZ)
self.varID = _varID
self.maxV = _maxV
self.minV = _minV
def constraint(self, state, **kwargs):
return [], [state[self.varID] - self.minV, self.maxV - state[self.varID]]
class StateVelocity(Constraint):
"""
makes sure a state variable is less than or equal to a max velocity
_vMax is max velocity per frame (time parameterize frames outside the constraint)
"""
def __init__(self, _varID, _vMax, noZ=False):
Constraint.__init__(self, False, True, noZ)
self.varID = _varID
self.vMax = _vMax
self.usesStateDerivatives = 1
def constraint(self, stvel, **kwargs):
return [], [self.vMax - abs(stvel[self.varID])]
class pointDistance(Constraint):
"""
makes sure a point is at least some distance from a fixed location
"""
def __init__(self, _pointID, _r, _x, _y, _z, noZ):
Constraint.__init__(self, False, True, noZ)
self.pointID = _pointID
self.r = _r
self.x = _x
self.y = _y
self.z = _z
def constraint(self, points, **kwargs):
dx = self.x - points[self.pointID][0]
dy = self.y - points[self.pointID][1]
if self.noZ:
dst = AD.MATH.sqrt(dx * dx + dy * dy)
else:
dz = self.z - points[self.pointID][2]
dst = AD.MATH.sqrt(dx * dx + dy * dy + dz * dz)
return [], [dst - self.r]
class allPointsDistance(Constraint):
"""
makes sure all points of a robot are at least some distance from a fixed location
ignores the first points if you like
"""
def __init__(self, _r, _x, _y, _z, _noZ, _firstPoint=1, numPoints=None):
Constraint.__init__(self, False, True, _noZ)
self.first = _firstPoint
self.r = _r
self.x = _x
self.y = _y
self.z = _z
def constraint(self, points, **kwargs):
lst = []
for i in range(self.first, len(points)):
dx = self.x - points[i][0]
dy = self.y - points[i][1]
if self.noZ:
dst = AD.MATH.sqrt(dx * dx + dy * dy)
else:
dz = self.z - points[i][2]
dst = AD.MATH.sqrt(dx * dx + dy * dy + dz * dz)
lst.append(dst - self.r)
return [], lst
| en | 0.927161 | simplest possible constraint - puts a point at a specific position simplest possible constraint - a nail that doesn't actually connect to anything! simplest inequality - only works in Y kindof like a joint limit - but implemented as a constraint makes sure a state variable is less than or equal to a max velocity _vMax is max velocity per frame (time parameterize frames outside the constraint) makes sure a point is at least some distance from a fixed location makes sure all points of a robot are at least some distance from a fixed location ignores the first points if you like | 3.021144 | 3 |
skills/dummy_skill/connector.py | deepmipt/assistant-base | 7 | 6621843 | #!/usr/bin/env python
import asyncio
import csv
import json
import logging
import random
import re
import time
from collections import defaultdict
from copy import deepcopy
from os import getenv
from random import choice
from typing import Callable, Dict
import sentry_sdk
from common.ignore_lists import FALSE_POS_NPS_LIST, BAD_NPS_LIST
from common.link import (
LIST_OF_SCRIPTED_TOPICS,
SKILLS_TO_BE_LINKED_EXCEPT_LOW_RATED,
DFF_WIKI_LINKTO,
skills_phrases_map,
compose_linkto_with_connection_phrase,
)
from common.sensitive import is_sensitive_situation
from common.universal_templates import opinion_request_question, is_switch_topic, if_choose_topic
from common.utils import get_topics, get_entities, is_no, get_intents, is_yes
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
sentry_sdk.init(getenv("SENTRY_DSN"))
ASK_QUESTION_PROB = 0.7
LINK_TO_PROB = 0.5
LINK_TO_PHRASES = sum([list(list_el) for list_el in skills_phrases_map.values()], [])
with open("skills/dummy_skill/google-english-no-swears.txt", "r") as f:
TOP_FREQUENT_UNIGRAMS = f.read().splitlines()[:1000]
np_ignore_expr = re.compile(
"(" + "|".join([r"\b%s\b" % word for word in BAD_NPS_LIST + TOP_FREQUENT_UNIGRAMS]) + ")", re.IGNORECASE
)
np_remove_expr = re.compile("(" + "|".join([r"\b%s\b" % word for word in FALSE_POS_NPS_LIST]) + ")", re.IGNORECASE)
rm_spaces_expr = re.compile(r"\s\s+")
ASK_ME_QUESTION_PATTERN = re.compile(
r"^(do you have (a )?question|(can you|could you)?ask me (something|anything|[a-z ]+question))", re.IGNORECASE
)
donotknow_answers = [
"What do you want to talk about?",
"I am a bit confused. What would you like to chat about?",
"Sorry, probably, I didn't get what you meant. What do you want to talk about?",
"Sorry, I didn't catch that. What would you like to chat about?",
]
with open("skills/dummy_skill/questions_map.json", "r") as f:
QUESTIONS_MAP = json.load(f)
with open("skills/dummy_skill/nounphrases_questions_map.json", "r") as f:
NP_QUESTIONS = json.load(f)
with open("skills/dummy_skill/facts_map.json", "r") as f:
FACTS_MAP = json.load(f)
with open("skills/dummy_skill/nounphrases_facts_map.json", "r") as f:
NP_FACTS = json.load(f)
class RandomTopicResponder:
def __init__(self, filename, topic, text):
self.topic_phrases = defaultdict(list)
with open(filename, "r") as f:
reader = csv.DictReader(f)
for row in reader:
self.topic_phrases[row[topic]].append(row[text])
self.current_index = {k: 0 for k in self.topic_phrases.keys()}
self.topics = set(self.topic_phrases.keys())
def get_random_text(self, topics):
available_topics = self.topics.intersection(set(topics))
if not available_topics:
return ""
selected_topic = choice(list(available_topics))
result = self.topic_phrases[selected_topic][self.current_index[selected_topic]]
self.current_index[selected_topic] += 1
if self.current_index[selected_topic] >= len(self.topic_phrases[selected_topic]):
self.current_index[selected_topic] = 0
return result
questions_generator = RandomTopicResponder("skills/dummy_skill/questions_with_topics.csv", "topic", "question")
facts_generator = RandomTopicResponder("skills/dummy_skill/facts_with_topics.csv", "topic", "fact")
def get_link_to_question(dialog, all_prev_active_skills):
"""Generate `link_to` question updating bot attributes to one of the skills
which were not active for the last [5] turns.
Args:
dialog: dp-agent dialog instance
Returns:
tuple of linked question and updated bot attributes with saved link to `used_links`
"""
# get previous active skills
human_attr = {}
human_attr["used_links"] = dialog["human"]["attributes"].get("used_links", {})
human_attr["used_wiki_topics"] = dialog["human"]["attributes"].get("used_wiki_topics", [])
human_attr["disliked_skills"] = dialog["human"]["attributes"].get("disliked_skills", [])
human_attr["prelinkto_connections"] = dialog["human"]["attributes"].get("prelinkto_connections", [])
from_skill = None
for from_skill in all_prev_active_skills[::-1][:5]:
if from_skill in LIST_OF_SCRIPTED_TOPICS.keys():
break
# remove prev active skills from those we can link to
available_links = list(set(SKILLS_TO_BE_LINKED_EXCEPT_LOW_RATED).difference(all_prev_active_skills))
# use recommended skills
# recommended_skills = dialog["human_utterances"][-1].get("annotations", []).get("topic_recommendation", [])
# if len(set(available_links).intersection(recommended_skills)) > 0:
# available_links = list(set(recommended_skills).intersection(available_links))
all_wiki_topics = set(DFF_WIKI_LINKTO.keys())
available_wiki_topics = list(all_wiki_topics.difference(set(human_attr["used_wiki_topics"])))
available_best_wiki_topics = list(set(["art", "love", "anime"]).difference(set(human_attr["used_wiki_topics"])))
if len(available_links) > 0:
# if we still have skill to link to, try to generate linking question
# {'phrase': result, 'skill': linkto_dict["skill"], "connection_phrase": connection}
if len(available_best_wiki_topics) > 0 and random.uniform(0, 1) < 0.2:
chosen_topic = random.choice(available_best_wiki_topics)
linked_question = DFF_WIKI_LINKTO[chosen_topic]
else:
link = compose_linkto_with_connection_phrase(
available_links,
human_attributes=human_attr,
recent_active_skills=all_prev_active_skills,
from_skill=from_skill,
)
human_attr["used_links"][link["skill"]] = human_attr["used_links"].get(link["skill"], []) + [link["phrase"]]
human_attr["prelinkto_connections"] += [link.get("connection_phrase", "")]
linked_question = link["phrase"]
elif len(available_wiki_topics) > 0:
chosen_topic = random.choice(available_wiki_topics)
linked_question = DFF_WIKI_LINKTO[chosen_topic]
else:
linked_question = ""
return linked_question, human_attr
def generate_question_not_from_last_responses(dialog, all_prev_active_skills):
linked_question, human_attr = get_link_to_question(dialog, all_prev_active_skills)
if len(linked_question) > 0:
result = linked_question
else:
result = ""
return result, human_attr
class DummySkillConnector:
async def send(self, payload: Dict, callback: Callable):
try:
st_time = time.time()
dialog = deepcopy(payload["payload"]["dialogs"][0])
is_sensitive_case = is_sensitive_situation(dialog["human_utterances"][-1])
all_prev_active_skills = payload["payload"]["all_prev_active_skills"][0]
curr_topics = get_topics(dialog["human_utterances"][-1], which="cobot_topics")
curr_nounphrases = get_entities(dialog["human_utterances"][-1], only_named=False, with_labels=False)
if len(curr_topics) == 0:
curr_topics = ["Phatic"]
logger.info(f"Found topics: {curr_topics}")
for i in range(len(curr_nounphrases)):
np = re.sub(np_remove_expr, "", curr_nounphrases[i])
np = re.sub(rm_spaces_expr, " ", np)
if re.search(np_ignore_expr, np):
curr_nounphrases[i] = ""
else:
curr_nounphrases[i] = np.strip()
curr_nounphrases = [np for np in curr_nounphrases if len(np) > 0]
logger.info(f"Found nounphrases: {curr_nounphrases}")
cands = []
confs = []
human_attrs = []
bot_attrs = []
attrs = []
cands += [choice(donotknow_answers)]
confs += [0.5]
attrs += [{"type": "dummy"}]
human_attrs += [{}]
bot_attrs += [{}]
if len(dialog["utterances"]) > 14 and not is_sensitive_case:
questions_same_nps = []
for i, nphrase in enumerate(curr_nounphrases):
for q_id in NP_QUESTIONS.get(nphrase, []):
questions_same_nps += [QUESTIONS_MAP[str(q_id)]]
if len(questions_same_nps) > 0:
logger.info("Found special nounphrases for questions. Return question with the same nounphrase.")
cands += [choice(questions_same_nps)]
confs += [0.5]
attrs += [{"type": "nounphrase_question"}]
human_attrs += [{}]
bot_attrs += [{}]
link_to_question, human_attr = get_link_to_question(dialog, all_prev_active_skills)
if link_to_question:
_prev_bot_uttr = dialog["bot_utterances"][-2]["text"] if len(dialog["bot_utterances"]) > 1 else ""
_bot_uttr = dialog["bot_utterances"][-1]["text"] if len(dialog["bot_utterances"]) > 0 else ""
_prev_active_skill = (
dialog["bot_utterances"][-1]["active_skill"] if len(dialog["bot_utterances"]) > 0 else ""
)
_no_to_first_linkto = any([phrase in _bot_uttr for phrase in LINK_TO_PHRASES])
_no_to_first_linkto = _no_to_first_linkto and all(
[phrase not in _prev_bot_uttr for phrase in LINK_TO_PHRASES]
)
_no_to_first_linkto = _no_to_first_linkto and is_no(dialog["human_utterances"][-1])
_no_to_first_linkto = _no_to_first_linkto and _prev_active_skill != "dff_friendship_skill"
_if_switch_topic = is_switch_topic(dialog["human_utterances"][-1])
bot_uttr_dict = dialog["bot_utterances"][-1] if len(dialog["bot_utterances"]) > 0 else {}
_if_choose_topic = if_choose_topic(dialog["human_utterances"][-1], bot_uttr_dict)
_is_ask_me_something = ASK_ME_QUESTION_PATTERN.search(dialog["human_utterances"][-1]["text"])
if len(dialog["human_utterances"]) > 1:
_was_cant_do = "cant_do" in get_intents(dialog["human_utterances"][-2]) and (
len(curr_nounphrases) == 0 or is_yes(dialog["human_utterances"][-1])
)
_was_cant_do_stop_it = "cant_do" in get_intents(dialog["human_utterances"][-2]) and is_no(
dialog["human_utterances"][-1]
)
else:
_was_cant_do = False
_was_cant_do_stop_it = False
if _was_cant_do_stop_it:
link_to_question = "Sorry, bye! #+#exit"
confs += [1.0] # finish dialog request
elif _no_to_first_linkto:
confs += [0.99]
elif _is_ask_me_something or _if_switch_topic or _was_cant_do or _if_choose_topic:
confs += [1.0] # Use it only as response selector retrieve skill output modifier
else:
confs += [0.05] # Use it only as response selector retrieve skill output modifier
cands += [link_to_question]
attrs += [{"type": "link_to_for_response_selector"}]
human_attrs += [human_attr]
bot_attrs += [{}]
facts_same_nps = []
for i, nphrase in enumerate(curr_nounphrases):
for fact_id in NP_FACTS.get(nphrase, []):
facts_same_nps += [
f"Well, now that you've mentioned {nphrase}, I've remembered this. {FACTS_MAP[str(fact_id)]}. "
f"{(opinion_request_question() if random.random() < ASK_QUESTION_PROB else '')}"
]
if len(facts_same_nps) > 0 and not is_sensitive_case:
logger.info("Found special nounphrases for facts. Return fact with the same nounphrase.")
cands += [choice(facts_same_nps)]
confs += [0.5]
attrs += [{"type": "nounphrase_fact"}]
human_attrs += [{}]
bot_attrs += [{}]
total_time = time.time() - st_time
logger.info(f"dummy_skill exec time: {total_time:.3f}s")
asyncio.create_task(
callback(task_id=payload["task_id"], response=[cands, confs, human_attrs, bot_attrs, attrs])
)
except Exception as e:
logger.exception(e)
sentry_sdk.capture_exception(e)
asyncio.create_task(callback(task_id=payload["task_id"], response=e))
| #!/usr/bin/env python
import asyncio
import csv
import json
import logging
import random
import re
import time
from collections import defaultdict
from copy import deepcopy
from os import getenv
from random import choice
from typing import Callable, Dict
import sentry_sdk
from common.ignore_lists import FALSE_POS_NPS_LIST, BAD_NPS_LIST
from common.link import (
LIST_OF_SCRIPTED_TOPICS,
SKILLS_TO_BE_LINKED_EXCEPT_LOW_RATED,
DFF_WIKI_LINKTO,
skills_phrases_map,
compose_linkto_with_connection_phrase,
)
from common.sensitive import is_sensitive_situation
from common.universal_templates import opinion_request_question, is_switch_topic, if_choose_topic
from common.utils import get_topics, get_entities, is_no, get_intents, is_yes
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
sentry_sdk.init(getenv("SENTRY_DSN"))
ASK_QUESTION_PROB = 0.7
LINK_TO_PROB = 0.5
LINK_TO_PHRASES = sum([list(list_el) for list_el in skills_phrases_map.values()], [])
with open("skills/dummy_skill/google-english-no-swears.txt", "r") as f:
TOP_FREQUENT_UNIGRAMS = f.read().splitlines()[:1000]
np_ignore_expr = re.compile(
"(" + "|".join([r"\b%s\b" % word for word in BAD_NPS_LIST + TOP_FREQUENT_UNIGRAMS]) + ")", re.IGNORECASE
)
np_remove_expr = re.compile("(" + "|".join([r"\b%s\b" % word for word in FALSE_POS_NPS_LIST]) + ")", re.IGNORECASE)
rm_spaces_expr = re.compile(r"\s\s+")
ASK_ME_QUESTION_PATTERN = re.compile(
r"^(do you have (a )?question|(can you|could you)?ask me (something|anything|[a-z ]+question))", re.IGNORECASE
)
donotknow_answers = [
"What do you want to talk about?",
"I am a bit confused. What would you like to chat about?",
"Sorry, probably, I didn't get what you meant. What do you want to talk about?",
"Sorry, I didn't catch that. What would you like to chat about?",
]
with open("skills/dummy_skill/questions_map.json", "r") as f:
QUESTIONS_MAP = json.load(f)
with open("skills/dummy_skill/nounphrases_questions_map.json", "r") as f:
NP_QUESTIONS = json.load(f)
with open("skills/dummy_skill/facts_map.json", "r") as f:
FACTS_MAP = json.load(f)
with open("skills/dummy_skill/nounphrases_facts_map.json", "r") as f:
NP_FACTS = json.load(f)
class RandomTopicResponder:
def __init__(self, filename, topic, text):
self.topic_phrases = defaultdict(list)
with open(filename, "r") as f:
reader = csv.DictReader(f)
for row in reader:
self.topic_phrases[row[topic]].append(row[text])
self.current_index = {k: 0 for k in self.topic_phrases.keys()}
self.topics = set(self.topic_phrases.keys())
def get_random_text(self, topics):
available_topics = self.topics.intersection(set(topics))
if not available_topics:
return ""
selected_topic = choice(list(available_topics))
result = self.topic_phrases[selected_topic][self.current_index[selected_topic]]
self.current_index[selected_topic] += 1
if self.current_index[selected_topic] >= len(self.topic_phrases[selected_topic]):
self.current_index[selected_topic] = 0
return result
questions_generator = RandomTopicResponder("skills/dummy_skill/questions_with_topics.csv", "topic", "question")
facts_generator = RandomTopicResponder("skills/dummy_skill/facts_with_topics.csv", "topic", "fact")
def get_link_to_question(dialog, all_prev_active_skills):
"""Generate `link_to` question updating bot attributes to one of the skills
which were not active for the last [5] turns.
Args:
dialog: dp-agent dialog instance
Returns:
tuple of linked question and updated bot attributes with saved link to `used_links`
"""
# get previous active skills
human_attr = {}
human_attr["used_links"] = dialog["human"]["attributes"].get("used_links", {})
human_attr["used_wiki_topics"] = dialog["human"]["attributes"].get("used_wiki_topics", [])
human_attr["disliked_skills"] = dialog["human"]["attributes"].get("disliked_skills", [])
human_attr["prelinkto_connections"] = dialog["human"]["attributes"].get("prelinkto_connections", [])
from_skill = None
for from_skill in all_prev_active_skills[::-1][:5]:
if from_skill in LIST_OF_SCRIPTED_TOPICS.keys():
break
# remove prev active skills from those we can link to
available_links = list(set(SKILLS_TO_BE_LINKED_EXCEPT_LOW_RATED).difference(all_prev_active_skills))
# use recommended skills
# recommended_skills = dialog["human_utterances"][-1].get("annotations", []).get("topic_recommendation", [])
# if len(set(available_links).intersection(recommended_skills)) > 0:
# available_links = list(set(recommended_skills).intersection(available_links))
all_wiki_topics = set(DFF_WIKI_LINKTO.keys())
available_wiki_topics = list(all_wiki_topics.difference(set(human_attr["used_wiki_topics"])))
available_best_wiki_topics = list(set(["art", "love", "anime"]).difference(set(human_attr["used_wiki_topics"])))
if len(available_links) > 0:
# if we still have skill to link to, try to generate linking question
# {'phrase': result, 'skill': linkto_dict["skill"], "connection_phrase": connection}
if len(available_best_wiki_topics) > 0 and random.uniform(0, 1) < 0.2:
chosen_topic = random.choice(available_best_wiki_topics)
linked_question = DFF_WIKI_LINKTO[chosen_topic]
else:
link = compose_linkto_with_connection_phrase(
available_links,
human_attributes=human_attr,
recent_active_skills=all_prev_active_skills,
from_skill=from_skill,
)
human_attr["used_links"][link["skill"]] = human_attr["used_links"].get(link["skill"], []) + [link["phrase"]]
human_attr["prelinkto_connections"] += [link.get("connection_phrase", "")]
linked_question = link["phrase"]
elif len(available_wiki_topics) > 0:
chosen_topic = random.choice(available_wiki_topics)
linked_question = DFF_WIKI_LINKTO[chosen_topic]
else:
linked_question = ""
return linked_question, human_attr
def generate_question_not_from_last_responses(dialog, all_prev_active_skills):
linked_question, human_attr = get_link_to_question(dialog, all_prev_active_skills)
if len(linked_question) > 0:
result = linked_question
else:
result = ""
return result, human_attr
class DummySkillConnector:
async def send(self, payload: Dict, callback: Callable):
try:
st_time = time.time()
dialog = deepcopy(payload["payload"]["dialogs"][0])
is_sensitive_case = is_sensitive_situation(dialog["human_utterances"][-1])
all_prev_active_skills = payload["payload"]["all_prev_active_skills"][0]
curr_topics = get_topics(dialog["human_utterances"][-1], which="cobot_topics")
curr_nounphrases = get_entities(dialog["human_utterances"][-1], only_named=False, with_labels=False)
if len(curr_topics) == 0:
curr_topics = ["Phatic"]
logger.info(f"Found topics: {curr_topics}")
for i in range(len(curr_nounphrases)):
np = re.sub(np_remove_expr, "", curr_nounphrases[i])
np = re.sub(rm_spaces_expr, " ", np)
if re.search(np_ignore_expr, np):
curr_nounphrases[i] = ""
else:
curr_nounphrases[i] = np.strip()
curr_nounphrases = [np for np in curr_nounphrases if len(np) > 0]
logger.info(f"Found nounphrases: {curr_nounphrases}")
cands = []
confs = []
human_attrs = []
bot_attrs = []
attrs = []
cands += [choice(donotknow_answers)]
confs += [0.5]
attrs += [{"type": "dummy"}]
human_attrs += [{}]
bot_attrs += [{}]
if len(dialog["utterances"]) > 14 and not is_sensitive_case:
questions_same_nps = []
for i, nphrase in enumerate(curr_nounphrases):
for q_id in NP_QUESTIONS.get(nphrase, []):
questions_same_nps += [QUESTIONS_MAP[str(q_id)]]
if len(questions_same_nps) > 0:
logger.info("Found special nounphrases for questions. Return question with the same nounphrase.")
cands += [choice(questions_same_nps)]
confs += [0.5]
attrs += [{"type": "nounphrase_question"}]
human_attrs += [{}]
bot_attrs += [{}]
link_to_question, human_attr = get_link_to_question(dialog, all_prev_active_skills)
if link_to_question:
_prev_bot_uttr = dialog["bot_utterances"][-2]["text"] if len(dialog["bot_utterances"]) > 1 else ""
_bot_uttr = dialog["bot_utterances"][-1]["text"] if len(dialog["bot_utterances"]) > 0 else ""
_prev_active_skill = (
dialog["bot_utterances"][-1]["active_skill"] if len(dialog["bot_utterances"]) > 0 else ""
)
_no_to_first_linkto = any([phrase in _bot_uttr for phrase in LINK_TO_PHRASES])
_no_to_first_linkto = _no_to_first_linkto and all(
[phrase not in _prev_bot_uttr for phrase in LINK_TO_PHRASES]
)
_no_to_first_linkto = _no_to_first_linkto and is_no(dialog["human_utterances"][-1])
_no_to_first_linkto = _no_to_first_linkto and _prev_active_skill != "dff_friendship_skill"
_if_switch_topic = is_switch_topic(dialog["human_utterances"][-1])
bot_uttr_dict = dialog["bot_utterances"][-1] if len(dialog["bot_utterances"]) > 0 else {}
_if_choose_topic = if_choose_topic(dialog["human_utterances"][-1], bot_uttr_dict)
_is_ask_me_something = ASK_ME_QUESTION_PATTERN.search(dialog["human_utterances"][-1]["text"])
if len(dialog["human_utterances"]) > 1:
_was_cant_do = "cant_do" in get_intents(dialog["human_utterances"][-2]) and (
len(curr_nounphrases) == 0 or is_yes(dialog["human_utterances"][-1])
)
_was_cant_do_stop_it = "cant_do" in get_intents(dialog["human_utterances"][-2]) and is_no(
dialog["human_utterances"][-1]
)
else:
_was_cant_do = False
_was_cant_do_stop_it = False
if _was_cant_do_stop_it:
link_to_question = "Sorry, bye! #+#exit"
confs += [1.0] # finish dialog request
elif _no_to_first_linkto:
confs += [0.99]
elif _is_ask_me_something or _if_switch_topic or _was_cant_do or _if_choose_topic:
confs += [1.0] # Use it only as response selector retrieve skill output modifier
else:
confs += [0.05] # Use it only as response selector retrieve skill output modifier
cands += [link_to_question]
attrs += [{"type": "link_to_for_response_selector"}]
human_attrs += [human_attr]
bot_attrs += [{}]
facts_same_nps = []
for i, nphrase in enumerate(curr_nounphrases):
for fact_id in NP_FACTS.get(nphrase, []):
facts_same_nps += [
f"Well, now that you've mentioned {nphrase}, I've remembered this. {FACTS_MAP[str(fact_id)]}. "
f"{(opinion_request_question() if random.random() < ASK_QUESTION_PROB else '')}"
]
if len(facts_same_nps) > 0 and not is_sensitive_case:
logger.info("Found special nounphrases for facts. Return fact with the same nounphrase.")
cands += [choice(facts_same_nps)]
confs += [0.5]
attrs += [{"type": "nounphrase_fact"}]
human_attrs += [{}]
bot_attrs += [{}]
total_time = time.time() - st_time
logger.info(f"dummy_skill exec time: {total_time:.3f}s")
asyncio.create_task(
callback(task_id=payload["task_id"], response=[cands, confs, human_attrs, bot_attrs, attrs])
)
except Exception as e:
logger.exception(e)
sentry_sdk.capture_exception(e)
asyncio.create_task(callback(task_id=payload["task_id"], response=e))
| en | 0.674026 | #!/usr/bin/env python Generate `link_to` question updating bot attributes to one of the skills which were not active for the last [5] turns. Args: dialog: dp-agent dialog instance Returns: tuple of linked question and updated bot attributes with saved link to `used_links` # get previous active skills # remove prev active skills from those we can link to # use recommended skills # recommended_skills = dialog["human_utterances"][-1].get("annotations", []).get("topic_recommendation", []) # if len(set(available_links).intersection(recommended_skills)) > 0: # available_links = list(set(recommended_skills).intersection(available_links)) # if we still have skill to link to, try to generate linking question # {'phrase': result, 'skill': linkto_dict["skill"], "connection_phrase": connection} #+#exit" # finish dialog request # Use it only as response selector retrieve skill output modifier # Use it only as response selector retrieve skill output modifier | 1.978563 | 2 |
NewWindow/main.py | tatonkoduje/pythonforbegginers | 0 | 6621844 | from tkinter import *
def create_window():
window.destroy()
# new_window = Toplevel()
new_window = Tk()
window = Tk()
window.geometry("800x600")
button = Button(window, text="Otwórz nowe okno", command=create_window)
button.place(x=200, y=100)
window.mainloop()
| from tkinter import *
def create_window():
window.destroy()
# new_window = Toplevel()
new_window = Tk()
window = Tk()
window.geometry("800x600")
button = Button(window, text="Otwórz nowe okno", command=create_window)
button.place(x=200, y=100)
window.mainloop()
| en | 0.46549 | # new_window = Toplevel() | 3.628526 | 4 |
asyncspinner/styles.py | Hegdahl/asyncspinner | 0 | 6621845 | '''Different ways spinner can look.'''
STRETCHING_SQUARE = '▀▜▟▄▖▖▌▛▜▐▗▗▄▙▛▀▝▝▐▟▙▌▘▘'
ROTATING_BAR = '|/╱-╲\\'
| '''Different ways spinner can look.'''
STRETCHING_SQUARE = '▀▜▟▄▖▖▌▛▜▐▗▗▄▙▛▀▝▝▐▟▙▌▘▘'
ROTATING_BAR = '|/╱-╲\\'
| en | 0.781671 | Different ways spinner can look. | 1.692294 | 2 |
python/diff-viewer/diffViewerSaved.py | heliosbryan/pet-projects | 0 | 6621846 | import sys
import re
import Tkinter as tk
from ScrolledText import ScrolledText
def convertToUserFriendly(line):
line = line.replace("--- /dev/null", "[New file]")
line = line.replace("diff --git a/", "File: ")
return line
def parseDiff(diffFile):
filenameParseRegex = "diff --git (.*)"
print "Using file " + diffFile
lineArray = [];
with open (diffFile, "r") as myfile:
for line in myfile:
lineArray.append(line)
diffFileData = {}
currentLinesString = ""
currentFilename = ""
lineCount = 0
for line in lineArray:
lineCount+=1
if("diff" in line):
m = re.search(filenameParseRegex, line)
filenames = m.group(1)
# this is the "next" filename
if(currentFilename == ""):
header = currentLinesString
else:
diffFileData[currentFilename] = currentLinesString
filenameA = filenames.split(" ")[0]
filenameA = filenameA.replace("a/", "")
currentFilename = filenameA
currentLinesString = ""
currentLinesString += convertToUserFriendly(line)
return diffFileData
diffFile = sys.argv[1]
parsedDiff = parseDiff(diffFile)
class ThreesDiff(tk.Tk):
listOptions = None
diffData = {}
root = None
textbox = None
def __init__(self, diffData):
self.diffData = diffData
self.listOptions = diffData.keys()
#tk.Tk.__init__(self, *args, **kwargs)
self.root = tk.Tk()
lb = tk.Listbox(self.root)
for option in self.listOptions:
lb.insert("end", option)
lb.bind("<Double-Button-1>", self.OnDouble)
lb.bind("<<ListboxSelect>>", self.OnDouble)
lb.pack(side="top", fill="both", expand=True)
text = ScrolledText(self.root, height=500, width=150, bg="black")
text.tag_configure("minus", foreground="red")
text.tag_configure("plus", foreground="green")
text.tag_configure("normal", foreground="grey")
text.pack()
self.textbox = text
def OnDouble(self, event):
widget = event.widget
selection=widget.curselection()
value = widget.get(selection[0])
print "selection:", selection, ": '%s'" % value
textContent = self.diffData[value]
self.textbox.delete('1.0', tk.END) # clear it
splitContent = textContent.split("\n")
for line in splitContent:
line = line + "\n"
if line.startswith("+"):
self.textbox.insert(tk.END, line, ("plus"))
elif line.startswith("-"):
self.textbox.insert(tk.END, line, ("minus"))
else:
self.textbox.insert(tk.END, line, ("normal"))
if __name__ == "__main__":
app = ThreesDiff(parsedDiff)
app.root.mainloop()
| import sys
import re
import Tkinter as tk
from ScrolledText import ScrolledText
def convertToUserFriendly(line):
line = line.replace("--- /dev/null", "[New file]")
line = line.replace("diff --git a/", "File: ")
return line
def parseDiff(diffFile):
filenameParseRegex = "diff --git (.*)"
print "Using file " + diffFile
lineArray = [];
with open (diffFile, "r") as myfile:
for line in myfile:
lineArray.append(line)
diffFileData = {}
currentLinesString = ""
currentFilename = ""
lineCount = 0
for line in lineArray:
lineCount+=1
if("diff" in line):
m = re.search(filenameParseRegex, line)
filenames = m.group(1)
# this is the "next" filename
if(currentFilename == ""):
header = currentLinesString
else:
diffFileData[currentFilename] = currentLinesString
filenameA = filenames.split(" ")[0]
filenameA = filenameA.replace("a/", "")
currentFilename = filenameA
currentLinesString = ""
currentLinesString += convertToUserFriendly(line)
return diffFileData
diffFile = sys.argv[1]
parsedDiff = parseDiff(diffFile)
class ThreesDiff(tk.Tk):
listOptions = None
diffData = {}
root = None
textbox = None
def __init__(self, diffData):
self.diffData = diffData
self.listOptions = diffData.keys()
#tk.Tk.__init__(self, *args, **kwargs)
self.root = tk.Tk()
lb = tk.Listbox(self.root)
for option in self.listOptions:
lb.insert("end", option)
lb.bind("<Double-Button-1>", self.OnDouble)
lb.bind("<<ListboxSelect>>", self.OnDouble)
lb.pack(side="top", fill="both", expand=True)
text = ScrolledText(self.root, height=500, width=150, bg="black")
text.tag_configure("minus", foreground="red")
text.tag_configure("plus", foreground="green")
text.tag_configure("normal", foreground="grey")
text.pack()
self.textbox = text
def OnDouble(self, event):
widget = event.widget
selection=widget.curselection()
value = widget.get(selection[0])
print "selection:", selection, ": '%s'" % value
textContent = self.diffData[value]
self.textbox.delete('1.0', tk.END) # clear it
splitContent = textContent.split("\n")
for line in splitContent:
line = line + "\n"
if line.startswith("+"):
self.textbox.insert(tk.END, line, ("plus"))
elif line.startswith("-"):
self.textbox.insert(tk.END, line, ("minus"))
else:
self.textbox.insert(tk.END, line, ("normal"))
if __name__ == "__main__":
app = ThreesDiff(parsedDiff)
app.root.mainloop()
| en | 0.558252 | # this is the "next" filename #tk.Tk.__init__(self, *args, **kwargs) # clear it | 3.037459 | 3 |
Sem5SLLPYTHON/demo/file.py | tanisha03/5th-Sem-ISE | 1 | 6621847 | dict = {}
wordLen = []
if(len(sys.argv) != 2):
print ("Invalid Arguments")
sys.exit()
if(not(os.path.exists(sys.argv[0]))):
print ("Invalid File Path")
sys.exit()
if(sys.argv[1].split('.')[-1] != "txt"):
print ("Invalid File Format. Only TXT files allowed")
sys.exit()
with open(sys.argv[1]) as file:
for line in file:
for word in line.split():
dict[word] = dict.get(word,0) + 1
print dict
sortedDict = sorted(dict.items(), key=lambda dictItem: dictItem[1], reverse=True)
for i in range(10):
try:
wordTuple = sortedDict[i]
wordLen.append(len(wordTuple[0]))
print (wordTuple[0], ", Frequency: " , wordTuple[1] , ", Length " , len(wordTuple[0]))
except IndexError:
print ("File has less than 10 words")
break
print ("Lengths of 10 most frequently occuring words:")
print (wordLen)
sum = reduce(lambda x,y: x+y, wordLen)
print ("Average length of words: " , sum*1.0/len(wordLen)*1.0)
squares = [x**2 for x in wordLen if x%2 != 0]
print ("Squres of odd word lengths: ")
print (squares)
| dict = {}
wordLen = []
if(len(sys.argv) != 2):
print ("Invalid Arguments")
sys.exit()
if(not(os.path.exists(sys.argv[0]))):
print ("Invalid File Path")
sys.exit()
if(sys.argv[1].split('.')[-1] != "txt"):
print ("Invalid File Format. Only TXT files allowed")
sys.exit()
with open(sys.argv[1]) as file:
for line in file:
for word in line.split():
dict[word] = dict.get(word,0) + 1
print dict
sortedDict = sorted(dict.items(), key=lambda dictItem: dictItem[1], reverse=True)
for i in range(10):
try:
wordTuple = sortedDict[i]
wordLen.append(len(wordTuple[0]))
print (wordTuple[0], ", Frequency: " , wordTuple[1] , ", Length " , len(wordTuple[0]))
except IndexError:
print ("File has less than 10 words")
break
print ("Lengths of 10 most frequently occuring words:")
print (wordLen)
sum = reduce(lambda x,y: x+y, wordLen)
print ("Average length of words: " , sum*1.0/len(wordLen)*1.0)
squares = [x**2 for x in wordLen if x%2 != 0]
print ("Squres of odd word lengths: ")
print (squares)
| none | 1 | 3.841463 | 4 | |
setup.py | jgrss/geowombat | 38 | 6621848 | import setuptools
from pathlib import Path
from distutils.core import setup
from distutils.extension import Extension
import re
from collections import defaultdict
import subprocess
try:
from Cython.Build import cythonize
except:
raise ImportError('Cython must be installed to build GeoWombat.')
try:
import numpy as np
except:
raise ImportError('NumPy must be installed to build GeoWombat.')
# Parse the version from the module.
# Source: https://github.com/mapbox/rasterio/blob/master/setup.py
with open('geowombat/version.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
continue
pkg_name = 'geowombat'
maintainer = ''
maintainer_email = ''
description = 'GeoWombat: Utilities for geospatial data'
git_url = 'https://github.com/jgrss/geowombat'
download_url = '{GIT}/archive/{PKG}-{VERSION}'.format(GIT=git_url, PKG=pkg_name, VERSION=version)
keywords = ['raster', 'satellite']
extras = 'extra-requirements.txt'
with open('README.md') as f:
long_description = f.read()
with open('LICENSE.txt') as f:
license_file = f.read()
with open('requirements.txt') as f:
required_packages = f.readlines()
# Attempt to get the GDAL binary version
try:
process = subprocess.Popen(['gdalinfo', '--version'], stdout=subprocess.PIPE, stderr=None)
gdal_version = str(process.communicate()[0]).split(',')[0].split(' ')[1].strip()
except:
gdal_version = None
if gdal_version:
required_packages.append('GDAL=={GDAL_VERSION}\n'.format(GDAL_VERSION=gdal_version))
def get_extra_requires(path, add_all=True):
with open(path) as fp:
extra_deps = defaultdict(set)
for k in fp:
if k.strip() and not k.startswith('#'):
tags = set()
if ':' in k:
k, v = k.split(':')
tags.update(vv.strip() for vv in v.split(','))
tags.add(re.split('[<=>]', k)[0])
for t in tags:
extra_deps[t].add(k)
# add tag `all` at the end
if add_all:
extra_deps['all'] = set(vv for v in extra_deps.values() for vv in v)
return extra_deps
def get_packages():
return setuptools.find_packages()
def get_package_data():
return {'': ['*.md', '*.txt'],
'data': ['*.png'],
'geowombat': ['config.ini',
'data/*.tif',
'data/*.TIF',
'data/*.gpkg',
'data/*.tar.gz',
'moving/*.so',
'bin/*.tar.gz']}
def get_extensions():
extensions = [Extension('*',
sources=['geowombat/moving/_moving.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
if Path('geowombat/moving/_test.pyx').is_file():
extensions += [Extension('*',
sources=['geowombat/moving/_test.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
if Path('geowombat/radiometry/_fusion.pyx').is_file():
extensions += [Extension('*',
sources=['geowombat/radiometry/_fusion.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
return extensions
def setup_package():
include_dirs = [np.get_include()]
metadata = dict(name=pkg_name,
maintainer=maintainer,
maintainer_email=maintainer_email,
description=description,
license=license_file,
version=version,
long_description=long_description,
packages=get_packages(),
package_data=get_package_data(),
ext_modules=cythonize(get_extensions()),
zip_safe=False,
keywords=' '.join(keywords),
url=git_url,
download_url=download_url,
install_requires=required_packages,
extras_require=get_extra_requires(extras),
include_dirs=include_dirs,
classifiers=['Intended Audience :: Science/Research',
'License :: MIT',
'Topic :: Scientific :: Remote Sensing',
'Programming Language :: Cython',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'])
setup(**metadata)
if __name__ == '__main__':
setup_package()
| import setuptools
from pathlib import Path
from distutils.core import setup
from distutils.extension import Extension
import re
from collections import defaultdict
import subprocess
try:
from Cython.Build import cythonize
except:
raise ImportError('Cython must be installed to build GeoWombat.')
try:
import numpy as np
except:
raise ImportError('NumPy must be installed to build GeoWombat.')
# Parse the version from the module.
# Source: https://github.com/mapbox/rasterio/blob/master/setup.py
with open('geowombat/version.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
continue
pkg_name = 'geowombat'
maintainer = ''
maintainer_email = ''
description = 'GeoWombat: Utilities for geospatial data'
git_url = 'https://github.com/jgrss/geowombat'
download_url = '{GIT}/archive/{PKG}-{VERSION}'.format(GIT=git_url, PKG=pkg_name, VERSION=version)
keywords = ['raster', 'satellite']
extras = 'extra-requirements.txt'
with open('README.md') as f:
long_description = f.read()
with open('LICENSE.txt') as f:
license_file = f.read()
with open('requirements.txt') as f:
required_packages = f.readlines()
# Attempt to get the GDAL binary version
try:
process = subprocess.Popen(['gdalinfo', '--version'], stdout=subprocess.PIPE, stderr=None)
gdal_version = str(process.communicate()[0]).split(',')[0].split(' ')[1].strip()
except:
gdal_version = None
if gdal_version:
required_packages.append('GDAL=={GDAL_VERSION}\n'.format(GDAL_VERSION=gdal_version))
def get_extra_requires(path, add_all=True):
with open(path) as fp:
extra_deps = defaultdict(set)
for k in fp:
if k.strip() and not k.startswith('#'):
tags = set()
if ':' in k:
k, v = k.split(':')
tags.update(vv.strip() for vv in v.split(','))
tags.add(re.split('[<=>]', k)[0])
for t in tags:
extra_deps[t].add(k)
# add tag `all` at the end
if add_all:
extra_deps['all'] = set(vv for v in extra_deps.values() for vv in v)
return extra_deps
def get_packages():
return setuptools.find_packages()
def get_package_data():
return {'': ['*.md', '*.txt'],
'data': ['*.png'],
'geowombat': ['config.ini',
'data/*.tif',
'data/*.TIF',
'data/*.gpkg',
'data/*.tar.gz',
'moving/*.so',
'bin/*.tar.gz']}
def get_extensions():
extensions = [Extension('*',
sources=['geowombat/moving/_moving.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
if Path('geowombat/moving/_test.pyx').is_file():
extensions += [Extension('*',
sources=['geowombat/moving/_test.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
if Path('geowombat/radiometry/_fusion.pyx').is_file():
extensions += [Extension('*',
sources=['geowombat/radiometry/_fusion.pyx'],
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])]
return extensions
def setup_package():
include_dirs = [np.get_include()]
metadata = dict(name=pkg_name,
maintainer=maintainer,
maintainer_email=maintainer_email,
description=description,
license=license_file,
version=version,
long_description=long_description,
packages=get_packages(),
package_data=get_package_data(),
ext_modules=cythonize(get_extensions()),
zip_safe=False,
keywords=' '.join(keywords),
url=git_url,
download_url=download_url,
install_requires=required_packages,
extras_require=get_extra_requires(extras),
include_dirs=include_dirs,
classifiers=['Intended Audience :: Science/Research',
'License :: MIT',
'Topic :: Scientific :: Remote Sensing',
'Programming Language :: Cython',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'])
setup(**metadata)
if __name__ == '__main__':
setup_package()
| en | 0.715131 | # Parse the version from the module. # Source: https://github.com/mapbox/rasterio/blob/master/setup.py # Attempt to get the GDAL binary version # add tag `all` at the end | 1.959302 | 2 |
recirq/optimize/minimize_test.py | Coiner1909/ReCirq | 195 | 6621849 | <gh_stars>100-1000
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import recirq
def sum_of_squares(x):
return np.sum(x**2).item()
def test_minimize():
x0 = np.random.randn(5)
result = recirq.optimize.minimize(sum_of_squares,
x0,
method='mgd',
sample_radius=1e-1,
n_sample_points=21,
rate=1e-1,
tol=1e-7,
known_values=None)
assert result.message == 'Optimization converged successfully.'
result = recirq.optimize.minimize(sum_of_squares, x0, method='Nelder-Mead')
assert result.message == 'Optimization terminated successfully.'
| # Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import recirq
def sum_of_squares(x):
return np.sum(x**2).item()
def test_minimize():
x0 = np.random.randn(5)
result = recirq.optimize.minimize(sum_of_squares,
x0,
method='mgd',
sample_radius=1e-1,
n_sample_points=21,
rate=1e-1,
tol=1e-7,
known_values=None)
assert result.message == 'Optimization converged successfully.'
result = recirq.optimize.minimize(sum_of_squares, x0, method='Nelder-Mead')
assert result.message == 'Optimization terminated successfully.' | en | 0.856768 | # Copyright 2020 Google # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.368588 | 2 |
runtests.py | bgc-autumn/HydroSensorReader | 6 | 6621850 | """
File for running tests programmatically.
"""
import pytest
def main():
"""
Run pytest tests.
"""
errno = pytest.main(['-x', 'hydsensread', '-v', '-rw', '--durations=10',
'--cov=hydsensread'])
if errno != 0:
raise SystemExit(errno)
if __name__ == '__main__':
main()
| """
File for running tests programmatically.
"""
import pytest
def main():
"""
Run pytest tests.
"""
errno = pytest.main(['-x', 'hydsensread', '-v', '-rw', '--durations=10',
'--cov=hydsensread'])
if errno != 0:
raise SystemExit(errno)
if __name__ == '__main__':
main()
| en | 0.872097 | File for running tests programmatically. Run pytest tests. | 2.575586 | 3 |