seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14438218202 | import psycopg2
update_sql = "UPDATE test SET data = %s WHERE num = %s"
conn = None
try:
# connect to the PostgreSQL database
conn = psycopg2.connect(
dbname='spacedys',
host='localhost',
user='spacedys',
password='password')
# create a new cursor
cur = conn.cursor()
# execute the UPDTE statement
cur.execute(update_sql, ("ghjklm", 100))
print("Rows modified:")
print(cur.rowcount)
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
| nicolacammillini/spacedys | docs/demos/db/update-pg.py | update-pg.py | py | 694 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "psycopg2.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "psycopg2.DatabaseError",
"line_number": 24,
"usage_type": "attribute"
}
] |
15872415911 | import torch
import numpy as np
from falkon import Falkon
from falkon.kernels import GaussianKernel
from falkon.options import FalkonOptions
from falkonhep.models import HEPModel
class FalkonHEPModel(HEPModel):
def create_labels(self, ref_size, data_size):
ref_labels = np.zeros(ref_size, dtype=np.float64) - 1
data_labels = np.ones(data_size, dtype=np.float64)
return np.hstack((ref_labels, data_labels))
def __loglikelihood(self, f):
c = 1e-5
p = (f + 1)/2
n = (1 - f)/2
return torch.log(p / n)
def predict(self, data):
preds = self.model.predict(torch.from_numpy(data).contiguous())
return self.__loglikelihood(preds)
def build_model(self, model_parameters, weight):
def weight_fun(Y, X, idx):
wvec = torch.ones(Y.shape,dtype=Y.dtype)
wvec[Y==-1] = weight
return wvec
cg_tol = model_parameters['cg_tol'] if 'cg_tol' in model_parameters else 1e-7
keops_active = model_parameters['keops_active'] if 'keops_active' in model_parameters else "no"
maxiter = model_parameters['maxiter'] if 'maxiter' in model_parameters else 10000000
use_cpu = model_parameters['use_cpu'] if 'use_cpu' in model_parameters else False
seed = model_parameters['seed'] if 'seed' in model_parameters else None
kernel = GaussianKernel(torch.Tensor([model_parameters['sigma']]))
configuration = {
'kernel' : kernel,
'penalty' : model_parameters['penalty'],
'maxiter' : maxiter,
'M' : model_parameters['M'],
'options' : FalkonOptions(cg_tolerance=cg_tol, keops_active=keops_active, use_cpu=use_cpu),
'weight_fn' : weight_fun,
'seed' : seed
}
self.model= Falkon(**configuration) | FalkonHEP/falkonhep | falkonhep/models/flkhep_model.py | flkhep_model.py | py | 1,858 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "falkonhep.models.HEPModel",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",... |
24969027415 | from typing import List
class Solution:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
ans = [0] * len(temperatures)
stack = []
for i, v in enumerate(temperatures):
while stack and stack[-1][1] < v:
index, value = stack.pop()
ans[index] = i - index
stack.append([i, v])
print(f"stack: {stack}")
return ans
sol = Solution()
print(sol.dailyTemperatures(temperatures=[73, 74, 75, 71, 69, 72, 76, 73]))
| inverseTrig/leet_code | 739_daily_temperatures.py | 739_daily_temperatures.py | py | 530 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
34212066405 | # https://www.acmicpc.net/problem/14502
# solution
# 1) ์์๋ก 3๊ฐ์ ๋ฒฝ์ ์ธ์ด๋ค(dfs)
# 2) ๋ฐ์ด๋ฌ์ค๋ฅผ ํผ๋จ๋ฆฐ๋ค(bfs)
# 3) ์์ ๊ตฌ์ญ์ ํฌ๊ธฐ๋ฅผ ๊ตฌํด์ ์ต๋๊ฐ์ ๊ฐฑ์ ํ๋ค
# 4) 1)๋ก ๋์๊ฐ ๋ชจ๋ ๊ฒฝ์ฐ์ ๋ํด ๋ฐ๋ณตํ๋ค
# 5) ์์ ๊ตฌ์ญ ํฌ๊ธฐ์ ์ต๋๊ฐ์ ์ถ๋ ฅํ๋ค
# TIL
# 2์ฐจ์ ๋ฐฐ์ด์์ ๊ฐ๋ฅํ ๋ชจ๋ ์กฐํฉ์ ์ฌ๊ท์ ์ผ๋ก ํ์ํ๋ ์ฝ๋( line:60~66 )
## -> ๋ชซ(i//m)๊ณผ ๋๋จธ์ง(i%m)๋ฅผ ์ด์ฉํ ํ์๊ณผ ์ฌ๊ทํจ์ ๋ด์ ๋ฐ๋ณต๋ฌธ ์๋ ํํ
import copy
from collections import deque
def calc_safe_area():
global walled_map
area = 0
for i in range(N):
for j in range(M):
if walled_map[i][j] == 0:
area += 1
return area
def virus_spread(virus_lst_copy): # ์ด๊ธฐ์ virus_lst๋ฅผ ๋ฐ์์ bfs๋ก virus ํผ๋จ๋ฆผ
global walled_map
dx = [1,0,-1,0]
dy = [0,1,0,-1]
q = deque(virus_lst_copy)
while len(q) > 0:
x, y = q.popleft()
for d_idx in range(4):
next_x = x + dx[d_idx]
next_y = y + dy[d_idx]
if ( (next_x >=0) and (next_x <N) and (next_y >=0) and (next_y <M) and (walled_map[next_x][next_y]==0) ):
walled_map[next_x][next_y] = 2
q.append((next_x,next_y))
def calculate(start, wall_cnt): # ๋ฒฝ์ ์ธ์ด ๋ค ๋ฐ์ด๋ฌ์ค ํผ๋จ๋ฆฌ๊ณ ์์ ๊ตฌ์ญ ๋์ด ๊ณ์ฐํด ์ต๋๊ฐ์ผ ๊ฒฝ์ฐ ๊ฐฑ์
global lab_map, max_area, N, M
if wall_cnt == 3: # 3๊ฐ์ ๋ฒฝ ๋ชจ๋ ์ธ์์ง
global walled_map
walled_map = copy.deepcopy(lab_map)
virus_lst_copy = copy.deepcopy(virus_lst)
virus_spread(virus_lst_copy)
max_area = max(max_area, calc_safe_area())
return
for i in range(start, N*M):
x = i//M
y = i%M
if lab_map[x][y] == 0:
lab_map[x][y] = 1
calculate(i+1,wall_cnt+1)
lab_map[x][y] = 0
if __name__ == "__main__":
N, M = tuple(map(int, input().split()))
lab_map = []
for _ in range(N):
lab_map.append(list(map(int, input().split())))
virus_lst = []
for i in range(N):
for j in range(M):
if lab_map[i][j] == 2:
virus_lst.append((i,j))
max_area = 0 # ์์ ๊ตฌ์ญ์ ์ต๋ํฌ๊ธฐ
calculate(0,0)
print(max_area)
| chankoo/problem-solving | graph/14502-์ฐ๊ตฌ์.py | 14502-์ฐ๊ตฌ์.py | py | 2,358 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 53,
"usage_type": "call"
}
] |
22377349542 | import pandas as pd
import matplotlib.pyplot as plt
import os # file muveletek
import datetime
from datetime import date
import time
import copy
import numpy as np
class nHop:
time_stamp = ''
next_hop = ''
count = 0
per8 = 0
adv_range = 0
def __init__(self):
self.time_stamp = ''
self.next_hop = ''
self.count = 0
self.per8 = 0
self.adv_range = 0
def blank_sheet(self):
self.time_stamp = ''
self.next_hop = ''
self.count = 0
self.per8 = 0
def tostring(self):
return str(self.time_stamp) + "," + str(self.next_hop) + "," + str(self.count) + "," + str(self.per8) + "," + str(self.adv_range)
class Ip:
time_stamp = ''
address = 0
prefix = 0
nh = 0
msp = 0
bin = ''
def __init__(self, address, prefix, nh):
self.address = address
self.prefix = prefix
self.nh = nh
def tostring(self):
return str(self.bin) + '\t' + str(self.address) + '\t' + str(self.nh) + '\t' + str(self.prefix) + '\t' + str(
self.msp)
def write_pre_bin(self):
return str(self.bin[0:int(self.prefix)])
def store_to_list(filepath): # elsล prefix tรกrolรกsa
lst = []
with open(filepath) as fp:
line = fp.readline()
# default gateway kihagyรกsa
if line[8] == '0':
line = fp.readline()
cnt = 1
while line:
# print("Line {}: {}".format(cnt, line.strip()))
tmp = line.split("\t")
tmp2 = tmp[0].split("/")
p = Ip(tmp2[0], tmp2[1], tmp[1].strip())
mydate= filepath.split("_")[-1].split('.')[0]
p.time_stamp = mydate[0]+mydate[1]+mydate[2]+mydate[3]+'-'+mydate[4]+mydate[5]+'-'+mydate[6]+mydate[7]
lst.append(p)
cnt += 1
line = fp.readline()
fp.close()
return lst
def calc_per8(per8):
return (2 ** (32 - int(per8))) / (2 ** 24)
location = "C:/o"
workFiles = []
workList = []
hops = {}
if __name__ == "__main__":
start = datetime.datetime.now()
print('start ' + str(start))
for root, dirs, files in os.walk(location):
for file in files:
if file.split('.')[-1] == 'txt':
workFiles.append(root + '/' + file)
print(root + '/' + file)
# for filepath in workFiles:
# store_to_list(filepath, workList)
workList = store_to_list("F:/cha6/in/bme_fib_20191101.txt")
for item in workList:
if item.nh not in hops:
hop = nHop()
hop.time_stamp = item.time_stamp
hop.next_hop = item.nh
hop.count = 1
hop.per8 = calc_per8(item.prefix)
hop.adv_range = 2 ** (32 - int(item.prefix))
hops[item.nh] = hop
else:
hop = nHop()
hop.time_stamp = item.time_stamp
hop.next_hop = item.nh
hop.count = hops[item.nh].count + 1
hop.per8 = hops[item.nh].per8 + calc_per8(item.prefix)
hop.adv_range = hops[item.nh].adv_range + 2 ** (32 - int(item.prefix))
hops[item.nh] = hop
for hop in hops.values():
print(hop.tostring())
print('finished ' + str(datetime.datetime.now() - start))
| Tomikaze/IP-stats-trends | venv/nexthop.py | nexthop.py | py | 2,796 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "datetime.datetim... |
264212609 | """
https://portswigger.net/web-security/cross-site-scripting/contexts/lab-onclick-event-angle-brackets-double-quotes-html-encoded-single-quotes-backslash-escaped
"""
import sys
import requests
from bs4 import BeautifulSoup
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
s = requests.Session()
def try_post(name, website_link):
blog_post_url = f'https://{site}/post?postId=1'
resp = s.get(blog_post_url)
soup = BeautifulSoup(resp.text,'html.parser')
csrf = soup.find('input', {'name':'csrf'}).get('value')
comment_url = f'https://{site}/post/comment'
comment_data = {
'csrf' : csrf,
'postId' : '1',
'comment' : 'Hello world!',
'name' : name,
'email' : 'baa@pdx.edu',
'website': website_link
}
resp = s.post(comment_url, data=comment_data)
#try_post("single quote","https://pdx.edu/'")
#try_post("double quote",'https://pdx.edu/"')
#try_post("double quote HTML encoded",'https://pdx.edu/"')
#try_post("single quote HTML encoded",'https://pdx.edu/'')
try_post("exploit",'https://pdx.edu/' -alert(1)-'') | brandonaltermatt/penetration-testing-scripts | cross-site-scripting/contexts/onclick-event-angle-brackets-double-quotes-html-encoded-single-quotes-backslash-escaped.py | onclick-event-angle-brackets-double-quotes-html-encoded-single-quotes-backslash-escaped.py | py | 1,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
}
] |
71205780264 | from setuptools import setup, find_packages
from apache_spark import __version__
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
setup(
name='apache_spark',
version=__version__,
description='Computational tools for Big Data (02807) - Apache Spark',
long_description='''
Computational tools for Big Data (02807) - Apache Spark
''',
keywords='spark pyspark euler graphs',
author='Jose L. Bellod Cisneros & Kosai Al-Nakeeb',
author_email='bellod.cisneros@gmail.com & kosai@cbs.dtu.dk',
url='https://github.com/josl/ApacheSpark_02817',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
],
packages=find_packages(),
include_package_data=False,
install_requires=[
# add your dependencies here
# remember to use 'package-name>=x.y.z,<x.y+1.0' notation (this way you
# get bugfixes)
],
extras_require={
'tests': tests_require,
},
entry_points={
'console_scripts': [
# add cli scripts here in this form:
# 'apache_spark=apache_spark.cli:main',
],
},
)
| yosoyubik/ApacheSpark_02817 | setup.py | setup.py | py | 1,598 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "apache_spark.__version__",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "setuptools.find_packages",
"line_number": 39,
"usage_type": "call"
}
] |
74134918182 | from cmath import nan
import re
from types import NoneType
from django.shortcuts import render, redirect
from django.http import JsonResponse
from mensajeria.models import (
Archivos,
Destinatarios,
Personas,
Maestras,
Peticion,
Paises,
Areas,
Secciones,
Grupos
)
from mensajeria.forms import ArchivosForm
import os
from datetime import datetime
from django.conf import settings
import pandas as pd
import json
from django.http import HttpResponse
from openpyxl import Workbook
from rest_framework.generics import CreateAPIView, GenericAPIView
from ...mixins.base import ResponseMixin
from ...serializers.auth.signup_serializers import SignupSerializers
from ...serializers.auth.signin_serializers import SigninSerializers
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import render
from django.http import HttpResponse
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
class Preparation(CreateAPIView, ResponseMixin):
serializer_class = SignupSerializers
def get_validar_numero(self, numero):
regex = r"\d{10}$"
return re.match(regex, numero) is not None
def get_binary_search(self, arr, target):
left, right = 0, len(arr) - 1
while left <= right:
mid = (left + right) // 2
if arr[mid] == target:
return mid
elif arr[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1
def get_data_null(self, txt):
if txt is nan:
return ""
else:
return txt
def get_validar_campos(self, matriz):
return [
[str(x) if x is not None and not pd.isna(x) else "" for x in row]
for row in matriz
]
def get_pais(self, paises_validos, paises_codigo, pais):
try:
if pais == "Colombia":
pais_id = 39
codigo = paises_codigo[38]
else:
index = self.get_binary_search(paises_validos, pais)
if index != -1:
pais_id = index + 1
codigo = paises_codigo[index]
else:
pais_id = 39
codigo = paises_codigo[38]
except ValueError:
pais_id = 39
codigo = paises_codigo[38]
return pais_id, codigo
def post(self, request, *args, **kwargs):
try:
if "archivo_excel_area" in request.FILES:
archivo = request.FILES["archivo_excel_area"]
if archivo.name.endswith((".xls", ".xlsx")):
df = pd.read_excel(archivo, engine="openpyxl")
matriz = df.values.tolist()
errados = []
validos = []
validos_celular = []
duplicados = []
duplicados_add = []
duplicados_docu = []
num_validos = 0
num_duplicados = 0
num_errados = 0
personas_actuales = Personas.objects.all().order_by("telefonowhatsapp")
paises_actuales = Paises.objects.all().order_by("nombre")
paises_validos = []
paises_codigo = []
validos_actuales = []
documentos_actuales = []
for pais_list in paises_actuales:
paises_validos.append(pais_list.nombre)
paises_codigo.append(pais_list.codigo)
for persona in personas_actuales:
validos_actuales.append(persona.telefonowhatsapp)
documentos_actuales.append(persona.identificacion)
matriz_data = self.get_validar_campos(matriz)
for row in matriz_data:
nombre = row[0].strip()
segundo_nombre = row[1].strip()
apellido = row[2].strip()
segundo_apellido = row[3].strip()
celular = row[4].strip()
pais = row[5].strip()
documento = row[6].strip()
tipoidentificacion = row[7].strip()
fechanacimiento = row[8].strip()
ocupacion = row[9].strip()
area = row[10].strip()
seccion = row[11].strip()
grupo = row[12].strip()
pais_id, codigo = self.get_pais(paises_validos, paises_codigo, pais)
try:
fechanacimiento = datetime.strptime(
fechanacimiento, "%Y-%m-%d %H:%M:%S"
).date()
if fechanacimiento > datetime.now().date():
fechanacimiento = ""
except ValueError:
fechanacimiento = ""
if not isinstance(celular, str):
celular = str(celular)
documento = str(documento).rstrip(".0")
persona_new = {
"nombre": nombre,
"segundo_nombre": segundo_nombre,
"apellido": apellido,
"segundo_apellido": segundo_apellido,
"celular_whatsapp": celular,
"whatsapp_prefijo": codigo + celular,
"pais": pais_id,
"documento": documento,
"tipoidentificacion": tipoidentificacion,
"fechanacimiento": fechanacimiento,
"ocupacion": ocupacion,
"area": area,
"seccion": seccion,
"grupo": grupo,
"message": "",
}
if not self.get_validar_numero(persona_new["celular_whatsapp"]):
persona_new["message"] = "Numero de whatsapp invalido."
errados.append(persona_new)
num_errados = num_errados + 1
elif not self.get_validar_numero(persona_new["documento"]):
persona_new["message"] = "Numero de documento invalido."
errados.append(persona_new)
num_errados = num_errados + 1
else:
validos_celular.sort()
index_validos = self.get_binary_search(validos_celular, celular)
if index_validos != -1:
persona_new[
"message"
] = "Numero de whatsapp duplicado en el excel."
duplicados.append(persona_new)
num_duplicados = num_duplicados + 1
else:
index2 = self.get_binary_search(validos_actuales, celular)
if index2 != -1:
persona_new[
"message"
] = "Numero de whatsapp duplicado en la base de datos."
duplicados.append(persona_new)
num_duplicados = num_duplicados + 1
else:
persona_new["message"] = "Datos correctos."
validos_celular.append(celular)
validos.append(persona_new)
num_validos = num_validos + 1
data = {
"validos": {"count": num_validos, "data": validos},
"errados": {"count": num_errados, "data": errados},
"duplicados": {"count": num_duplicados, "data": duplicados},
}
self.data = data
else:
self.error = "Archivo no encontrado"
self.status = status.HTTP_400_BAD_REQUEST
except Exception as e:
self.error = str(e)
self.status = status.HTTP_400_BAD_REQUEST
return Response(self.response_obj)
class Save(CreateAPIView, ResponseMixin):
serializer_class = SignupSerializers
def post_add_person(self, data, user):
nueva_persona = Personas(
nombre=data["nombre"],
segundonombre=data["segundo_nombre"],
apellido=data["apellido"],
segundoapellido=data["segundo_apellido"],
telefonomovil=data["celular_llamada"],
telefonowhatsapp=data["celular_whatsapp"],
identificacion=data["documento"],
fechanacimiento=data["fechanacimiento"],
pais_id=data["pais_id"],
created_by=user,
)
nueva_persona.save()
persona_id = nueva_persona.id
nuevo_registro = Destinatarios(
persona_id=persona_id, created_by=user, estado_id=596, grupo_id = data["grupo_id"]
)
nuevo_registro.save()
return True
def get_binary_search(self, arr, target):
left, right = 0, len(arr) - 1
while left <= right:
mid = (left + right) // 2
if arr[mid] == target:
return mid
elif arr[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1
def get_search(self, lista, valor):
try:
index = lista.index(valor)
return index
except ValueError:
return -1
def post(self, request, *args, **kwargs):
try:
personas = request.data["destinatarios"]
user = request.user
validos = []
num_validos = 0
invalidos = []
num_invalidos = 0
areas_actuales = Areas.objects.filter(estado_id=596).order_by("nombre")
secciones_actuales = Secciones.objects.filter(estado_id=596).order_by("nombre")
grupos_actuales = Grupos.objects.filter(estado_id=596).order_by("nombre")
areas_listado = []
areas_listado_id = []
secciones_listado = []
secciones_listado_id = []
grupos_listado = []
grupos_listado_id = []
grupos_listado_info = []
for area_list in areas_actuales:
areas_listado_id.append(area_list.id)
areas_listado.append(area_list.nombre)
for seccion_list in secciones_actuales:
secciones_listado_id.append(seccion_list.id)
secciones_listado.append(seccion_list.nombre)
for grupos_list in grupos_actuales:
grupos_listado_id.append(grupos_list.id)
grupos_listado.append(grupos_list.nombre)
areas_array = []
secciones_array = []
grupos_array = []
for row in personas:
index_validos = self.get_search(areas_listado, row['area'])
if index_validos != -1:
area_id = areas_listado_id[index_validos]
else:
areas_listado.append(row["area"])
new_area = Areas()
new_area.nombre = row["area"]
new_area.descripcion = ""
new_area.estado_id = 596
new_area.created_by = user
new_area.save()
area_id = new_area.id
areas_listado_id.append(area_id)
index_seccion = self.get_search(secciones_listado, row['seccion'])
if index_seccion != -1:
seccion_id = secciones_listado_id[index_seccion]
else:
secciones_listado.append(row['seccion'])
new_seccion = Secciones()
new_seccion.nombre = row['seccion']
new_seccion.area_id = area_id
new_seccion.descripcion = ""
new_seccion.estado_id = 596
new_seccion.created_by = user
new_seccion.save()
seccion_id = new_seccion.id
secciones_listado_id.append(seccion_id)
index_grupo = self.get_search(grupos_listado, row['grupo'])
if index_grupo != -1:
grupo_id = grupos_listado_id[index_grupo]
else:
grupos_listado.append(row['grupo'])
new_grupo = Grupos()
new_grupo.nombre = row['grupo']
new_grupo.seccion_id = seccion_id
new_grupo.descripcion = ""
new_grupo.estado_id = 596
new_grupo.created_by = user
new_grupo.save()
grupo_id = new_grupo.id
grupos_listado_id.append(grupo_id)
index_grupo = self.get_search(grupos_listado, row['grupo'])
if index_grupo != -1:
grupo_id = grupos_listado_id[index_grupo]
persona_new = {
"nombre": row["nombre"],
"segundo_nombre": row["segundo_nombre"],
"apellido": row["apellido"],
"segundo_apellido": row["segundo_apellido"],
"celular_whatsapp": row["whatsapp_prefijo"],
"celular_llamada": row["celular_whatsapp"],
"documento": row["documento"],
"fechanacimiento": row["fechanacimiento"],
"pais_id": row["pais"],
"grupo_id": grupo_id,
"seccion": row["seccion"],
"grupo": row["grupo"]
}
try:
self.post_add_person(persona_new, user)
validos.append(persona_new)
num_validos = num_validos + 1
except Exception as e:
invalidos.append(persona_new)
num_invalidos = num_invalidos + 1
data = {
"validos": {"count": num_validos, "data": validos},
"invalidos": {"count": num_invalidos, "data": invalidos},
"error": num_invalidos,
}
self.data = data
return Response(self.response_obj)
except Exception as e:
self.error = str(e)
self.status = status.HTTP_400_BAD_REQUEST
return Response(self.response_obj) | YilberthAndres/masivo | mensajeria/views/carga/carga_distribucion.py | carga_distribucion.py | py | 16,140 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.generics.CreateAPIView",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "mixins.base.ResponseMixin",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "serializers.auth.signup_serializers.SignupSerializers",
"line_number": 41,
... |
20078510579 | from flask import Blueprint, render_template, url_for, request, send_from_directory
from flask_login import login_required, current_user
from werkzeug.utils import redirect
from crm import db
from ..utils.images_handler import save_image_uploads, get_image_list, load_image_uploads, delete_images
from .forms import NewNoteForm, DeleteNoteForm
from .models import Note
bp_note = Blueprint('note', __name__, template_folder='templates')
@bp_note.route('uploads/<filename>')
def uploads(filename):
return load_image_uploads(filename)
@bp_note.route('/', methods=['GET'])
@login_required
def notes():
notes = Note.get_all_with_users()
return render_template('notes.html', notes=notes, title='Notatki')
@bp_note.route('/<idx>', methods=['GET'])
@login_required
def note(idx):
note = Note.get_by_id_with_user(idx)
form = DeleteNoteForm()
image_list = ''
if note.image:
image_list = get_image_list(note)
return render_template('note.html', note=note, form=form, image_list=image_list, title='Szczegรณลy notatki')
@bp_note.route('/add', methods=['GET', 'POST'])
@login_required
def add():
form = NewNoteForm()
if form.validate_on_submit():
note = Note(title=form.title.data,
description=form.description.data,
user_id=current_user.id,
expire_date=form.expire_date.data,
on_todo_list=form.on_todo_list.data)
if request.args.get('client_id', default=False, type=int):
note.client_id = request.args.get('client_id')
if request.args.get('offer_id', default=False, type=int):
note.offer_id = request.args.get('offer_id')
db.session.add(note)
db.session.flush()
filename = save_image_uploads(form.images, note, current_user.initials)
if filename:
note.image = filename
db.session.commit()
return redirect(url_for('note.notes'))
return render_template('add_note.html', form=form, title='Dodawanie notatki')
@bp_note.route("/delete/<int:idx><delete_img>", methods=['GET'])
@login_required
def delete(idx, delete_img):
note = Note.query.get(idx)
db.session.delete(note)
db.session.commit()
if delete_img == 'True':
delete_images(note)
return redirect(url_for('note.notes'))
| tomasz-rzesikowski/crm | crm/note/views.py | views.py | py | 2,354 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.images_handler.load_image_uploads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Note.get_all_with_users",
"line_number": 22,
"usage_type": "call"
},
{... |
1938554685 | import sys
from typing import List
from kclvm.tools.lint.reporters.base_reporter import BaseReporter
from kclvm.tools.lint.message.message import Message
class FileReporter(BaseReporter):
def __init__(self, linter, output=None, encoding=None):
self.name = "file_reporter"
self.output_file = linter.config.output_path
super().__init__(linter, output, encoding)
def print_msg(self, msgs: List[Message] = None):
assert self.output_file
with open(self.output_file, "w") as f:
current = sys.stdout
sys.stdout = f
for msg in msgs:
print(msg)
print()
print("Check total {} files:".format(len(self.linter.file_list)))
for k, v in self.linter.msgs_map.items():
print("{:<8}{}: {}".format(v, k, self.linter.MSGS[k][1]))
print(f"KCL Lint: {len(self.linter.msgs)} problems")
sys.stdout = current
| kcl-lang/kcl-py | kclvm/tools/lint/reporters/file_reporter.py | file_reporter.py | py | 968 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "kclvm.tools.lint.reporters.base_reporter.BaseReporter",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "kclvm.tools.lint.message.message.Message",
"line_number": 14,
"usage_typ... |
73421284263 |
import tensorflow as tf
import keras as K
from keras import callbacks, optimizers
from keras import backend as KB
from keras.engine import Layer
from keras.layers import Activation
from keras.layers import LeakyReLU, Dense, Input, Embedding, Dropout, Reshape, Concatenate, MaxPooling1D, Flatten
from keras.layers import Flatten, SpatialDropout1D, Conv1D
from keras.models import Model
from keras.utils import plot_model
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.layers import Add
# capsule layers from Xifeng Guo
# https://github.com/XifengGuo/CapsNet-Keras
from capsulelayers import CapsuleLayer, PrimaryCap, PrimaryCap1D, Length, Mask
# Build the CapsNet model
def draw_capsnet_model(hyper_param, embedding_matrix=None, verbose=True):
"""
Input: hyper parameters dictionary
Construct:
input layers : x , x_pos(o), x_captialization(o)
embedding matrix : use_glove or randomly initialize
conv1 : first convolution layer
primarycaps : conv2 and squash function applied
ner_caps : make 8 ner capsules of specified dim
out_pred : calc length of 8 ner capsules as 8 prob. predictions over 8 ner classes
Returns:
if decoding/reconstruction disabled --> a single keras.models.Model object
if decoding/reconstruction enabled --> three keras.models.Model objects
"""
# input layer(s)
x = Input(shape=(hyper_param['maxlen'],), name='x')
if hyper_param['use_pos_tags'] :
x_pos = Input(shape=(hyper_param['maxlen'],hyper_param['poslen']), name='x_pos')
if hyper_param['use_capitalization_info'] :
x_capital = Input(shape=(hyper_param['maxlen'], hyper_param['capitallen']), name='x_capital')
# embedding matrix
if hyper_param['use_glove']:
embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], weights=[embedding_matrix],\
input_length=hyper_param['maxlen'], trainable=hyper_param['allow_glove_retrain'])(x)
else:
embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], input_length=hyper_param['maxlen'],\
embeddings_initializer="random_uniform" )(x)
# concat embeddings with additional features
if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info'] :
embed = Concatenate(axis=-1)([embed, x_pos, x_capital])
elif hyper_param['use_pos_tags'] and (not hyper_param['use_capitalization_info']) :
embed = Concatenate(axis=-1)([embed, x_pos])
elif (not hyper_param['use_pos_tags']) and hyper_param['use_capitalization_info'] :
embed = Concatenate(axis=-1)([embed, x_capital])
else :
embed = embed
# add dropout here
if hyper_param['embed_dropout'] > 0.0:
embed = SpatialDropout1D( hyper_param['embed_dropout'])(embed)
# feed embeddings into conv1
conv1 = Conv1D( filters=hyper_param['conv1_filters'], \
kernel_size=hyper_param['conv1_kernel_size'],\
strides=hyper_param['conv1_strides'], \
padding=hyper_param['conv1_padding'],\
activation='relu', name='conv1')(embed)
# make primary capsules
if hyper_param['use_2D_primarycaps']:
convShape = conv1.get_shape().as_list()
conv1 = Reshape(( convShape[1], convShape[2], 1))(conv1)
primaryCapLayer = PrimaryCap
else:
primaryCapLayer = PrimaryCap1D
# make primary capsules
primarycaps = primaryCapLayer(conv1, \
dim_capsule=hyper_param['primarycaps_dim_capsule'],\
n_channels=hyper_param['primarycaps_n_channels'],\
kernel_size=hyper_param['primarycaps_kernel_size'], \
strides=hyper_param['primarycaps_strides'], \
padding=hyper_param['primarycaps_padding'])
# make ner capsules
ner_caps = CapsuleLayer(num_capsule=hyper_param['ner_classes'], \
dim_capsule=hyper_param['ner_capsule_dim'], \
routings=hyper_param['num_dynamic_routing_passes'], \
name='nercaps')(primarycaps)
# replace each ner capsuel with its length
out_pred = Length(name='out_pred')(ner_caps)
if verbose:
print ("x", x.get_shape())
if hyper_param['use_pos_tags'] : print ("x_pos", x_pos.get_shape())
if hyper_param['use_capitalization_info'] : print ("x_capital", x_capital.get_shape())
print ("embed", embed.get_shape())
print ("conv1", conv1.get_shape())
print ("primarycaps", primarycaps.get_shape())
print ("ner_caps", ner_caps.get_shape())
print ("out_pred", out_pred.get_shape())
if hyper_param['use_decoder']:
decoder_y_cat = Input(shape=(hyper_param['ner_classes'],), name='decoder_y_cat')
masked_by_y = Mask(name='masked_by_y')([ner_caps, decoder_y_cat]) # true label is used to mask during training
masked = Mask()(ner_caps) # mask using capsule with maximal length for predicion
# decoder for training
train_decoder_dense1 = Dense(hyper_param['decoder_feed_forward_1'], activation='relu',\
input_dim=hyper_param['ner_capsule_dim']*hyper_param['ner_classes'],\
name='train_decoder_dense1')(masked_by_y)
train_decoder_dense1_dropout = Dropout(hyper_param['decoder_dropout'])(train_decoder_dense1)
train_decoder_dense2 = Dense(hyper_param['decoder_feed_forward_2'], activation='relu',\
name='train_decoder_dense2')(train_decoder_dense1_dropout)
train_decoder_dense2_dropout = Dropout(hyper_param['decoder_dropout'])(train_decoder_dense2)
train_decoder_output = Dense(hyper_param['embed_dim'], activation=None,\
name='train_decoder_output')(train_decoder_dense2_dropout)
# decoder for evaluation (prediction)
eval_decoder_dense1 = Dense(hyper_param['decoder_feed_forward_1'], activation='relu',\
input_dim=hyper_param['ner_capsule_dim']*hyper_param['ner_classes'],\
name='eval_decoder_dense1')(masked)
eval_decoder_dense2 = Dense(hyper_param['decoder_feed_forward_2'], activation='relu',\
name='eval_decoder_dense2')(eval_decoder_dense1)
eval_decoder_output = Dense(hyper_param['embed_dim'], activation=None,\
name='eval_decoder_output')(eval_decoder_dense2)
if verbose:
print ("Decoder model enabled for GloVe vector deconstruction...")
print ("decoder_y_cat", decoder_y_cat.get_shape())
print ("masked_by_y", masked_by_y.get_shape())
print ("train_decoder_dense1", train_decoder_dense1.get_shape())
print ("train_decoder_dense1_dropout", train_decoder_dense1_dropout.get_shape())
print ("train_decoder_dense2", train_decoder_dense2.get_shape())
print ("train_decoder_dense2_dropout", train_decoder_dense2_dropout.get_shape())
print ("train_decoder_output", train_decoder_output.get_shape())
print ("masked", masked.get_shape())
print ("eval_decoder_dense1", eval_decoder_dense1.get_shape())
print ("eval_decoder_dense2", eval_decoder_dense2.get_shape())
print ("eval_decoder_output", eval_decoder_output.get_shape())
# construct input list
if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info'] :
input_list = [x, x_pos, x_capital]
elif hyper_param['use_pos_tags'] and (not hyper_param['use_capitalization_info']) :
input_list = [x, x_pos]
elif (not hyper_param['use_pos_tags']) and hyper_param['use_capitalization_info'] :
input_list = [x, x_capital]
else:
input_list = [x]
if hyper_param['use_decoder']==False:
print ("decoder/reconstruction DISabled")
print ("returning 1 model")
return Model(inputs=input_list, outputs=[out_pred])
else :
train_model = Model(inputs=input_list+[decoder_y_cat], outputs=[out_pred, train_decoder_output])
eval_model = Model(inputs=input_list, outputs=[out_pred, eval_decoder_output])
print ("decoder/reconstruction enabled")
print ("returning a list of 2 models: train_model, eval_model")
return train_model, eval_model
# marginal loss
def margin_loss(y_true, y_pred):
L = y_true * KB.square(KB.maximum(0., 0.9 - y_pred)) + 0.5 * (1 - y_true) * KB.square(KB.maximum(0., y_pred - 0.1))
return KB.mean(KB.sum(L, 1))
# decoder loss
def custom_cosine_proximity(y_true, y_pred):
y_true = tf.nn.l2_normalize(y_true, dim=-1)
y_pred = tf.nn.l2_normalize(y_pred, dim=-1)
return -KB.sum(y_true * y_pred)
# compile the model
def compile_caps_model(hyper_param, model):
"""
Input: keras.models.Model object, see draw_capsnet_model() output. This is a graph with all layers drawn and connected
do:
compile with loss function and optimizer
Returns: compiled model
"""
if hyper_param['optimizer'] == "Adam":
opt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
elif hyper_param['optimizer'] == "SGD":
opt = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)
elif hyper_param['optimizer'] == None:
raise Exception("No optimizer specified")
if hyper_param.get('use_decoder') == True:
if hyper_param['loss_function'] == 'custom_cosine':
decodeLoss = custom_cosine_proximity
else:
decodeLoss = hyper_param['loss_function']
model_loss = [margin_loss, decodeLoss] # work in progress
loss_wts = [1, hyper_param['lam_recon']]
else:
model_loss = margin_loss
loss_wts = None
model.compile(optimizer=opt, #'adam',
loss=model_loss,
loss_weights=loss_wts,
metrics={'out_pred':'accuracy'})
return model
def fit_model( hyper_param, model, modelName, trainX_dict, devX_list_arrayS, trainY_dict, devY_list_arrayS):
#Saving weights and logging
log = callbacks.CSVLogger(hyper_param['save_dir'] + '/{0}_historylog.csv'.format(modelName))
tb = callbacks.TensorBoard(log_dir=hyper_param['save_dir'] + '/tensorboard-logs', \
batch_size=hyper_param['batch_size'], histogram_freq=hyper_param['debug'])
checkpoint = callbacks.ModelCheckpoint(hyper_param['save_dir'] + '/weights-{epoch:02d}.h5', \
save_best_only=True, save_weights_only=True, verbose=1)
es = callbacks.EarlyStopping(patience=hyper_param['stopping_patience'], verbose=2)
#lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.))
model.summary()
# Save a png of the model shapes and flow
# must have installed pydot and graphviz...
# conda install pydot
# conda install -c anaconda graphviz
# sometimes graphviz is a little squirrely, if so, use: pip install graphviz
# plot_model( model, to_file=hyper_param['save_dir'] + '/{0}.png'.format(modelName), show_shapes=True)
#loss = margin_loss
data = model.fit( x=trainX_dict, # {'x':trainX, 'x_pos':trainX_pos_cat, 'x_capital':trainX_capitals_cat, (o)'decoder_y_cat':trainY_cat}
y=trainY_dict, #!{'out_pred':trainY_cat, (o)'decoder_output':train_decoderY}
batch_size=hyper_param['batch_size'],
epochs=hyper_param['epochs'],
validation_data=[devX_list_arrayS, devY_list_arrayS], #! [devX, devX_pos_cat, devX_capitals_cat, (o)devY_cat], [devY_cat, (o)dev_decoderY]
callbacks=[log, tb, checkpoint, es],
verbose=1)
| Chucooleg/CapsNet_for_NER | code/buildCapsModel.py | buildCapsModel.py | py | 12,144 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "keras.layers.Input",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "keras.layers.Input",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "keras.layers... |
30753498437 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
CHECKPOINT_PATH = "model/lab2.h5"
def main():
(train_images, train_labels), (test_images,
test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
model = models.Sequential()
model.add(layers.Conv2D(
32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
model.save(CHECKPOINT_PATH)
model.evaluate(test_images, test_labels, verbose=2)
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
if __name__ == "__main__":
main()
| sontuphan/deep-learning-labs | labs/lab2.py | lab2.py | py | 1,755 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.datasets.cifar10.load_data",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.datasets.cifar10",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.datasets",
"line_number": 14,
"usage_... |
18262811618 | import requests
import json
import os
import pyperclip
from wox import Wox,WoxAPI
class Main(Wox):
def query(self,key):
results=[]
key=key.split(' ')
servers={
'1':"LuXingNiao",'2':"MoGuLi",'3':"MaoXiaoPang",
}
worlds={
'HongYuHai':"็บข็ๆตท",
'LaNuoXiYa':"ๆ่ฏบ่ฅฟไบ",
'ChenXiWangZuo':"ๆจๆฆ็ๅบง",
'YuZhouHeYin':"ๅฎๅฎๅ้ณ",
'WoXianXiRan':"ๆฒไปๆฆๆ",
'ShenYiZhiDi':"็ฅๆไนๅฐ",
'HuanYingQunDao':"ๅนปๅฝฑ็พคๅฒ",
'MengYaChi':"่่ฝๆฑ ",
'BaiYinXiang':"็ฝ้ถไนก",
'BaiJinHuanXiang':"็ฝ้ๅนป่ฑก",
'ShenQuanHen':"็ฅๆณ็",
'ChaoFengTing':"ๆฝฎ้ฃไบญ",
'LvRenZhanQiao':"ๆ
ไบบๆ ๆกฅ",
'FuXiaoZhiJian':"ๆๆไน้ด",
'Longchaoshendian':"้พๅทข็ฅๆฎฟ",
'MengYuBaoJing':"ๆขฆ็พฝๅฎๅข",
'ZiShuiZhanQiao':"็ดซๆฐดๆ ๆกฅ",
'YanXia':"ๅปถๅค",
'JingYuZhuangYuan':"้่ฏญๅบๅญ",
'MoDuNa':"ๆฉๆ็บณ",
'HaiMaoChaWu':"ๆตท็ซ่ถๅฑ",
'RouFengHaiWan':"ๆ้ฃๆตทๆนพ",
'HuPoYuan':"็ฅ็ๅ"
}
if key[0]=='s':
recvListings=self.cafemaker(key[1])
for item in recvListings:
itemID,itemType,itemIconPath,itemKindName,itemName=self.itemSolve(item)
if itemType=="Item":
itemIconUrl='https://cafemaker.wakingsands.com/{}'.format(itemIconPath)
if not os.path.exists('ItemIcon/{}.png'.format(itemID)):
with open('ItemIcon/{}.png'.format(itemID),'wb') as f:
f.write(requests.get(itemIconUrl).content)
results.append({
"Title":"{}".format(itemName),
"SubTitle":"{}".format(itemKindName),
"IcoPath":"ItemIcon/{}.png".format(itemID),
"JsonRPCAction":{
"method":"Wox.ChangeQuery",
"parameters":["item q {} 1 ({})".format(itemID,itemName),False],
"dontHideAfterAction":True
}
})
return results
if key[0]=='q':
data=self.universalis(servers[key[2]],key[1])
for item in data:
results.append({
"Title": "{} x {} = {}".format(item["pricePerUnit"],item["quantity"],item["total"]),
"SubTitle": "{}({})".format(item["retainerName"],worlds[item["worldName"]]),
"IcoPath":"Images/hq.png"if item["hq"] else "Images/nq.png"
})
return results
def universalis(self,server,itemID):
api='https://universalis.app/api/{}/{}'.format(server,itemID)
recv=requests.get(api)
if recv.text=='Not Found':
return False
return json.loads(recv.text)["listings"]
def cafemaker(self,queryName):
u='https://cafemaker.wakingsands.com/search?columns=ID%2CUrlType%2CIcon%2CName%2CItemKind.Name&string={}'.format(queryName)
return json.loads(requests.get(u).text)["Results"]
def itemSolve(self,item):
return item["ID"],item["UrlType"],item["Icon"],item["ItemKind"]["Name"],item["Name"]
if __name__ == "__main__":
Main()
| ShiomiyaRinne/FFXIV-Market-Query | main.py | main.py | py | 3,542 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "wox.Wox",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": ... |
9262633989 | from tts_websocketserver.tts_pipeline import get_pipeline
from rgws.interface import WebsocketServer
import json, asyncio
class TTSPipelineManager:
def __init__(self):
self.state = "Setup"
self.pipeline = get_pipeline()
self.pipeline.build()
self.state = "Ready"
async def process_text(self, text):
self.state = "Processing"
yield {"resp": self.pipeline.predict(text)}
self.state = "Processed"
async def status(self):
return self.state
def __del__(self):
self.state = "Disposing"
self.pipeline.dispose()
self.state = "Disposed"
# building in global so it can also be imported from outside
# eg. from tts_websocketserver.tts_server import tts_pipeline
tts_pipeline = TTSPipelineManager()
class TTSServerInterface(WebsocketServer):
def __init__(self, **kwargs):
super(TTSServerInterface, self).__init__(**kwargs)
self._register(tts_pipeline.process_text)
self._register(self.status)
self._register(self.setup_model)
async def _consumer(self, ws, message):
ret = await self.dispatch(message)
async for gen in ret:
await ws.send_json(gen)
async def status(self):
yield {"resp": tts_pipeline.state}
async def setup_model(self):
yield {"resp": True if tts_pipeline.state != "Setup" else False}
def run():
s = TTSServerInterface(host="localhost", port=8787)
loop = asyncio.get_event_loop()
loop.run_until_complete(s.run())
loop.run_forever()
if __name__ == "__main__":
run() | TheSoundOfAIOSR/rg_text_to_sound | tts_websocketserver/src/tts_websocketserver/tts_server.py | tts_server.py | py | 1,659 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "tts_websocketserver.tts_pipeline.get_pipeline",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "rgws.interface.WebsocketServer",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 54,
"usage_type": "ca... |
34316494896 | '''
Created on Mar 6, 2014
@author: kerem
'''
from random import randint
from random import random as rnd
class InteractionBox(object):
center = None
width = None
height = None
depth = None
is_valid = None
def __init__(self):
pass
def set(self, box):
self.center = box.center.to_tuple()
self.width = box.width
self.height = box.height
self.depth = box.depth
self.is_valid = box.is_valid
return self
class LeapHand(object):
id = None
frame = None
palm_position = None
stabilized_palm_position = None
palm_velocity = None
palm_normal = None
direction = None
sphere_center = None
sphere_radius = None
time_visible = None
is_valid = None
_translation = None
_translation_prob = None
def __init__(self, hand, frame, random=False, position=None):
if random:
self.id = 1
self.frame = None
if position:
assert len(position) == 3
self.stabilized_palm_position = position
else:
self.stabilized_palm_position = (rnd() * 100, rnd() * 100, rnd() * 100)
self.palm_normal = (rnd() * 100, rnd() * 100, rnd() * 100)
self.palm_position = (rnd() * 100, rnd() * 100, rnd() * 100)
self.palm_velocity = (rnd() * 100, rnd() * 100, rnd() * 100)
self.direction = (rnd() * 100, rnd() * 100, rnd() * 100)
self.sphere_center = (rnd() * 100, rnd() * 100, rnd() * 100)
self.time_visible = randint(1, 100)
self.is_valid = True
else:
self.id = hand.id
self.frame = frame
self.stabilized_palm_position = hand.stabilized_palm_position.to_tuple()
self.palm_normal = hand.palm_normal.to_tuple()
self.palm_position = hand.palm_position.to_tuple()
self.palm_velocity = hand.palm_velocity.to_tuple()
self.direction = hand.direction.to_tuple()
self.sphere_center = hand.sphere_center.to_tuple()
self.time_visible = hand.time_visible
self.is_valid = hand.is_valid
class LeapFrame(object):
'''
This is a pure python clone of the Leap Motion Controller
frame objects. It is written to be picklable, unlike the
original, SWIG-generated frame objects. It does not include
anything finer-grained than hand movements i.e. no pointables,
fingers or tools.
'''
id = None
timestamp = None
hands = None
interaction_box = None
current_frames_per_second = None
is_valid = None
def __init__(self, frame, random=False, position=None):
'''
Constructs a new python frame from the original frame
'''
if random:
self.id = randint(0,100)
self.timestamp = randint(0,10000)
self.hands = [LeapHand(None, None, random=random, position=position)]
self.interaction_box = InteractionBox()
self.interaction_box.center = (randint(0,100),randint(0,100),randint(0,100))
self.interaction_box.width = randint(0,100)
self.interaction_box.height = randint(0,100)
self.interaction_box.depth = randint(0,100)
self.current_frames_per_second = randint(0,100)
self.is_valid = True
else:
self.id = frame.id
self.timestamp = frame.timestamp
self.hands = [LeapHand(hand, self) for hand in frame.hands]
self.interaction_box = InteractionBox().set(frame.interaction_box)
self.current_frames_per_second = frame.current_frames_per_second
self.is_valid = frame.is_valid
def get_stabilized_position(self):
"""
Shortcut to getting the stabilized position of
the first available hand.
"""
return self.hands[0].stabilized_palm_position
def hand(self, id):
"""
The Hand object with the specified ID in this frame.
"""
pass
def gestures(self, sinceFrame):
"""
Returns a GestureList containing all gestures that have
occured since the specified frame.
"""
pass
def toJSON(self):
import jsonpickle
return jsonpickle.encode(self)
def __str__(self):
return "LeapFrame({:.2f},{:.2f},{:.2f})".format(*self.get_stabilized_position())
def __repr__(self):
return str(self)
def generateRandomSignal(duration):
"""
Returns a randomly generated list of LeapFrame objects
for testing purposes.
"""
from jsonpickle import encode
lst = []
for i in range(duration):
frame = LeapFrame(None, random=True, position=(40 + i, 30, 30))
lst.append(encode(frame))
return lst
| keryil/leaparticulatorqt | leaparticulator/data/frame.py | frame.py | py | 4,847 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.random",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_n... |
12575982154 | from distutils.log import INFO
from multiprocessing.sharedctypes import Value
from dash import Dash, html, dcc, Input, Output, State, dash_table
import dash
import dash_bootstrap_components as dbc
import LoraLogger
import pandas as pd
import json
import plotly.io as pio
pio.templates.default = "plotly_dark"
logger = LoraLogger.logger(__name__, INFO)
# app = Dash(__name__, use_pages=True, external_stylesheets=[dbc.themes.VAPOR])
app = Dash(__name__, use_pages=True)
# read data
data_df = pd.read_excel('utils/FAI_results.xlsx')
# Demographic radio button options mappings
with open('utils/demo_mapping.json', 'r') as f:
demo_mapping = json.load(f)
with open('utils/demo_groups.json', 'r') as f:
demo_groups = json.load(f)
with open('utils/fin_mapping.json', 'r') as f:
fin_mapping = json.load(f)
with open('utils/fin_groups.json', 'r') as f:
fin_groups = json.load(f)
l_break = html.P(
id='separate',
children='|',
style={
'display': 'inline',
'margin': '0 5px 0 -5px'
}
)
app.layout = html.Div([
html.H1(
children='PolyU & AskLora FAI Result',
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
html.H5(
children='Grouping Method',
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
dbc.RadioItems(
id='gp_method',
persistence=True,
persistence_type='memory',
options=[
{
"label": "Demographics",
"value": "demo"
},
{
"label": "Fintech Usage",
"value": "fin"
}
],
value="demo",
style={
'textAlign': 'center',
},
labelStyle={
'display': 'block',
},
inline=True
),
html.Div(
id='gp_details',
children=[
html.H5(
id='gp_title_big',
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
dbc.RadioItems(
id='demograph',
persistence=True,
persistence_type='memory',
style={
'textAlign': 'center',
},
labelStyle={
'display': 'block',
},
inline=True,
switch=True
),
html.H5(
id='gp_title',
children=[
'Groups to show'
],
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
html.Div(
children=[
html.Div(
id='group_options_bar',
children=[
html.Div(
id='group_options',
children=[
dbc.Checklist(
id='groups',
),
],
style={
'display': 'inline'
}
),
html.Div(
id='group_options_traces',
children=[
dbc.Checklist(
id='bin_fin',
),
],
style={
'display': 'inline'
}
),
],
style={
'display': 'inline'
}
),
dbc.Checklist(
id='percent',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show percentage w.r.t. group', 'value': 'True'}],
value=[],
inline=True,
switch=True,
style={
'display': 'inline'
}
)
],
style={
'textAlign': 'center',
'margin-top': '15px'
}
)
]
),
html.Div(
children=[
html.Div(
dcc.Link(
dbc.Button(page['name'].replace('_', ' ')),
href=page["relative_path"],
style={
'margin': '10px',
}
),
style={
'display': 'inline-block'
}
)
for page in dash.page_registry.values() if not page['name'].startswith('Segment')
] + [
dbc.Button(
"Download Raw Data Excel Sheet", id="download_xlsx_btn", style={'margin': '10px'}, color='info'
),
dcc.Download(id="download_xlsx")
],
style={
'text-align': 'center',
'padding-top': '20px',
'padding-bottom': '20px',
}
),
html.Div(
[
html.Div(
dcc.Link(
dbc.Button(page['name'].replace(
'_', ' '), color='danger'),
href=page["relative_path"],
style={
'margin': '10px',
}
),
style={
'display': 'inline-block'
}
)
for page in dash.page_registry.values() if page['name'].startswith('Segment')
],
style={
'text-align': 'center',
'padding-top': '0px',
'padding-bottom': '20px',
}
),
dash.page_container
],
# style={'height': '100vh'}
)
@ app.callback(
Output("download_xlsx", "data"),
Input("download_xlsx_btn", "n_clicks"),
prevent_initial_call=True
)
def down(n_clicks):
return dcc.send_file("./utils/FAI_results.xlsx")
@ app.callback(
Output(component_id='demograph', component_property='options'),
Output(component_id='demograph', component_property='value'),
Input(component_id='gp_method', component_property='value')
)
def update_group_options(method):
if method == "demo":
options = [demo_mapping[-1]] + demo_mapping[0:5]
value = demo_mapping[-1]['value']
return options, value
if method == "fin":
options = [demo_mapping[-1]] + fin_mapping[4:9]
value = demo_mapping[-1]['value']
return options, value
@ app.callback(
Output(component_id='group_options', component_property='children'),
Output(component_id='group_options_traces', component_property='children'),
Output(component_id='gp_title', component_property='style'),
Output(component_id='gp_title_big', component_property='children'),
[
Input(component_id='demograph', component_property='value'),
Input(component_id='gp_method', component_property='value')
]
)
def update_groups(demo, gp_method):
if gp_method == 'demo':
groups = demo_groups
mappings = demo_mapping
gp_title = 'Demographic category to analyze'
elif gp_method == 'fin':
groups = fin_groups
mappings = fin_mapping
gp_title = 'Fintech usage to analyze'
binary_user = dbc.Checklist(
id='bin_fin',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show binary users', 'value': 'False'}],
value=[],
inline=True,
switch=True,
style={
'display': 'none'
}
)
if demo == demo_mapping[-1]['value']:
checkboxes = dbc.Checklist(
id='groups',
persistence=True,
persistence_type='memory',
options=demo_groups[demo][0:1],
style={
'textAlign': 'center',
'display': 'none'
},
value=[bool(demo_groups[demo][0]['value'])],
inline=True
),
return checkboxes, binary_user, {'display': 'none'}, gp_title
checkboxes = dbc.Checklist(
id='groups',
persistence=True,
persistence_type='memory',
options=groups[demo],
style={
'textAlign': 'center',
'display': 'inline'
},
value=[l['value'] for l in groups[demo]],
inline=True
)
if gp_method == 'fin':
binary_user = dbc.Checklist(
id='bin_fin',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show binary users', 'value': 'False'}],
value=[],
inline=True,
switch=True,
style={
'display': 'inline'
}
)
return checkboxes, [l_break, binary_user], {'textAlign': 'center', 'margin-top': '15px'}, gp_title
else:
binary_user = dbc.Checklist(
id='bin_fin',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show binary users', 'value': 'False'}],
value=[],
inline=True,
switch=True,
style={
'display': 'none'
}
)
return checkboxes, [l_break, binary_user], {'textAlign': 'center', 'margin-top': '15px'}, gp_title
if __name__ == '__main__':
app.run_server(host="0.0.0.0", port=8051)
| adrwong/FAI | app.py | app.py | py | 10,096 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "plotly.io.templates",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "plotly.io",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "LoraLogger.logger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "distutils.log.IN... |
11695978241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#ย @Timeย ย ย : 2020/3/27 16:43
#ย @Authorย : TanLHHH
#ย @Siteย ย ย :
#ย @Fileย ย ย : tieba.py
#ย @Software: PyCharm
import requests,re
url = "https://tieba.baidu.com/f?kw=%E6%96%B0%E5%9E%8B%E5%86%A0%E7%8A%B6%E7%97%85%E6%AF%92&ie=utf-8&pn=0"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"Host": "tieba.baidu.com"
}
res = requests.get(url=url,headers=headers)
#print(res.content.decode('utf-8'))
pat = '<a rel="noreferrer" href="/p/(.*?)" title="(.*?)" target="_blank" class="j_th_tit ">'
title_list = re.compile(pat, re.S).findall(res.content.decode('utf-8'))
#url_pat = 'class="frs-author-name j_user_card " href="(.*?)" target="_blank">'
pat = 'title="ไธป้ขไฝ่
:.*?".*? href="/home/main/(.*?)" target="'
url_list = re.compile(pat,re.S).findall(res.content.decode('utf-8'))
print(res.content.decode('utf-8'))
# print(res.status_code)
# print(title_list)
# print(title_list[0][0],title_list[0][1])
print(url_list)
print(len(url_list)) | TanLHHHH/Spiders | ๆต่ฏๆไปถๅคน/tieba.py | tieba.py | py | 1,112 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 30,... |
37071867440 | from flask import Flask,render_template,redirect,url_for
from flask_bootstrap import Bootstrap
from detail_collector import DetailCollector
app = Flask(__name__)
app.config['Secret_KEY'] = '83hs293C0926jcw2FJ893Bd3E'
Bootstrap(app)
detail = DetailCollector()
@app.route("/")
@app.route("/home")
def home():
return render_template('index.html',details = detail)
| Amari-17/portfolio | app.py | app.py | py | 370 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_bootstrap.Bootstrap",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "detail_collector.DetailCollector",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "... |
14086957906 | import json
import networkx as nx
import matplotlib.pyplot as plt
def get_topology(edges):
g = nx.Graph()
l3edges_json = json.loads(edges.to_json(orient="index"))
for k in l3edges_json:
neighbor = l3edges_json[k]
node_id = neighbor["Interface"]["hostname"]
remote_node_id = neighbor["Remote_Interface"]["hostname"]
g.add_edge(node_id, remote_node_id)
return g
def get_figure(pframe):
"""
Plots Pandas data frame
"""
g = get_topology(pframe)
# Calculate spring layout
pos = nx.spring_layout(g)
# Draw the graph using matplotlib within Streamlit
fig, ax = plt.subplots()
nx.draw(g, pos, with_labels=True, ax=ax, node_size=1000, font_color="white")
return fig
| martimy/Bat-Q | pages/common/plotting.py | plotting.py | py | 756 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "networkx.Graph",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.sub... |
34353676149 | import requests
from bs4 import BeautifulSoup
from time import sleep
import json
from sqlalchemy import create_engine,Column,Integer,String,ForeignKey,table, column, select, update, insert
from sqlalchemy.ext.declarative import declarative_base
from urlparse import urlparse
from sqlalchemy.orm import sessionmaker
from sqlalchemy import *
start = "http://register.start.bg/"
f = requests.get(start)
soup = BeautifulSoup(f.text, 'html.parser')
websites = {}
Base = declarative_base()
engine = create_engine('sqlite:///servers.db', echo = True)
Session = sessionmaker(bind=engine)
session = Session()
metadata = MetaData()
connection = engine.connect()
servers = Table('servers', metadata,
Column('user_id', Integer, primary_key=True),
Column('server', String, nullable=False),
Column('website', String)
)
count = 0
metadata.create_all(engine)
for link in soup.find_all('a'):
l = link.get('href')
#if(not type(l) is None or not type(l)is unicode):
#print(l)
ext = ".bg"
ht = "http"
print(count)
if l is None:
continue
elif u"link.php" in l :
try:
lin = start + str(l)
obj1 = requests.get(lin)
parsed = urlparse(lin)
exists = False
for key in obj1.headers:
if(key == "Server"):
exists = True
break
if(exists):
#is_chunked = obj1.headers.get('transfer-encoding', '') == 'chunked'
#if(is_chunked):
#continue
#elif(obj1.status_code != 200):
#print("different status code from 200")
#continue
#else:
if(parsed.netloc not in websites):
engine.execute(servers.insert(),server = obj1.headers["Server"],website = obj1.url)
count +=1
else:
continue
except requests.exceptions.ConnectionError as e:
pass
elif ext and ht in l:
try:
obj = requests.get(l)
#is_chunked = obj.headers.get('transfer-encoding', '') == 'chunked'
#if(is_chunked):
#continue
#elif(obj.status_code != 200):
#print("differen t status code from 200")
#continue
#else:
parsed = urlparse(obj.url)
if(parsed.netloc not in websites):
engine.execute(servers.insert(),server = obj.headers["Server"], website = obj.url)
count +=1
else:
websites[parsed.netloc] +=1
except requests.exceptions.ConnectionError as e:
pass
else:
continue
if(count >= 10):
break
s = select([distinct(servers.c.server)])
result = connection.execute(s)
for row in result:
websites[row[0]] = 0
s = select([servers.c.server])
result = connection.execute(s)
for row in result:
websites[row[0]] +=1
print(websites)
| VladislavSpassov/HackBulgariaTasks | Week13/CrawnBGWebsites.py | CrawnBGWebsites.py | py | 3,072 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 17,
"usage_type": "call"
},
{
"api_na... |
14263045240 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Account, UserProfile, RefundRequests, FavoriteItem, Preferences
# Register your models here.
class AccountAdmin(UserAdmin):
list_display = ('email', 'first_name', 'last_name', 'username', 'phone_number', 'last_login', 'date_joined', 'is_active')
list_per_page = 60
filter_horizontal = ()
list_filter = ()
fieldsets = ()
ordering = ('date_joined',)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'city', 'district', 'province', 'country')
list_per_page = 30
class PreferencesAdmin(admin.ModelAdmin):
list_display = ('user', 'product', 'rating')
list_per_page = 30
class RefundRequestsAdmin(admin.ModelAdmin):
list_display = ('user', 'order_number', 'amount_paid','processed','created_at')
list_editable = ('processed',)
list_per_page = 30
admin.site.register(Account,AccountAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(RefundRequests, RefundRequestsAdmin)
admin.site.register(FavoriteItem)
admin.site.register(Preferences,PreferencesAdmin) | jeffjcb/southcartel-app | southcartel/accounts/admin.py | admin.py | py | 1,150 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.contrib.auth.admin.UserAdmin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 16,
"usage_type": "name"
... |
74978102183 | import os
import random
from captcha import image
import numpy as np
import PIL.Image as PILImage
import cv2
import shutil
def create_train_dataset(output_dir: str, width: float, height: float, captcha_count=4, count=2000):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
number_sets = lambda offset: str(offset)
lowcase_char_sets = lambda offset: chr(97 + offset)
upcase_char_sets = lambda offset: chr(65 + offset)
avaliable_sets = []
for i in range(0, 10):
avaliable_sets.append(number_sets(i))
# for i in range(0, 26):
# avaliable_sets.append(lowcase_char_sets(i))
# for i in range(0, 26):
# avaliable_sets.append(upcase_char_sets(i))
def random_str(count):
str = ""
for i in range(0, count):
rand_index = random.randrange(0, len(avaliable_sets) - 1)
str = str + avaliable_sets[rand_index]
return str
image_captcha = image.ImageCaptcha(width=width, height=height)
for i in range(count):
captcha_str = random_str(captcha_count)
image_captcha.write(captcha_str, output_dir + "/" + captcha_str + ".png", "png")
print("Gen captcha: {0}".format(captcha_str))
def remove_dataset(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def read_dataset(dir):
images = []
labels = []
for subpath in os.listdir(dir):
if subpath.endswith(".png"):
image = np.array(PILImage.open(os.path.join(dir, subpath)))
label = subpath[:-4]
gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(gray_img / 255.0)
labels.append(label)
return images, labels
| SquarePants1991/LearnTensorFlow | ้ช่ฏ็ ่ฏๅซ/dataset_util.py | dataset_util.py | py | 1,701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_num... |
28225180956 | from typing import List
import unittest
class Solution:
def carFleet(self, target: int, position: List[int], speed: List[int]) -> int:
records = [
(pos, spd) for pos, spd in zip(position, speed)
]
records.sort(key=lambda x: x[0])
for i in range(len(records)):
records[i] = (target - records[i][0])/records[i][1]
stack = []
for record in records:
while stack and record >= stack[-1]:
stack.pop()
stack.append(record)
return len(stack)
class TestCases(unittest.TestCase):
def setUp(self):
self.sol = Solution().carFleet
def test_solution_1(self):
target = 12
position = [10,8,0,5,3]
speed = [2,4,1,1,3]
ans = self.sol(target, position, speed)
expected = 3
self.assertEqual(ans, expected)
target = 100
position = [0,2,4]
speed = [4,2,1]
ans = self.sol(target, position, speed)
expected = 1
self.assertEqual(ans, expected)
target = 10
position = [3]
speed = [3]
ans = self.sol(target, position, speed)
expected = 1
self.assertEqual(ans, expected)
if __name__ == '__main__':
unittest.main() | HomayoonAlimohammadi/Training | Leetcode/853_CarFleet.py | 853_CarFleet.py | py | 1,349 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 56,
"usage_type": "call"
}
] |
5032205330 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import getdist
#from getdist import plots, MCSamples
sys.path.append(
"{}/utils".format(os.environ["GitHub"])
)
import list_utils as list_utils
import directory_utils as directory_utils
# NOTE: A LOT NEED TO CHANGE HERE ...
# TODO: The samples in the 2dmarginal plot need not be in a grid.
def get_list_of_directory_trees_in_directory(directory):
return [
x[0]
for x in os.walk(directory)
]
def get_subphase_directory(x_i, x_j, y_i, y_j):
str_0 = "galaxies_subhalo_mass_centre_0_" + "{0:.2f}".format(x_i) + "_" + "{0:.2f}".format(x_j)
str_1 = "galaxies_subhalo_mass_centre_1_" + "{0:.2f}".format(y_i) + "_" + "{0:.2f}".format(y_j)
return "{}_{}".format(
str_0,
str_1
)
def get_subphase_directories_for_gridsearch(phase_directory, xmin, xmax, ymin, ymax, number_of_steps):
x = np.linspace(xmin, xmax, number_of_steps + 1)
y = np.linspace(ymin, ymax, number_of_steps + 1)
directories = []
for i in range(number_of_steps):
directories_temp = []
for j in range(number_of_steps):
subphase_directory = get_subphase_directory(
x_i=x[i], x_j=x[i+1], y_i=y[j], y_j=y[j+1]
)
# TODO: sanitize the phase directory
phase_directory = directory_utils.sanitize_directory(
directory=phase_directory
)
subphase_directory = phase_directory + "/" + subphase_directory
if not os.path.isdir(subphase_directory):
raise IOError(subphase_directory + " does not exist")
list_of_directory_trees_filtered = list_utils.filter_input_list_of_strings_after_split_with_ending_string(
input_list_of_strings=get_list_of_directory_trees_in_directory(
directory=subphase_directory
),
split_character="/",
ending_string="optimizer_backup"
)
if len(list_of_directory_trees_filtered) == 1:
if not os.listdir(list_of_directory_trees_filtered[0]):
directories_temp.append(None)
else:
directories_temp.append(list_of_directory_trees_filtered[0])
if len(list_of_directory_trees_filtered) < 1:
directories_temp.append(None)
#raise ValueError("optimizer_backup does not exist")
if len(list_of_directory_trees_filtered) > 1:
raise ValueError("THIS IS WEIRD...")
directories.append(directories_temp)
return directories
def get_samples_from_subphase_directories(directories):
samples = []
for i in range(np.shape(directories)[0]):
samples_temp = []
for j in range(np.shape(directories)[1]):
if directories[i][j] is not None:
directory = directories[i][j] + "/multinest"
try:
sample = getdist.mcsamples.loadMCSamples(directory)
#print(sample.__dict__)
except:
sample = None
else:
sample = None
samples_temp.append(sample)
samples.append(samples_temp)
return samples
def subhalo_grid_plot_from_samples(samples, levels=None):
plt.figure(
figsize=(15, 15)
)
# ...
if levels is None:
levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# ...
for i in range(np.shape(samples)[0]):
for j in range(np.shape(samples)[0]):
sample_temp = samples[i][j]
if sample_temp is not None:
density2D = sample_temp.get2DDensity(
'galaxies_subhalo_mass_centre_1',
'galaxies_subhalo_mass_centre_0'
)
if density2D is not None:
plt.contour(
density2D.x,
density2D.y,
density2D.P,
levels=levels,
colors="black"
)
#print("OK")
for i in np.linspace(-2.0, 2.0, 5):
plt.axvline(i, linestyle="--", linewidth=2, color="r")
plt.axhline(i, linestyle="--", linewidth=2, color="r")
plt.plot([-1.0], [0.0], marker="*", markersize=20, color="b")
plt.xlabel("x (arcsec)", fontsize=20)
plt.ylabel("y (arcsec)", fontsize=20)
plt.xlim((-2.1, 2.1))
plt.ylim((-2.1, 2.1))
plt.show()
if __name__ == "__main__":
phase_directory = "/Users/ccbh87/Desktop/COSMA/cosma7/data/dp004/dc-amvr1/workspace/output/interferometer/lens_powerlaw_and_shear_and_subhalo__source_ellipticalcoresersic/model_1/total_flux_1.0_Jy/5.6/230GHz/t_tot__60s/t_int__10s/n_channels_128/0.5mm/width_128/pipeline__lens_fixed_with_subhalo__source_inversion/general/source__pix_voro_mag__reg_const__with_shear/phase_2__subhalo_search__source/phase_tag__rs_shape_125x125__rs_pix_0.04x0.04__sub_2__pos_0.20/"
xmin = -2.0
xmax = 2.0
ymin = -2.0
ymax = 2.0
number_of_steps = 4
subphase_directories = get_subphase_directories_for_gridsearch(
phase_directory=phase_directory,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
number_of_steps=number_of_steps
)
samples = get_samples_from_subphase_directories(directories=subphase_directories)
subhalo_grid_plot_from_samples(samples=samples)
| Sketos/utils | autolens_utils/autolens_directory_utils.py | autolens_directory_utils.py | py | 5,540 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number... |
32901525311 | import csv
import itertools
import os.path
import sys
if __name__ == "__main__":
#Create ontology dictionary from MEGARes ontology file
#megares_ontology = {}
#ontology_filename = "/home/noyes046/jsettle/argmobrich/MEGARESONTOLOGY.tsv"
#with open(ontology_filename, 'r') as ontology_tsv:
#ontology_reader = csv.reader(ontology_tsv, delimiter='\t')
#for row in ontology_reader:
##Skip column names
#if row[0] == "header":
#continue
#
##FIll in our dict
#megares_ontology[row[0]] = { "class" : row[1],
#"mechanism" : row[2],
#"group" : row[3]
#}
#Go through colocalization results, looking for overlaps
results_filename = sys.argv[1]
overlap_dict ={}
tmp_overlaps = []
with open(results_filename, 'r') as results_csv:
results_reader = csv.reader(results_csv, delimiter='\t')
for row in results_reader:
#Skip column names
if row[0] == "SAMPLE_TYPE":
continue
if row[18] == "No":
#If there were overlaps, record them
if len(tmp_overlaps) > 1:
overlaps_processed = []
for overlap in itertools.product(tmp_overlaps, repeat=2):
#Not interested in alignments to same read
if overlap[0] == overlap[1]:
continue
#(A,B) = (B,A) for this purpose
if (tuple(sorted(overlap)) in overlaps_processed):
continue
if overlap in overlap_dict:
overlap_dict[overlap] += 1
else:
overlap_dict[overlap] = 1
overlaps_processed.append(tuple(sorted(overlap)))
tmp_overlaps = [row[11]]
else: #(row[16] == "Yes")
tmp_overlaps.append(row[11])
sorted_overlaps = sorted(overlap_dict, key = lambda overlap: overlap_dict[overlap], reverse=True)
#Write tsv for overlap counts
with open(os.path.splitext(os.path.basename(sys.argv[1]))[0] + "_overlaps.tsv", 'w') as coloc_tsv:
coloc_writer = csv.writer(coloc_tsv, delimiter='\t')
coloc_writer.writerow([os.path.splitext(os.path.basename(sys.argv[1]))[0][:6] + " overlaps"])
coloc_writer.writerow([])
coloc_writer.writerow(["Overlap Pair", "Occurrences"])
for overlap in sorted_overlaps:
coloc_writer.writerow([overlap, overlap_dict[overlap]])
| settj/argmobrich_analysis | colocalization/gen_overlap.py | gen_overlap.py | py | 2,748 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.path.splitext",
... |
24390093684 | import argparse
import os
import re
import subprocess
from . import tool
from .common import check_which, Command, guess_command, make_command_converter
from .. import log, options as opts, shell
from ..exceptions import PackageResolutionError, PackageVersionError
from ..file_types import Directory, HeaderDirectory
from ..iterutils import iterate, listify
from ..objutils import memoize_method
from ..packages import Package, PackageKind
from ..path import Path, Root
from ..shell import posix as pshell, which
from ..versioning import check_version, SpecifierSet, Version
_lib_dirs_parser = argparse.ArgumentParser()
_lib_dirs_parser.add_argument('-L', action='append', dest='lib_dirs')
_include_dirs_parser = argparse.ArgumentParser()
_include_dirs_parser.add_argument('-I', action='append', dest='include_dirs')
_c_to_pkgconf = make_command_converter([
(re.compile(r'gcc(?:-[\d.]+)?(?:-(?:posix|win32))?'), 'pkg-config'),
])
def _shell_split(output):
return pshell.split(output, type=opts.option_list, escapes=True)
def _requires_split(output):
return [i.split(' ')[0] for i in output.split('\n') if i]
@tool('pkg_config')
class PkgConfig(Command):
# Map command names to pkg-config flags and whether they should be treated
# as shell arguments.
_options = {
'version': (['--modversion'], None),
'requires': (['--print-requires'], _requires_split),
'path': (['--variable=pcfiledir'], None),
'install_names': (['--variable=install_names'], _shell_split),
'include_dirs': (['--cflags-only-I'], _shell_split),
'other_cflags': (['--cflags-only-other'], _shell_split),
'lib_dirs': (['--libs-only-L'], _shell_split),
'other_ldflags': (['--libs-only-other'], _shell_split),
'ldlibs': (['--libs-only-l'], _shell_split),
}
@staticmethod
def _get_command(env):
cmd = env.getvar('PKG_CONFIG')
if cmd:
return check_which(cmd, env.variables)
# We don't have an explicitly-set command from the environment, so try
# to guess what the right command would be based on the C compiler
# command.
default = ['pkg-config', 'pkgconf']
sibling = env.builder('c').compiler
guessed_cmd = guess_command(sibling, _c_to_pkgconf)
# If the guessed command is the same as the default command candidate,
# skip it. This will keep us from logging a useless info message that
# we guessed the default value for the command.
if guessed_cmd is not None and guessed_cmd != default:
try:
cmd = which(guessed_cmd, env.variables)
log.info('guessed pkg-config {!r} from c compiler {!r}'
.format(guessed_cmd, shell.join(sibling.command)))
return cmd, True
except FileNotFoundError:
pass
# Try the default command candidate.
return check_which(default, env.variables)
def __init__(self, env):
super().__init__(env, command=('pkg_config',) + self._get_command(env))
def _call(self, cmd, names, type, static=False, msvc_syntax=False,
options=[]):
result = cmd + listify(names) + self._options[type][0] + options
if static:
result.append('--static')
if msvc_syntax:
result.append('--msvc-syntax')
return result
def run(self, names, type, *args, extra_env=None, installed=None,
**kwargs):
if installed is True:
extra_env = dict(PKG_CONFIG_DISABLE_UNINSTALLED='1',
**(extra_env or {}))
elif installed is False:
names = [i + '-uninstalled' for i in iterate(names)]
result = super().run(names, type, *args, extra_env=extra_env,
**kwargs).strip()
if self._options[type][1]:
return self._options[type][1](result)
return result
def search_path(self, extra=[]):
path = self.env.variables.get('PKG_CONFIG_PATH')
if path:
return shell.join_paths(extra + [path])
return shell.join_paths(extra)
class PkgConfigPackage(Package):
def __init__(self, pkg_config, name, submodules=None,
specifier=SpecifierSet(), pcnames=None, *, format,
kind=PackageKind.any, system=True, deps=None,
search_path=None):
super().__init__(name, submodules, format=format, deps=deps)
self._pkg_config = pkg_config
self._env = ({'PKG_CONFIG_PATH': pkg_config.search_path(search_path)}
if search_path else {})
self.pcnames = pcnames if pcnames is not None else [name]
try:
version = self._call(self.pcnames[0], 'version')
version = Version(version) if version else None
except subprocess.CalledProcessError:
raise PackageResolutionError("unable to find package '{}'"
.format(name))
if version:
check_version(version, specifier, name, PackageVersionError)
self.version = version
self.specifier = specifier
self.static = kind == PackageKind.static
self.system = system
@memoize_method
def _call(self, *args, extra_env=None, **kwargs):
final_env = dict(**self._env, **extra_env) if extra_env else self._env
return self._pkg_config.run(*args, extra_env=final_env, **kwargs)
def include_dirs(self, **kwargs):
args = self._call(self.pcnames, 'include_dirs', self.static, **kwargs)
inc_dirs = _include_dirs_parser.parse_known_args(args)[0].include_dirs
return [Path(i, Root.absolute) for i in inc_dirs or []]
def lib_dirs(self, **kwargs):
args = self._call(self.pcnames, 'lib_dirs', self.static, **kwargs)
lib_dirs = _lib_dirs_parser.parse_known_args(args)[0].lib_dirs
return [Path(i, Root.absolute) for i in lib_dirs or []]
def _get_rpaths(self):
extra_env = {'PKG_CONFIG_ALLOW_SYSTEM_LIBS': '1'}
def rpaths_for(installed):
try:
return self.lib_dirs(extra_env=extra_env, installed=installed)
except shell.CalledProcessError:
return None
uninstalled = rpaths_for(installed=False)
installed = rpaths_for(installed=True)
if uninstalled is None or uninstalled == installed:
return opts.option_list(opts.rpath_dir(i) for i in installed)
else:
return opts.option_list(
(opts.rpath_dir(i, 'uninstalled') for i in uninstalled),
(opts.rpath_dir(i, 'installed') for i in installed or []),
)
def _get_install_name_changes(self, pcnames=None):
if pcnames is None:
pcnames = self.pcnames
def install_names_for(installed):
try:
return self._call(pcnames, 'install_names', self.static,
installed=installed)
except shell.CalledProcessError:
return None
uninstalled = install_names_for(installed=False)
installed = install_names_for(installed=True)
if ( uninstalled is None or installed is None or
uninstalled == installed ):
result = opts.option_list()
else:
result = opts.option_list(opts.install_name_change(i, j)
for i, j in zip(uninstalled, installed))
# Recursively get install_name changes for public requirements.
requires = self._call(pcnames, 'requires')
for i in requires:
result.extend(self._get_install_name_changes(i))
return result
def compile_options(self, compiler, *, raw=False):
flags = self._call(self.pcnames, 'other_cflags', self.static,
not raw and compiler.flavor == 'msvc')
# Get include paths separately so we can selectively use them as
# "system" includes; this helps ensure that warnings in external
# headers don't break the build when using `-Werror`.
incdirs = opts.option_list(
opts.include_dir(HeaderDirectory(i, system=self.system))
for i in self.include_dirs()
)
return flags + incdirs
def link_options(self, linker, *, raw=False):
flags = self._call(self.pcnames, 'other_ldflags', self.static,
not raw and linker.flavor == 'msvc')
libdirs = opts.option_list(opts.lib_dir(Directory(i))
for i in self.lib_dirs())
# XXX: How should we ensure that these libs are linked statically when
# necessary?
libs = self._call(self.pcnames, 'ldlibs', self.static,
not raw and linker.flavor == 'msvc')
libs = opts.option_list(opts.lib_literal(i) for i in libs)
# Add extra link options as needed for platform-specific oddities.
extra_opts = opts.option_list()
if not raw and not self.static:
if linker.builder.object_format == 'elf':
# pkg-config packages don't generally include rpath
# information, so we need to generate it ourselves.
extra_opts = self._get_rpaths()
elif linker.builder.object_format == 'mach-o':
# When using uninstalled variants of pkg-config packages, we
# should check if there are any install_names set that we need
# to update when installing. For more information, see the
# pkg-config builtin.
extra_opts = self._get_install_name_changes()
return flags + libdirs + libs + extra_opts
def path(self):
return self._call(self.pcnames[0], 'path')
def __repr__(self):
return '<{}({!r}, {!r})>'.format(
type(self).__name__, self.name, str(self.version)
)
# A package automatically generated for us by mopack. This is useful when
# generating our own pkg-config file, so that we don't add this one as a
# requirement (it's only temporary, after all).
class GeneratedPkgConfigPackage(PkgConfigPackage):
pass
def resolve(env, name, *args, generated=False, **kwargs):
type = GeneratedPkgConfigPackage if generated else PkgConfigPackage
pkg = type(env.tool('pkg_config'), name, *args, **kwargs)
log.info('found package {!r} version {} via pkg-config in {}'
.format(pkg.name, pkg.version, os.path.normpath(pkg.path())))
return pkg
| jimporter/bfg9000 | bfg9000/tools/pkg_config.py | pkg_config.py | py | 10,628 | python | en | code | 73 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "common.make_command_converter",
"line_number": 24,
"usage_type": "call"
},
{
"api... |
39804054613 |
import os
from django.urls import resolve, reverse
from django.shortcuts import redirect
from django.http import HttpResponseForbidden
from internal_users.models import InternalUser
from firebase_auth_app.models import FirebaseUser
from customer_users.models import CustomerUser
class InternalUserMiddleware:
# Define apps that need to pass through the middleware check
PROTECTED_APPS = ['talents', 'apis',
'dashboard', 'admin', 'talent_management',] # 'appointments'
# login_url = reverse('internal_users:internal_user_login')
def __init__(self, get_response):
self.get_response = get_response
self.employee_login_url = reverse('internal_users:internal_user_login')
self.customer_login_url = reverse('customer_users:customer_user_login')
def __call__(self, request):
# Resolve the current app name
current_app = resolve(request.path_info).app_name
print(
F'The current_app name is {current_app}. url requeted is {request.path}. is_authenticated?:{request.user.is_authenticated}.')
# print(request.path, request.user.is_authenticated)
# If the user is trying to access the login page itself, bypass further checks
if request.path == self.employee_login_url or request.path == self.customer_login_url:
return self.get_response(request)
# Check if the request path resolves to any of the protected apps
if current_app in self.PROTECTED_APPS:
print(
f'url visit to protected app(s). Implementing custom rules in InternalUserMiddleware...')
if not request.user.is_authenticated or (request.user.is_authenticated and not isinstance(request.user, InternalUser)):
print('incorrect redirecting to the employee login url...')
return redirect(self.employee_login_url)
# # Additional check for CustomerUser when visiting the customer_users app
# if current_app in ['customer_users'] and (not request.user.is_authenticated or (request.user.is_authenticated and not isinstance(request.user, CustomerUser))):
# # Redirect to the customer user login
# print('redirecting to the customer login url...')
# return redirect(self.customer_login_url)
return self.get_response(request)
# the future used multiple user model authentication middleware; designed used with firebase auth
class MultipleUserModelMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if 'uid' in request.session:
uid = request.session['uid']
firebase_user = None
specific_user = None
try:
firebase_user = FirebaseUser.objects.get(uid=uid)
except FirebaseUser.DoesNotExist:
pass
if firebase_user:
# Set base user
request.firebase_user = firebase_user
# Check each model to find the specific user
for user_model in [CustomerUser, InternalUser]:
try:
specific_user = user_model.objects.get(
firebase_user=firebase_user)
break
except user_model.DoesNotExist:
continue
if specific_user:
request.user = specific_user
response = self.get_response(request)
return response
| zjgcainiao/new_place_at_76 | internal_users/middlewares.py | middlewares.py | py | 3,585 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.reverse",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.urls.resolve",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "internal_... |
6674627865 | import torch
from dace.transformation import transformation
from dace.properties import make_properties
from dace.sdfg.utils import node_path_graph
from dace import nodes, SDFG, SDFGState, registry, Memlet
from typing import Dict, Union
from daceml import onnx as donnx
from daceml.transformation.constant_folding import remove_node_and_computation
from daceml.util import utils
@make_properties
class PadConvFusion(transformation.SingleStateTransformation):
""" Fuse a constant pad into a convolution.
"""
pad = transformation.PatternNode(donnx.ONNXPad)
data = transformation.PatternNode(nodes.AccessNode)
conv = transformation.PatternNode(donnx.ONNXConv)
@classmethod
def expressions(cls):
return [node_path_graph(cls.pad, cls.data, cls.conv)]
def can_be_applied(self,
graph: SDFGState,
expr_index: int,
sdfg: SDFG,
permissive: bool = False) -> bool:
pad: donnx.ONNXPad = self.pad
data_node: nodes.AccessNode = self.data
conv: donnx.ONNXConv = self.conv
if pad.mode != 'constant':
return False
# Check if data in access node is used anywhere else
other_nodes = [
node for state in sdfg.nodes() for node in state.nodes() if
isinstance(node, nodes.AccessNode) and node.data == data_node.data
]
if len(other_nodes) != 1:
return False
# conservative: padded value should be 4 dimensional
if len(data_node.desc(sdfg).shape) != 4:
return False
# no other out edges
if graph.in_degree(data_node) != 1 or graph.out_degree(data_node) != 1:
return False
# check that the two pad inputs can be folded
constant_value = list(
graph.in_edges_by_connector(pad, "constant_value"))[0].data.data
pads = list(graph.in_edges_by_connector(pad, "pads"))[0].data.data
if constant_value not in sdfg._parent_onnx_model.clean_weights:
return False
if pads not in sdfg._parent_onnx_model.clean_weights:
return False
pads_value: torch.Tensor = sdfg._parent_onnx_model.clean_weights[pads]
constant_value_value: torch.Tensor = sdfg._parent_onnx_model.clean_weights[
constant_value]
if constant_value_value != 0:
return False
if len(pads_value.shape) != 1 or pads_value.shape[0] != 8:
return False
# can only eliminate the pad if it is along the spatial axes
# pads_value[i::4] gets the padding at the start and end of the i-th axis
if (not utils.iterables_equal(pads_value[0::4], [0, 0])
and utils.iterables_equal(pads_value[1::4], [0, 0])):
return False
return True
def apply(self, state: SDFGState, sdfg: SDFG):
pad: donnx.ONNXPad = self.pad
data_node: nodes.AccessNode = self.data
conv: donnx.ONNXConv = self.conv
pads = list(state.in_edges_by_connector(pad, "pads"))[0].data.data
pads_value: torch.Tensor = sdfg._parent_onnx_model.clean_weights[pads]
conv.pads[0] += int(pads_value[2::4][0])
conv.pads[2] += int(pads_value[2::4][1])
conv.pads[1] += int(pads_value[3::4][0])
conv.pads[3] += int(pads_value[3::4][1])
in_edge = next(state.in_edges_by_connector(pad, "data"))
state.add_edge(in_edge.src, in_edge.src_conn, conv, "X", in_edge.data)
state.remove_edge(in_edge)
remove_node_and_computation(sdfg, state, data_node)
| spcl/daceml | daceml/transformation/pad_conv_fusion.py | pad_conv_fusion.py | py | 3,630 | python | en | code | 69 | github-code | 36 | [
{
"api_name": "dace.transformation.transformation.SingleStateTransformation",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "dace.transformation.transformation",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dace.transformation.transformation.PatternNo... |
26648319096 | import os
import sys
from glob import glob
from json2mongo import WriteToDataBase
import argparse
import configparser
filename = "last_run_context_numbers.txt"
# argument to be provided to the process manager
parser = argparse.ArgumentParser( description="process manager" )
# need this to read the ini file
config = configparser.ConfigParser()
def read_run_context():
"""
read the file last_run_context_numbers.txt and give back the the run & context numbers
"""
global filename
# read the last_run.txt file and get the last run and the context_version
file_infos = open(filename, "r")
last_run = int(file_infos.readline().strip())
last_context_version = file_infos.readline().strip()
return last_run, last_context_version
def loop_over_main_dir(main_dir=None, database=None):
"""
Loop over the directory $HOME/data/xom and get the main directories
The idea is to compare the context versions then the run numbers
"""
last_run, last_context = read_run_context()
list_contexts = []
for name in os.listdir( main_dir ):
if os.path.isdir( main_dir + name ) and not name.startswith("_"):
list_contexts.append( name )
print('number of contexts we have: ', list_contexts)
if len(list_contexts) > 1:
for newcontext in list_contexts:
# we are going to loop over the 'new context' and write what is inside it to the DB
# this comparison ensures that the old context was already written in DB
if newcontext > last_context :
# we go to the new directory that is made from the new context
# we get the list of directories(runs) inside this new context
current_dir = main_dir + newcontext
os.chdir( current_dir )
# we get all the runs in the new context as a list from glob
list_runs = glob('[0-9]*')
print('list of runs:', list_runs)
if len(list_runs):
for runs in list_runs:
current_dir = main_dir + newcontext + "/" + runs
#lets go inside
os.chdir(current_dir)
# we need the run number inside this new context, of course there is only one
jsonfilename = glob("*.json")[0]
run_number = int(runs)
dbwriter = WriteToDataBase(datapath=current_dir, database=database,
collection=newcontext, runnumber=run_number,
jsonfile=jsonfilename)
try:
print( 'Writing the modified json file to the DB' )
# now lets write the json file inside the data base
dbwriter.write_to_db()
except Exception as err:
print("we can't write the json file to the data base")
print("the error: ", err)
elif len(list_contexts) == 1:
if list_contexts[0] == last_context:
# we are still under the same versions of the context
# we get into the directory of that context version and loop over directories
old_context_directory = main_dir + last_context
# each directory has a name which is the run number
list_run_directories = []
for name in os.listdir( old_context_directory ):
if os.path.isdir( old_context_directory + "/" + name ):
list_run_directories.append( int( name ) )
print('the list of runs: ', list_run_directories)
# now get the run_numbers and compare them to the old one
for newrun in list_run_directories:
if newrun - last_run == 0:
# there are no changes in the run numbers just quit here.
break
# compare each run_directory with the old one:
elif newrun - last_run >=1 :
current_dir = old_context_directory + "/" + str(newrun)
# lets go inside
os.chdir( current_dir )
# we need the run number inside this new context, of course there is only one
jsonfilename = glob( "*.json" )[0]
run_number = int( jsonfilename.rstrip( ".json" ) )
dbwriter = WriteToDataBase( datapath=current_dir, database=database,
collection=last_context, runnumber=run_number,
jsonfile=jsonfilename )
try:
print( 'Writing the modified json file to the DB' )
# now lets write the json file inside the data base
dbwriter.write_to_db()
except Exception as err:
print( "Can not dump the json file to DB" )
print( "the error: ", err )
else:
sys.exit( "old_context and new context are different BUT they should not be!!!" )
else:
sys.exit("something is wrong with the module:%s" %"write_json_to_db.py")
def main():
"""
here we set all the arguments and make call for the function:loop_over_main_dir
:return: nothing
"""
config.read( "pm.ini" )
parser.add_argument( '--mainDir',
default=config["LNGS"]["maindirectory"],
type=str,
help='Name of the main directory at LNGS for data' )
parser.add_argument( '--DBLngs',
default=config["LNGS"]["db_lngs"],
type=str,
help='Name of the data base at LNGS' )
# Get the object of arguments, args
args = parser.parse_args()
print("module: write_json_to_db.py")
print("given arguments to loop_over_main_dir")
print("main dir: ", args.mainDir)
print("data base name: ", args.DBLngs)
# call loop_over_main_dir with two args
loop_over_main_dir(main_dir=args.mainDir, database=args.DBLngs)
if __name__ == "__main__" :
# now copy the json file into the database
main() | ShanJ35/XOM_master | backend/write_json_to_db.py | write_json_to_db.py | py | 6,424 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.... |
4828550972 | from concurrent.futures import process
from matplotlib.pyplot import cla
import numpy as np, pandas as pd
import re
from scipy import rand
dataset = pd.read_csv("../../resources/Part 7 - Natural Language Processing/Section 36 - Natural Language Processing/Python/Restaurant_Reviews.tsv",delimiter="\t", quoting=3)
## cleaning text
def cleanseText(sentence, stopwordSet, porterStemmerFunction):
processedText = re.sub('[^a-zA-Z]',' ',sentence) ## replace non letter characters with space
processedText = (processedText.lower()).split() ## convert sentence to list of words to process next few steps easily
processedText = [porterStemmerFunction.stem(word) for word in processedText if not word in stopwordSet] ## if not a stop-word, then stem the word
processedText = ' '.join(processedText) ## convert list back to sentence
return processedText
def bayesClassifier(x_train, y_train):
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train, y_train)
return classifier
def logisticRegression(x_train, y_train):
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(x_train,y_train)
return classifier
def dt(x_train, y_train):
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(random_state=0)
classifier.fit(x_train, y_train)
return classifier
def svm(x_train, y_train):
from sklearn.svm import SVC
classifier = SVC(kernel='linear',random_state=0)
classifier.fit(x_train, y_train)
return classifier
def nonlinearsvm(x_train, y_train):
from sklearn.svm import SVC
classifier = SVC(kernel='rbf',random_state=0)
classifier.fit(x_train, y_train)
return classifier
def knn(x_train, y_train):
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)
classifier.fit(x_train, y_train)
return classifier
import nltk
nltk.download('stopwords') ## stopwords are not useful for computation (like 'the', 'a', 'an')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
num_reviews = 1000
stopwords_list = set(stopwords.words('english'))
stopwords_list.remove('not')
stopwords_set = set(stopwords_list)
ps = PorterStemmer() ## import word stemmer -> this bunches up tenses of the same word (for eg. like and liked are the same word)
for uncleanedReview in range(0,num_reviews):
review = cleanseText(dataset['Review'][uncleanedReview],stopwords_set,ps)
corpus.append(review) ## append the cleaned sentence to the corpus
## Bag of words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
x = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:,-1].values
## split into train and test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=0)
cmlist = []
accuracylist = []
precisionlist = []
recalllist = []
f1scorelist = []
from sklearn.metrics import confusion_matrix, accuracy_score
for classifyingFunction in [bayesClassifier, logisticRegression, dt, svm, nonlinearsvm, knn]:
classifier = classifyingFunction(x_train, y_train)
y_pred = classifier.predict(x_test)
#y_list = np.concatenate((y_pred.reshape(len(y_pred),1),y_test.reshape(len(y_test),1)),axis=1)
#print(y_list)
cm = confusion_matrix(y_test, y_pred)
cmlist.append(cm)
accuracy = accuracy_score(y_test, y_pred)
accuracylist.append(accuracy)
precision = cm[0][0] / (cm[0][0] + cm[1][0])
precisionlist.append(precision)
recall = cm[0][0] / (cm[0][0] + cm[0][1])
recalllist.append(recall)
f1score = 2*precision*recall / (precision + recall)
f1scorelist.append(f1score)
for i in range(0,len(cmlist)):
print(cmlist[i])
print(accuracylist[i],precisionlist[i],recalllist[i],f1scorelist[i]) | ManishLapasi/MLstuff | models/NLP/nlpselect.py | nlpselect.py | py | 4,084 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_m... |
14029268104 | import typing
import discord
from discord.ext import commands
from .general import is_staff, free_category, free_category_create
class FreeCategory(commands.Cog):
__slots__ = ('client', 'name', 'staff',)
permissions_jp = {
# 'create_instant_invite': 'ๆๅพ
ใไฝๆ',
'manage_channels': 'ใใฃใณใใซใฎ็ฎก็',
'manage_roles': 'ๆจฉ้ใฎ็ฎก็',
}
permissions_jp_text = {
'read_messages': 'ใกใใปใผใธใ่ชญใ',
'send_messages': 'ใกใใปใผใธใ้ไฟก',
'manage_messages': 'ใกใใปใผใธใฎ็ฎก็',
'embed_links': 'ๅใ่พผใฟใชใณใฏ',
'attach_files': 'ใใกใคใซใๆทปไป',
'read_message_history': 'ใกใใปใผใธๅฑฅๆญดใ่ชญใ',
'external_emojis': 'ๅค้จใฎ็ตตๆๅญใฎไฝฟ็จ',
'add_reactions': 'ใชใขใฏใทใงใณใฎ่ฟฝๅ ',
}
permissions_jp_voice = {
'read_messages': 'ใใฃใณใใซใ่ฆใ',
'connect': 'ๆฅ็ถ',
'speak': '็บ่จ',
'mute_members': 'ใกใณใใผใใใฅใผใ',
'deafen_members': 'ใกใณใใผใฎในใใผใซใผใใใฅใผใ',
'move_members': 'ใกใณใใผใ็งปๅ',
'use_voice_activation': '้ณๅฃฐๆคๅบใไฝฟ็จ',
'priority_speaker': 'ใใฉใคใชใชใใฃในใใผใซใผ'
}
def __init__(self, client, name=None,):
self.client: commands.Bot = client
self.name = name if name is not None else type(self).__name__
@property
def category(self):
return self.client.get_channel(free_category)
@property
def create_channel(self):
return self.client.get_channel(free_category_create)
@commands.command(name='ftcc')
async def free_text_channel_create(self, ctx, *, name):
channel = await self._free_channel_create(ctx, name)
if channel is not None:
await ctx.send(
'ไฝๆใใพใใใ\n{0}\nใใจ{1}ใใฃใณใใซไฝๆๅฏ่ฝใ'
.format(channel.mention, 50 - len(channel.category.channels))
)
async def _free_channel_create(self, ctx, name):
category = self.category
if len(category.channels) >= 50:
await ctx.send(
"ใใฃใณใใซใไธๆฏใงไฝๆใงใใพใใใ\n"
"้ๅถใซ้ฃ็ตกใใฆใใ ใใใ"
)
return
guild = category.guild
overwrites = {
self.client.user:
discord.PermissionOverwrite.from_pair(discord.Permissions.all(), discord.Permissions.none()),
ctx.author:
discord.PermissionOverwrite.from_pair(discord.Permissions(66448721), discord.Permissions.none()),
guild.default_role:
discord.PermissionOverwrite.from_pair(discord.Permissions.none(), discord.Permissions.all()),
guild.get_role(515467411898761216):
discord.PermissionOverwrite.from_pair(discord.Permissions.none(), discord.Permissions.all()),
guild.get_role(515467425429585941):
discord.PermissionOverwrite.from_pair(
discord.Permissions(37080128), discord.Permissions(2 ** 53 - 37080129)),
}
return await guild.create_text_channel(name, overwrites=overwrites, category=category)
@commands.command()
async def cedit(self, ctx,
channel: typing.Union[discord.TextChannel, discord.VoiceChannel] = None):
EMOJI = 0x1f1e6 # ็ตตๆๅญๅฎๆฐ(ใใใ่ถณใใใๅผใใใใใใจใชใขใฏใทใงใณ็ใซใใพใใใ)
EMBED_TITLE = 'ใใฃใณใใซๆจฉ้็ทจ้'
if channel is None:
channel = ctx.channel
if (
(
ctx.author in channel.overwrites
and channel.overwrites_for(ctx.author).manage_roles is not False
) # ใกใณใใผใฎ่ฟฝๅ ่จญๅฎใใใใใใคใๆจฉ้ใฎ็ฎก็ใใNone
or await self.client.is_owner(ctx.author) # ใชใผใใผ
or await is_staff(ctx.author) # ในใฟใใใใผใ
):
all_commands = (
'ๆฐ่ฆใซๅฝน่ทใ่ฟฝๅ ่จญๅฎ',
'ๆฐ่ฆใซใฆใผใถใผใ่ฟฝๅ ่จญๅฎ',
'็พๅจ่จญๅฎใใใฆใใ่ฟฝๅ ่จญๅฎใฎๅคๆด',
'็พๅจ่จญๅฎใใใฆใใ่ฟฝๅ ่จญๅฎใฎๅ้ค'
)
emojis = [chr(i + EMOJI) for i in range(len(all_commands))]
embed = discord.Embed(
title=EMBED_TITLE,
description='\n'.join(
'{0}:{1}'.format(i, e)
for i, e in zip(emojis, all_commands)
)
)
embed.set_footer(text='ๅฏพ่ฑกใใฃใณใใซ:{0.name}\nใใฃใณใใซID:{0.id}'.format(channel))
message = await ctx.send(embed=embed)
[await message.add_reaction(e)
for e in emojis]
def check(r, u):
return (
r.me and ctx.author == u
and r.message.id == message.id
and r.message.channel == message.channel
)
reaction, _ = \
await self.client.wait_for('reaction_add', check=check)
await message.delete()
num_command = ord(reaction.emoji) - EMOJI
if 0 <= num_command <= 1:
# ใฆใผใถใผใพใใฏๅฝน่ทใฎ่ฟฝๅ
if num_command == 0:
target_type = 'ๅฝน่ท'
else:
target_type = 'ใฆใผใถใผ'
description1 = ('ใใฃใณใใซใฎ่ฟฝๅ ่จญๅฎใซ{0}ใ่ฟฝๅ ใใพใใ\n'
'่ฟฝๅ ใใใ{0}ใๅ
ฅๅใใฆใใ ใใ').format(target_type)
message = await ctx.send(description1)
def check1(m):
return (
m.channel == ctx.channel
and m.author == ctx.author
)
message2 = await self.client.wait_for('message', check=check1)
await message.delete()
if num_command == 0:
converter = commands.RoleConverter()
else:
converter = commands.MemberConverter()
try:
target = await converter.convert(ctx, message2.content)
except commands.BadArgument:
await ctx.send(
'ๆๅฎใใ{0}ใ่ฆใคใใใพใใใงใใ'.format(target_type)
+ 'ใใไธๅบฆใใ็ดใใฆไธใใใ'
)
return
elif 2 <= num_command <= 3:
action = 'ๅคๆด' if num_command == 2 else 'ๅ้ค'
description1 = (
'่ฟฝๅ ่จญๅฎใ{0}ใใพใ\n'
'{0}ใใใๅฝน่ทใใพใใฏใฆใผใถใผใ้ธใใงใใ ใใ'
).format(action)
embed = discord.Embed(title=EMBED_TITLE, description=description1)
overwrites = channel.overwrites
def func2(_page=0):
end = (_page + 1) * 17
if len(overwrites) < end:
end = len(overwrites)
start = _page * 17
tg = [i for i in overwrites.keys()][start:end]
try:
tg.remove(self.client.user)
except ValueError:
pass
desc = '\n'.join(
'{0}:{1}'.format(chr(i + EMOJI), t.mention)
for i, t in enumerate(tg)
)
return tg, desc
page = 0
targets, description1 = func2(page)
embed.add_field(name='ๅฝน่ทใปใฆใผใถใผไธ่ฆง', value=description1)
message = await ctx.send(embed=embed)
[await message.add_reaction(chr(i + EMOJI))
for i in range(len(targets))]
await message.add_reaction('\U0001f519')
await message.add_reaction('\U0001f51c')
await message.add_reaction('\u274c')
def check3(r, u):
return (
u == ctx.author
and r.me
and r.message.channel == message.channel
and r.message.id == message.id
)
while True:
new_page = page
reaction, user = \
await self.client.wait_for('reaction_add', check=check3)
await message.remove_reaction(reaction, user)
if reaction.emoji == '\U0001f519':
new_page = page - 1
elif reaction.emoji == '\U0001f51c':
new_page = page + 1
elif reaction.emoji == '\u274c':
await message.delete()
await ctx.send('ไธญๆญขใใพใใใ')
return
else:
break
if new_page != page:
new_targets, description1 = func2(_page=new_page)
if description1 != '':
embed.set_field_at(
0, name='ๅฝน่ทใปใฆใผใถใผไธ่ฆง', value=description1
)
await message.edit(embed=embed)
page = new_page
targets = new_targets
await message.delete()
target = targets[ord(reaction.emoji) - EMOJI]
if num_command <= 2:
perms_jp = self.permissions_jp.copy()
perms_jp.update(
self.permissions_jp_text
if isinstance(channel, discord.TextChannel)
else self.permissions_jp_voice
)
perms = tuple(perms_jp.keys())
def func1(overwrite):
description = ''
n = 0
for en, jp in perms_jp.items():
try:
value = getattr(overwrite, en)
except AttributeError:
continue
else:
description += '{0}'.format(chr(n + EMOJI))
description += jp
if value:
description += ':\u2705\n'
elif value is None:
description += ':\u2b1c\n'
else:
description += ':\u274c\n'
n += 1
return description
overwrite1: discord.PermissionOverwrite = channel.overwrites_for(target)
embed = discord.Embed(
title=EMBED_TITLE,
description='{0}ใฎๆจฉ้่จญๅฎใๅคๆดใใพใ'.format(target.mention)
)
embed.add_field(name='ๆจฉ้ไธ่ฆง', value=func1(overwrite1))
message3 = await ctx.send(embed=embed)
[await message3.add_reaction(chr(i + EMOJI))
for i in range(len(perms))]
await message3.add_reaction('\u2705')
await message3.add_reaction('\u274c')
def check2(reaction, user):
return (
user == ctx.author
and reaction.me
and reaction.message.channel == message3.channel
and reaction.message.id == message3.id
)
loop = True
while loop:
reaction, user = await self.client.wait_for('reaction_add', check=check2)
if reaction.emoji == '\u2705':
loop = False
continue
elif reaction.emoji == '\u274c':
await message3.delete()
await ctx.send('ไธญๆญขใใพใใใ')
break
await message3.remove_reaction(reaction, user)
perm = perms[ord(reaction.emoji) - EMOJI]
value = getattr(overwrite1, perm)
if value:
value = False
elif value is None:
value = True
else:
value = None
if perm == 'manage_roles' and value:
value = False
overwrite1.update(**{perm: value})
embed.set_field_at(0, name='ๆจฉ้ไธ่ฆง', value=func1(overwrite1))
await message3.edit(embed=embed)
else:
await message3.delete()
await channel.set_permissions(target, overwrite=overwrite1)
await ctx.send('ๆจฉ้ใๅคๆดใใพใใใ')
elif num_command == 3:
await channel.set_permissions(target, overwrite=None)
await ctx.send('ๆจฉ้ใๅ้คใใพใใใ')
else:
await ctx.send('ใใชใใฏใใใใใๆจฉ้ใใใใพใใใ')
| Kesigomon/Skyline_py | cogs/freecategory.py | freecategory.py | py | 13,622 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 38,
"usage_type": "attribute"
},
{
"ap... |
37716628062 | # Module Imports
import mariadb
import sys
import socket
import threading
import time
# Connect to MariaDB Platform
try:
db = mariadb.connect(
user="user",
password="password",
host="mariadb",
port=3306,
database="info801"
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
sys.exit(1)
# Get Cursor
cur = db.cursor()
HEADER = 64
SERVER = socket.gethostbyname(socket.gethostname())
PORT = 5050
ADDR = (SERVER,PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(ADDR)
def getBatimentByName(bat):
cur.execute(
"SELECT id FROM batiment WHERE nom=? LIMIT 1",
(bat,))
def getPersonneByBadge(badge):
cur.execute(
"SELECT id FROM personne WHERE numero_badge=? LIMIT 1",
(badge,))
def verify(idBat,idPer):
cur.execute(
"SELECT COUNT(*) FROM personne_batiment WHERE batiment_id=? AND personne_id=?",
(idBat,idPer))
def insertHistory(personne_id,batiment_id):
cur.execute(
"INSERT INTO history (personne_id,batiment_id) VALUES (?, ?)",
(personne_id, batiment_id))
db.commit()
print(f"Last Inserted ID: {cur.lastrowid}")
def handle_client(conn, addr):
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
msg = msg.split(',')
print(msg)
if msg == DISCONNECT_MESSAGE:
connected = False
if(msg[0] == "batiment"):
msg.pop(0)
getBatimentByName(msg[0])
msg.pop(0)
(idBat,) = cur.fetchone()
print(f"id bat: {idBat}")
#remove scanCarte
msg.pop(0)
getPersonneByBadge(msg[0])
(idPer,) = cur.fetchone()
print(f"id bat: {idPer}")
msg.pop(0)
cur.fetchall()
verify(idBat,idPer)
(ver,) = cur.fetchone()
if ver==0:
msg = "status,lumiereRouge"
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
conn.send(send_length)
conn.send(message)
insertHistory(idPer,idBat)
else:
msg = "status,lumiereVerte"
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
conn.send(send_length)
conn.send(message)
insertHistory(idPer,idBat)
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start() | visarsylejmani/architecture-logicielle-info801 | server/server.py | server.py | py | 3,523 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mariadb.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mariadb.Error",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "socket.gethostbyname",
... |
5668667396 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from PIL import Image as pil_image
from mosaic import data_utils
from mosaic import contexts
from mosaic import image_io
from mosaic import features
from mosaic import plots
__all__ = ['image_histogram']
def images_to_histogram(images, x, n_bins=None, sort_by=None):
"""Create an image histogram.
Parameters
----------
images : listof PIL Images.
Images to display in the image histogram. All images must be
the same shape.
x : np.array of shape [n_samples,]
The variable whose histogram is displayed.
n_bins : int or None, optional
Specification of the number of bins. If None, then the
Freedman-Diaconis estimator is used to determine the number of bins.
sort_by : np.array of shape [n_samples,], optional
Data or name of the variable to sort images by on the y-axis.
Returns
-------
A properly shaped width x height x 3 PIL Image.
"""
n_bins = n_bins if n_bins is not None else 'fd'
hist, bin_edges = np.histogram(x, bins=n_bins)
n_bins = hist.shape[0]
bin_max = hist.max()
width, height = images[0].size
px_w = width * n_bins
px_h = height * bin_max
#background_color = (50, 50, 50)
background_color = (255, 255, 255)
canvas = pil_image.new('RGB', (px_w, px_h), background_color)
thumbnail_px = (width, height)
for bin_idx, edge in enumerate(zip(bin_edges, bin_edges[1:])):
edge_mask = (x >= edge[0]) & (x < edge[1])
tmp_sort = sort_by[edge_mask]
tmp = [images[index] for index in np.where(edge_mask)[0]]
# sort y values if present
if sort_by is not None:
tmp = [tmp[index] for index in np.argsort(tmp_sort)[::-1]]
y_coord = px_h
x_coord = width * bin_idx
for thumbnail in tmp:
canvas.paste(thumbnail, (x_coord, y_coord))
y_coord -= height
return canvas
def histogram_matplotlib(images, x, n_bins=None, sort_by=None, **kwargs):
fig, ax = plt.subplots(**kwargs)
n_bins = n_bins if n_bins is not None else 'fd'
hist, bin_edges = np.histogram(x, bins=n_bins)
n_bins = hist.shape[0]
bin_max = hist.max()
y_max = 0
for bin_idx, edge in enumerate(zip(bin_edges, bin_edges[1:])):
img_height = abs(edge[1] - edge[0])
edge_mask = (x >= edge[0]) & (x < edge[1])
bin_images = images[edge_mask]
# sort y values if present
if sort_by is not None:
bin_sort = sort_by[edge_mask]
bin_images = bin_images[np.argsort(bin_sort)]
left, right = edge
for i, img in enumerate(bin_images):
bottom = img_height * i
top = bottom + img_height
plots.imshow(img, extent=[left, right, bottom, top], interpolation='lanczos')
if top > y_max:
y_max = top
ax.set_xlim(bin_edges[0], bin_edges[-1])
ax.set_ylim(0, y_max)
ax.yaxis.set_visible(False)
return sns.despine(ax=ax, left=True)
def image_histogram(x,
images=None,
data=None,
n_bins=None,
sort_by=features.HSVFeatures.SATURATION,
image_dir='',
image_size=None,
n_jobs=1,
**kwargs):
"""Create an univariate image histogram binned by the `x`
variable.
Parameters
----------
x : str or array-like of shape [n_samples,]
Data or names of variables in `data`.
images : str or array-like of shape [n_samples, width, height, channels], optional
Image array or name of the variable containing the image file
paths within `data`.
data : pandas.DataFrame, optional
Tidy ("long-form") dataframe where each column is a variable
and each row is an observation. If `images`, `x`, or `sort_by`
is a variable name, then it should be contained in `data`.
n_bins : int or None
Specification of the number of bins. If None, then the
Freedman-Diaconis estimator is used to determine the number of bins.
sort_by : str, HSVFeatures enum or array-like of shape [n_samples,], optional
Data or name of the variable to sort images by on the y-axis.
image_dir : str (default='')
The location of the image files on disk.
image_size : int
The size of each image in the scatter plot.
n_jobs : int (default=1)
The number of parallel workers to use for loading
the image files.
Returns
-------
ax : matplotlib Axes
Returns the Axes object with the plot for further tweaking.
Examples
--------
Create an image histogram.
.. plot:: ../examples/image_histogram.py
"""
images = data_utils.get_images(
data, images,
image_dir=image_dir,
image_size=image_size,
index=None,#x.index,
as_image=False,
n_jobs=n_jobs)
x = data_utils.get_variable(data, x)
if sort_by is not None:
if sort_by in features.HSVFeatures.all_features():
hsv = features.extract_hsv_stats(images, n_jobs=n_jobs)
sort_by = hsv[:, features.HSVFeatures.feature_index(sort_by)]
else:
sort_by = data_utils.get_variable(data, sort_by)
#histo = images_to_histogram(images, x, n_bins=n_bins, sort_by=sort_by)
#return plots.pillow_to_matplotlib(histo, **kwargs)
return histogram_matplotlib(images, x, n_bins=n_bins, sort_by=sort_by, **kwargs)
| joshloyal/Mosaic | mosaic/histogram.py | histogram.py | py | 5,809 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.histogram",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_numbe... |
71798506343 | #!/usr/bin/python
# python images_for_deep_learning_sv02_create_non_white_space_mapping_file.py -i input_bed -o output_mapping_file
# python images_for_deep_learning_sv02_create_non_white_space_mapping_file.py -i non_white_out_all_mgrb_and_isks.txt -o non_white_out_all_mgrb_and_isks_map_350x350.txt
# Sort input bed file by chromosome. It is assumed that positions within chromosome are already sorted.
# Count number of nucleotides in it.
# Map nucleotide positions to a 350 x 350 array.
# Output the mapping.
# This mapping will be used to convert a VCF file to a 350 x 350 image.
# The entire 3 billion nucleotide genome could have been mapped. However, most of it doesn't contain any variants.
# This program reads and maps only those genome positions that have variants.
# Even then, there will be more genomic nucleotides to map to pixels than there are available pixels in a 350 x 350 image.
# This program calculates the nucleotide_to_pixel_ratio and maps or compresses multiple bp into one pixel.
# Mapping file is tab-delimited and has no header. First line is nucleotide_to_pixel_ratio, num_rows, and num_cols for pixel image.
# Columns are chrom, start_pos, end_pos, map_row_start, map_col_start, map_row_end, map_col_end
# 14214 350 350
# 1 843216 843248 1 1 1 1
# 1 869460 870342 1 1 1 1
# 1 884041 884110 1 1 1 1
# ...
# Y 22661495 22661520 350 52 350 52
# Y 24417006 24417026 350 52 350 52
# Y 28787561 28802853 350 52 350 53
# MT 1 16569 350 53 350 54
__author__ = 'Emma M. Rath'
__copyright__ = 'Copyright 2019, Garvan Institute of Medical Research and Kinghorn Cancer Centre'
import sys
import os
import argparse
import math
######################################################
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
######################################################
def read_input( in_bed ):
in_chrom = []
in_start = []
in_end = []
infile = open( in_bed, 'r')
for inline in infile:
inline = inline.strip()
if (inline != ''):
infields = inline.split('\t')
in_chrom.append( str(infields[0]) )
in_start.append( int(infields[1]) )
in_end.append( int(infields[2]) )
return in_chrom, in_start, in_end
######################################################
def sort_input( in1_chrom, in1_start, in1_end ):
order_of_chromosomes = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','X','Y','MT']
in2_chrom = []
in2_start = []
in2_end = []
for this_chrom in order_of_chromosomes:
for i in range( 0, len(in1_chrom) ):
if (in1_chrom[i] == this_chrom):
in2_chrom.append( in1_chrom[i] )
in2_start.append( in1_start[i] )
in2_end.append( in1_end[i] )
return in2_chrom, in2_start, in2_end
######################################################
def count_nucleotides( in_start, in_end ):
num_nucleotides = 0
for i in range( 0, len(in_start) ):
num_nucleotides = num_nucleotides + in_end[i] - in_start[i] + 1
return num_nucleotides
######################################################
def map_positions_to_pixels( in_chrom, in_start, in_end, num_rows, num_cols, nucleotide_to_pixel_ratio ):
map_row_start = [0] * len(in_chrom)
map_col_start = [0] * len(in_chrom)
map_row_end = [0] * len(in_chrom)
map_col_end = [0] * len(in_chrom)
row_upto = 1
col_upto = 1
for i in range( 0, len(in_chrom)):
chrom = str(in_chrom[i])
start_pos = int(in_start[i])
end_pos = int(in_end[i])
map_row_start[i] = row_upto
map_col_start[i] = col_upto
remaining_unmapped_bp = end_pos - start_pos + 1
remaining_unmapped_pixels = int(remaining_unmapped_bp / nucleotide_to_pixel_ratio)
if (int(remaining_unmapped_bp % nucleotide_to_pixel_ratio) > 0):
remaining_unmapped_pixels = remaining_unmapped_pixels + 1
remaining_cols_in_row = num_cols - col_upto - 1
if (remaining_unmapped_pixels <= remaining_cols_in_row):
map_row_end[i] = row_upto
map_col_end[i] = col_upto + remaining_unmapped_pixels - 1
col_upto = col_upto + remaining_unmapped_pixels - 1
else:
remaining_unmapped_pixels = remaining_unmapped_pixels - remaining_cols_in_row
additional_rows = int(math.ceil( float(remaining_unmapped_pixels) / float(num_cols) ))
map_row_end[i] = row_upto + additional_rows
row_upto = row_upto + additional_rows
additional_cols = int(remaining_unmapped_pixels % num_cols)
if (additional_cols == 0):
col_upto = num_cols
map_col_end[i] = num_cols
else:
col_upto = additional_cols
map_col_end[i] = additional_cols
return map_row_start, map_col_start, map_row_end, map_col_end
######################################################
def write_output_map( out_map, nucleotide_to_pixel_ratio, num_rows, num_cols, in_chrom, in_start, in_end, map_row_start, map_col_start, map_row_end, map_col_end ):
out_map_file = open(out_map, 'w')
outline = str(nucleotide_to_pixel_ratio) + "\t" + str(num_rows) + "\t" + str(num_cols) + "\n"
out_map_file.write( outline )
for i in range( 0, len(in_chrom) ):
outline = str(in_chrom[i]) + "\t" + str(in_start[i]) + "\t" + str(in_end[i]) + "\t" + str(map_row_start[i]) + "\t" + str(map_col_start[i]) + "\t" + str(map_row_end[i]) + "\t" + str(map_col_end[i]) + "\n"
out_map_file.write( outline )
out_map_file.close()
return
######################################################
def main():
parser = argparse.ArgumentParser(description='Read in BED file and sort. Map BED file locations to a 350 x 350 array.')
parser.add_argument('-i', action="store", dest="in_bed", required=True, help='Input BED file')
parser.add_argument('-o', action="store", dest="out_map", required=True, help='Output mapping file')
args = parser.parse_args()
num_rows = 350
num_cols = 350
in1_chrom, in1_start, in1_end = read_input( args.in_bed )
in2_chrom, in2_start, in2_end = sort_input( in1_chrom, in1_start, in1_end )
num_nucleotides = count_nucleotides( in2_start, in2_end )
num_pixels = num_rows * num_cols
nucleotide_to_pixel_ratio = int(num_nucleotides / num_pixels) + 1
map_row_start, map_col_start, map_row_end, map_col_end = map_positions_to_pixels( in2_chrom, in2_start, in2_end, num_rows, num_cols, nucleotide_to_pixel_ratio )
write_output_map( args.out_map, nucleotide_to_pixel_ratio, num_rows, num_cols, in2_chrom, in2_start, in2_end, map_row_start, map_col_start, map_row_end, map_col_end )
if __name__=='__main__':
main()
| emmamrath/gene_annotation_of_structural_variants | create_images_for_deep_learning/images_for_deep_learning_sv02_create_non_white_space_mapping_file.py | images_for_deep_learning_sv02_create_non_white_space_mapping_file.py | py | 6,428 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "math.ceil",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 148,
"usage_type": "call"
}
] |
10179826987 | #!/usr/bin/python3
"""
Number of subscribers(not active users,total subscribers) for a given subreddit
"""
import requests
def number_of_subscribers(subreddit):
"""
Number of subscribers for a given subreddit from the Reddit API
"""
if subreddit is None or not isinstance(subreddit, str):
return 0
user_agent = {'User-agent': 'Google Chrome Version 81.0.4044.129'}
url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
response = requests.get(url, headers=user_agent)
results = response.json()
try:
return results['data']['subscribers']
except Exception:
return 0
| jamesAlhassan/alx-system_engineering-devops | 0x16-api_advanced/0-subs.py | 0-subs.py | py | 646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
}
] |
36839090820 | import moviepy.editor as me
import numpy as np
from bm_analyze import *
import time
t=open('!r_data.txt')
M=t.read().split('\n')
t.close()
del t
M[0]='[empty]'
start=36000
end=37000
speed=16
time.sleep(120)
render=me.VideoClip(lambda t:np.zeros([1080,1920,3]),duration=(end-start)/speed)#+3*(q+1==k))
clips=[render]
print('Finished making a void')
def parseMatrix(M):
if M=='[empty]':return []
if M=='Limit':return [[1]]
return eval("[" + M.replace(")(", "],[").replace("(", "[").replace(")", "]").replace("?","") + "]")
for i in range(start,end):
a=''
k=0
for j in M[i]:
a+=j
k+=1
if k>=100 and j==')':a+='\n';k=0
obj=me.TextClip(a.replace('(0)(2)','Limit of BMS'),size=(1920,500),color='white',fontsize=25,method='label',align='northwest')
clips.append(obj.set_start((i-start)/speed).set_duration(1/speed).set_pos((10,100)))
if parseMatrix(M[i])<[[0],[1,1,1],[2,1,1],[3,1,1],[2,1,1],[3,1],[2]]:
obj=me.TextClip(prettyprint(toexp(matfromstr(M[i])[0])),size=(1920,600),color='white',fontsize=30,method='label',align='west',font='Courier-New')
clips.append(obj.set_start((i-start)/speed).set_duration(1/speed).set_pos((10,500)))
obj=me.TextClip('Frame {:,}\nโ {}h {:02}m spent calculating'.format(i,i//6660,i//111%60),size=(1920,500),color='white',fontsize=20,method='label',align='northwest')
clips.append(obj.set_start((i-start)/speed).set_duration(1/speed).set_pos((10,20)))
if i%100==0:
print(i,M[i])
# render=me.CompositeVideoClip(clips)
# clips=[render]
if i%10==0 and i%100!=0:
print(i)
print('Finished creating text')
render=me.CompositeVideoClip(clips)
del clips
print('Finished adding text to the void')
render_=me.VideoFileClip('BMSlngi.mp4')
me.concatenate([render_,render]).write_videofile(f'BMSlngi_.mp4',fps=24)
del render
del render_
| dr2xmillion371/stuff | matrix.py | matrix.py | py | 1,876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "moviepy.editor.VideoClip",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "moviepy.editor",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
... |
23536887489 | import numpy as np
from collections import deque
import gymnasium as gym
from stable_baselines3.common.atari_wrappers import (
ClipRewardEnv,
EpisodicLifeEnv,
FireResetEnv,
MaxAndSkipEnv,
NoopResetEnv,
)
class Agent:
def __init__(self, eval_env):
self.eval_env = eval_env
def eval(self):
obs, _ = self.eval_env.reset()
while True:
action = np.array([self.eval_env.single_action_space.sample() for _ in range(1)])
obs, _, _, _, infos = self.eval_env.step(action)
if "final_info" in infos:
for info in infos["final_info"]:
# Skip the envs that are not done
if "episode" not in info:
continue
return info['episode']['r'], info['episode']['l']
def run(self):
eval_rewards = deque(maxlen=10)
eval_steps = deque(maxlen=10)
for episode in range(10):
eval_reward, eval_step = self.eval()
eval_rewards.append(eval_reward)
eval_steps.append(eval_step)
return np.mean(eval_rewards), np.mean(eval_steps)
if __name__ == "__main__":
def make_env(env_name, seed, resize=84):
def thunk():
env = gym.make(env_name)
env = gym.wrappers.RecordEpisodeStatistics(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ClipRewardEnv(env)
if len(env.observation_space.shape): # pixel obs
env = gym.wrappers.ResizeObservation(env, (resize, resize))
env = gym.wrappers.GrayScaleObservation(env)
env = gym.wrappers.FrameStack(env, 4)
env.action_space.seed(seed)
return env
return thunk
env_names = [
"Alien-v5", "Amidar-v5", "Assault-v5", "Asterix-v5", "BankHeist-v5",
"BattleZone-v5", "Boxing-v5", "Breakout-v5", "ChopperCommand-v5", "CrazyClimber-v5",
"DemonAttack-v5", "Freeway-v5", "Frostbite-v5", "Gopher-v5", "Hero-v5",
"IceHockey-v5", "Jamesbond-v5", "Kangaroo-v5", "Krull-v5", "KungFuMaster-v5",
"MsPacman-v5", "Pong-v5", "PrivateEye-v5", "Seaquest-v5", "Skiing-v5",
"Surround-v5", "Tennis-v5", "UpNDown-v5"
]
for env in env_names:
eval_env = gym.vector.SyncVectorEnv([make_env("ALE/" + env, 1)])
random_agent = Agent(eval_env)
rewards = deque(maxlen=10)
steps = deque(maxlen=10)
for i in range(10):
reward, step = random_agent.run()
rewards.append(reward)
steps.append(step)
print(env, "\t", np.mean(rewards), np.std(rewards), "\t", np.mean(rewards), np.std(steps))
| ZangZehua/rlatari | utils/random_atari.py | random_atari.py | py | 2,894 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"lin... |
5198901937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import unittest
import yalix.repl as repl
import yalix.utils as utils
def send_inputs(*args):
# count always starts from 1
def invoke(count):
try:
cmd = args[count - 1]
if isinstance(cmd, str):
yield cmd
else:
raise cmd
except IndexError:
raise EOFError()
return invoke
def capture_outputs(collector):
""" Collector should be dict-like """
def invoke(result, count):
collector[count] = result
return invoke
class ReplTests(unittest.TestCase):
def test_license(self):
self.assertTrue(len(repl.license()) > 0)
self.assertTrue(str(datetime.now().year) in repl.license())
def test_copyright(self):
self.assertTrue(len(repl.copyright()) > 0)
self.assertTrue(str(datetime.now().year) in repl.copyright())
def test_help(self):
self.assertTrue(len(repl.help()) > 0)
self.assertTrue('github.com/rm-hull/yalix' in repl.help())
def test_init_readline(self):
with utils.capture() as out:
repl.init_readline({})
self.assertTrue('Reading history' in out[0])
self.assertTrue('DONE' in out[0] or 'FAILED' in out[0])
def test_repl_starts_OK(self):
commands = send_inputs("(+ 1 2 3 4)", "(iterate inc 0)",
KeyboardInterrupt())
results = {}
collector = capture_outputs(results)
with utils.capture() as out:
repl.repl(inprompt=commands, outprompt=collector)
self.assertEqual('10', results[1])
self.assertEqual(
'(0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...)', results[2])
self.assertTrue('KeyboardInterrupt' in out[0])
self.assertTrue('Bye!' in out[0])
if __name__ == '__main__':
unittest.main()
| rm-hull/yalix | python/tests/repl_test.py | repl_test.py | py | 1,934 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "yalix.repl.license",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "yalix.repl",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "datetime.datetim... |
495434407 | from __future__ import print_function
import os
import string
import mock
import pytest
from click import UsageError
from click.testing import CliRunner
from dagster import (
DagsterInvariantViolationError,
PartitionSetDefinition,
RepositoryDefinition,
ScheduleDefinition,
lambda_solid,
pipeline,
repository_partitions,
schedules,
seven,
solid,
)
from dagster.check import CheckError
from dagster.cli.pipeline import (
execute_backfill_command,
execute_execute_command,
execute_list_command,
execute_print_command,
execute_scaffold_command,
pipeline_backfill_command,
pipeline_execute_command,
pipeline_list_command,
pipeline_print_command,
pipeline_scaffold_command,
)
from dagster.cli.run import run_list_command, run_wipe_command
from dagster.cli.schedule import (
schedule_list_command,
schedule_restart_command,
schedule_start_command,
schedule_stop_command,
schedule_up_command,
schedule_wipe_command,
)
from dagster.config.field_utils import Shape
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.launcher import RunLauncher
from dagster.core.serdes import ConfigurableClass
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.local_compute_log_manager import NoOpComputeLogManager
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
from dagster.utils import script_relative_path
from dagster.utils.test import FilesytemTestScheduler
def no_print(_):
return None
@lambda_solid
def do_something():
return 1
@lambda_solid
def do_input(x):
return x
@pipeline(name='foo')
def foo_pipeline():
do_something()
def define_foo_pipeline():
return foo_pipeline
@pipeline(name='baz', description='Not much tbh')
def baz_pipeline():
do_input()
def define_bar_repo():
return RepositoryDefinition('bar', {'foo': define_foo_pipeline, 'baz': lambda: baz_pipeline},)
@solid
def spew(context):
context.log.info('HELLO WORLD')
@solid
def fail(context):
raise Exception('I AM SUPPOSED TO FAIL')
@pipeline
def stdout_pipeline():
spew()
@pipeline
def stderr_pipeline():
fail()
def test_list_command():
runner = CliRunner()
execute_list_command(
{
'repository_yaml': None,
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'define_bar_repo',
},
no_print,
)
result = runner.invoke(
pipeline_list_command,
['-f', script_relative_path('test_cli_commands.py'), '-n', 'define_bar_repo'],
)
assert result.exit_code == 0
assert result.output == (
'Repository bar\n'
'**************\n'
'Pipeline: baz\n'
'Description:\n'
'Not much tbh\n'
'Solids: (Execution Order)\n'
' do_input\n'
'*************\n'
'Pipeline: foo\n'
'Solids: (Execution Order)\n'
' do_something\n'
)
execute_list_command(
{
'repository_yaml': None,
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'define_repo',
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-m', 'dagster_examples.intro_tutorial.repos', '-n', 'define_repo']
)
assert result.exit_code == 0
assert result.output == (
'Repository hello_cereal_repository\n'
'**********************************\n'
'Pipeline: complex_pipeline\n'
'Solids: (Execution Order)\n'
' load_cereals\n'
' sort_by_calories\n'
' sort_by_protein\n'
' display_results\n'
'*******************************\n'
'Pipeline: hello_cereal_pipeline\n'
'Solids: (Execution Order)\n'
' hello_cereal\n'
)
execute_list_command(
{
'repository_yaml': script_relative_path('repository_module.yaml'),
'python_file': None,
'module_name': None,
'fn_name': None,
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-y', script_relative_path('repository_module.yaml')]
)
assert result.exit_code == 0
assert result.output == (
'Repository hello_cereal_repository\n'
'**********************************\n'
'Pipeline: complex_pipeline\n'
'Solids: (Execution Order)\n'
' load_cereals\n'
' sort_by_calories\n'
' sort_by_protein\n'
' display_results\n'
'*******************************\n'
'Pipeline: hello_cereal_pipeline\n'
'Solids: (Execution Order)\n'
' hello_cereal\n'
)
with pytest.raises(UsageError):
execute_list_command(
{
'repository_yaml': None,
'python_file': 'foo.py',
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'define_repo',
},
no_print,
)
result = runner.invoke(
pipeline_list_command,
['-f', 'foo.py', '-m', 'dagster_examples.intro_tutorial.repos', '-n', 'define_repo'],
)
assert result.exit_code == 2
with pytest.raises(UsageError):
execute_list_command(
{
'repository_yaml': None,
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': None,
},
no_print,
)
result = runner.invoke(pipeline_list_command, ['-m', 'dagster_examples.intro_tutorial.repos'])
assert result.exit_code == 2
with pytest.raises(UsageError):
execute_list_command(
{
'repository_yaml': None,
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': None,
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-f', script_relative_path('test_cli_commands.py')]
)
assert result.exit_code == 2
def valid_execute_args():
return [
{
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
},
{
'repository_yaml': script_relative_path('repository_module.yaml'),
'pipeline_name': ('hello_cereal_pipeline',),
'python_file': None,
'module_name': None,
'fn_name': None,
},
{
'repository_yaml': None,
'pipeline_name': ('foo',),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'define_bar_repo',
},
{
'repository_yaml': None,
'pipeline_name': ('hello_cereal_pipeline',),
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'define_repo',
},
{
'repository_yaml': None,
'pipeline_name': (),
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'hello_cereal_pipeline',
},
{
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'define_foo_pipeline',
},
{
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'foo_pipeline',
},
]
def valid_cli_args():
return [
['-y', script_relative_path('repository_file.yaml'), 'foo'],
['-y', script_relative_path('repository_module.yaml'), 'hello_cereal_pipeline'],
['-f', script_relative_path('test_cli_commands.py'), '-n', 'define_bar_repo', 'foo'],
[
'-m',
'dagster_examples.intro_tutorial.repos',
'-n',
'define_repo',
'hello_cereal_pipeline',
],
['-m', 'dagster_examples.intro_tutorial.repos', '-n', 'hello_cereal_pipeline'],
['-f', script_relative_path('test_cli_commands.py'), '-n', 'define_foo_pipeline'],
]
def test_print_command():
for cli_args in valid_execute_args():
execute_print_command(verbose=True, cli_args=cli_args, print_fn=no_print)
for cli_args in valid_execute_args():
execute_print_command(verbose=False, cli_args=cli_args, print_fn=no_print)
runner = CliRunner()
for cli_args in valid_cli_args():
result = runner.invoke(pipeline_print_command, cli_args)
assert result.exit_code == 0
result = runner.invoke(pipeline_print_command, ['--verbose'] + cli_args)
assert result.exit_code == 0
res = runner.invoke(
pipeline_print_command,
[
'--verbose',
'-f',
script_relative_path('test_cli_commands.py'),
'-n',
'define_bar_repo',
'baz',
],
)
assert res.exit_code == 0
def test_execute_mode_command():
runner = CliRunner()
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'--env',
script_relative_path('../environments/multi_mode_with_resources/add_mode.yaml'),
'-d',
'add_mode',
'multi_mode_with_resources', # pipeline name
],
)
assert add_result
mult_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'--env',
script_relative_path('../environments/multi_mode_with_resources/mult_mode.yaml'),
'-d',
'mult_mode',
'multi_mode_with_resources', # pipeline name
],
)
assert mult_result
double_adder_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'--env',
script_relative_path(
'../environments/multi_mode_with_resources/double_adder_mode.yaml'
),
'-d',
'double_adder_mode',
'multi_mode_with_resources', # pipeline name
],
)
assert double_adder_result
def test_execute_preset_command():
runner = CliRunner()
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'add',
'multi_mode_with_resources', # pipeline name
],
)
assert 'PIPELINE_SUCCESS' in add_result.output
# Can't use -p with --env
bad_res = runner.invoke(
pipeline_execute_command,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'add',
'--env',
script_relative_path(
'../environments/multi_mode_with_resources/double_adder_mode.yaml'
),
'multi_mode_with_resources', # pipeline name
],
)
assert bad_res.exit_code == 2
def test_execute_command():
for cli_args in valid_execute_args():
execute_execute_command(env=None, cli_args=cli_args)
for cli_args in valid_execute_args():
execute_execute_command(
env=[script_relative_path('default_log_error_env.yaml')], cli_args=cli_args
)
runner = CliRunner()
for cli_args in valid_cli_args():
runner_pipeline_execute(runner, cli_args)
runner_pipeline_execute(
runner, ['--env', script_relative_path('default_log_error_env.yaml')] + cli_args
)
res = runner.invoke(
pipeline_execute_command,
['-y', script_relative_path('repository_module.yaml'), 'hello_cereal_pipeline', 'foo'],
)
assert res.exit_code == 1
assert isinstance(res.exception, CheckError)
assert 'Can only handle zero or one pipeline args.' in str(res.exception)
def test_stdout_execute_command():
runner = CliRunner()
result = runner_pipeline_execute(
runner, ['-f', script_relative_path('test_cli_commands.py'), '-n', 'stdout_pipeline']
)
assert 'HELLO WORLD' in result.output
def test_stderr_execute_command():
runner = CliRunner()
result = runner_pipeline_execute(
runner, ['-f', script_relative_path('test_cli_commands.py'), '-n', 'stderr_pipeline']
)
assert 'I AM SUPPOSED TO FAIL' in result.output
def test_fn_not_found_execute():
with pytest.raises(DagsterInvariantViolationError) as exc_info:
execute_execute_command(
env=None,
cli_args={
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'nope',
},
)
assert 'nope not found in module' in str(exc_info.value)
def not_a_repo_or_pipeline_fn():
return 'kdjfkjdf'
not_a_repo_or_pipeline = 123
def test_fn_is_wrong_thing():
with pytest.raises(DagsterInvariantViolationError) as exc_info:
execute_execute_command(
env={},
cli_args={
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'not_a_repo_or_pipeline',
},
)
assert str(exc_info.value) == (
'not_a_repo_or_pipeline must be a function that returns a '
'PipelineDefinition or a RepositoryDefinition, or a function '
'decorated with @pipeline.'
)
def test_fn_returns_wrong_thing():
with pytest.raises(DagsterInvariantViolationError) as exc_info:
execute_execute_command(
env={},
cli_args={
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'not_a_repo_or_pipeline_fn',
},
)
assert str(exc_info.value) == (
'not_a_repo_or_pipeline_fn is a function but must return a '
'PipelineDefinition or a RepositoryDefinition, or be decorated '
'with @pipeline.'
)
def runner_pipeline_execute(runner, cli_args):
result = runner.invoke(pipeline_execute_command, cli_args)
if result.exit_code != 0:
# CliRunner captures stdout so printing it out here
raise Exception(
(
'dagster pipeline execute commands with cli_args {cli_args} '
'returned exit_code {exit_code} with stdout:\n"{stdout}" and '
'\nresult as string: "{result}"'
).format(
cli_args=cli_args, exit_code=result.exit_code, stdout=result.stdout, result=result
)
)
return result
def test_scaffold_command():
for cli_args in valid_execute_args():
cli_args['print_only_required'] = True
execute_scaffold_command(cli_args=cli_args, print_fn=no_print)
cli_args['print_only_required'] = False
execute_scaffold_command(cli_args=cli_args, print_fn=no_print)
runner = CliRunner()
for cli_args in valid_cli_args():
result = runner.invoke(pipeline_scaffold_command, cli_args)
assert result.exit_code == 0
result = runner.invoke(pipeline_scaffold_command, ['-p'] + cli_args)
assert result.exit_code == 0
def test_default_memory_run_storage():
cli_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
}
result = execute_execute_command(env=None, cli_args=cli_args)
assert result.success
def test_override_with_in_memory_storage():
cli_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
}
result = execute_execute_command(
env=[script_relative_path('in_memory_env.yaml')], cli_args=cli_args
)
assert result.success
def test_override_with_filesystem_storage():
cli_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
}
result = execute_execute_command(
env=[script_relative_path('filesystem_env.yaml')], cli_args=cli_args
)
assert result.success
def test_run_list():
runner = CliRunner()
result = runner.invoke(run_list_command)
assert result.exit_code == 0
def test_run_wipe_correct_delete_message():
runner = CliRunner()
result = runner.invoke(run_wipe_command, input="DELETE\n")
assert 'Deleted all run history and event logs' in result.output
assert result.exit_code == 0
def test_run_wipe_incorrect_delete_message():
runner = CliRunner()
result = runner.invoke(run_wipe_command, input="WRONG\n")
assert 'Exiting without deleting all run history and event logs' in result.output
assert result.exit_code == 0
@schedules(scheduler=FilesytemTestScheduler)
def define_bar_scheduler():
return [
ScheduleDefinition(
"foo_schedule",
cron_schedule="* * * * *",
pipeline_name="test_pipeline",
environment_dict={},
)
]
def test_schedules_list_without_dagster_home():
runner = CliRunner()
result = runner.invoke(
schedule_list_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 2
assert 'Error: $DAGSTER_HOME is not set' in result.output
def test_schedules_list():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_list_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 0
assert result.output == ('Repository bar\n' '**************\n')
def test_schedules_up():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 0
assert result.output == 'Changes:\n + foo_schedule (add)\n'
def test_schedules_up_and_list():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_list_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 0
assert (
result.output == 'Repository bar\n'
'**************\n'
'Schedule: foo_schedule [STOPPED]\n'
'Cron Schedule: * * * * *\n'
)
def test_schedules_start_and_stop():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
assert result.exit_code == 0
assert 'Started schedule foo_schedule with ' in result.output
result = runner.invoke(
schedule_stop_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
assert result.exit_code == 0
assert 'Stopped schedule foo_schedule with ' in result.output
def test_schedules_start_all():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), '--start-all'],
)
assert result.exit_code == 0
assert result.output == 'Started all schedules for repository bar\n'
def test_schedules_wipe_correct_delete_message():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_wipe_command,
['-y', script_relative_path('repository_file.yaml')],
input="DELETE\n",
)
assert result.exit_code == 0
assert 'Wiped all schedules and schedule cron jobs' in result.output
result = runner.invoke(
schedule_up_command,
['-y', script_relative_path('repository_file.yaml'), '--preview'],
)
# Verify schedules were wiped
assert result.exit_code == 0
assert result.output == 'Planned Changes:\n + foo_schedule (add)\n'
def test_schedules_wipe_incorrect_delete_message():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_wipe_command,
['-y', script_relative_path('repository_file.yaml')],
input="WRONG\n",
)
assert result.exit_code == 0
assert 'Exiting without deleting all schedules and schedule cron jobs' in result.output
result = runner.invoke(
schedule_up_command,
['-y', script_relative_path('repository_file.yaml'), '--preview'],
)
# Verify schedules were not wiped
assert result.exit_code == 0
assert (
result.output
== 'No planned changes to schedules.\n1 schedules will remain unchanged\n'
)
def test_schedules_restart():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
result = runner.invoke(
schedule_restart_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
assert result.exit_code == 0
assert 'Restarted schedule foo_schedule' in result.output
def test_schedules_restart_all():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
result = runner.invoke(
schedule_restart_command,
[
'-y',
script_relative_path('repository_file.yaml'),
'foo_schedule',
'--restart-all-running',
],
)
assert result.exit_code == 0
assert result.output == 'Restarted all running schedules for repository bar\n'
def test_multiproc():
with seven.TemporaryDirectory() as temp:
runner = CliRunner(env={'DAGSTER_HOME': temp})
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'multiproc',
'multi_mode_with_resources', # pipeline name
],
)
assert 'PIPELINE_SUCCESS' in add_result.output
def test_multiproc_invalid():
# force ephemeral instance by removing out DAGSTER_HOME
runner = CliRunner(env={'DAGSTER_HOME': None})
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'multiproc',
'multi_mode_with_resources', # pipeline name
],
)
# which is invalid for multiproc
assert 'DagsterUnmetExecutorRequirementsError' in add_result.output
class InMemoryRunLauncher(RunLauncher, ConfigurableClass):
def __init__(self, inst_data=None):
self._inst_data = inst_data
self._queue = []
def launch_run(self, _instance, run):
self._queue.append(run)
return run
def queue(self):
return self._queue
@classmethod
def config_type(cls):
return Shape({})
@classmethod
def from_config_value(cls, inst_data, config_value):
return cls(inst_data=inst_data,)
@property
def inst_data(self):
return self._inst_data
@repository_partitions
def define_baz_partitions():
return [
PartitionSetDefinition(
name='baz_partitions', pipeline_name='baz', partition_fn=lambda: string.ascii_lowercase,
)
]
def backfill_execute_args(execution_args):
backfill_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'noprompt': True,
}
pipeline_name = execution_args.get('pipeline_name')
if pipeline_name:
backfill_args['pipeline_name'] = (pipeline_name,)
for name, value in execution_args.items():
if name != 'pipeline_name':
backfill_args[name] = value
return backfill_args
def backfill_cli_runner_args(execution_args):
backfill_args = ['-y', script_relative_path('repository_file.yaml'), '--noprompt']
pipeline_name = execution_args.get('pipeline_name')
if pipeline_name:
backfill_args.append(pipeline_name)
for name, value in execution_args.items():
if name != 'pipeline_name':
backfill_args.extend(['--{}'.format(name.replace('_', '-')), value])
return backfill_args
def run_test_backfill(
execution_args, expected_count=None, error_message=None, use_run_launcher=True
):
runner = CliRunner()
run_launcher = InMemoryRunLauncher() if use_run_launcher else None
with seven.TemporaryDirectory() as temp_dir:
instance = DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
compute_log_manager=NoOpComputeLogManager(temp_dir),
run_launcher=run_launcher,
)
with mock.patch('dagster.core.instance.DagsterInstance.get') as _instance:
_instance.return_value = instance
if error_message:
with pytest.raises(UsageError) as error_info:
execute_backfill_command(backfill_execute_args(execution_args), no_print)
assert error_info and error_message in error_info.value.message
result = runner.invoke(
pipeline_backfill_command, backfill_cli_runner_args(execution_args)
)
if error_message:
assert result.exit_code == 2
else:
assert result.exit_code == 0
if expected_count:
assert len(run_launcher.queue()) == expected_count
def test_backfill_no_run_launcher():
args = {'pipeline_name': 'baz'} # legit partition args
run_test_backfill(
args, use_run_launcher=False, error_message='A run launcher must be configured'
)
def test_backfill_no_pipeline():
args = {'pipeline_name': 'nonexistent'}
run_test_backfill(args, error_message='No pipeline found')
def test_backfill_no_partition_sets():
args = {'pipeline_name': 'foo'}
run_test_backfill(args, error_message='No partition sets found')
def test_backfill_no_named_partition_set():
args = {'pipeline_name': 'baz', 'partition_set': 'nonexistent'}
run_test_backfill(args, error_message='No partition set found')
def test_backfill_launch():
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions'}
run_test_backfill(args, expected_count=len(string.ascii_lowercase))
def test_backfill_partition_range():
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'from': 'x'}
run_test_backfill(args, expected_count=3)
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'to': 'c'}
run_test_backfill(args, expected_count=3)
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'from': 'c', 'to': 'f'}
run_test_backfill(args, expected_count=4)
def test_backfill_partition_enum():
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'partitions': 'c,x,z'}
run_test_backfill(args, expected_count=3)
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster_tests/cli_tests/test_cli_commands.py | test_cli_commands.py | py | 31,036 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster.lambda_solid",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "dagster.lambda_solid",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "dagster.pipeline",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "dagster.pi... |
13419956267 | import pygame,sys
from Room import Room,Overworld
class Game:
def __init__(self) -> None:
self.overworld = Overworld(screen,self.start_game)
self.status = 'overworld'
self.current_room = 0
def start_game(self):
self.room = Room(self.current_room, self.create_overworld)
self.status = "game_running"
def create_overworld(self):
self.overworld = Overworld(screen,self.start_game)
self.status = 'overworld'
def run(self):
if self.status == 'overworld':
self.overworld.run()
else:
self.room.run()
pygame.init()
screen = pygame.display.set_mode((900,506))
pygame.display.set_caption('Text Based Game')
game = Game()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
game.run()
pygame.display.update()
| NishantK30/projects | text based game/main.py | main.py | py | 960 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Room.Overworld",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "Room.Room",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "Room.Overworld",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number... |
41585308053 | from flask.ext.mongokit import MongoKit, Document
from datetime import datetime
from sensorapp import db, app
@db.register
class Device(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "device"
structure = {
'name': unicode,
'digit_pin_num': int,
'analog_pin_num': int,
'type': unicode,
'location': unicode,
'sensor_list': list,
'actuator_list': list,
'created_user':unicode,
'created_time': datetime
}
required_fields = ['name', 'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class Sensor(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "sensor"
structure = {
'name': unicode,
'type': unicode,
'digit_or_analog': unicode,
'location': unicode,
'ability': unicode,
'at_device_id': unicode,
'at_device_name': unicode,
'at_pin': int,
'created_user': unicode,
'created_time': datetime
}
required_fields = ['name', 'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class Actuator(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "actuator"
structure = {
'name': unicode,
'type': unicode,
'digit_or_analog': unicode,
'location': unicode,
'ability': unicode,
'at_device_id': unicode,
'at_device_name': unicode,
'at_pin': int,
'created_user': unicode,
'created_time': datetime
}
required_fields = ['name', 'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class SensorData(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "sensordata"
structure = {
'value': float,
'from_sensor_id': unicode,
'from_sensor_name': unicode,
'sensing_time': datetime,
'created_time': datetime
}
required_fields = ['value', 'from_sensor_name', 'sensing_time' ,'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class ActuatorData(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "actuatordata"
structure = {
'value': float,
'from_actuator_id': unicode,
'from_actuator_name': unicode,
'created_by': unicode,
'acting_time': datetime,
'created_time': datetime
}
required_fields = ['value', 'from_actuator_name', 'acting_time' ,'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
db.register([Device])
db.register([Sensor])
db.register([Actuator])
db.register([SensorData])
db.register([ActuatorData])
| janetyc/SensorIoT | sensorapp/models.py | models.py | py | 2,885 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.ext.mongokit.Document",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "sensorapp.app.config",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sensorapp.app",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "date... |
15150261198 | #Healthy programmer
# 9am - 5pm
# Water = water.mp3 (3.5 liters)every 40min - Drank - log
# Eyes = eyes.mp3 (Every 30 min) - EyesDone - log
# Pysical Activity = pysical.mp3 (every 45 min)- ExDone - log
#
# Rules - pygame module to play audio
from pygame import mixer
from datetime import datetime
from time import time
def musiconloop(file,stopper):
mixer.init()
mixer.music.load(file)
mixer.music.play()
while True:
input_of_user = input()
if input_of_user == stopper:
mixer.music.stop()
break
def log_now(msg):
with open("mylogs.txt","a") as f:
f.write(f"{msg} {datetime.now()}\n")
if __name__ == '__main__':
init_waters = time()
init_eyes = time()
init_exercise = time()
watersecs = 10 #40min
eyessecs = 30 #30 min
exsecs = 50 #45 min
while True:
if time() - init_waters > watersecs:
print ("Drink Water!! ..Write 'drank' to stop the alarm")
musiconloop('waters.mp3' ,'drank')
init_waters = time()
log_now("Drank water at")
if time() - init_eyes > eyessecs:
print ("Eyes Exercise time!! ..Write 'doneeyes' to stop the alarm")
musiconloop('eyes.mp3' ,'doneeyes')
init_eyes = time()
log_now("Eyes relaxed done at")
if time() - init_exercise > exsecs:
print ("Exercise Time!! ..Write 'doneex' to stop the alarm")
musiconloop('excercise.mp3' ,'doneex')
init_exercise = time()
log_now("Exercise done at")
| entbappy/My-python-projects | Ex7 Healty programmer50.py | Ex7 Healty programmer50.py | py | 1,582 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pygame.mixer.init",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.m... |
5850629924 | #coding: utf-8
import pygame
from block import Block
import constants
#insert this class in ship method gen_shoot()
class Bullet(Block):
def __init__(self,x,y, sign, speed, targets_nopoints= None, targets_points=None, point_receptor=None):
super(Bullet, self).__init__(x,y,10,10,constants.YELLOW)
self.dir_x = 0
self.dir_y = -1*sign*speed
#Next 2 shall be pygame.sprite.Group()...
if isinstance(targets_nopoints, pygame.sprite.Group):
self.t_n_points= targets_nopoints
else:
self.t_n_points= None
if isinstance(targets_points, pygame.sprite.Group):
self.t_w_points= targets_points #must have self.value...
self.point_receptor=point_receptor #must have change_points_by(x) method
else:
self.t_w_points= None
def update(self):
#always go up
self.rect.y += self.dir_y
#verify if within bounds
if self.rect.y < 0 or self.rect.y >= constants.SCREEN_HEIGHT:
self.kill() #removes from ALL the pygame's Groups
del self
return
if self.t_n_points != None:
collision_list = pygame.sprite.spritecollide(self, self.t_n_points, True)
#Having spritecollide set to True destroys obstacles
if len(collision_list)>0:
self.kill()
if self.t_w_points != None:
collision_list = pygame.sprite.spritecollide(self, self.t_w_points, True)
#Having spritecollide set to True destroys obstacles
if len(collision_list)>0:
self.kill()
for el in collision_list:
self.point_receptor.change_points_by(el.value)
| RafaelPAndrade/Pixel_Martians | bullet.py | bullet.py | py | 1,487 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "block.Block",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "constants.YELLOW",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite",
... |
35076398379 | """This file contains the signature validator abstraction"""
import base64
import json
from cose.headers import KID
from cose.keys.keyops import VerifyOp
from cose.messages import Sign1Message
from cose.keys import CoseKey
from cose.algorithms import Es256, Ps256
from cose.keys.keytype import KtyEC2, KtyRSA
from cose.keys.keyparam import KpKty, KpKeyOps
from cose.keys.keyparam import KpAlg, EC2KpX, EC2KpY, EC2KpCurve, RSAKpE, RSAKpN
from cose.keys.curves import P256
from cose.exceptions import CoseException
from cryptography import x509
from cryptography.hazmat.primitives.asymmetric import ec, rsa
from cryptography.utils import int_to_bytes
from classes.TrustList import TrustList
class SignatureValidator:
"""Validate COSE signatures"""
def __init__(self, trust_list: TrustList):
self._trust_list = trust_list
def validate(self, payload: bytes):
"""Validates the signature, or returns the errors"""
try:
message = Sign1Message.decode(payload)
kid = self._get_kid(message)
print(f"KID = {kid}")
dsc = self._trust_list.find(kid)
cert: x509.base = dsc.certificate()
if cert is None:
return {
"valid": False,
"error": {
"type": "TRUST-LIST",
"message": f"KID {kid} not found in the trust-list"
}
}
message.key = self._get_key(cert)
if message.verify_signature():
return {
"valid": True,
"error": None
}
return {
"valid": False,
"error": "Invalid signature! Reason: unknown."
}
except UnicodeDecodeError as err:
return {
"valid": False,
"error": {
"type": "UNICODE",
"message": err
}
}
except json.decoder.JSONDecodeError as err:
return {
"valid": False,
"error": {
"type": "JSON",
"message": err
}
}
except (CoseException, AttributeError, TypeError) as err:
return {
"valid": False,
"error": {
"type": "COSE",
"message": err
}
}
@staticmethod
def _get_kid(message) -> str:
"""Returns the KID from the message"""
if KID in message.phdr.keys():
return base64.b64encode(message.phdr[KID]).decode("UTF-8")
return base64.b64encode(message.uhdr[KID]).decode("UTF-8")
@staticmethod
def _get_key(cert: x509.base) -> CoseKey:
"""Returns the CoseKey"""
if isinstance(cert.public_key(), rsa.RSAPublicKey):
return CoseKey.from_dict(
{
KpKeyOps: [VerifyOp],
KpKty: KtyRSA,
KpAlg: Ps256, # RSSASSA-PSS-with-SHA-256-and-MFG1
RSAKpE: int_to_bytes(cert.public_key().public_numbers().e),
RSAKpN: int_to_bytes(cert.public_key().public_numbers().n)
}
)
elif isinstance(cert.public_key(), ec.EllipticCurvePublicKey):
return CoseKey.from_dict(
{
KpKeyOps: [VerifyOp],
KpKty: KtyEC2,
EC2KpCurve: P256, # Ought o be pk.curve - but the two libs clash
KpAlg: Es256, # ecdsa-with-SHA256
EC2KpX: int_to_bytes(cert.public_key().public_numbers().x),
EC2KpY: int_to_bytes(cert.public_key().public_numbers().y)
}
)
else:
raise Exception(f"Algorithm unsupported: { cert.signature_algorithm_oid }")
| ryanbnl/eu-dcc-diagnostics | classes/SignatureValidator.py | SignatureValidator.py | py | 3,969 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "classes.TrustList.TrustList",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "cose.messages.Sign1Message.decode",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cose.messages.Sign1Message",
"line_number": 31,
"usage_type": "name"
},
... |
38488870579 | #!/usr/bin/env python3
#
# Bonus. GPE auto-training + GSA using external (from publication) dataset loaded from json file
#
import os
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import LinearMean
from gpytorch.kernels import MaternKernel, ScaleKernel
from GPErks.gp.data.dataset import Dataset
from GPErks.gp.experiment import GPExperiment
from GPErks.gp.mean import LinearMean
from GPErks.log.logger import get_logger
from GPErks.serialization.path import posix_path
from GPErks.train.emulator import GPEmulator
from GPErks.utils.random import set_seed
from GPErks.perks.gsa import SobolGSA
def main():
get_logger()
seed = 8
set_seed(seed)
device = "cpu"
# This new method loads your dataset into a dictionary where keys = features, values = Dataset objects
# (each Dataset is built to create the experiment that will emulate the corresponding scalar feature (key))
datasets = Dataset.build_from_file(posix_path(os.getcwd(), "data", "datasets", "Stefano_8p_sham.json"))
features = list(datasets.keys())
print(features) # available features to be emulated
# # Note: if you want to create a .json file containing your dataset, you can do so like this:
# X = np.loadtxt(data_dir / "X.txt", dtype=float)
# Y = np.loadtxt(data_dir / "Y.txt", dtype=float)
# xlabels = read_labels_from_file(data_dir / "xlabels.txt")
# ylabels = read_labels_from_file(data_dir / "ylabels.txt")
# data_dct = {
# "X_train": X.tolist(),
# "Y_train": Y.tolist(),
# # "X_val": X_val.tolist(), # (if available, otherwise can omit this dct key)
# # "Y_val": Y_val.tolist(), # (if available, otherwise can omit this dct key)
# # "X_test": X_test.tolist(), # (if available, otherwise can omit this dct key)
# # "Y_test": Y_test.tolist(), # (if available, otherwise can omit this dct key)
# "x_labels": xlabels, # (if available, otherwise can omit this dct key)
# "y_labels": ylabels, # (if available, otherwise can omit this dct key)
# # "l_bounds": a list here (if available, otherwise can omit this dct key)
# # "u_bounds": a list here (if available, otherwise can omit this dct key)
# "info": "A short description about the dataset"
# }
# with open(Path(os.getcwd())/"datasetname.json", "w") as f:
# json.dump(data_dct, f, indent=4)
#
# # Also note that there is already a utility function that does this for you:
# # from GPErks.utils.jsonfiles import create_json_dataset_from_arrays
feature = "EDV" # we will emulate just one feature as an example
# GPE auto-training
print(f"\nEmulating target feature: {feature}")
dataset = datasets[feature]
likelihood = GaussianLikelihood()
mean = LinearMean(degree=1, input_size=dataset.input_size, bias=True)
covariance = ScaleKernel(MaternKernel(ard_num_dims=dataset.input_size))
metrics = []
experiment = GPExperiment(
dataset,
likelihood,
mean,
covariance,
metrics=metrics,
seed=seed
)
emulator = GPEmulator(experiment, device)
emulator.train_auto() # you could use a more manual approach here with early stopping etc.
msg = experiment.print_stats()
print(f"\nFitted emulator hyperparameters:{msg}")
# GSA
gsa = SobolGSA(dataset, n=1024, seed=seed)
# the following method is used to perform GSA whenever a (trained) emulator object is available, also covering
# the case where it was trained using an externally imported dataset as in this example
gsa.estimate_Sobol_indices_with_emulator(emulator, n_draws=1000)
gsa.summary()
gsa.correct_Sobol_indices(threshold=0.01)
gsa.plot()
gsa.plot_donut()
gsa.plot_fancy_donut()
gsa.plot_heatmap()
gsa.plot_network()
if __name__ == "__main__":
main()
| stelong/GPErks | examples/example_bonus.py | example_bonus.py | py | 3,888 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "GPErks.log.logger.get_logger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "GPErks.utils.random.set_seed",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "GPErks.gp.data.dataset.Dataset.build_from_file",
"line_number": 30,
"usage_type... |
3419259967 | from spectractor import parameters
from spectractor.simulation.simulator import AtmosphereGrid, SpectrumSimulatorSimGrid
from spectractor.config import load_config
from spectractor.simulation.image_simulation import ImageSim
from spectractor.logbook import LogBook
from spectractor.extractor.extractor import Spectractor
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(dest="input", metavar='path', default=["tests/data/reduc_20170530_134.fits"],
help="Input fits file name. It can be a list separated by spaces, or it can use * as wildcard.",
nargs='*')
parser.add_argument("-d", "--debug", dest="debug", action="store_true",
help="Enter debug mode (more verbose and plots).", default=False)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="Enter verbose (print more stuff).", default=False)
parser.add_argument("-o", "--output_directory", dest="output_directory", default="outputs/",
help="Write results in given output directory (default: ./outputs/).")
parser.add_argument("-l", "--logbook", dest="logbook", default="./tests/data/ctiofulllogbook_jun2017_v5.csv",
help="CSV logbook file. (default: ./tests/data/ctiofulllogbook_jun2017_v5.csv).")
parser.add_argument("-c", "--config", dest="config", default="config/ctio.ini",
help="INI config file. (default: config.ctio.ini).")
args = parser.parse_args()
parameters.VERBOSE = args.verbose
if args.debug:
parameters.DEBUG = True
parameters.VERBOSE = True
file_names = args.input
load_config(args.config)
logbook = LogBook(logbook=args.logbook)
for file_name in file_names:
tag = file_name.split('/')[-1]
disperser_label, target, xpos, ypos = logbook.search_for_image(tag)
if target is None or xpos is None or ypos is None:
continue
spectrum_file_name = args.output_directory + '/' + tag.replace('.fits', '_spectrum.fits')
atmgrid = AtmosphereGrid(file_name)
image = ImageSim(file_name, spectrum_file_name, args.output_directory, A1=1, A2=1,
pwv=5, ozone=300, aerosols=0.03,
psf_poly_params=None, with_stars=True)
sim_file_name = args.output_directory + tag.replace('reduc_', 'sim_')
Spectractor(sim_file_name, args.output_directory, target, [xpos, ypos], disperser_label, args.config)
| LSSTDESC/Spectractor | runSimulator.py | runSimulator.py | py | 2,608 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "spectractor.parameters.VERBOSE",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "spectractor.parameters",
"line_number": 27,
"usage_type": "name"
},
{
... |
26736973424 | import json
import re
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QFileDialog, QListWidgetItem, QColorDialog
from models.const import *
from models import util
from uis.main_window import Ui_MainWindow
class SettingsController(object):
def __init__(self, app=None, ui: Ui_MainWindow = None, main_controller=None):
self.app = app
self.ui = ui
self.main_controller = main_controller
self.setup_control()
pass
def setup_control(self):
self.ui.cbx_settings_reader_auto_play_pgb.addItems([TRSM("No"),TRSM("Yes")])
self.ui.cbx_settings_proxy_mode.addItems([TRSM("Disable proxy"),TRSM("Use proxy and not proxy same time"),TRSM("Only use proxy")])
self.ui.cbx_settings_proxy_type.addItems([TRSM("https"),TRSM("http")])
self.ui.cbx_settings_cugan_denoise.addItems([TRSM("No effect"),TRSM("Level 0"),TRSM("Level 1"),TRSM("Level 2"),TRSM("Level 3")])
self.ui.cbx_settings_cugan_resize.addItems([TRSM("No"),TRSM("Yes")])
self.ui.cbx_settings_when_close_window.addItems([TRSM("Minimize to system tray"),TRSM("Close window")])
# UI Display
self.retranslateUi()
self.load_config()
# action
self.ui.btn_settings_save.clicked.connect(self.btn_settings_save_clicked)
self.ui.btn_settings_reset.clicked.connect(self.btn_settings_reset_clicked)
self.ui.btn_settings_general_folder.clicked.connect(self.btn_settings_general_folder_clicked)
self.ui.btn_settings_proxy_add.clicked.connect(self.btn_settings_proxy_add_clicked)
self.ui.btn_settings_proxy_delete.clicked.connect(self.btn_settings_proxy_delete_clicked)
self.ui.btn_settings_cugan_browser.clicked.connect(self.btn_settings_cugan_browser_clicked)
self.ui.btn_settings_reader_background.clicked.connect(self.btn_settings_reader_background_clicked)
self.ui.txt_settings_reader_background.textChanged.connect(self.txt_settings_reader_background_text_changed)
def retranslateUi(self):
self.ui.cbx_settings_reader_auto_play_pgb.setItemText(0,TRSM("No"))
self.ui.cbx_settings_reader_auto_play_pgb.setItemText(1,TRSM("Yes"))
self.ui.cbx_settings_proxy_mode.setItemText(0,TRSM("Disable proxy"))
self.ui.cbx_settings_proxy_mode.setItemText(1,TRSM("Use proxy and not proxy same time"))
self.ui.cbx_settings_proxy_mode.setItemText(2,TRSM("Only use proxy"))
self.ui.cbx_settings_cugan_denoise.setItemText(0,TRSM("No effect"))
self.ui.cbx_settings_cugan_denoise.setItemText(1,TRSM("Level 0"))
self.ui.cbx_settings_cugan_denoise.setItemText(2,TRSM("Level 1"))
self.ui.cbx_settings_cugan_denoise.setItemText(3,TRSM("Level 2"))
self.ui.cbx_settings_cugan_denoise.setItemText(4,TRSM("Level 3"))
self.ui.cbx_settings_cugan_resize.setItemText(0,TRSM("No"))
self.ui.cbx_settings_cugan_resize.setItemText(1,TRSM("Yes"))
self.ui.cbx_settings_when_close_window.setItemText(0,TRSM("Minimize to system tray"))
self.ui.cbx_settings_when_close_window.setItemText(1,TRSM("Close window"))
pass
#action
def btn_settings_general_folder_clicked(self):
old_folder_path = self.ui.txt_settings_general_folder.text()
if old_folder_path == "":
old_folder_path = "./"
folder_path = QFileDialog.getExistingDirectory(self.main_controller,TRSM("Open folder"), old_folder_path)
if folder_path != "":
self.ui.txt_settings_general_folder.setText(folder_path)
pass
def btn_settings_reader_background_clicked(self):
old_color = QColor(self.ui.txt_settings_reader_background.text())
color = QColorDialog.getColor(old_color,self.main_controller,TRSM("Pick a color"))
if color.isValid():
color_name = color.name()
self.ui.txt_settings_reader_background.setText(color_name)
self.ui.lbl_settings_reader_background_preview.setStyleSheet("background-color: "+color_name+";")
def btn_settings_cugan_browser_clicked(self):
old_file_path = self.ui.txt_settings_cugan_location.text()
if old_file_path == "":
old_file_path = "./"
file_path = QFileDialog.getOpenFileName(self.main_controller,TRSM("EXE location"), old_file_path)
if len(file_path) >= 2 and file_path[0] != "":
self.ui.txt_settings_cugan_location.setText(file_path[0])
def btn_settings_proxy_add_clicked(self):
if self.ui.txt_settings_proxy_ip.text() != "":
proxy = self.ui.cbx_settings_proxy_type.currentText() + "://" + self.ui.txt_settings_proxy_ip.text()
if not self.check_proxy_format(proxy):
util.msg_box(TRSM("Please enter a proxy with ip:port format"), self.main_controller)
elif self.check_proxy_exist_in_list(proxy):
util.msg_box(TRSM("Proxy already exist in list"), self.main_controller)
else:
self.try_add_proxy(proxy)
self.ui.txt_settings_proxy_ip.setText("")
else:
util.msg_box(TRSM("Please enter a proxy with ip:port format"),self.main_controller)
pass
def btn_settings_proxy_delete_clicked(self):
if len(self.ui.list_settings_proxy.selectedItems()) > 0:
if util.confirm_box(TRSM("Confirm delete these proxy?"),self.main_controller):
for item in self.ui.list_settings_proxy.selectedItems():
self.ui.list_settings_proxy.takeItem(self.ui.list_settings_proxy.row(item))
else:
util.msg_box(TRSM("Please select at least one proxy"),self.main_controller)
pass
def btn_settings_save_clicked(self):
self.save_config()
def btn_settings_reset_clicked(self):
self.ui.spin_settings_max_retry.setValue(5)
self.ui.spin_settings_timeout.setValue(30)
self.ui.spin_settings_book_padding.setValue(2)
self.ui.spin_settings_chapter_padding.setValue(3)
self.ui.spin_settings_image_padding.setValue(3)
self.ui.spin_settings_jpg_quality.setValue(90)
self.ui.spin_settings_check_is_2_page.setValue(1.0)
self.ui.txt_settings_reader_background.setText("#000000")
self.ui.spin_settings_reader_auto_play_interval.setValue(5.0)
self.ui.cbx_settings_reader_auto_play_pgb.setCurrentIndex(1)
self.ui.spin_settings_reader_page_gap.setValue(0)
self.ui.spin_settings_page_sleep.setValue(10)
self.ui.spin_settings_image_sleep.setValue(1)
self.ui.spin_settings_download_worker.setValue(2)
self.ui.cbx_settings_proxy_mode.setCurrentIndex(0)
self.ui.spin_settings_cugan_scale.setValue(2)
self.ui.cbx_settings_cugan_denoise.setCurrentIndex(4)
self.ui.cbx_settings_cugan_resize.setCurrentIndex(0)
self.ui.cbx_settings_when_close_window.setCurrentIndex(0)
pass
def txt_settings_reader_background_text_changed(self):
color_str = self.ui.txt_settings_reader_background.text()
q_color = QColor(color_str)
if q_color.isValid():
self.ui.lbl_settings_reader_background_preview.setStyleSheet("background-color: " + color_str)
# internal
def load_config(self):
#general
download_folder = MY_CONFIG.get("general", "download_folder")
self.ui.txt_settings_general_folder.setText(download_folder)
max_retry = MY_CONFIG.get("general", "max_retry")
self.ui.spin_settings_max_retry.setValue(int(max_retry))
timeout = MY_CONFIG.get("general", "timeout")
self.ui.spin_settings_timeout.setValue(float(timeout))
self.ui.cbx_settings_user_agent.addItems([
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.60 Safari/537.36",
])
# "Mozilla/5.0 (iPad; CPU OS 8_0_2 like Mac OS X) AppleWebKit/60.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12A405 Safari/600.1.4",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.2 Mobile/15E148 Safari/604.1",
# "Mozilla/5.0 (Linux; Android 12; Pixel 6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.58 Mobile Safari/537.36"
agent = MY_CONFIG.get("general", "agent")
self.ui.cbx_settings_user_agent.setCurrentText(agent)
book_padding = MY_CONFIG.get("general", "book_padding")
self.ui.spin_settings_book_padding.setValue(int(book_padding))
chapter_padding = MY_CONFIG.get("general", "chapter_padding")
self.ui.spin_settings_chapter_padding.setValue(int(chapter_padding))
image_padding = MY_CONFIG.get("general", "image_padding")
self.ui.spin_settings_image_padding.setValue(int(image_padding))
jpg_quality = MY_CONFIG.get("general", "jpg_quality")
self.ui.spin_settings_jpg_quality.setValue(int(jpg_quality))
check_is_2_page = MY_CONFIG.get("general", "check_is_2_page")
self.ui.spin_settings_check_is_2_page.setValue(float(check_is_2_page))
reader_background = MY_CONFIG.get("reader", "background")
self.ui.txt_settings_reader_background.setText(reader_background)
self.ui.lbl_settings_reader_background_preview.setStyleSheet("background-color:"+reader_background+";")
reader_auto_play_interval = MY_CONFIG.get("reader", "auto_play_interval")
self.ui.spin_settings_reader_auto_play_interval.setValue(float(reader_auto_play_interval))
reader_auto_play_pgb = MY_CONFIG.get("reader", "auto_play_pgb")
self.ui.cbx_settings_reader_auto_play_pgb.setCurrentIndex(int(reader_auto_play_pgb))
reader_page_gap = MY_CONFIG.get("reader", "page_gap")
self.ui.spin_settings_reader_page_gap.setValue(int(reader_page_gap))
#anti ban
page_sleep = MY_CONFIG.get("anti-ban", "page_sleep")
self.ui.spin_settings_page_sleep.setValue(float(page_sleep))
image_sleep = MY_CONFIG.get("anti-ban", "image_sleep")
self.ui.spin_settings_image_sleep.setValue(float(image_sleep))
download_worker = MY_CONFIG.get("anti-ban", "download_worker")
self.ui.spin_settings_download_worker.setValue(int(download_worker))
proxy_mode = MY_CONFIG.get("anti-ban", "proxy_mode")
self.ui.cbx_settings_proxy_mode.setCurrentIndex(int(proxy_mode))
proxy_list = MY_CONFIG.get("anti-ban", "proxy_list")
if proxy_list != "":
proxy_list = json.loads(proxy_list)
for proxy in proxy_list:
item = QListWidgetItem()
item.setText(proxy["url"])
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
if proxy["enable"]:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
self.ui.list_settings_proxy.addItem(item)
#real-cugan
exe_location = MY_CONFIG.get("real-cugan", "exe_location")
self.ui.txt_settings_cugan_location.setText(exe_location)
scale = int(MY_CONFIG.get("real-cugan", "scale"))
self.ui.spin_settings_cugan_scale.setValue(scale)
denoise_level = int(MY_CONFIG.get("real-cugan", "denoise_level"))
self.ui.cbx_settings_cugan_denoise.setCurrentIndex(denoise_level+1)
resize = int(MY_CONFIG.get("real-cugan", "resize"))
self.ui.cbx_settings_cugan_resize.setCurrentIndex(resize)
#misc
display_message = MY_CONFIG.get("misc", "display_message")
if display_message == "False":
self.ui.radio_settings_message_no.setChecked(True)
else:
self.ui.radio_settings_message_yes.setChecked(True)
play_sound = MY_CONFIG.get("misc", "play_sound")
if play_sound == "False":
self.ui.radio_settings_sound_no.setChecked(True)
else:
self.ui.radio_settings_sound_yes.setChecked(True)
when_close_window = MY_CONFIG.get("misc", "when_close_window")
self.ui.cbx_settings_when_close_window.setCurrentIndex(int(when_close_window))
pass
def save_config(self):
global WEB_BOT, EXECUTOR
#print("try save")
#general
MY_CONFIG.set("general","download_folder",self.ui.txt_settings_general_folder.text())
MY_CONFIG.set("general","max_retry",str(self.ui.spin_settings_max_retry.value()))
MY_CONFIG.set("general","timeout",str(self.ui.spin_settings_timeout.value()))
MY_CONFIG.set("general","agent",self.ui.cbx_settings_user_agent.currentText())
MY_CONFIG.set("general","book_padding",str(self.ui.spin_settings_book_padding.value()))
MY_CONFIG.set("general","chapter_padding",str(self.ui.spin_settings_chapter_padding.value()))
MY_CONFIG.set("general","image_padding",str(self.ui.spin_settings_image_padding.value()))
MY_CONFIG.set("general","jpg_quality",str(self.ui.spin_settings_jpg_quality.value()))
MY_CONFIG.set("general","check_is_2_page",str(self.ui.spin_settings_check_is_2_page.value()))
MY_CONFIG.set("reader","background",self.ui.txt_settings_reader_background.text())
MY_CONFIG.set("reader","auto_play_interval",str(self.ui.spin_settings_reader_auto_play_interval.value()))
MY_CONFIG.set("reader","auto_play_pgb",str(self.ui.cbx_settings_reader_auto_play_pgb.currentIndex()))
MY_CONFIG.set("reader","page_gap",str(self.ui.spin_settings_reader_page_gap.value()))
#anti ban
MY_CONFIG.set("anti-ban","page_sleep",str(self.ui.spin_settings_page_sleep.value()))
MY_CONFIG.set("anti-ban","image_sleep",str(self.ui.spin_settings_image_sleep.value()))
MY_CONFIG.set("anti-ban","download_worker",str(self.ui.spin_settings_download_worker.value()))
MY_CONFIG.set("anti-ban","proxy_mode",str(self.ui.cbx_settings_proxy_mode.currentIndex()))
MY_CONFIG.set("anti-ban","proxy_list",json.dumps(self.proxy_list_to_json()))
#real-cugan
MY_CONFIG.set("real-cugan","exe_location",self.ui.txt_settings_cugan_location.text())
MY_CONFIG.set("real-cugan","scale",str(self.ui.spin_settings_cugan_scale.value()))
MY_CONFIG.set("real-cugan","denoise_level",str(self.ui.cbx_settings_cugan_denoise.currentIndex()-1))
MY_CONFIG.set("real-cugan","resize",str(self.ui.cbx_settings_cugan_resize.currentIndex()))
#misc
MY_CONFIG.set("misc","display_message",str(self.ui.radio_settings_message_yes.isChecked()))
MY_CONFIG.set("misc","play_sound",str(self.ui.radio_settings_sound_yes.isChecked()))
MY_CONFIG.set("misc","when_close_window",str(self.ui.cbx_settings_when_close_window.currentIndex()))
MY_CONFIG.save()
WEB_BOT.set_agent(MY_CONFIG.get("general", "agent"))
WEB_BOT.set_time_out(float(MY_CONFIG.get("general", "timeout")))
WEB_BOT.set_max_retry(int(MY_CONFIG.get("general", "max_retry")))
WEB_BOT.set_proxy_mode(int(MY_CONFIG.get("anti-ban", "proxy_mode")))
WEB_BOT.set_proxy_list(MY_CONFIG.get("anti-ban", "proxy_list"))
EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=int(MY_CONFIG.get("anti-ban", "download_worker")))
def try_add_proxy(self,proxy):
if self.check_proxy_exist_in_list(proxy):
return False
item = QListWidgetItem()
item.setText(proxy)
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)
self.ui.list_settings_proxy.addItem(item)
return True
def check_proxy_format(self,proxy):
pattern_proxy = re.compile(r'http([s]?)://(.*?):([0-9]*)')
proxy_info = re.findall(pattern_proxy, proxy)
if len(proxy_info) == 1 and len(proxy_info[0]) == 3:
return True
return False
def check_proxy_exist_in_list(self,proxy):
for i in range(self.ui.list_settings_proxy.count()):
item = self.ui.list_settings_proxy.item(i)
if item.text() == proxy:
return True
return False
def proxy_list_to_json(self):
result = []
for i in range(self.ui.list_settings_proxy.count()):
item = self.ui.list_settings_proxy.item(i)
result.append({
"enable": item.checkState() == QtCore.Qt.Checked,
"url": item.text(),
})
return result
| freedy82/Comic-Toolbox | models/controllers/settings_controller.py | settings_controller.py | py | 14,876 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "uis.main_window.Ui_MainWindow",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 69,
"usage_ty... |
18174515063 | import os
import sys
import pathlib
import pandas as pd
import pymrio as pym
import pickle as pkl
import logging
import argparse
import json
import re
from pymrio.core.mriosystem import IOSystem
SEC_AGG_ODS_FILENAME = "exiobase3_aggregate_to_7_sectors.ods"
PARAMS_ODS_FILENAME ="exiobase3_7_sectors_params.ods"
EXIO3_MONETARY = 1000000
MAIN_INVENTORY_DURATION = 90
PARAMS = {
# The directory to use to store results (relative to output_dir)
"results_storage": "results",
# This tells the model to register the evolution of the stocks
# of every industry (the file can be quite large (2Gbytes+ for
# a 365 days simulation with exiobase))
"register_stocks": True,
# Parameters of the model (we detail these in the documentation)
"psi_param": 0.85,
"order_type": "alt",
# Time division of a year in the model (365 == day, 52 == week, ...)
"year_to_temporal_unit_factor": 365,
# Number of day|week|... of one step of the model (ie time sampling)
"n_temporal_units_by_step": 1,
# Charateristic time of inventory restoration
"inventory_restoration_tau": 60,
# Base overproduction factor
"alpha_base": 1.0,
# Maximum overproduction factor
"alpha_max": 1.25,
# Charateristic time of overproduction
"alpha_tau": 365,
# Charateristic time of rebuilding
"rebuild_tau": 60,
# Number of day|week|... to simulate
"n_temporal_units_to_sim": 700,
# Unused
"min_duration": 700
}
def lexico_reindex(mrio: pym.IOSystem) -> pym.IOSystem:
"""Reindex IOSystem lexicographicaly
Sort indexes and columns of the dataframe of a :ref:`pymrio.IOSystem` by
lexical order.
Parameters
----------
mrio : pym.IOSystem
The IOSystem to sort
Returns
-------
pym.IOSystem
The sorted IOSystem
"""
mrio.Z = mrio.Z.reindex(sorted(mrio.Z.index), axis=0)
mrio.Z = mrio.Z.reindex(sorted(mrio.Z.columns), axis=1)
mrio.Y = mrio.Y.reindex(sorted(mrio.Y.index), axis=0)
mrio.Y = mrio.Y.reindex(sorted(mrio.Y.columns), axis=1)
mrio.x = mrio.x.reindex(sorted(mrio.x.index), axis=0) #type: ignore
mrio.A = mrio.A.reindex(sorted(mrio.A.index), axis=0)
mrio.A = mrio.A.reindex(sorted(mrio.A.columns), axis=1)
return mrio
def full_mrio_pickle(exio3, save_path=None):
scriptLogger.info("Removing IOSystem attributes deemed unnecessary")
attr = ['Z', 'Y', 'x', 'A', 'L', 'unit', 'population', 'meta', '__non_agg_attributes__', '__coefficients__', '__basic__']
tmp = list(exio3.__dict__.keys())
for at in tmp:
if at not in attr:
delattr(exio3,at)
assert isinstance(exio3, IOSystem)
scriptLogger.info("Done")
scriptLogger.info("Computing the missing IO components")
exio3.calc_all()
scriptLogger.info("Done")
scriptLogger.info("Reindexing the dataframes lexicographicaly")
exio3 = lexico_reindex(exio3)
scriptLogger.info("Done")
scriptLogger.info("Saving Full mrio pickle file to {}".format(pathlib.Path(save_path).absolute()))
exio3 = lexico_reindex(exio3)
with open(save_path, 'wb') as f:
pkl.dump(exio3, f)
def aggreg_mrio_pickle(full_exio_path, sector_aggregator_path, save_path=None):
exio_path = pathlib.Path(full_exio_path)
if not exio_path.exists():
raise FileNotFoundError("Exiobase file not found - {}".format(exio_path))
with exio_path.open('rb') as f:
scriptLogger.info("Loading EXIOBASE3 from {}".format(exio_path.resolve()))
exio3 = pkl.load(f)
assert isinstance(exio3, IOSystem)
sec_agg_vec = pd.read_excel(sector_aggregator_path, sheet_name="aggreg_input", engine="odf")
sec_agg_newnames = pd.read_excel(sector_aggregator_path, sheet_name="name_input", engine="odf", index_col=0, squeeze=True)
sec_agg_vec = sec_agg_vec.sort_values(by="sector")
scriptLogger.info("Reading aggregation matrix from sheet 'input' in file {}".format(pathlib.Path(sector_aggregator_path).absolute()))
scriptLogger.info("Aggregating from {} to {} sectors".format(len(exio3.get_sectors()), len(sec_agg_vec.group.unique()))) #type:ignore
sec_agg_vec['new_sectors'] = sec_agg_vec.group.map(sec_agg_newnames.to_dict())
exio3.aggregate(sector_agg=sec_agg_vec.new_sectors.values)
exio3.calc_all()
scriptLogger.info("Done")
scriptLogger.info("Saving to {}".format(pathlib.Path(save_path).absolute()))
exio3 = lexico_reindex(exio3)
with open(save_path, 'wb') as f:
pkl.dump(exio3, f)
def params_from_ods(ods_file,monetary,main_inv_dur):
mrio_params = {}
mrio_params["monetary_factor"] = monetary
mrio_params["main_inv_dur"] = main_inv_dur
df = pd.read_excel(ods_file) #type: ignore
mrio_params["capital_ratio_dict"] = df[["Aggregated version sector", "Capital to VA ratio"]].set_index("Aggregated version sector").to_dict()['Capital to VA ratio']
mrio_params["inventories_dict"] = df[["Aggregated version sector", "Inventory size (days)"]].set_index("Aggregated version sector").to_dict()['Inventory size (days)']
return mrio_params
def event_tmpl_from_ods(ods_file):
event_params = {}
event_params["aff_regions"] = ["FR"]
event_params["dmg_distrib_regions"] = [1]
event_params["dmg_distrib_sectors_type"] = "gdp"
event_params["dmg_distrib_sectors"] = []
event_params["duration"] = 5
event_params["name"] = "Test-event"
event_params["occur"] = 7
event_params["q_dmg"] = 1000000
df = pd.read_excel(ods_file) #type: ignore
event_params["aff_sectors"] = df.loc[(df.Affected=="yes"),"Aggregated version sector"].to_list()
event_params["rebuilding_sectors"] = df.loc[(df["Rebuilding factor"] > 0),["Aggregated version sector", "Rebuilding factor"]].set_index("Aggregated version sector").to_dict()['Rebuilding factor']
return event_params
parser = argparse.ArgumentParser(description="Build a minimal example for BoARIO, from EXIOBASE3 MRIO table zip file")
parser.add_argument('source_path', type=str, help='The str path to the directory with source materials')
parser.add_argument('-o', "--output", type=str, help='The path to the example directory to create', nargs='?', default='./testing-directory/')
args = parser.parse_args()
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(name)s %(message)s", datefmt="%H:%M:%S")
scriptLogger = logging.getLogger("EXIOBASE3_Minimal_example_generator")
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
scriptLogger.addHandler(consoleHandler)
scriptLogger.setLevel(logging.INFO)
scriptLogger.propagate = False
if __name__ == '__main__':
args = parser.parse_args()
scriptLogger.info("Make sure you use the same python environment when you use the minimal example as now.")
scriptLogger.info("Your current environment is: {}".format(sys.executable))
sec_agg_ods = pathlib.Path(args.source_path)/SEC_AGG_ODS_FILENAME
params_ods = pathlib.Path(args.source_path)/PARAMS_ODS_FILENAME
output_dir = pathlib.Path(args.output)
# Create full mrio pickle file
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
full_exio_pickle_name = "exiobase3_full.pkl"
minimal_exio_name = "exiobase3_minimal.pkl"
params_file_name = "params.json"
mrio_params_file_name = "mrio_params.json"
event_file_name = "event.json"
scriptLogger.info("This will create the following directory, with all required files for the minimal example : {}".format(output_dir.resolve()))
if not sec_agg_ods.exists():
raise FileNotFoundError("Sector aggregator ods file not found - {}".format(sec_agg_ods))
if not params_ods.exists():
raise FileNotFoundError("Params ods file not found - {}".format(params_ods))
if not (output_dir/full_exio_pickle_name).exists():
regex = re.compile(r"(IOT_\d\d\d\d_ixi.zip)")
exio_path = None
for root, dirs, files in os.walk(args.source_path):
scriptLogger.info("Looking for Exiobase3 file here {}".format(args.source_path))
for f in files:
if regex.match(f):
exio_path = (pathlib.Path(args.source_path)/f).resolve()
scriptLogger.info("Found Exiobase3 file here {}".format(exio_path))
break
if exio_path is None:
raise FileNotFoundError("Exiobase file not found in given source directory - {}".format(args.source_path))
scriptLogger.info("Parsing EXIOBASE3 from {} - Note that this takes a few minutes on a powerful laptop. ".format(exio_path.resolve()))
exio3 = pym.parse_exiobase3(path=exio_path)
full_mrio_pickle(exio3, save_path=output_dir/full_exio_pickle_name)
# create minimal mrio pickle file
if not (output_dir/minimal_exio_name).exists():
aggreg_mrio_pickle(output_dir/full_exio_pickle_name, sector_aggregator_path=sec_agg_ods, save_path=output_dir/minimal_exio_name)
# create params file
if not (output_dir/params_file_name).exists():
scriptLogger.info("Generating simulation parameters file : {}".format((output_dir/params_file_name).resolve()))
params = PARAMS
params["output_dir"] = str(output_dir.resolve())
with (output_dir/params_file_name).open("w") as f:
json.dump(params, f, indent=4)
# create mrio_params_file
if not (output_dir/mrio_params_file_name).exists():
scriptLogger.info("Generating mrio parameters file : {}".format((output_dir/mrio_params_file_name).resolve()))
mrio_params = params_from_ods(params_ods, EXIO3_MONETARY, MAIN_INVENTORY_DURATION)
with (output_dir/mrio_params_file_name).open("w") as f:
json.dump(mrio_params, f, indent=4)
# create mrio_params_file
if not (output_dir/event_file_name).exists():
scriptLogger.info("Generating event file : {}".format((output_dir/event_file_name).resolve()))
event_params = event_tmpl_from_ods(params_ods)
with (output_dir/event_file_name).open("w") as f:
json.dump(event_params, f, indent=4)
scriptLogger.info("Done !")
| spjuhel/BoARIO | scripts/generate-example-files.py | generate-example-files.py | py | 10,120 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "pymrio.IOSystem",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pymrio.core.mriosystem.IOSystem",
"line_number": 82,
"usage_type": "argument"
},
{
"api_name": "pathlib.Path",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": ... |
9959099201 | import random
import itertools
import math
import json
from functions.counting import counting
from functions.multi_arithematic import multiple_operations_two_ops
from functions.single_arithematic import single_arithematic
from functions.avg_val import average_point_value
from functions.permutation_combination import permutations
from functions.permutation_combination import combinations
from functions.probability import probability_questions
from functions.comparisions import comparison_questions
from functions.logical import logical_questions
from functions.number_theory import number_theory_questions
from functions.graph_theory import generate_graph_theory_questions
from functions.pattern_recognition import pattern_recognition_questions
from functions.geometry import generate_shape_questions
from functions.clock import generate_clock_time_questions
from functions.algorithmic_reasoning import knapsack_questions
from functions.algorithmic_reasoning import greedy_questions
from functions.algorithmic_reasoning import sort_and_median_questions
from functions.algorithmic_reasoning import recursion_questions
from functions.temporal import temporal_questions
from functions.incomplete_question import incomplete_questions
from functions.spatial_reasoning import spatial_reasoning_questions
plural_dictionary = {
"fruit": "fruits",
"apple": "apples",
"orange": "oranges",
"banana": "bananas",
"strawberry": "strawberries",
"grape": "grapes",
"vegetable": "vegetables",
"carrot": "carrots",
"broccoli": "broccoli",
"tomato": "tomatoes",
"potato": "potatoes",
"cabbage": "cabbages",
"animal": "animals",
"dog": "dogs",
"cat": "cats",
"elephant": "elephants",
"giraffe": "giraffes",
"dolphin": "dolphins",
"geometry": "geometries",
"triangle": "triangles",
"square": "squares",
"pentagon": "pentagons",
"hexagon": "hexagons",
"octagon": "octagons",
"clock" : "clocks",
}
object_dictionary = {
"fruit": {
"items": [
"apple",
"orange",
# "banana",
# "strawberry",
# "grape",
],
"range": [1]
},
"vegetable": {
"items": [
"carrot",
"broccoli",
# "tomato",
# "potato",
# "cabbage"
],
"range": [1]
},
"animal":{
"items": [
"dog",
"cat",
# "elephant",
# "giraffe",
# "dolphin"
],
"range": [1]
},
"geometry": {
"items": [
"triangle",
"square",
"pentagon",
"hexagon",
"octagon"
],
"range": [1]
},
"clock": {
"items": [
"clock 1",
"clock 2",
"clock 3",
"clock 4",
"clock 5",
"clock 6",
"clock 7",
"clock 8",
"clock 9",
"clock 10",
"clock 11",
"clock 12"
],
"range": [1]
}
}
def determine_object_type(sampled_items):
categories = set()
for item in sampled_items:
for category, details in object_dictionary.items():
if item in details["items"]:
categories.add(category)
break
if len(sampled_items) == 1:
return "single_object"
elif len(categories) == 1:
return "intra_category"
else:
return "inter_category"
def test_create_QA():
obj_to_return = []
question_item = {'clock 1': 1, 'square':2, 'orange': 4}
object_key = determine_object_type([item for item in question_item.keys()])
qa_pairs = []
# qa_pairs += counting(question_item, object_key)
# qa_pairs += multiple_operations_two_ops(question_item, object_key)
# qa_pairs += single_arithematic(question_item, object_key)
# qa_pairs += average_point_value(question_item, object_key)
# qa_pairs += permutations(question_item, object_key)
# qa_pairs += combinations(question_item, object_key)
# qa_pairs += probability_questions(question_item, object_key)
# qa_pairs += comparison_questions(question_item, object_key)
# qa_pairs += logical_questions(question_item, object_key)
# qa_pairs += number_theory_questions(question_item, object_key)
# qa_pairs += generate_graph_theory_questions(question_item, object_key)
# qa_pairs += pattern_recognition_questions(question_item, object_key)
# qa_pairs += knapsack_questions(question_item, object_key)
# qa_pairs += greedy_questions(question_item, object_key)
# qa_pairs += sort_and_median_questions(question_item, object_key)
# qa_pairs += recursion_questions(question_item, object_key)
# qa_pairs += temporal_questions(question_item, object_key)
# qa_pairs += incomplete_questions(question_item, object_key)
# qa_pairs += spatial_reasoning_questions(question_item, object_key)
# qa_pairs += generate_shape_questions(question_item, object_key)
qa_pairs += generate_clock_time_questions(question_item, object_key)
obj_to_return.append({
"obj_json": question_item,
"qa_pairs": qa_pairs,
})
return obj_to_return
def create_QA():
obj_to_return = []
total_questions = 0
# Flatten the items across all categories
all_items = []
for category, details in object_dictionary.items():
all_items.extend(details["items"])
items_range = [1, 5] # A generic range for simplicity
for L in range(1, len(all_items) + 1):
if L<=9:
for sampled_items in itertools.combinations(all_items, L):
object_type = determine_object_type(sampled_items)
number_of_objects = len(sampled_items)
item_count_values = [i for i in range(items_range[0], items_range[1] + 1)]
lists = [item_count_values] * number_of_objects
for combination in itertools.product(*lists):
index = 0
qa_pairs = []
question_item = {}
for item in sampled_items:
item_count = combination[index]
question_item[item] = item_count
index += 1
# Now, we generate questions for these combinations
qa_pairs += counting(question_item, object_type)
# qa_pairs += multiple_operations_two_ops_extended(number_of_objects, sampled_items, question_item)
obj_to_return.append({
"obj_json": question_item,
"qa_pairs": qa_pairs,
})
total_questions += len(qa_pairs)
print(f"Total questions: {total_questions}")
return obj_to_return
if __name__ == "__main__":
file_path = 'D:\MLMM_ASU\MLLM_Evaluation_Scale\display3.json'
with open(file_path, 'w') as f:
json.dump(test_create_QA(), f) | GVS-007/MLLM_Reasoning | final_data_creation.py | final_data_creation.py | py | 7,225 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functions.clock.generate_clock_time_questions",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 203,
"usage_type": "call"
},
... |
16620383859 | # @Author: Billy Li <billyli>
# @Date: 06-05-2022
# @Email: li000400@umn.edu
# @Last modified by: billyli
# @Last modified time: 06-06-2022
import sys
from pathlib import Path
import shutil
import numpy as np
from scipy.stats import uniform
data_dir = Path.cwd().parent.joinpath("data")
sys.path.insert(1, str(data_dir))
from hit_generator import stochastic
from util import plot_in_RAM, small_helix_check, hits2arc
from information import *
def discretize(x, min, max, res):
# return the discretized index of a value given a range and resolution
step = (max-min)/res
result = (x-min)//step
if result >= res:
result = res-1
return int(result)
def xy2map(xs, ys, res):
# return a z-t ptcl number map
map = np.zeros(shape=(res,res), dtype=float)
xmin, xmax = -810, 810
ymin, ymax = -810, 810
for x, y in zip(xs, ys):
xIdx = discretize(x, xmin, xmax, res)
yIdx = discretize(y, ymin, ymax, res)
map[res-1-yIdx, xIdx] = 1.0
return map
# feature_dir_default =
def make_data_single_track(feature_dir):
feature_dir.mkdir(parents=True, exist_ok=True)
### set dataset property
# Number of samples
N_data = 100
N_generated = 0
# quality cut
dx_min = 100
dy_min = 100
res = 256
# set track source (db files)
track_dir = Path.cwd().parent.parent.joinpath('data').joinpath('raw')
db_list = ["train_CeEndpoint-mix-fromCSV_1.db",
"train_CeEndpoint-mix-fromCSV_2.db",\
"train_CeEndpoint-mix-fromCSV_3.db"]
file_list = [track_dir.joinpath(db) for db in db_list]
# set track distribution
dist = uniform(loc=1, scale=0)
# set track generator
gen = stochastic(dist=dist, db_files=file_list, hitNumCut=20)
X = []
Y = []
while N_generated < N_data:
hit_dict = gen.generate(mode='production')
if small_helix_check(hit_dict,dx_min=dx_min,dy_min=dy_min):
continue
else:
sys.stdout.write(t_info(f'Finding qualified track: {N_generated+1}/{N_data}', special='\r'))
if N_generated+1 == N_data:
sys.stdout.write('\n')
sys.stdout.flush()
x = plot_in_RAM(hit_dict, res)
x = x.reshape(res,res)
X.append(x)
a, b, R = hits2arc(hit_dict)
y = xy2map([a], [b], int(res/8))
N_generated += 1
Y.append(y)
X = np.array(X)
Y = np.array(Y)
X_file = feature_dir.joinpath("X.npy")
Y_file = feature_dir.joinpath("Y.npy")
np.save(X_file, X)
np.save(Y_file, Y)
return
if __name__ == "__main__":
data_dir = Path.cwd().parent.parent.joinpath("data")
feature_dir = data_dir.joinpath("interm").joinpath("single_track")
make_data_single_track(feature_dir)
| billy000400/CircNN | src/features/make_data_single_track.py | make_data_single_track.py | py | 2,828 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path.cwd",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sys.path.insert",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_nu... |
13990400448 | from collections import defaultdict
from sys import maxint
class Solution(object):
def getClosest(self, S, t):
closest = None
minDiff = maxint
for k, x in S:
diff = abs(t - x)
if diff < minDiff:
minDiff = diff
closest = k, x
return closest
def minTransfers(self, transactions):
"""
:type transactions: List[List[int]]
:rtype: int
"""
snd = lambda t: t[1]
balance = defaultdict(lambda: 0)
for x, y, z in transactions:
balance[x] += z
balance[y] -= z
pos, neg = [], []
for k, b in balance.iteritems():
if b > 0:
pos.append((k, b))
elif b < 0:
neg.append((k, -b))
opers = 0
while neg:
kn, negBal = max(neg, key=snd)
neg.remove((kn, negBal))
kp, posBal = self.getClosest(pos, negBal)
pos.remove((kp, posBal))
rem = negBal - posBal
if rem > 0:
neg.append((kn, rem))
elif rem < 0:
pos.append((kp, -rem))
opers += 1
return opers
# trans = [[0,1,10],[2,0,5]]
# trans = [[0,1,10], [1,0,1], [1,2,5], [2,0,5]]
trans = [[1, 8, 1], [1, 13, 21], [2, 8, 10], [3, 9, 20], [4, 10, 61],
[5, 11, 61], [6, 12, 59], [7, 13, 60]]
print(Solution().minTransfers(trans))
| dariomx/topcoder-srm | leetcode/zero-pass/google/optimal-account-balancing/Solution3.py | Solution3.py | py | 1,460 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.maxint",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 22,
"usage_type": "call"
}
] |
23256745509 | from flask import Flask, render_template, request, session, redirect, flash, url_for
from flask_hashing import Hashing
import random
import secrets
from .utils import *
app = Flask(__name__)
app.config.from_object('config')
hashing = Hashing(app)
def before_request():
"""fonction qui initialise les sessions de flask"""
# models.addReset(0) #Supprimer aprรจs execution
# models.reset_xp()
if not ("username" in session):
session["username"] = "Connexion"
if not ('timeSpanish' in session):
session["timeSpanish"] = "temps"
session["pronounsSpanish"] = "pronoms"
session["verbSpanish"] = 'verbe'
if not ("bananeSpanish" in session):
session["bananeSpanish"] = None
session["banane2Spanish"] = None
session["banane3Spanish"] = None
session["banane4Spanish"] = None
session["banane5Spanish"] = None
session["banane6Spanish"] = None
session["banane7Spanish"] = None
session["kiwiSpanish"] = None
session["kiwi2Spanish"] = None
session["kiwi3Spanish"] = None
if not ("erreur_timeSpanish" in session):
session["erreur_timeSpanish"] = []
session["erreur_pronounsSpanish"] = []
session["erreur_verbSpanish"] = []
session["erreur_typeSpanish"] = []
if "erreur_verbSpanish" in session and len(session["erreur_verbSpanish"]) >= 5:
session["erreur_timeSpanish"] = [session["erreur_timeSpanish"][-1]]
session["erreur_pronounsSpanish"] = [session["erreur_pronounsSpanish"][-1]]
session["erreur_verbSpanish"] = [session["erreur_verbSpanish"][-1]]
session["erreur_typeSpanish"] = [session["erreur_typeSpanish"][-1]]
if not ("reponseUserSpanish" in session):
session["reponseUserSpanish"] = ""
if not ("reponseVerbSpanish" in session):
session["reponseVerbSpanish"] = ""
if not ('timeItalian' in session):
session["timeItalian"] = "temps"
session["pronounsItalian"] = "pronoms"
session["verbItalian"] = 'verbe'
if not ("bananeItalian" in session):
session["bananeItalian"] = None
session["banane2Italian"] = None
session["banane3Italian"] = None
session["banane4Italian"] = None
session["banane5Italian"] = None
session["kiwiItalian"] = None
session["kiwi2Italian"] = None
session["kiwi3Italian"] = None
if not ("erreur_timeItalian" in session):
session["erreur_timeItalian"] = []
session["erreur_pronounsItalian"] = []
session["erreur_verbItalian"] = []
session["erreur_typeItalian"] = []
if "erreur_verbItalian" in session and len(session["erreur_verbItalian"]) >= 5:
session["erreur_timeItalian"] = [session["erreur_timeItalian"][-1]]
session["erreur_pronounsItalian"] = [session["erreur_pronounsItalian"][-1]]
session["erreur_verbItalian"] = [session["erreur_verbItalian"][-1]]
session["erreur_typeItalian"] = [session["erreur_typeItalian"][-1]]
if not ("reponseUserItalian" in session):
session["reponseUserItalian"] = ""
if not ("reponseVerbItalian" in session):
session["reponseVerbItalian"] = ""
# Home page
@app.route("/", methods=['GET', 'POST'])
def home():
"""fonction qui renvoie la page d'acceuil du site"""
before_request()
return render_template("home.html",
username=session["username"])
# German page
@app.route("/de", methods=['GET', 'POST'])
def de():
"""fonction qui renvoie la page d'allemand du site"""
before_request()
return render_template("language/german.html",
username=session["username"])
# Italian page
@app.route("/it", methods=['GET', 'POST'])
def it():
"""fonction qui renvoie la page d'italien du site"""
before_request()
rappel = ""
verif = request.form.get("temps[]")
if verif == "futur" or verif == "conditionnel" or verif == "prรฉsent" or verif == "imparfait" or verif == "passรฉ simple":
session["listActiveTimesItalian"] = request.form.getlist("temps[]")
session["timeItalian"] = random.choice(session["listActiveTimesItalian"])
session["pronounsItalian"] = random.choice(listPronounsItalian)
bananes = {"futur": "bananeItalian", "conditionnel": "banane2Italian",
"prรฉsent": "banane3Italian", "imparfait": "banane4Italian",
"passรฉ simple": "banane5Italian"}
for time in bananes:
if time in session["listActiveTimesItalian"]:
session[bananes[time]] = "checked"
else:
session[bananes[time]] = None
if request.form.get("reponse") is not None and session["verbItalian"] != "verbe":
if request.form.get("reponse") is not None:
reponse = request.form.getlist("reponse")
reponse = reponse[0].lower()
else:
reponse = ""
for chr in reponse:
if chr != " ":
session["reponseVerbItalian"] += chr
if "irregularItalian" in session and session["irregularItalian"] is True:
correction = correspondanceTimeIrregularItalian[session["timeItalian"]]()[
listPronounsItalian.index(session['pronounsItalian'])][
correspondanceVerbItalian.index(session["verbItalian"])]
if session["reponseVerbItalian"] == correction:
session["reponseUserItalian"] = True
models.addPoint(session["username"], 2)
else:
session["reponseUserItalian"] = str(correction)
session["erreur_timeItalian"] += [session["timeItalian"]]
session["erreur_verbItalian"] += [session["verbItalian"]]
session["erreur_pronounsItalian"] += [session["pronounsItalian"]]
session["erreur_typeItalian"] += [True]
if not ("compteurItalian" in session):
session["compteurItalian"] = 0
else:
termination = str(session["verbItalian"][-3:])
if termination == "rre":
correction = \
"c" + correspondanceTimeItalian[session["timeItalian"]]()[
listPronounsItalian.index(session['pronounsItalian'])][1]
else:
correction = \
correspondanceTimeItalian[session["timeItalian"]]()[listPronounsItalian.index(session['pronounsItalian'])][
correspondanceTerminationItalian.index(termination)]
if session["reponseVerbItalian"] == session["verbItalian"][:-3] + correction and session["verbItalian"][-1] != "c":
session["reponseUserItalian"] = True
models.addPoint(session["username"], 1)
elif session["verbItalian"][-1] == "c" and session["reponseVerbItalian"] == session["verbItalian"][:-3] + "h" + correction:
session["reponseUserItalian"] = str(session["verbItalian"][:-3] + "h" + correction)
models.addPoint(session["username"], 1)
elif session["verbItalian"][-1] == "c":
session["reponseUserItalian"] = str(session["verbItalian"][:-3] + "h" + correction)
else:
session["reponseUserItalian"] = str(session["verbItalian"][:-3] + correction)
if session["reponseUserSpanish"] is not True:
session["erreur_timeItalian"] += [session["timeItalian"]]
session["erreur_verbItalian"] += [session["verbItalian"]]
session["erreur_pronounsItalian"] += [session["pronounsItalian"]]
session["erreur_typeItalian"] += [False]
if not ("compteurItalian" in session):
session["compteurItalian"] = 0
verb_type = request.form.get("drone")
if request.form.get("continue") is not None or verb_type is not None:
session["reponseUserItalian"] = ""
session["reponseVerbItalian"] = ""
if verb_type == "tous" or (
verb_type != "reguliers" and verb_type != "irreguliers" and "tousItalian" in session and session[
"tousItalian"] is True):
aleatoire = random.randint(0, 1)
session["kiwi2Italian"] = "checked"
session["kiwi3Italian"] = None
session["kiwiItalian"] = None
if aleatoire == 0:
session["verbItalian"] = csvReaderIrregularItalian.verbChoice()
session["irregularItalian"] = True
session["tousItalian"] = True
else:
session["verbItalian"] = csvReaderItalian.verbChoice()
session["tousItalian"] = True
session["irregularItalian"] = False
elif verb_type == "irreguliers" or (
verb_type != "reguliers" and "irregularItalian" in session and session["irregularItalian"] is True):
session["kiwi3Italian"] = "checked"
session["kiwiItalian"] = None
session["kiwi2Italian"] = None
session["verbItalian"] = csvReaderIrregularItalian.verbChoice()
session["irregularItalian"] = True
session["tousItalian"] = False
else:
session["kiwiItalian"] = "checked"
session["kiwi2Italian"] = None
session["kiwi3Italian"] = None
session["verbItalian"] = csvReaderItalian.verbChoice()
session["tousItalian"] = False
session["irregularItalian"] = False
if "compteurItalian" in session and session["compteurItalian"] == 2:
session["timeItalian"] = session["erreur_timeItalian"][0]
session["pronounsItalian"] = session["erreur_pronounsItalian"][0]
session["verbItalian"] = session["erreur_verbItalian"][0]
session["irregularItalian"] = session["erreur_typeItalian"][0]
session["erreur_timeItalian"].pop(0)
session["erreur_pronounsItalian"].pop(0)
session["erreur_verbItalian"].pop(0)
session["erreur_typeItalian"].pop(0)
session.pop("compteurItalian")
rappel = "Tu as fait une erreur rรฉcemment sur ce verbe, conjugue le ร nouveau !"
else:
session["timeItalian"] = random.choice(session["listActiveTimesItalian"])
session["pronounsItalian"] = random.choice(listPronounsItalian)
if "compteurItalian" in session:
session["compteurItalian"] += 1
return render_template("language/italian.html",
time=session["timeItalian"],
pronouns=session["pronounsItalian"],
verb=session["verbItalian"],
reponseUser=session["reponseUserItalian"],
reponseVerb=session["reponseVerbItalian"],
banane=session["bananeItalian"],
banane2=session["banane2Italian"],
banane3=session["banane3Italian"],
banane4=session["banane4Italian"],
banane5=session["banane5Italian"],
kiwi=session["kiwiItalian"],
kiwi2=session["kiwi2Italian"],
kiwi3=session["kiwi3Italian"],
username=session["username"],
rappel=rappel)
# Spanish page
@app.route("/es", methods=['GET', 'POST'])
def es():
"""fonction qui traite plusieurs chose:
-renvoie la page d'espagnol du site
-les temps et types de verbes chosie par l'utilisateur(qui viennent d'un formulaire du html): renvoie la page d'espagnol
du site avec des verbes du bon type (rรฉguliers, irrรฉguliers, tous) ร conjuguer avec un pronom personnel et le temps auquel le conjuguรฉ.
-la rรฉponse de l'utilisateur avec un verbe conjuguer: si il est juste renvoie que la page d'espagnol du site avec รฉcrit
que c'est une bonne rรฉponse sinon renvoie la correction
renvoie aussi un verbe ou l'utilisateur c'est dรฉjร trompรฉ(systรจme de rappel d'erreur) 3 verbes aprรจs que l'utilisateur se soient trompรฉ"""
before_request()
rappel = ""
verif = request.form.get("temps[]")
if verif == "Futuro" or verif == "Conditional" or verif == "Presente de indicativo" or verif == "Presente de subjonctivo" or verif == "Pretรฉrito imperfecto de indicativo" or verif == "Pretรฉrito indefinido" or verif == "Prรฉtero imperfecto de subjonctivo":
session["listActiveTimesSpanish"] = request.form.getlist("temps[]")
session["timeSpanish"] = random.choice(session["listActiveTimesSpanish"])
session["pronounsSpanish"] = random.choice(listPronounsSpanish)
bananes = {"Futuro": "bananeSpanish", "Conditional": "banane2Spanish", "Presente de indicativo": "banane3Spanish",
"Presente de subjonctivo": "banane4Spanish", "Pretรฉrito imperfecto de indicativo": "banane5Spanish",
"Pretรฉrito indefinido": "banane6Spanish", "Prรฉtero imperfecto de subjonctivo": "banane7Spanish"}
for time in bananes:
if time in session["listActiveTimesSpanish"]:
session[bananes[time]] = "checked"
else:
session[bananes[time]] = None
if request.form.get("reponse") is not None and session["verbSpanish"] != "verbe":
if request.form.get("reponse") is not None:
reponse = request.form.getlist("reponse")
reponse = reponse[0].lower()
else:
reponse = ""
for chr in reponse:
if chr != " ":
session["reponseVerbSpanish"] += chr
if "irregularSpanish" in session and session["irregularSpanish"] is True:
correction = correspondanceTimeIrregularSpanish[session["timeSpanish"]]()[listPronounsSpanish.index(session['pronounsSpanish'])][
correspondanceVerbSpanish.index(session["verbSpanish"])]
if session["reponseVerbSpanish"] == correction:
session["reponseUserSpanish"] = True
models.addPoint(session["username"], 2)
else:
session["reponseUserSpanish"] = str(correction)
session["erreur_timeSpanish"] += [session["timeSpanish"]]
session["erreur_verbSpanish"] += [session["verbSpanish"]]
session["erreur_pronounsSpanish"] += [session["pronounsSpanish"]]
session["erreur_typeSpanish"] += [True]
if not ("compteurSpanish" in session):
session["compteurSpanish"] = 0
else:
termination = str(session["verbSpanish"][-2:])
correction = correspondanceTimeSpanish[session["timeSpanish"]]()[listPronounsSpanish.index(session['pronounsSpanish'])][
correspondanceTerminationSpanish.index(termination)]
if (session["reponseVerbSpanish"] == session["verbSpanish"][:-2] + correction and session["timeSpanish"] != "Futuro" and session[
"timeSpanish"] != "Conditional") or (
(session["timeSpanish"] == "Futuro" or session["timeSpanish"] == "Conditional") and session["reponseVerbSpanish"] ==
session[
"verbSpanish"] + correction):
session["reponseUserSpanish"] = True
models.addPoint(session["username"], 1)
elif (session["timeSpanish"] == "Futuro" or session["timeSpanish"] == "Conditional") and session["reponseVerbSpanish"] != \
session[
"verbSpanish"] + correction:
session["reponseUserSpanish"] = str(session["verbSpanish"] + correction)
else:
session["reponseUserSpanish"] = str(session["verbSpanish"][:-2] + correction)
if session["reponseUserSpanish"] is not True:
session["erreur_timeSpanish"] += [session["timeSpanish"]]
session["erreur_verbSpanish"] += [session["verbSpanish"]]
session["erreur_pronounsSpanish"] += [session["pronounsSpanish"]]
session["erreur_typeSpanish"] += [False]
if not ("compteurSpanish" in session):
session["compteurSpanish"] = 0
verb_type = request.form.get("drone")
if request.form.get("continue") is not None or verb_type is not None:
session["reponseUserSpanish"] = ""
session["reponseVerbSpanish"] = ""
if verb_type == "tous" or (
verb_type != "reguliers" and verb_type != "irreguliers" and "tousSpanish" in session and session[
"tousSpanish"] is True):
aleatoire = random.randint(0, 1)
session["kiwi2Spanish"] = "checked"
session["kiwi3Spanish"] = None
session["kiwiSpanish"] = None
if aleatoire == 0:
session["verbSpanish"] = csvReaderIrregularSpanish.verbChoice()
session["irregularSpanish"] = True
session["tousSpanish"] = True
else:
session["verbSpanish"] = csvReaderSpanish.verbChoice()
session["tousSpanish"] = True
session["irregularSpanish"] = False
elif verb_type == "irreguliers" or (
verb_type != "reguliers" and "irregularSpanish" in session and session["irregularSpanish"] is True):
session["kiwi3Spanish"] = "checked"
session["kiwiSpanish"] = None
session["kiwi2Spanish"] = None
session["verbSpanish"] = csvReaderIrregularSpanish.verbChoice()
session["irregularSpanish"] = True
session["tousSpanish"] = False
else:
session["kiwiSpanish"] = "checked"
session["kiwi2Spanish"] = None
session["kiwi3Spanish"] = None
session["verbSpanish"] = csvReaderSpanish.verbChoice()
session["tousSpanish"] = False
session["irregularSpanish"] = False
if "compteurSpanish" in session and session["compteurSpanish"] == 2:
session["timeSpanish"] = session["erreur_timeSpanish"][0]
session["pronounsSpanish"] = session["erreur_pronounsSpanish"][0]
session["verbSpanish"] = session["erreur_verbSpanish"][0]
session["irregularSpanish"] = session["erreur_typeSpanish"][0]
session["erreur_timeSpanish"].pop(0)
session["erreur_pronounsSpanish"].pop(0)
session["erreur_verbSpanish"].pop(0)
session["erreur_typeSpanish"].pop(0)
session.pop("compteurSpanish")
rappel = "Tu as fait une erreur rรฉcemment sur ce verbe, conjugue le ร nouveau !"
else:
session["timeSpanish"] = random.choice(session["listActiveTimesSpanish"])
session["pronounsSpanish"] = random.choice(listPronounsSpanish)
if "compteurSpanish" in session:
session["compteurSpanish"] += 1
return render_template("language/spanish.html",
time=session["timeSpanish"],
pronouns=session["pronounsSpanish"],
verb=session["verbSpanish"],
reponseUser=session["reponseUserSpanish"],
reponseVerb=session["reponseVerbSpanish"],
banane=session["bananeSpanish"],
banane2=session["banane2Spanish"],
banane3=session["banane3Spanish"],
banane4=session["banane4Spanish"],
banane5=session["banane5Spanish"],
banane6=session["banane6Spanish"],
banane7=session["banane7Spanish"],
kiwi=session["kiwiSpanish"],
kiwi2=session["kiwi2Spanish"],
kiwi3=session["kiwi3Spanish"],
username=session["username"],
rappel=rappel)
@app.route("/connexion", methods=['GET', 'POST'])
def connexion():
"""fonction qui renvoie la page de connexion et de crรฉation de compte du site"""
before_request()
return render_template("login.html",
username=session["username"])
@app.route("/signup", methods=['GET', 'POST'])
def signup():
"""fonction qui traite la crรฉation de compte en vรฉrifiant un compte avec cette adresse email n'existe pas dรฉjร :
si existe renvoie ร la page de connexion sinon envoie a la page d'acceuil du site"""
before_request()
user = models.User.query.all()
email = request.form.get("email")
usernameBase = request.form.get("username").lower()
username = ""
for chr in usernameBase:
if chr != " " and (
ord(chr) == 45 or ord(chr) == 46 or 48 <= ord(chr) <= 57 or ord(chr) == 95 or 97 <= ord(chr) <= 122):
username += chr
for val in user:
if email == val.email:
flash("Adresse email dรฉjร utilisรฉ")
return redirect(url_for("connexion"))
if username == val.username:
flash("Nom d'utilisateur dรฉjร utilisรฉ")
return redirect(url_for("connexion"))
firstname = request.form.get("firstname")
lastname = request.form.get("lastname")
mailtoken = secrets.token_hex(12)
mail(email,"mailverif.html", firstname, lastname, username, mailtoken)
password = hashing.hash_value(request.form.get("password"), salt='abcd')
etablissement = request.form.get("etablissement")
date_creation = models.datetime.now().strftime('%d/%m/%Y')
logo = "https://cdn.discordapp.com/attachments/1098726716798673016/1099109424590757929/mexicain.png"
models.addUser(email, False, mailtoken, firstname, lastname, username, password, etablissement, 0, "0", date_creation, logo, 1, 0, 0, 0)
session["username"] = username
flash("Bienvenue et bonne conjugaison")
if "qcm" in session:
return redirect("qcm")
return redirect(url_for("home"))
@app.route("/signin", methods=['GET', 'POST'])
def signin():
"""fonction qui traite la connexion ร un compte existant: si il existe l'envoie vers la page d'acceuil connecter
sinon le renvoie ร la page de connexion"""
before_request()
user = models.User.query.all()
for val in user:
if request.form.get("email") == val.email and hashing.check_value(val.password, request.form.get("password"),
salt='abcd'):
flash("Connexion rรฉussi")
session["username"] = val.username
if "qcm" in session:
return redirect("qcm")
return redirect(url_for("home"))
elif request.form.get("email") == val.email:
flash("Mot de passe incorrect")
return redirect(url_for("connexion"))
flash("Pas de compte utilisateur pour cette adresse email")
return redirect(url_for("connexion"))
@app.route("/logout", methods=['GET', 'POST'])
def logout():
"""fonction permettant de ce dรฉconnecter de son compte """
before_request()
session["username"] = "Connexion"
flash("Dรฉconnection rรฉussi")
return redirect(url_for("home"))
@app.route("/profile/<username>", methods=['GET', 'POST'])
def username_route(username):
"""fonction qui renvoie la page de profil de l'utilisateur rechercher"""
before_request()
models.modifyClassement(classements())
user = models.User.query.all()
for val in user:
if val.username == username:
date_creation = val.date_creation
xp = val.xp
etablissement = val.etablissement
level = val.level
day_streak = val.day_streak
logo = val.logo
classement = val.classement
return render_template("heritage_template/profile.html",
date_creation=date_creation,
xp=xp,
etablissement=etablissement,
day_streak=day_streak,
logo=logo,
username2=username,
level=level,
classement=classement,
classementJoueurs=classements(),
username=session["username"])
return "User Not Found"
@app.route("/share", methods=['GET', 'POST'])
def partager():
"""fonction qui permet de copiรฉ le l'url de la page et de partager sont profil"""
flash("Le lien du profil a bien รฉtรฉ copiรฉ")
return redirect(url_for("username_route", username=session["username"]))
@app.route("/search", methods=['GET', 'POST'])
def search():
"""fonction qui renvoie la page de recherche du site"""
before_request()
return render_template("search.html", username=session["username"], utilisateurs=utilisateurs())
@app.route("/leaderboard", methods=['GET', 'POST'])
def leaderboard():
"""fonction qui renvoie la page de classement du site"""
before_request()
return render_template("leaderboard.html",
username=session["username"],
utilisateurs=utilisateurs(),
classementPlayers=classements(),
classementWeek=classement_week(),
classementMonth=classement_month())
@app.route("/verif/<username>/<mailtoken>", methods=['GET', 'POST'])
def verif(mailtoken, username):
if models.verif(mailtoken, username) is True:
flash("Compte vรฉrifier")
return redirect(url_for("home"))
flash("une erreur est survenu")
return redirect(url_for("home"))
@app.route("/forgetpassword/<username>/<mailtoken>", methods=['GET', 'POST'])
def passwordForget(username, mailtoken):
password = request.form.get("password")
if password is not None:
password = hashing.hash_value(password, salt='abcd')
if models.changePassword(mailtoken, username, password) is True:
flash("Changement de mot de passe effectuรฉ")
return redirect(url_for("home"))
flash("une erreur est survenu")
return redirect(url_for("home"))
return render_template("forgetPassword.html")
@app.route("/forgetpassword", methods=['GET', 'POST'])
def sendMailPassword():
username = session["username"]
mailtoken = secrets.token_hex(12)
fistLastName = addtoken(mailtoken, username)
firstname = fistLastName[0]
lastname = fistLastName[1]
mail = fistLastName[2]
sendmail(mail, "mailforgetpassword.html", firstname, lastname, username, mailtoken)
#@app.route("/qcm", methods=['GET', 'POST'])
#def qcm():
# """fonction permettant d'accรฉder ร la page QCM """
# before_request()
# session["qcm"] = "ok"
# return render_template("qcm.html",
# username=session["username"])
#@app.route("/esQcmChap4", methods=['GET', 'POST'])
#def esQcmChap4():
# before_request()
# if session["username"] == "Connexion":
# return redirect(url_for("qcm"))
# else:
# return render_template("esQcmChap4.html",
# username=session["username"])
| Tezay/conjug | conjugFR/views.py | views.py | py | 27,625 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_hashing.Hashing",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.session",
"l... |
18306688211 | from PIL import Image
img=Image.open(r"C:\Users\rohan\Downloads\Pictures\IMG_5093.jpg")
height=img.height
width=img.width
print(f"Height:{height} Width:{width}")
r,g,b=img.getpixel((100,100))
print(f"R:{r} G:{g} B:{b}")
img2=img.convert("L")
img2.show()
img2=img2.save(r"C:\Users\rohan\Downloads\Pictures\test2.jpeg")
img3=img.rotate(180)
img3.show()
img3=img3.save(r"C:\Users\rohan\Downloads\Pictures\test3.jpeg")
| rohanxd1/Codes | Python/TEST.py | TEST.py | py | 416 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 2,
"usage_type": "name"
}
] |
1165604872 | # https://leetcode.com/problems/subsets/
# Total subsets for an arr of len n is 2 power n
# Each subset is represented by binary representation of this total
# for ex. if n = 3
# total subsets is 8 viz. 2 power 3
# subset0 would be 000, subset1 = 001, subset2 = 010, subset3 = 011 and so on.
# if one is found in binary number then include num[index] where index is position of 1 in binary number
# for ex in 000 donot include any item in subset
# for 001 include only 3rd item (nums[2]) in subset
# for 010 include 2nd item (nums[1]) in subset
# 011 include 2nd and 3rd item (nums[1] and nums[2]) in subset
from typing import List
class Solution:
def _getbinary(self, num):
res: List[int] = []
while num > 0:
mod = num % 2
res.append(mod)
num = num // 2
return res
def subsets(self, nums: List[int]) -> List[List[int]]:
# init list
res: List = []
total= pow(2, len(nums))
for i in range(total):
bin_arr = self._getbinary(i)
index: int = 0
sub_set: List[int] = []
while(index < len(bin_arr)):
if (bin_arr[index] == 1):
sub_set.append(nums[index])
index += 1
res.append(sub_set)
return res
if __name__ == '__main__':
print(Solution().subsets([1, 2, 3]))
| acharyarajiv/leetcode | medium/subsets.py | subsets.py | py | 1,423 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": ... |
32487421332 | #-*-coding:utf-8 -*-
import sys
import hmac
import hashlib
import time
import requests
import json
import urllib
import top
class ewsServiceApi:
'''
Aliyun EWS service api
'''
def __init__(self,accesskey,secretkey):
self.accesskey = accesskey
self.secrekey = secretkey
self.timestamp = int(1000 * time.time())
def sign(self,secret, parameters):
if hasattr(parameters, "items"):
keys = parameters.keys()
sorted(keys)
parameters = "%s%s%s" % (secret,str().join('%s%s' % (key, parameters[key]) for key in keys),secret)
# print(parameters)
SecretKey = self.secrekey
sign = hmac.new(
SecretKey.encode('utf-8'),
parameters.encode('utf-8'),
hashlib.md5
).hexdigest().upper()
return sign
def get(self,geturl):
parames = {
'accesskey':self.accesskey,
'timestamp':self.timestamp,
}
sign = self.sign(self.secrekey,parames)
headers = {'Authorization':sign}
req = requests.get(geturl,params=parames,headers=headers)
context = json.loads(req.text)
return context
# print(context)
def post(self, deployurl,node_id,comment):
parameters = {
'accesskey': self.accesskey,
'comment':comment,
'method': 'SEQUENTIAL',
'node_id':node_id,
'timestamp': self.timestamp,
'update': "false",
'url': deployurl,
}
sign = self.sign(self.secrekey,parameters)
headers = {'Authorization':sign}
serivceUrl = 'http://open-ews.cloud.tmall.com/api/v1/node/{0}/uploadStart/'.format(node_id)
res = requests.post(url=serivceUrl,params=parameters,headers=headers)
aa = json.loads(res.text)
print(aa)
if __name__ == '__main__':
a = ewsServiceApi(
accesskey='kqlnim0khfpou45p',
secretkey='7226d410ef16427e821e61ebe30e8939'
)
a.get(geturl='http://open-ews.cloud.tmall.com/api/v1/service/')
a.post(deployurl = 'http://10.26.235.132/job/meetyou-youzijie-center-new/ws/youzijie-center.war',node_id=622290,comment='ไธดๆถๅค็ๆจ่ๅๅไธๅบ่ฟๆปค111')
| opnms/opnms | base/ewsService.py | ewsService.py | py | 2,267 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "hmac.new",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number":... |
25163396057 |
import os
from unittest import mock
from easul import util
from easul.driver import MemoryDriver
from easul.visual import Visual
from easul.tests.example import diabetes_progression_algorithm, prog_input_data, no_prog_input_data
import logging
from easul.visual.element import Prediction
from easul.visual.element.prediction import ProbabilityPlot, LimeTablePlot
logging.basicConfig(level = logging.INFO)
LOG = logging.getLogger(__name__)
from easul.tests.example import EXAMPLE_PATH
import anys
import pytest
prog_result = {"value":1,"label":"Progression", "probabilities":[{"value":0,"label":"No progression","probability":0.26},{"value":1,"label":"Progression","probability":0.74}]}
no_prog_result = {"value":0,"label":"No progression", "probabilities":[{"value":0,"label":"No progression","probability":0.67},{"value":1,"label":"Progression","probability":0.33}]}
row_simple_elements = [
(ProbabilityPlot, anys.AnyContains("data:image/png;base64"), prog_result),
(Prediction, anys.AnyContains("<h5>Progression</h5>"), prog_result),
(Prediction, anys.AnyContains("<h5>No progression</h5>"), no_prog_result)
]
row_explain_elements = [
(LimeTablePlot, anys.AnyContains("ldl, low-density lipoproteins</b> is less than 95.85"), prog_input_data),
(LimeTablePlot, anys.AnyContains("ldl, low-density lipoproteins</b> is less than 95.85"), no_prog_input_data),
]
@pytest.mark.parametrize("element_cls,expected_html,result", row_simple_elements)
def test_create_simple_elements_for_ml(element_cls, expected_html, result):
driver = MemoryDriver.from_reference("A1", autocreate=True)
visual = Visual(elements=[
element_cls()
], metadata_filename=EXAMPLE_PATH + "/metadata/row_scope.emd", algorithm=diabetes_progression_algorithm())
html = visual.render(driver=driver, result=result)
assert str(html) == expected_html
@pytest.mark.parametrize("element_cls,expected_html,input_data", row_explain_elements)
def test_create_explainable_elements_for_ml(element_cls, expected_html, input_data):
driver = MemoryDriver.from_reference("A1", autocreate=True)
visual = Visual(elements=[
element_cls()
], metadata_filename=EXAMPLE_PATH + "/metadata/row_scope.emd", algorithm=diabetes_progression_algorithm())
context = visual.generate_context(input_data=input_data)
html = visual.render(driver=driver, context=context)
assert str(html) == expected_html
def test_show_prediction_handles_expressions():
driver = MemoryDriver.from_reference("A1", autocreate=True)
algo = diabetes_progression_algorithm()
visual = Visual(
elements=[Prediction(title="Predicted amount",expression="value * 100",suffix="%",as_value=True)
], metadata_filename = EXAMPLE_PATH + "/metadata/row_scope.emd", algorithm = algo)
result = algo.single_result(prog_input_data)
html = visual.render(driver=driver, result=result)
assert str(html) == anys.AnyContains("100.00%")
| rcfgroup/easul | easul/tests/visual/test_prediction.py | test_prediction.py | py | 2,967 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "easul.visual.... |
70231595624 | '''
Name: Main file for HW2 of FE 595
Intro: This file should load the cleaned data from theyfightcrime.org, sort the data, and return the required info.
Author: William Long
Date : 09/22/2019
'''
import pandas as pd
import numpy as np
from textblob import TextBlob
import nltk
#First, Let's load the data
m_raw = pd.read_csv('Male_full.txt', names=["male"], sep='\t')
f_raw = pd.read_csv('Female_full.txt', names=["female"], sep='\t')
# We need to get the sentiment.
def char_sent(text):
'''
This fun should take in some text, and return the sentiment polarity using textblob
:param text: String
:return: sent, float
'''
sent = TextBlob(text).sentiment.polarity
return sent
# Let's add the sentiment in
m_raw["sentiment"] = m_raw.apply(lambda row: char_sent(row["male"]), axis=1)
f_raw["sentiment"] = f_raw.apply(lambda row: char_sent(row["female"]), axis=1)
# Let's sort and return the values we want.
m_sort = m_raw.sort_values(by=["sentiment"])
f_sort = f_raw.sort_values(by=["sentiment"])
m_best = m_sort.tail(10)
f_best = f_sort.tail(10)
m_worst = m_sort.head(10)
f_worst = f_sort.head(10)
# Let's make a list of all the descriptors.
des = []
for i in range(len(m_raw["male"])):
tokens = nltk.word_tokenize(m_raw["male"][i])
tags = nltk.pos_tag(tokens)
a = [wt[0] for wt in tags if wt[1] == 'JJ']
des.extend(a)
for i in range(len(f_raw["female"])):
tokens = nltk.word_tokenize(f_raw["female"][i])
tags = nltk.pos_tag(tokens)
a = [wt[0] for wt in tags if wt[1] == 'JJ']
des.extend(a)
# We just need to do the last part.
word_dist = nltk.FreqDist(des)
top_words = word_dist.most_common(10)
top_words_df = pd.DataFrame(top_words, columns=["Word", "Count"])
# Save the data.
m_best.to_csv(r'Male_Best.csv', index=None, header=True, sep=';')
f_best.to_csv(r'Female_Best.csv', index=None, header=True, sep=';')
m_worst.to_csv(r'Male_Worst.csv', index=None, header=True, sep=';')
f_worst.to_csv(r'Female_Worst.csv', index=None, header=True, sep=';')
top_words_df.to_csv(r'Top_Words.csv', sep=';', index=None, header=True)
| bluefinch83/FE_595_HW2 | Main.py | Main.py | py | 2,099 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nltk.word_tokenize"... |
19702023558 | from flask import Flask, request, send_from_directory, send_file
from contracts import DCCInterface
from web3 import Web3, HTTPProvider
import json
import _thread
import time
import traceback
app = Flask(__name__)
jobs = []
jobs_details = []
web3 = Web3([HTTPProvider("http://10.8.3.1:8545")])
def thread_prune_entries():
global jobs
global jobs_details
while True:
print ("Pruning entires!")
new_jobs = []
new_jobs_details = []
for j in jobs:
try:
iface = DCCInterface(web3, j)
new_jobs_details.append({
'id': j,
'owner': iface.get_owner(),
'in_progress': 'Ongoing' if iface.get_in_progress() else 'Completed/Failed',
'price': iface.get_price()
})
if iface.get_in_progress():
new_jobs.append(j)
except Exception:
traceback.print_exc()
jobs = new_jobs
jobs_details = new_jobs_details
time.sleep(5)
@app.route('/')
def index():
return send_file('../public/index.html')
@app.route('/<path:filename>')
def default(filename):
return send_from_directory('../public', filename)
@app.route('/api/jobs')
def get_jobs():
return json.dumps(jobs)
@app.route('/api/addjob', methods=['POST'])
def add_job():
try:
contract_id = request.data.decode('utf-8')
if contract_id in jobs:
raise Exception
jobs.append(contract_id)
return 'ok'
except:
traceback.print_exc()
return 'fail'
if __name__ == '__main__':
_thread.start_new_thread(thread_prune_entries, ())
app.run(host="0.0.0.0")
| jimgao1/dcc | src/server.py | server.py | py | 1,747 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "web3.HTTPProvider",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "contracts.DCCInterface",
... |
72054157543 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from datetime import datetime
from urllib.parse import urljoin
import pymongo
import scrapy
import scrapy.exceptions as scrapy_exceptions
from itemadapter import ItemAdapter
from scrapy.pipelines.images import ImagesPipeline
class GetchuPipeline:
def process_item(self, item, spider):
return item
class MongoUpsertPipeline:
mongo_collection_name = "items"
def __init__(self, mongo_uri, mongo_db, mongo_collection_name):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.mongo_collection_name = mongo_collection_name
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get("MONGO_URI"),
mongo_db=crawler.settings.get("MONGO_DATABASE"),
mongo_collection_name=crawler.settings.get("MONGO_COLLECTION_NAME"),
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
itemAd = ItemAdapter(item)
itemAd['updated_at'] = datetime.now()
if not itemAd.get('getchu_id'):
raise scrapy_exceptions.CloseSpider('insert db error,not getchu_id provide')
self.db[self.mongo_collection_name].update_one(
{'getchu_id': itemAd.get('getchu_id')},
{'$set': itemAd.asdict()},
upsert=True,
)
# self.db[self.collection_name].insert_one(ItemAdapter(item).asdict(),)
return item
class MyImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
# print('enter image pipeline')
adapter = ItemAdapter(item)
# ๅชไธ่ฝฝgameๅ็ฑป
if adapter.get('tab_type') == 'game':
url_list = []
cover_url = adapter.get('cover_url')
if cover_url:
url_list.append(cover_url)
cover_url_hd = adapter.get('cover_url_hd')
if cover_url_hd:
url_list.append(cover_url_hd)
# ๆ ทๅๅพ็
sample_img_list = adapter.get('sample_img_list')
if sample_img_list:
url_list += sample_img_list
# ่ง่ฒๅพ็
chara_list = adapter.get('chara_list')
if chara_list:
for chara in chara_list:
img = chara['img']
img_whole = chara['img_whole']
if img:
url_list += [img]
if img_whole:
url_list += [img_whole]
# print(f'urllist:{url_list}')
for url in url_list:
yield scrapy.Request(
urljoin('https://www.getchu.com', url),
headers={
'referer': f'https://www.getchu.com/soft.phtml?id={adapter.get("getchu_id")}'
},
meta={'dont_cache': True},
)
def file_path(self, request, response=None, info=None, *, item=None):
getchu_id = item['getchu_id']
origin_filename = request.url.split('/')[-1]
# ็ฎๅฝๆ็
งๆถ้ดๆฅๅ็ฑป
on_sale = item['on_sale']
# ๆ็
งๅนด/ๆ/id็ๆไปถๅคนๅฝขๅผๅฝๆกฃ
datepath = datetime.strptime(on_sale, r'%Y/%m/%d').strftime(r'%Y/%m')
return f'{datepath}/{getchu_id}/{origin_filename}'
def item_completed(self, results, item, info):
# image_paths = [x["path"] for ok, x in results if ok]
# if not image_paths:
# raise scrapy_exceptions.DropItem("Item contains no images")
return item
| mudssky/myScrapySpiders | getchu/getchu/pipelines.py | pipelines.py | py | 3,959 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "itemadapter.ItemAdapter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "dat... |
14873664547 | import cv2
import yaml
from application.main.onnx_model.base_model import BaseModel
from typing import Tuple
from application.main.onnx_model.util import *
class YoloOnnxModel(BaseModel):
def __init__(self, cfg_file):
super(YoloOnnxModel, self).__init__(cfg_file)
self.input_nodes = ["images"]
self.input_width, self.input_height = self.input_size
def preprocess(self, input_img: np.ndarray):
img = cv2.resize(input_img, dsize=self.input_size)
input_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_image = input_image.astype(np.float32)
input_image /= 255.0
image1 = np.transpose(input_image, (2, 0, 1)) # HWC -> CHE
data = image1[np.newaxis, ...].astype(np.float32) # 1 x CHW
inputs = {self.input_nodes[0]: data}
return inputs
def run(self, input_img: np.ndarray):
origin_h, origin_w, _ = input_img.shape
inputs = self.preprocess(input_img)
outputs = self.model.run(self.output_nodes, inputs)
outputs = self.postprocess(outputs[0][0], origin_shape=(origin_w, origin_h))
return outputs
def postprocess(self, model_outputs, origin_shape: Tuple[int, int]):
origin_w, origin_h = origin_shape
nms_output = nms(model_outputs, self.param["conf_threshold"], self.param["iou_threshold"])
nms_output[:, 0] *= origin_w / self.input_width
nms_output[:, 1] *= origin_h / self.input_height
nms_output[:, 2] *= origin_w / self.input_width
nms_output[:, 3] *= origin_h / self.input_height
return nms_output
def __call__(self, img: np.ndarray, *args, **kwargs):
return self.run(img)
def get_iou_threshold(self):
return self.param["iou_threshold"]
def set_iou_threshold(self, value):
if 0 <= value <= 1:
self.param["iou_threshold"] = value
return True
return False
def get_conf_threshold(self):
return self.param["get_conf_threshold"]
def set_conf_threshold(self, value):
if 0 <= value <= 1:
self.param["set_conf_threshold"] = value
return True
return False
| YoungHyuenKim/onnx_fastAPI_example | application/main/onnx_model/yolo_model.py | yolo_model.py | py | 2,184 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "application.main.onnx_model.base_model.BaseModel",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "cv2.resize",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name... |
74797071465 | import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.http import Request
import re
class MySpider(CrawlSpider):
name = 'author_scrape'
author_name = 'ุงูููุฏู ูุนููุจ'
author_eng = 'al-Kindi'
start_urls = ['https://ablibrary.net/books/?offset=0&limit=50&author={}&sort=name,asc'.format(author_name)]#start url from here, need to parse the json dict.
def parse(self, response):
author_data = response.body.decode()
books_ptrn = re.compile(r'"(author":.+?id":(\d+?),.+?"name":" (.+?)".+?"volume":"(.+?)"})',
re.DOTALL | re.MULTILINE | re.UNICODE)
books = re.findall(books_ptrn, author_data)
for book in books:
bib = book[0]
book_id = book[1]
title = book[2]
all_text = ''
bib_data = {'author': self.author_eng, 'title': title, 'bib data': bib, 'all_text': all_text}
volume = book[3]
if volume != ' ':
bib_data['title'] = bib_data['title']+'v.' + volume
book_url = 'https://ablibrary.net/books/{}/content?fields=content,footnote,description,page_name,' \
'page_number'.format(book_id)
request = Request(book_url, callback=self.parse_book)
request.meta['meta'] = bib_data
yield request
def parse_book(self, response):
bib_data = response.meta['meta']
book_text = response.body.decode()
bib_data['all_text'] = bib_data['all_text'] + '\n' + book_text
self.log('extracted book {}'.format(bib_data['title']))
text_file = open("{}, {}.txt".format(bib_data['title'], self.author_eng), "w", encoding='utf-8')
#bib_data['all_text'] = self.clean_text(bib_data['all_text'])
text_file.write(bib_data['all_text'])
text_file.close()
def clean_text(self, text):
#here will be code that removes from the string "text" the unwanted patterns
return text
| maeirnoam/scrapers | ABLibrary/ablib_scraper/ablib_scraper/spiders/ABLibCrawler.py | ABLibCrawler.py | py | 2,051 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.DOTALL",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "re.MULTILINE",
... |
34996914416 | import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from numpy import pi
inputName = "21cylindrov"
fileName = inputName + "-Tphi-all"
plotName = fileName + "-smery"
data = np.load("./Results/"+fileName+".npz")
inputData = np.load("./Inputs/" + inputName + ".npz")
n = inputData['n']
R = inputData['R']
x0 = inputData['x']
y0 = inputData['y']
phi = data['phi']
Tphi = data['Tphi']
frequencies = data['frequencies']
countf = frequencies.size
Rk = data['Rk']
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['axes.labelsize'] = 'large'
for uhol in range(90,360,180):
plt.plot(frequencies, 2*pi/countf * Tphi[:,uhol]*(1+uhol/1000.), label = "%d" %(uhol) )
#plt.ylim(bottom=1e-6)
plt.legend(loc=4)
#plt.yscale("log")
plt.savefig("./"+plotName+".pdf")
#plt.show ()
| KlaraFickova/Diplomovka | Draw/smery-f.py | smery-f.py | py | 919 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_n... |
21645118091 | from datetime import date, datetime, timezone, timedelta
import threading
import git
import os
from repoorgui.commands import commandfn
# see https://stackoverflow.com/a/39956572
# made changes to return repo object if exits
def is_git_repo(path):
try:
r = git.Repo(path)
_ = r.git_dir
return True, r
except git.exc.InvalidGitRepositoryError:
return False, None
def getRemoteIfExits(repo):
try:
return repo.remote()
except ValueError as ve:
return None
def getRemoteUrl(remote):
if remote:
return next(remote.urls)
else:
return None
# 0 for all
# TODO: there is a division by 0 below for _limit=0
# needs to be fixed.
# for now setting this to a large number
_limit = 1000
def commit_days_text(numdays):
try:
if numdays == 0:
return "today"
elif numdays == 1:
return "yesterday"
elif numdays > 1:
return str(numdays) + " days ago"
except TypeError as te:
return "invalid"
def getReposList(updateFunc=None, appstate=None):
table_rows = []
_now = datetime.now()
_td_one_day = timedelta(days=1)
_completion = 0
if updateFunc:
updateFunc(_completion)
# Get all the subdirectories of the repo parent path (might call this workspace folder).
_, all_subdirs, other_files = next(os.walk(appstate.workspace_folder))
# getting the dirs is 10% progress
if updateFunc:
_completion = 10
updateFunc(_completion)
# checking if the repos are git repos and populating repo object
# is 80% of the progress
_loading_total_progress = 90.0
_count = 0
_total = len(all_subdirs)
_item_progress = _loading_total_progress / \
(_limit if _total > _limit else _total)
# print('total = ', str(_total), ' item progress = ', str(_item_progress))
for dir in all_subdirs:
if _limit > 0:
_count += 1
if _limit > 0 and _count >= _limit:
if updateFunc:
_completion += _item_progress
updateFunc(_completion)
break
dir_abspath = os.path.abspath(
os.path.join(appstate.workspace_folder, dir))
flag, repo = is_git_repo(dir_abspath)
if flag:
remote_url = str(getRemoteUrl(getRemoteIfExits(repo)))
last_commit_datetime = '-'
days_since_last_commit = '-'
try:
last_commit_datetime = str(repo.head.commit.committed_datetime)
td_since_last_commit = _now - \
repo.head.commit.committed_datetime.replace(tzinfo=None)
# print(td_since_last_commit)
days_since_last_commit, _ = divmod(
td_since_last_commit, _td_one_day)
# print(days_since_last_commit)
except ValueError as ve:
# print(ve)
pass
appstate.workspace_repos[dir] = (
repo, remote_url, last_commit_datetime, commit_days_text(days_since_last_commit))
if updateFunc:
_completion += _item_progress
updateFunc(_completion)
# Create repository table
for repo_dir, (repo, remote_url, last_commit_datetime, days_since_last_commit) in appstate.workspace_repos.items():
table_rows.append([
str(repo_dir), str(repo.working_dir), remote_url, days_since_last_commit
])
# creating the repo table with details is 10% progress
if updateFunc:
_completion = 100
updateFunc(_completion)
return table_rows
def updateReposListWindow(window, appstate):
window.write_event_value('-START-LOADING-PROGRESS-', None)
table_rows = getReposList(lambda progress: window.write_event_value(
'-UPDATE-LOADING-PROGRESS-', progress), appstate)
window.write_event_value('-UPDATE-REPOS-LIST-', table_rows)
window.write_event_value('-DONE-LOADING-PROGRESS-', None)
@commandfn
def cmd_long_update_repos(window, event, values, appstate=None):
if appstate and values and values['workspace_folder'] != appstate.workspace_folder:
appstate.workspace_folder = values['workspace_folder']
appstate.workspace_repos = {}
threading.Thread(target=updateReposListWindow,
args=(window, appstate, ), daemon=True).start()
| abhishekmishra/repoorgui | src/repoorgui/gitworkspace.py | gitworkspace.py | py | 4,397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "git.Repo",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "git.exc",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"... |
38799414939 | import discord
import bdg
import gamelist
import random
class SurpriseGameCommand(bdg.BdgCommand):
header = {
'name': "sortear_jogo",
'description': "Lista de Jogos - Sorteie um jogo aleatรณrio baseado no filtro especificado",
}
async def on_command(self, i: discord.Interaction, filtro: gamelist.GameFilter):
gamelist = self.bdg.get_gamelist(self.bdg.guild_collection(i.guild))
available_games = [ g for g in gamelist.filter(filtro) ]
if len(available_games) <= 0:
await i.response.send_message(":warning: | Nรฃo hรก nenhum jogo disponรญvel com esse filtro", ephemeral=True)
return
game_index = available_games[ random.randint(0, len(available_games) - 1) ]
game = gamelist[game_index]
await i.response.send_message(f":tada: | O jogo sorteado รฉ... ||**{game.name.upper()}!**||")
| DanielKMach/BotDusGuri | src/commands/gamelist/surprise.py | surprise.py | py | 814 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "bdg.BdgCommand",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.Interaction",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "gamelist.GameFilter",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "... |
11685619050 | from django.shortcuts import render
from django.shortcuts import render,redirect
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
# from .models import Device
from django.contrib.auth import authenticate
from django.contrib.auth import login
#from .forms import SignUpForm
from django.core.mail import send_mail
from django.conf import settings
from rest_framework.decorators import api_view
import csv,io
from .forms import *
from .models import *
from django.views.generic import TemplateView
from django.shortcuts import get_object_or_404
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
def signup_view(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
print("Saved user")
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
to_email= form.cleaned_data.get('email')
print(username,raw_password)
user = authenticate(username=username, password=raw_password)
send_mail(
'Congratulations',
'Congratulations you are now registered',
settings.EMAIL_HOST_USER,
[to_email],
fail_silently=False,
)
else:
form = SignUpForm()
return render(request,'newops/signup.html',{'form':form})
def login_view(request):
if request.method=="POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user=form.get_user()
login(request,user)
return render(request,'newops/hello.html')
else:
form = AuthenticationForm()
return render(request,'newops/login.html',{'form':form})
def hello(request):
return render(request,'newops/hello.html')
#API
class ApplicationDetail(APIView):
def post(self, request):
print(request.data)
Application.objects.create(app_name = request.data.get('app_name'),app_function = request.data.get('app_function'),app_type = request.data.get('app_type'))
return redirect('../hello')
def details(request):
return render(request,'newops/applicationapi.html')
class AssestDetail(APIView):
def post(self, request):
customer_id_list = request.data.getlist('Customer_ID_id')
app_name_list = request.data.getlist('app_name')
page_list = request.data.getlist('page_name')
device_list = request.data.getlist('device_registration_name')
for i in range(len(customer_id_list)):
customer_id_list[i] = Customer.objects.get(company_name=customer_id_list[i])
obj = AssetGroup.objects.create(assestname = request.data.get('assestname'),Customer_ID = customer_id_list[0])
for i in range(len(page_list)):
obj.pagename.add(Pages.objects.filter(page_name=page_list[i]).first())
for i in range(len(device_list)):
obj.devicename.add(Device.objects.filter(device_registration_name=device_list[i]).first())
for i in range(len(app_name_list)):
obj.appname.add(Application.objects.filter(app_name=app_name_list[i]).first())
obj.save()
return redirect('../hello')
def assestdetails(request):
return render(request,'newops/assestgroupapi.html',{'customerquery':Customer.objects.all(),'appquery':Application.objects.all(),'pagequery':Pages.objects.all(),'devicequery':Device.objects.all()})
class DeviceSpecDetail(APIView):
def post(self, request):
DeviceSpecification.objects.create(techSpecificationID = request.data.get('techSpecificationID'),
techSpecificationName = request.data.get('techSpecificationName'),device_type = request.data.get('device_type'),
gps=request.data.get('gps'),gsm=request.data.get('gsm'),wifi=request.data.get('wifi'),ble=request.data.get('ble'),
zigbee=request.data.get('zigbee'),zigwave=request.data.get('zigbee'),rs_232=request.data.get('rs_232'),
rs_485=request.data.get('rs_485'),rs_422=request.data.get('rs_422'),tcp=request.data.get('tcp'),mqtt=request.data.get('mqtt'),
http=request.data.get('http'),symetric_key=request.data.get('symetric_key'),x509_Certificate=request.data.get('x509_Certificate'),
ota=request.data.get('ota'),inputs=request.data.get('inputs'),outputs=request.data.get('outputs'),ethernet=request.data.get('ethernet'),
analog_input=request.data.get('analog_input'),power_supply=request.data.get('power_supply'),other1=request.data.get('other1'),
other2=request.data.get('other2'),other3=request.data.get('other3'),security_key=request.data.get('security_key'))
return redirect('../hello')
def devicespec_details(request):
return render(request,'newops/devicespecapi.html')
class VendorDetail(APIView):
def post(self, request):
Vendor.objects.create(vendor_name=request.data.get('vendor_name'),vendor_address=request.data.get('vendor_address'),
vendor_city=request.data.get('vendor_city'),vendor_country=request.data.get('vendor_country'),zip_code=request.data.get('zip_code'),
vendor_contact=request.data.get('vendor_contact'),vendor_email=request.data.get('vendor_email'),web=request.data.get('web'),
vendor_VAT=request.data.get('vendor_VAT'),vendor_other1=request.data.get('vendor_other1'),vendor_other2=request.data.get('vendor_other2'),
vendor_other3=request.data.get('vendor_other2'))
return redirect('../hello')
def vendor_details(request):
return render(request,'newops/vendorapi.html')
class IAMDetail(APIView):
def post(self, request):
Device_IAM_Mechanism.objects.create(IAM=request.data.get('IAM'))
return redirect('../hello')
def IAM_details(request):
return render(request,'newops/iamapi.html')
class DPSDetail(APIView):
def post(self,request):
DPS_Property.objects.create(dps_name=request.data.get('dps_name'),resourse_type=request.data.get('resourse_type'),
location=request.data.get('location'),location_ID=request.data.get('location_ID'),resourse_ID=request.data.get('resourse_ID'),
resourse_group=request.data.get('resourse_group'),resourse_group_id=request.data.get('resourse_group_id'),subscription=request.data.get('subscription'),
subscription_id=request.data.get('subscription_id'))
return redirect('../hello')
def DPS_details(request):
return render(request,'newops/dpsapi.html')
class usergroupDetail(APIView):
def post(self,request):
UserGroup.objects.create(usergroup=request.data.get('usergroup'),superadmin=request.data.get('superadmin'),
admin=request.data.get('admin'),localadmin=request.data.get('localadmin'),manager=request.data.get('manager'),
supervisor=request.data.get('supervisor'),operator=request.data.get('operator'),support=request.data.get('support'),
staff=request.data.get('staff'),other1=request.data.get('other1'),other2=request.data.get('other2'))
return redirect('../hello')
def Usergroup_details(request):
return render(request,'newops/usergroupapi.html')
class IotDetail(APIView):
def post(self,request):
resourse_group_list = request.data.getlist('resourse_group')
print(resourse_group_list)
for i in range(len(resourse_group_list)):
resourse_group_list[i] = DPS_Property.objects.filter(resourse_group=resourse_group_list[i]).first()
IOT_Hub.objects.create(hub_name=request.data.get('hub_name'),hostname=request.data.get('hostname'),status=request.data.get('status'),
current_location=request.data.get('current_location'),subscription=request.data.get('subscription'),resourse_group=resourse_group_list[0])
return redirect('../hello')
def IOT_details(request):
return render(request,'newops/iotapi.html',{'dpsquery':DPS_Property.objects.all()})
class CADetail(APIView):
def post(self,request):
CA.objects.create(CAtype=request.data.get('CAtype'))
return redirect('../hello')
def CA_details(request):
return render(request,'newops/caapi.html')
class UserTypeDetail(APIView):
def post(self,request):
Usertype.objects.create(user_type=request.data.get('user_type'))
return redirect('../hello')
def Usertype_details(request):
return render(request,'newops/usertypeapi.html')
class PermissionDetail(APIView):
def post(self,request):
Permissions.objects.create(permission_name=request.data.get('permission_name'),add_permission=request.data.get('add_permission'),
edit_permission=request.data.get('edit_permission'),modify_permission=request.data.get('modify_permission'),
view_permission=request.data.get('view_permission'),log_permission=request.data.get('log_permission'),delete_permission=request.data.get('delete_permission'))
return redirect('../hello')
def Permission_details(request):
return render(request,'newops/permissionapi.html')
class CustomerDetail(APIView):
def post(self,request):
app_list = request.data.getlist('application')
print(app_list)
for i in range(len(app_list)):
app_list[i] = Application.objects.filter(app_name=app_list[i]).first()
Customer.objects.create(company_name=request.data.get('company_name'),address=request.data.get('address'),city=request.data.get('city'),
country=request.data.get('country'),zip_code=request.data.get('zip_code'),primary_contact_person=request.data.get('primary_contact_person'),
designation=request.data.get('designation'),primary_email=request.data.get('primary_email'),secondary_contact_person=request.data.get('secondary_contact_person'),
s_designation=request.data.get('s_designation'),secondary_email=request.data.get('secondary_email'),website=request.data.get('website'),
gst=request.data.get('gst'),vat=request.data.get('vat'),installation_mode=request.data.get('installation_mode'),no_of_site=request.data.get('no_of_site'),
site1=request.data.get('site1'),site2=request.data.get('site2'),site3=request.data.get('site3'),address_site1=request.data.get('address_site1'),
address_site2=request.data.get('address_site2'),address_site3=request.data.get('address_site3'),city_site1=request.data.get('city_site1'),city_site2=request.data.get('city_site2'),
city_site3=request.data.get('city_site3'),country_site1=request.data.get('country_site1'),country_site2=request.data.get('country_site2'),country_site3=request.data.get('country_site3'),
application=app_list[0])
return redirect('../hello')
def Customer_details(request):
return render(request,'newops/customerapi.html',{'appquery':Application.objects.all()})
class CertificateDetail(APIView):
def post(self,request):
ca_list = request.data.getlist('ca_name')
device_list = request.data.getlist('assignedTo')
for i in range(len(ca_list)):
ca_list[i] = CA.objects.filter(CAtype=ca_list[i]).first()
for i in range(len(device_list)):
device_list[i] = Device.objects.filter(Firmware_version=device_list[i]).first()
Certificate.objects.create(certificate_name=request.data.get('certificate_name'),certFile_type=request.data.get('certFile_type'),
generatedOn=request.data.get('generatedOn'),validity=request.data.get('validity'),uploadedOn=request.data.get('uploadedOn'),assigned=request.data.get('assigned'),
assignedDate=request.data.get('assignedDate'),assignedTo=device_list[0],ca_name=ca_list[0])
return redirect('../hello')
def Certificate_details(request):
return render(request,'newops/certificateapi.html',{'caquery':CA.objects.all(),'devicequery':Device.objects.all()})
class DeviceDetail(APIView):
def post(self,request):
iothublist = request.data.getlist('iot_hub_name')
dpslist = request.data.getlist('dps_property_ID')
vendorlist = request.data.getlist('vendor')
customerlist = request.data.getlist('sold_to_customer')
applist = request.data.getlist('route_to_application')
devicespeclist = request.data.getlist('device_Specification_ID')
IAMlist = request.data.getlist('device_IAM_mechanism')
for i in range(len(iothublist)):
iothublist[i] = IOT_Hub.objects.get(hub_name=iothublist[i])
for i in range(len(dpslist)):
dpslist[i] = DPS_Property.objects.get(dps_name=dpslist[i])
for i in range(len(vendorlist)):
vendorlist[i] = Vendor.objects.get(vendor_name=vendorlist[i])
for i in range(len(customerlist)):
customerlist[i] = Customer.objects.get(company_name=customerlist[i])
for i in range(len(applist)):
applist[i] = Application.objects.get(app_name=applist[i])
for i in range(len(devicespeclist)):
devicespeclist[i] = DeviceSpecification.objects.get(device_type=devicespeclist[i])
obj = Device.objects.create(device_type = request.data.get('device_type'),enrollment_type=request.data.get('enrollment_type'),
device_registration_name=request.data.get('device_registration_name'),iot_hub_name=iothublist[0],dps_property_ID=dpslist[0],
allocation_policy=request.data.get('allocation_policy'),secret_storage=request.data.get('secret_storage'),
operation=request.data.get('operation'),vendor=vendorlist[0],make=request.data.get('make'),model=request.data.get('model'),
serial_number=request.data.get('serial_number'),date_of_purchase=request.data.get('date_of_purchase'),
warrenty_period=request.data.get('warrenty_period'),warrenty_expiry=request.data.get('warrenty_expiry'),
Firmware_version=request.data.get('Firmware_version'),sold_to_customer=customerlist[0],route_to_application=applist[0],configured=request.data.get('configured'),
device_Specification_ID=devicespeclist[0])
for i in range(len(IAMlist)):
obj.device_IAM_mechanism.add(Device_IAM_Mechanism.objects.filter(IAM=IAMlist[i]).first())
obj.save()
return redirect('../hello')
def devicedetails(request):
return render(request,'newops/deviceapi.html',{'customerquery':Customer.objects.all(),'appquery':Application.objects.all(),'iotquery':IOT_Hub.objects.all(),'devicequery':Device.objects.all(),
'vendorquery':Vendor.objects.all(),'IAMquery':Device_IAM_Mechanism.objects.all(),'dpsquery':DPS_Property.objects.all()})
| sanjolisogani/new_ops | newops/views.py | views.py | py | 14,804 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.EMAIL_HOST_USER",
"line_number": 35,
"usage_type": "attr... |
41166932292 | # pip install requests bs4 lxml
# pip install jieba
import requests
import bs4
import jieba
import csv
stocks = set()
def prepare_stocks():
with open('week3/Stock.csv', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file)
stock_list = list(csv_reader)
for stock in stock_list[2:]:
stocks.add(stock[1])
def get_text(o):
return '' if o is None else o.text
def read_article(url):
html = requests.get('https://www.ptt.cc' + url)
soup = bs4.BeautifulSoup(html.text, 'lxml')
rows = soup.select('div.push')
for row in rows:
content = get_text(row.select_one('.push-content'))
issuer = get_text(row.select_one('.push-userid'))
created = get_text(row.select_one('.push-ipdatetime'))
tokens = jieba.lcut(content)
keywords = {}
for token in tokens:
if token in stocks:
if token in keywords:
keywords[token] += 1
else:
keywords[token] = 1
print(keywords)
def read_topic():
html = requests.get('https://www.ptt.cc/bbs/Stock/index.html')
soup = bs4.BeautifulSoup(html.text, 'lxml')
rows = soup.select('div.r-ent')
for row in rows:
anchor = row.select_one('.title a')
if anchor is not None:
url = anchor['href']
title = anchor.text
count = get_text(row.select_one('.nrec'))
issuer = get_text(row.select_one('.author'))
created = get_text(row.select_one('.date'))
tokens = jieba.lcut(title)
keywords = {}
for token in tokens:
if token in stocks:
if token in keywords:
keywords[token] += 1
else:
keywords[token] = 1
print(keywords)
read_article(url)
prepare_stocks()
read_topic() | andrewintw/learning-python-web-crawler | week3/lab00_ptt_from_teacher.py | lab00_ptt_from_teacher.py | py | 2,001 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "jieba.lcut",
"line_numb... |
74120599144 | from django.db.models import Q
from django.shortcuts import render
from apps.news.models import News, HeadlineNews, BottomInfo
# Views function for home page of site
def index(request):
# news part
latest_news = News.objects.order_by('-published_date')[:3]
headlines = HeadlineNews.objects.filter(is_publish=True)
headline1 = headlines.get(headline="headline1")
headline2 = headlines.get(headline="headline2")
headline3 = headlines.get(headline="headline3")
bottominfos = BottomInfo.objects.filter(is_publish=True)
info1 = bottominfos.get(bottom_info="info1")
info2 = bottominfos.get(bottom_info="info2")
info3 = bottominfos.get(bottom_info="info3")
context = {'latest_news': latest_news,
'headline1': headline1,
'headline2': headline2,
'headline3': headline3,
'info1': info1,
'info2': info2,
'info3': info3,
}
return render(request, 'home/index.html', context)
# View function for admin page that nested in home page
def about_us(request):
return render(request, 'home/about_us.html')
# View function for admin page that nested in home page
def administraion(request):
return render(request, 'home/include_admin.html') | libomun/crhs | apps/home/views.py | views.py | py | 1,287 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "apps.news.models.News.objects.order_by",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "apps.news.models.News.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "apps.news.models.News",
"line_number": 9,
"usage_type": "name"
... |
16667044604 | import os
import pydicom
import numpy as np
import dicom_numpy
from utils import hidden_errors
from tf_utils import *
from pathlib import Path
def read_dicom_folder(dicom_folder, rescale=None):
''' Reads all .dcm files in `dicom_folder` and merges them to one volume
Returns:
The volume and the affine transformation from pixel indices to xyz coordinates
'''
dss = [pydicom.dcmread(str(dicom_folder/dcm)) for dcm in os.listdir(dicom_folder) if dcm.endswith('.dcm')]
vol, mat = dicom_numpy.combine_slices(dss, rescale)
return vol, dss[0]
def get_largest_dir(dirs, minsize=100):
''' Returns the dir with the most files from `dirs`'''
m = max(dirs, key=lambda d: len(os.listdir(d)) if os.path.isdir(d) else 0)
if len(os.listdir(m)) >= minsize: return m
else: return None
def get_volume_dirs(path):
path = Path(path)
return list(
filter(lambda p: p is not None,
map( get_largest_dir, # extract subdir with most files in it (highest res volume)
map( lambda p: list(p.iterdir()), # get list of actual volume directorie
map( lambda p: next(p.iterdir())/'Unknown Study', # cd into subfolders CQ500-CT-XX/Unknown Study/
filter(lambda p: p.is_dir(), # Get all dirs, no files
path.iterdir()))))) # Iterate over path directory
)
def get_volume_gen(volume_dirs, rescale=None, tf_pts=None):
''' Make a generator that loads volumes from a list of volume directories, `volume_dirs`.
Returns: (volume:np.ndarray , index_to_pos_4x4:np.ndarray) '''
def vol_gen():
for vol_dir in volume_dirs:
with hidden_errors():
try:
vol, dcm = read_dicom_folder(vol_dir, rescale)
vox_scl = np.array([dicom.PixelSpacing[0], dicom.PixelSpacing[1], dicom.SliceThickness]).astype(np.float32)
vox_scl /= vox_scl.min()
vol_name = str(vol_dir.parent.parent.parent.name)
if tf_pts is None:
peaks = get_histogram_peaks(normalized_vol)
tf_pts = get_trapezoid_tf_points_from_peaks(peaks)
except dicom_numpy.DicomImportException:
print(f'Could not load {vol_dir}')
continue
yield vol, tf_pts, vox_scl, vol_name
return vol_gen()
__all__ = ['read_dicom_folder', 'get_largest_dir', 'get_volume_gen', 'get_volume_dirs']
| xeTaiz/dvao | volume_loader.py | volume_loader.py | py | 2,556 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "pydicom.dcmread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dicom_numpy.combine_slices",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",... |
2051327559 | ##https://towardsdatascience.com/develop-a-nlp-model-in-python-deploy-it-with-flask-step-by-step-744f3bdd7776
from flask import Flask, request, jsonify,render_template,redirect,flash
import pandas as pd
import matplotlib.pyplot as plt
#from flask_cors import CORS
from data_Preprocessing import DataPreprocessing
from vectorization import Embedding
from models import model
import os
from predict_model import predict
from dataVisualization import DataVisualization
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
app = Flask(__name__, static_url_path='')
#app = Flask(__name__, static_url_path = "/static", static_folder = "static")
@app.route('/')
def home():
return render_template('index.html')
@app.route("/upload", methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
#print(request.files['file-7[]'])
#import pdb;pdb.set_trace();
f = request.files['file-7[]']
#data_xls = pd.read_excel(f)
resp = DataPreprocessing()
data_df = resp.preprocessing(f)
#print(data_df)
##Object for Vectorization
target_names = ['Cancellation_Rescheduling','EDI_CustomerProgram','Escalation_Linedown',
'Logistic_changes','MDL_Exclusion','NewPO_Forecast',
'OrderEnquiry','Other','POChanges','RMA' ]
class_vector = Embedding()
X_train, X_test, Y_train, Y_test=class_vector.input_data(data_df)
count_train,count_test = class_vector.Countvectorization(X_train, X_test)
tfidf_train,tfidf_test = class_vector.TfIdfVectorization(X_train, X_test)
##Created Objects for models
models=model()
vis = DataVisualization()
##multinomialNB
nb_pred_test,nb_pred_test_tfidf=models.multinomialNB(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("----NaiveBayes model Using Count Vectors----")
print(classification_report(Y_test, nb_pred_test))
nbcm1 = confusion_matrix(Y_test, nb_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm1, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/NB_CountVector.png')
##confusion matrix and classification report using Tfidf
print("------NaiveBayes model Using Tfidf -----")
nbcm2 = confusion_matrix(Y_test,nb_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/NB_TfIdf.png')
##supportVectorMachine
svmc_pred_test,svmc_pred_test_tfidf = models.supportVectorMachine(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("----SVM Using Count Vectors----")
print(classification_report(Y_test, svmc_pred_test))
svmcm1 = confusion_matrix(Y_test,svmc_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/svmCount.png')
##confusion matrix and classification report using Tfidf
print("--------SVM Tfidf------")
print(classification_report(Y_test,svmc_pred_test_tfidf))
svmcm1 = confusion_matrix(Y_test,svmc_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/svmTfidf.png')
##decisionTreeClassifier
dtc_pred_test,dtc_pred_test_tfidf=models.decisionTreeClassifier(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("--------Decision CountVector------")
print(classification_report(Y_test,dtc_pred_test))
dtc1 = confusion_matrix(Y_test,dtc_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/dtc_Count.png')
##confusion matrix and classification report using Tfidf
print("--------Decision tfidf------")
print(classification_report(Y_test,dtc_pred_test_tfidf))
dtc2 = confusion_matrix(Y_test,dtc_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/dtc_Tfidf.png')
##randomClassifier
random_pred_test,random_pred_test_tfidf=models.randomClassifier(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("--------RandomForest CountVector------")
print(classification_report(Y_test,random_pred_test))
randomclassifier1 = confusion_matrix(Y_test,random_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/RF_Count.png')
##confusion matrix and classification report using Tfidf
print("--------RandomForest tfidf------")
print(classification_report(Y_test,dtc_pred_test_tfidf))
randomclassifier2 = confusion_matrix(Y_test,random_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/RF_Tfidf.png')
##LogisticRegression
logeg_test,logreg_tfidf_test= models.LogisticRegression(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("--------LogisticRegression CountVector------")
print(classification_report(Y_test,logeg_test))
randomclassifier1 = confusion_matrix(Y_test,logeg_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/logreg_Count.png')
##confusion matrix and classification report using Tfidf
print("--------LogisticRegression tfidf------")
print(classification_report(Y_test,logreg_tfidf_test))
randomclassifier2 = confusion_matrix(Y_test,logreg_tfidf_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/logreg_Tfidf.png')
##XGBootClassification
xgb_pred_test,xgb_pred_test_tfidf=models.XGBootClassification(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("-------- XGBootClassification CountVector------")
print(classification_report(Y_test,xgb_pred_test))
randomclassifier1 = confusion_matrix(Y_test,xgb_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/xgb_Count.png')
##confusion matrix and classification report using Tfidf
print("--------XGBootClassification tfidf------")
print(classification_report(Y_test,xgb_pred_test_tfidf))
randomclassifier2 = confusion_matrix(Y_test,xgb_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/xgb_Tfidf.png')
##KNNCLassification
modelknn_test, modelknn_tfidf_test = models.KNNCLassification(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("-------- KNN Classification CountVector------")
print(classification_report(Y_test,modelknn_test))
randomclassifier1 = confusion_matrix(Y_test,modelknn_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/knn_Count.png')
##confusion matrix and classification report using Tfidf
print("--------KNN Classification tfidf------")
print(classification_report(Y_test,modelknn_tfidf_test))
randomclassifier2 = confusion_matrix(Y_test,modelknn_tfidf_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/knn_Tfidf.png')
return render_template('home.html')
#return 'File Uploaded successfully'
#print(data_xls)
#return data_xls.to_html()
return render_template('file.html')
#return "File uploaded successfully"
@app.route("/predict", methods=['GET', 'POST'])
def predictor():
p = predict()
if request.method == 'POST':
message = request.form['mail']
data = [message]
result = p.prediction(data)
#result = str(result)
#print(result)
#print(type(result))
return render_template('sample.html', tables=[result.to_html(classes='data')], titles=result.columns.values)
#return result
return render_template('predict.html')
@app.route("/evalute")
def evalute():
return render_template('dash.html')
@app.route("/export", methods=['GET'])
def export_records():
return
if __name__ == "__main__":
app.run()
| Pooja-AI/Email-Classification | file.py | file.py | py | 10,841 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.req... |
8591398196 | import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import numpy as np
from itertools import chain
class SurgicalFineTuningBert(nn.Module):
def __init__(
self,
bert_model,
) -> None:
super().__init__()
self.get_extended_attention_mask = bert_model.get_extended_attention_mask
# copy the model
self.opti_embedding_block = bert_model.bert.embeddings
self.frozen_embedding_block = copy.deepcopy(self.opti_embedding_block)
self.opti_bert_layers = bert_model.bert.encoder.layer
self.frozen_bert_layers = copy.deepcopy(self.opti_bert_layers)
self.opti_bert_pooler = bert_model.bert.pooler
self.frozen_bert_pooler = copy.deepcopy(self.opti_bert_pooler)
self.opti_bert_classifier = bert_model.classifier
self.frozen_bert_classifier = copy.deepcopy(self.opti_bert_classifier)
frozen_params = chain(
self.frozen_embedding_block.parameters(),
self.frozen_bert_layers.parameters(),
self.frozen_bert_pooler.parameters(),
self.frozen_bert_classifier.parameters(),
)
for param in frozen_params:
param.requires_grad = False
self.dropout = nn.Sequential(bert_model.dropout)
if (
"bert-small" in bert_model.name_or_path
or "bert-med" in bert_model.name_or_path
):
self.alphas = nn.Parameter(
torch.zeros(len(bert_model.bert.encoder.layer) + 3)
)
else:
self.alphas = nn.Parameter(
torch.zeros(len(bert_model.bert.encoder.layer) + 1)
)
def forward(self, x):
input_ids, attention_mask = x["input_ids"], x["attention_mask"]
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_ids.size()
)
alpha_embeddings, alphas_layers, alpha_pooler, alpha_classifier = (
self.alphas[0],
self.alphas[:-2],
self.alphas[-2],
self.alphas[-1],
)
x_opti, x_frozen = self.opti_embedding_block(
input_ids
), self.frozen_embedding_block(input_ids)
a = alpha_embeddings.sigmoid()
x = a * self.opti_embedding_block(input_ids) + (
1 - a
) * self.frozen_embedding_block(input_ids)
for i in range(len(self.opti_bert_layers)):
a = alphas_layers[i].sigmoid()
if i > 0:
x_opti, x_frozen = x, x
x = (
a
* self.opti_bert_layers[i](
x_opti, attention_mask=extended_attention_mask
)[0]
+ (1 - a)
* self.frozen_bert_layers[i](
x_frozen, attention_mask=extended_attention_mask
)[0]
)
a = alpha_pooler.sigmoid()
x = a * self.opti_bert_pooler(x) + (1 - a) * self.frozen_bert_pooler(x)
x = self.dropout(x)
a = alpha_classifier.sigmoid()
x = a * self.opti_bert_classifier(x) + (1 - a) * self.frozen_bert_classifier(x)
return x
def forward_alphas(self, x, alphas):
alpha_embeddings, alphas_layers, alpha_pooler, alpha_classifier = (
alphas[0],
alphas[:-2],
alphas[-2],
alphas[-1],
)
input_ids, attention_mask = x["input_ids"], x["attention_mask"]
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_ids.size()
)
a = alpha_embeddings.sigmoid()
x = a * self.opti_embedding_block(input_ids) + (
1 - a
) * self.frozen_embedding_block(input_ids)
x_opti, x_frozen = self.opti_embedding_block(
input_ids
), self.frozen_embedding_block(input_ids)
for i in range(len(self.opti_bert_layers)):
a = alphas_layers[i].sigmoid()
if i > 0:
x_opti, x_frozen = x, x
x = (
a
* self.opti_bert_layers[i](
x_opti, attention_mask=extended_attention_mask
)[0]
+ (1 - a)
* self.frozen_bert_layers[i](
x_frozen, attention_mask=extended_attention_mask
)[0]
)
a = alpha_pooler.sigmoid()
x = a * self.opti_bert_pooler(x) + (1 - a) * self.frozen_bert_pooler(x)
x = self.dropout(x)
a = alpha_classifier.sigmoid()
x = a * self.opti_bert_classifier(x) + (1 - a) * self.frozen_bert_classifier(x)
return x
def get_alphas(self):
return [float(a.sigmoid()) for a in self.alphas]
| AntoineBigeard/NLPSurgicalFineTuning | src/pimped_bert.py | pimped_bert.py | py | 4,780 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_n... |
11831438569 | import discord
from discord.ext import commands
from discord.commands import Option
from commands.funcs.yatta_gif import yatta_gif
# List of commands here:
# /yattagif
class Gif(commands.Cog, description='Gif maker'):
def __init__(self, bot):
self.bot = bot
self.footer = "Developed by jej#6495 for Snowcodes 2022 โ๏ธ"
@commands.slash_command()
async def yattagif(
self,
ctx: discord.ApplicationContext,
member: Option(discord.Member, "member", required=False)):
"""Get Yatta'd"""
if member is None:
member = ctx.author
url = str(member.display_avatar)
yatta_gif(url)
await ctx.respond("Loading....")
embed = discord.Embed (
title = 'I will reverse all creations!',
description = "",
colour = discord.Colour.from_rgb(247, 168, 178)
)
file = discord.File("assets/images/yatta/result.gif", filename="result.gif")
embed.set_image(url="attachment://result.gif")
embed.set_footer(text=self.footer)
await ctx.edit(content=None, file=file, embed=embed)
def setup(bot):
bot.add_cog(Gif(bot))
| jej-v/snowcodes2022 | commands/yatta.py | yatta.py | py | 1,201 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "discord.ApplicationContext",
"line_number": 17,
"usage_type": "attribute"
},
{
"... |
16010531173 | '''
api_test.py
Jeff Ondich, 11 April 2016
Ethan Somes, 13 April, 2017
Revised from Jeff's example for CS 257 Software Design. How to retrieve results
from an HTTP-based API, parse the results (JSON in this case),
and manage the potential errors.
'''
import sys
import argparse
import json
import urllib.request
import re
def get_headlines(source, sorting):
'''
Returns a list of headlines and authors of the articles
sorted in the way inputted. They are
organized in a dictionary in the form:
{'title':title, 'author':author}
The source parameter must one of the 70 souces formatted as seen on
https://newsapi.org/sources
The sorting can be top, latest, or popular.
Raises exceptions on network connection errors and on data
format errors. Maybe?
'''
base_url = 'https://newsapi.org/v1/articles?source='
apiKey = '&apiKey=82cf4993bd7b404abae9673a74e61d01'
base_url = base_url + source + '&sortby=' + sorting + apiKey
# url = base_url.format(language, word)
data_from_server = urllib.request.urlopen(base_url).read()
string_from_server = data_from_server.decode('utf-8')
loaded = json.loads(string_from_server)
result_list = []
article_list = loaded['articles']
for article_dictionary in article_list:
title = article_dictionary['title']
author = article_dictionary['author']
if type(author) != type(''):
raise Exception('author has wrong type: "{0}"'.format(author))
if type(title) != type(''):
raise Exception('title has wrong type: "{0}"'.format(title))
result_list.append({'title': title, 'author': author})
return result_list
def get_Description(source, title):
'''
Returns a dictionary containing the title of the article in question
and the descrtiption of it.
The source parameter must one of the 7 souces formatted as seen on
https://newsapi.org/sources
Raises exceptions on network connection errors and on data
format errors.
'''
base_url = 'https://newsapi.org/v1/articles?source='
apiKey = '&apiKey=82cf4993bd7b404abae9673a74e61d01'
url = base_url + source + apiKey
data_from_server = urllib.request.urlopen(url).read()
string_from_server = data_from_server.decode('utf-8')
loaded = json.loads(string_from_server)
article_list = loaded['articles']
result_dict = {}
description = ""
for article_dictionary in article_list:
dictEntry = re.sub("[^a-z0-9]+", "", article_dictionary['title'], flags=re.IGNORECASE)
titleEntry = re.sub("[^a-z0-9]+", "", title, flags=re.IGNORECASE)
if (dictEntry == titleEntry):
description = article_dictionary['description']
if type(description) != type(''):
raise Exception('text has wrong type: "{0}"'.format(text))
result_dict['title'] = title
result_dict['description'] = description
return result_dict
def main():
print("This program allows you to look at news headlines from many sources!")
print("The 70 news sources available and thier codes are listed here: https://newsapi.org/sources")
print("You can either search for the description of a particular article from a source,")
print("or you can search for a list of headlines from a source.")
userInput = input("Would you like a description or a list of headlines? Enter description or list. ")
source = input("Enter a news source code: ")
if userInput == "description":
title = input("What is the title of the article you want to look at?")
description_Dict = get_Description(source, title)
print("Title: " + title)
print(description_Dict['description'])
elif userInput == "list":
sorting = input("How would you like the list to be sorted? Enter top, latest, or popular. ")
headlines_list = get_headlines(source, sorting)
for dictionary in headlines_list:
print(dictionary['title'] + ": " + dictionary['author'])
if __name__ == '__main__':
main()
# When I use argparse to parse my command line, I usually
# put the argparse setup here in the global code, and then
# call a function called main to do the actual work of
# the program.
''' parser = argparse.ArgumentParser(description='Get word info from the Ultralingua API')
parser.add_argument('action',
metavar='action',
help='action to perform ("description" or "list")',
choices=['description', 'list'])
parser.add_argument('source',
metavar='source',
help='The source parameter must one of the 70 souces formatted as seen on https://newsapi.org/sources',
parser.add_argument('sorting', help='the word you want to act on')
parser.add_argument('word', help='the word you want to act on')
args = parser.parse_args()
main(args)''' | NylaWorker/TrebuchetPhysicsSimulation | CS257/API.py | API.py | py | 5,087 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 35,
"usage_type": "name"
},
{
"api_nam... |
11519320722 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
from types import FunctionType
from fdm import central_fdm
from lab import B
from plum import Dispatcher, Self, Referentiable, type_parameter, Union
from .util import uprank
from .input import Input, At, MultiInput
from .kernel import ZeroKernel, PosteriorKernel, TensorProductKernel, \
CorrectiveKernel, OneKernel
from .lazy import LazyVector, LazyMatrix
from .matrix import matrix, Diagonal, dense
from .mean import PosteriorMean, ZeroMean, OneMean
from .mokernel import MultiOutputKernel as MOK
from .momean import MultiOutputMean as MOM
from .random import Random, PromisedGP, RandomProcess, Normal
__all__ = ['GP',
'model',
'Graph',
'AbstractObservations',
'Observations', 'Obs',
'SparseObservations', 'SparseObs']
log = logging.getLogger(__name__)
def ensure_at(x, ref=None):
"""Ensure that an input location is typed with `At` to specify which
process it belongs to.
Args:
x (input): Input location.
ref (:class:`.graph.GP`, optional): Reference process. If provided and
`x` is not an instance of `At`, then it assumed to belong to `ref`.
Returns:
:class:`.input.At`: Input, instance of `At`.
"""
if isinstance(x, At):
return x
elif ref is not None:
return ref(x)
else:
raise ValueError('Must explicitly specify the processes which to '
'condition on.')
class AbstractObservations(Referentiable):
"""Abstract base class for observations."""
_dispatch = Dispatcher(in_class=Self)
@_dispatch({B.Numeric, Input}, B.Numeric, [PromisedGP])
def __init__(self, x, y, ref=None):
self._ref = ref
self.x = ensure_at(x, self._ref)
self.y = y
self.graph = type_parameter(self.x).graph
@_dispatch([Union(tuple, list, PromisedGP)])
def __init__(self, *pairs, **kw_args):
# Check whether there's a reference.
self._ref = kw_args['ref'] if 'ref' in kw_args else None
# Ensure `At` for all pairs.
pairs = [(ensure_at(x, self._ref), y) for x, y in pairs]
# Get the graph from the first pair.
self.graph = type_parameter(pairs[0][0]).graph
# Extend the graph by the Cartesian product `p` of all processes.
p = self.graph.cross(*self.graph.ps)
# Condition on the newly created vector-valued GP.
xs, ys = zip(*pairs)
self.x = p(MultiInput(*xs))
self.y = B.concat(*[uprank(y) for y in ys], axis=0)
@_dispatch({tuple, list})
def __ror__(self, ps):
return self.graph.condition(ps, self)
def posterior_kernel(self, p_i, p_j): # pragma: no cover
"""Get the posterior kernel between two processes.
Args:
p_i (:class:`.graph.GP`): First process.
p_j (:class:`.graph.GP`): Second process.
Returns:
:class:`.kernel.Kernel`: Posterior kernel between the first and
second process.
"""
raise NotImplementedError('Posterior kernel construction not '
'implemented.')
def posterior_mean(self, p): # pragma: no cover
"""Get the posterior kernel of a process.
Args:
p (:class:`.graph.GP`): Process.
Returns:
:class:`.mean.Mean`: Posterior mean of `p`.
"""
raise NotImplementedError('Posterior mean construction not '
'implemented.')
class Observations(AbstractObservations, Referentiable):
"""Observations.
Can alternatively construct an instance of `Observations` with tuples or
lists of valid constructors.
Args:
x (input): Locations of points to condition on.
y (tensor): Observations to condition on.
ref (:class:`.class.GP`, optional): Reference process. See
:func:`.graph.ensure_at`.
"""
_dispatch = Dispatcher(in_class=Self)
def __init__(self, *args, **kw_args):
AbstractObservations.__init__(self, *args, **kw_args)
self._K_x = None
@property
def K_x(self):
"""Kernel matrix of the data."""
if self._K_x is None: # Cache computation.
p_x, x = type_parameter(self.x), self.x.get()
self._K_x = matrix(self.graph.kernels[p_x](x))
return self._K_x
def posterior_kernel(self, p_i, p_j):
p_x, x = type_parameter(self.x), self.x.get()
return PosteriorKernel(self.graph.kernels[p_i, p_j],
self.graph.kernels[p_x, p_i],
self.graph.kernels[p_x, p_j],
x, self.K_x)
def posterior_mean(self, p):
p_x, x = type_parameter(self.x), self.x.get()
return PosteriorMean(self.graph.means[p],
self.graph.means[p_x],
self.graph.kernels[p_x, p],
x, self.K_x, self.y)
class SparseObservations(AbstractObservations, Referentiable):
"""Observations through inducing points. Takes further arguments
according to the constructor of :class:`.graph.Observations`.
Attributes:
elbo (scalar): ELBO.
Args:
z (input): Locations of the inducing points.
e (:class:`.graph.GP`): Additive, independent noise process.
"""
_dispatch = Dispatcher(in_class=Self)
@_dispatch({B.Numeric, Input, tuple, list},
[Union(tuple, list, PromisedGP)])
def __init__(self, z, *pairs, **kw_args):
es, xs, ys = zip(*pairs)
AbstractObservations.__init__(self, *zip(xs, ys), **kw_args)
SparseObservations.__init__(self,
z,
self.graph.cross(*es),
self.x,
self.y,
**kw_args)
@_dispatch({list, tuple},
PromisedGP,
{B.Numeric, Input},
B.Numeric,
[PromisedGP])
def __init__(self, zs, e, x, y, ref=None):
# Ensure `At` everywhere.
zs = [ensure_at(z, ref=ref) for z in zs]
# Extract graph.
graph = type_parameter(zs[0]).graph
# Create a representative multi-output process.
p_z = graph.cross(*(type_parameter(z) for z in zs))
SparseObservations.__init__(self,
p_z(MultiInput(*zs)),
e, x, y, ref=ref)
@_dispatch({B.Numeric, Input},
PromisedGP,
{B.Numeric, Input},
B.Numeric,
[PromisedGP])
def __init__(self, z, e, x, y, ref=None):
AbstractObservations.__init__(self, x, y, ref=ref)
self.z = ensure_at(z, self._ref)
self.e = e
self._K_z = None
self._elbo = None
self._mu = None
self._A = None
@property
def K_z(self):
"""Kernel matrix of the data."""
if self._K_z is None: # Cache computation.
self._compute()
return self._K_z
@property
def elbo(self):
"""ELBO."""
if self._elbo is None: # Cache computation.
self._compute()
return self._elbo
@property
def mu(self):
"""Mean of optimal approximating distribution."""
if self._mu is None: # Cache computation.
self._compute()
return self._mu
@property
def A(self):
"""Parameter of the corrective variance of the kernel of the optimal
approximating distribution."""
if self._A is None: # Cache computation.
self._compute()
return self._A
def _compute(self):
# Extract processes.
p_x, x = type_parameter(self.x), self.x.get()
p_z, z = type_parameter(self.z), self.z.get()
# Construct the necessary kernel matrices.
K_zx = self.graph.kernels[p_z, p_x](z, x)
self._K_z = matrix(self.graph.kernels[p_z](z))
# Evaluating `e.kernel(x)` will yield incorrect results if `x` is a
# `MultiInput`, because `x` then still designates the particular
# components of `f`. Fix that by instead designating the elements of
# `e`.
if isinstance(x, MultiInput):
x_n = MultiInput(*(p(xi.get())
for p, xi in zip(self.e.kernel.ps, x.get())))
else:
x_n = x
# Construct the noise kernel matrix.
K_n = self.e.kernel(x_n)
# The approximation can only handle diagonal noise matrices.
if not isinstance(K_n, Diagonal):
raise RuntimeError('Kernel matrix of noise must be diagonal.')
# And construct the components for the inducing point approximation.
L_z = B.cholesky(self._K_z)
self._A = B.eye(self._K_z) + \
B.qf(K_n, B.transpose(B.trisolve(L_z, K_zx)))
y_bar = uprank(self.y) - self.e.mean(x_n) - self.graph.means[p_x](x)
prod_y_bar = B.trisolve(L_z, B.qf(K_n, B.transpose(K_zx), y_bar))
# Compute the optimal mean.
self._mu = self.graph.means[p_z](z) + \
B.qf(self._A, B.trisolve(L_z, self._K_z), prod_y_bar)
# Compute the ELBO.
# NOTE: The calculation of `trace_part` asserts that `K_n` is diagonal.
# The rest, however, is completely generic.
trace_part = B.ratio(Diagonal(self.graph.kernels[p_x].elwise(x)[:, 0]) -
Diagonal(B.qf_diag(self._K_z, K_zx)), K_n)
det_part = B.logdet(2 * B.pi * K_n) + B.logdet(self._A)
qf_part = B.qf(K_n, y_bar)[0, 0] - B.qf(self._A, prod_y_bar)[0, 0]
self._elbo = -0.5 * (trace_part + det_part + qf_part)
def posterior_kernel(self, p_i, p_j):
p_z, z = type_parameter(self.z), self.z.get()
return PosteriorKernel(self.graph.kernels[p_i, p_j],
self.graph.kernels[p_z, p_i],
self.graph.kernels[p_z, p_j],
z, self.K_z) + \
CorrectiveKernel(self.graph.kernels[p_z, p_i],
self.graph.kernels[p_z, p_j],
z, self.A, self.K_z)
def posterior_mean(self, p):
p_z, z = type_parameter(self.z), self.z.get()
return PosteriorMean(self.graph.means[p],
self.graph.means[p_z],
self.graph.kernels[p_z, p],
z, self.K_z, self.mu)
Obs = Observations #: Shorthand for `Observations`.
SparseObs = SparseObservations #: Shorthand for `SparseObservations`.
class Graph(Referentiable):
"""A GP model."""
_dispatch = Dispatcher(in_class=Self)
def __init__(self):
self.ps = []
self.pids = set()
self.kernels = LazyMatrix()
self.means = LazyVector()
# Store named GPs in both ways.
self.gps_by_name = {}
self.names_by_gp = {}
@_dispatch(str)
def __getitem__(self, name):
return self.gps_by_name[name]
@_dispatch(PromisedGP)
def __getitem__(self, p):
return self.names_by_gp[id(p)]
@_dispatch(PromisedGP, str)
def name(self, p, name):
"""Name a GP.
Args:
p (:class:`.graph.GP`): GP to name.
name (str): Name. Must be unique.
"""
# Delete any existing names and back-references for the GP.
if id(p) in self.names_by_gp:
del self.gps_by_name[self.names_by_gp[id(p)]]
del self.names_by_gp[id(p)]
# Check that name is not in use.
if name in self.gps_by_name:
raise RuntimeError('Name "{}" for "{}" already taken by "{}".'
''.format(name, p, self[name]))
# Set the name and the back-reference.
self.gps_by_name[name] = p
self.names_by_gp[id(p)] = name
def _add_p(self, p):
self.ps.append(p)
self.pids.add(id(p))
def _update(self, mean, k_ii_generator, k_ij_generator):
p = GP(self)
self.means[p] = mean
self.kernels.add_rule((p, p), self.pids, k_ii_generator)
self.kernels.add_rule((p, None), self.pids, k_ij_generator)
self.kernels.add_rule((None, p), self.pids,
lambda pi: reversed(self.kernels[p, pi]))
self._add_p(p)
return p
def add_independent_gp(self, p, kernel, mean):
"""Add an independent GP to the model.
Args:
p (:class:`.graph.GP`): GP object to add.
kernel (:class:`.kernel.Kernel`): Kernel function of GP.
mean (:class:`.mean.Mean`): Mean function of GP.
Returns:
:class:`.graph.GP`: The newly added independent GP.
"""
# Update means.
self.means[p] = mean
# Add rule to kernels.
self.kernels[p] = kernel
self.kernels.add_rule((p, None), self.pids, lambda pi: ZeroKernel())
self.kernels.add_rule((None, p), self.pids, lambda pi: ZeroKernel())
self._add_p(p)
return p
@_dispatch(object, PromisedGP)
def sum(self, other, p):
"""Sum a GP from the graph with another object.
Args:
obj1 (other type or :class:`.graph.GP`): First term in the sum.
obj2 (other type or :class:`.graph.GP`): Second term in the sum.
Returns:
:class:`.graph.GP`: The GP corresponding to the sum.
"""
return self.sum(p, other)
@_dispatch(PromisedGP, object)
def sum(self, p, other):
return self._update(self.means[p] + other,
lambda: self.kernels[p],
lambda pi: self.kernels[p, pi])
@_dispatch(PromisedGP, PromisedGP)
def sum(self, p1, p2):
# Check that the GPs are on the same graph.
if p1.graph != p2.graph:
raise RuntimeError('Can only add GPs from the same graph.')
return self._update(self.means[p1] + self.means[p2],
(lambda: self.kernels[p1] +
self.kernels[p2] +
self.kernels[p1, p2] +
self.kernels[p2, p1]),
lambda pi: self.kernels[p1, pi] +
self.kernels[p2, pi])
@_dispatch(PromisedGP, B.Numeric)
def mul(self, p, other):
"""Multiply a GP from the graph with another object.
Args:
p (:class:`.graph.GP`): GP in the product.
other (object): Other object in the product.
Returns:
:class:`.graph.GP`: The GP corresponding to the product.
"""
return self._update(self.means[p] * other,
lambda: self.kernels[p] * other ** 2,
lambda pi: self.kernels[p, pi] * other)
@_dispatch(PromisedGP, FunctionType)
def mul(self, p, f):
def ones(x):
return B.ones(B.dtype(x), B.shape(x)[0], 1)
return self._update(f * self.means[p],
lambda: f * self.kernels[p],
(lambda pi: TensorProductKernel(f, ones) *
self.kernels[p, pi]))
def shift(self, p, shift):
"""Shift a GP.
Args:
p (:class:`.graph.GP`): GP to shift.
shift (object): Amount to shift by.
Returns:
:class:`.graph.GP`: The shifted GP.
"""
return self._update(self.means[p].shift(shift),
lambda: self.kernels[p].shift(shift),
lambda pi: self.kernels[p, pi].shift(shift, 0))
def stretch(self, p, stretch):
"""Stretch a GP.
Args:
p (:class:`.graph.GP`): GP to stretch.
stretch (object): Extent of stretch.
Returns:
:class:`.graph.GP`: The stretched GP.
"""
return self._update(self.means[p].stretch(stretch),
lambda: self.kernels[p].stretch(stretch),
lambda pi: self.kernels[p, pi].stretch(stretch, 1))
def select(self, p, *dims):
"""Select input dimensions.
Args:
p (:class:`.graph.GP`): GP to select input
dimensions from.
*dims (object): Dimensions to select.
Returns:
:class:`.graph.GP`: GP with the specific input dimensions.
"""
return self._update(self.means[p].select(dims),
lambda: self.kernels[p].select(dims),
lambda pi: self.kernels[p, pi].select(dims, None))
def transform(self, p, f):
"""Transform the inputs of a GP.
Args:
p (:class:`.graph.GP`): GP to input transform.
f (function): Input transform.
Returns:
:class:`.graph.GP`: Input-transformed GP.
"""
return self._update(self.means[p].transform(f),
lambda: self.kernels[p].transform(f),
lambda pi: self.kernels[p, pi].transform(f, None))
def diff(self, p, dim=0):
"""Differentiate a GP.
Args:
p (:class:`.graph.GP`): GP to differentiate.
dim (int, optional): Dimension of feature which to take the
derivative with respect to. Defaults to `0`.
Returns:
:class:`.graph.GP`: Derivative of GP.
"""
return self._update(self.means[p].diff(dim),
lambda: self.kernels[p].diff(dim),
lambda pi: self.kernels[p, pi].diff(dim, None))
@_dispatch({list, tuple}, AbstractObservations)
def condition(self, ps, obs):
"""Condition the graph on observations.
Args:
ps (list[:class:`.graph.GP`]): Processes to condition.
obs (:class:`.graph.AbstractObservations`): Observations to
condition on.
Returns:
list[:class:`.graph.GP`]: Posterior processes.
"""
# A construction like this is necessary to properly close over `p`.
def build_gens(p):
def k_ij_generator(pi):
return obs.posterior_kernel(p, pi)
def k_ii_generator():
return obs.posterior_kernel(p, p)
return k_ii_generator, k_ij_generator
return [self._update(obs.posterior_mean(p), *build_gens(p)) for p in ps]
def cross(self, *ps):
"""Construct the Cartesian product of a collection of processes.
Args:
*ps (:class:`.graph.GP`): Processes to construct the
Cartesian product of.
Returns:
:class:`.graph.GP`: The Cartesian product of `ps`.
"""
mok = MOK(*ps)
return self._update(MOM(*ps),
lambda: mok,
lambda pi: mok.transform(None, lambda y: At(pi)(y)))
@_dispatch(int, [At])
def sample(self, n, *xs):
"""Sample multiple processes simultaneously.
Args:
n (int, optional): Number of samples. Defaults to `1`.
*xs (:class:`.graph.At`): Locations to sample at.
Returns:
tuple: Tuple of samples.
"""
sample = GP(MOK(*self.ps),
MOM(*self.ps),
graph=Graph())(MultiInput(*xs)).sample(n)
# To unpack `x`, just keep `.get()`ing.
def unpack(x):
while isinstance(x, Input):
x = x.get()
return x
# Unpack sample.
lengths = [B.shape(uprank(unpack(x)))[0] for x in xs]
i, samples = 0, []
for length in lengths:
samples.append(sample[i:i + length, :])
i += length
return samples[0] if len(samples) == 1 else samples
@_dispatch([At])
def sample(self, *xs):
return self.sample(1, *xs)
@_dispatch([{list, tuple}])
def logpdf(self, *pairs):
xs, ys = zip(*pairs)
# Check that all processes are specified.
if not all([isinstance(x, At) for x in xs]):
raise ValueError('Must explicitly specify the processes which to '
'compute the log-pdf for.')
# Uprank all outputs and concatenate.
y = B.concat(*[uprank(y) for y in ys], axis=0)
# Return composite log-pdf.
return GP(MOK(*self.ps),
MOM(*self.ps),
graph=Graph())(MultiInput(*xs)).logpdf(y)
@_dispatch(At, B.Numeric)
def logpdf(self, x, y):
return x.logpdf(y)
@_dispatch(Observations)
def logpdf(self, obs):
return obs.x.logpdf(obs.y)
@_dispatch(SparseObservations)
def logpdf(self, obs):
return obs.elbo
model = Graph() #: A default graph provided for convenience
class GP(RandomProcess, Referentiable):
"""Gaussian process.
Args:
kernel (:class:`.kernel.Kernel`): Kernel of the
process.
mean (:class:`.mean.Mean`, optional): Mean function of the
process. Defaults to zero.
graph (:class:`.graph.Graph`, optional): Graph to attach to.
"""
_dispatch = Dispatcher(in_class=Self)
@_dispatch([object])
def __init__(self, kernel, mean=None, graph=model, name=None):
# Resolve kernel.
if isinstance(kernel, (B.Numeric, FunctionType)):
kernel = kernel * OneKernel()
# Resolve mean.
if mean is None:
mean = ZeroMean()
elif isinstance(mean, (B.Numeric, FunctionType)):
mean = mean * OneMean()
# Then add a new `GP` to the graph with the resolved kernel and mean.
self.graph = graph
self.graph.add_independent_gp(self, kernel, mean)
# If a name is given, set the name.
if name:
self.graph.name(self, name)
@_dispatch(Graph)
def __init__(self, graph):
self.graph = graph
@property
def kernel(self):
"""Kernel of the GP."""
return self.graph.kernels[self]
@property
def mean(self):
"""Mean function of the GP."""
return self.graph.means[self]
@property
def name(self):
"""Name of the GP."""
return self.graph[self]
@name.setter
@_dispatch(str)
def name(self, name):
self.graph.name(self, name)
def __call__(self, x):
"""Construct a finite-dimensional distribution at specified locations.
Args:
x (input): Points to construct the distribution at.
Returns:
:class:`.random.Normal`: Finite-dimensional distribution.
"""
return Normal(self, x)
@_dispatch([object])
def condition(self, *args):
"""Condition the GP. See :meth:`.graph.Graph.condition`."""
return self.graph.condition((self,), Observations(*args, ref=self))[0]
@_dispatch(AbstractObservations)
def condition(self, obs):
return self.graph.condition((self,), obs)[0]
@_dispatch(object)
def __add__(self, other):
return self.graph.sum(self, other)
@_dispatch(Random)
def __add__(self, other):
raise NotImplementedError('Cannot add a GP and a {}.'
''.format(type(other).__name__))
@_dispatch(Self)
def __add__(self, other):
return self.graph.sum(self, other)
@_dispatch(object)
def __mul__(self, other):
return self.graph.mul(self, other)
@_dispatch(Random)
def __mul__(self, other):
raise NotImplementedError('Cannot multiply a GP and a {}.'
''.format(type(other).__name__))
@_dispatch(Self)
def __mul__(self, other):
return (lambda x: self.graph.means[self](x)) * other + \
self * (lambda x: self.graph.means[other](x)) + \
GP(kernel=self.graph.kernels[self] *
self.graph.kernels[other] +
self.graph.kernels[self, other] *
self.graph.kernels[other, self],
mean=-self.graph.means[self] *
self.graph.means[other],
graph=self.graph)
@_dispatch([object])
def __or__(self, args):
"""Shorthand for conditioning."""
return self.condition(Observations(*args, ref=self))
@_dispatch(AbstractObservations)
def __or__(self, obs):
return self.condition(obs)
def shift(self, shift):
"""Shift the GP. See :meth:`.graph.Graph.shift`."""
return self.graph.shift(self, shift)
def stretch(self, stretch):
"""Stretch the GP. See :meth:`.graph.Graph.stretch`."""
return self.graph.stretch(self, stretch)
def __gt__(self, stretch):
"""Shorthand for :meth:`.graph.GP.stretch`."""
return self.stretch(stretch)
def transform(self, f):
"""Input transform the GP. See :meth:`.graph.Graph.transform`."""
return self.graph.transform(self, f)
def select(self, *dims):
"""Select dimensions from the input. See :meth:`.graph.Graph.select`."""
return self.graph.select(self, *dims)
def __getitem__(self, *dims):
"""Shorthand for :meth:`.graph.GP.select`."""
return self.select(*dims)
def diff(self, dim=0):
"""Differentiate the GP. See :meth:`.graph.Graph.diff`."""
return self.graph.diff(self, dim)
def diff_approx(self, deriv=1, order=6):
"""Approximate the derivative of the GP by constructing a finite
difference approximation.
Args:
deriv (int): Order of the derivative.
order (int): Order of the estimate.
Returns:
Approximation of the derivative of the GP.
"""
# Use the FDM library to figure out the coefficients.
fdm = central_fdm(order, deriv, adapt=0, factor=1e8)
fdm.estimate() # Estimate step size.
# Construct finite difference.
df = 0
for g, c in zip(fdm.grid, fdm.coefs):
df += c * self.shift(-g * fdm.step)
return df / fdm.step ** deriv
@property
def stationary(self):
"""Stationarity of the GP."""
return self.kernel.stationary
@property
def var(self):
"""Variance of the GP."""
return self.kernel.var
@property
def length_scale(self):
"""Length scale of the GP."""
return self.kernel.length_scale
@property
def period(self):
"""Period of the GP."""
return self.kernel.period
def __str__(self):
return self.display()
def __repr__(self):
return self.display()
def display(self, formatter=lambda x: x):
"""Display the GP.
Args:
formatter (function, optional): Function to format values.
Returns:
str: GP as a string.
"""
return 'GP({}, {})'.format(self.kernel.display(formatter),
self.mean.display(formatter))
PromisedGP.deliver(GP)
| pb593/stheno | stheno/graph.py | graph.py | py | 27,441 | python | en | code | null | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "input.At",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "plum.Referentiable",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "plum.Dispatcher",
... |
23331748159 | import matplotlib.pyplot as plt
from utility_functions import *
depth = 120
layers = 100
segments = 1
size_classes = 2
lam = 300
simulate = False
verbose = True
l2 = False
min_attack_rate = 10**(-3)
mass_vector = np.array([0.05, 20, 6000]) # np.array([1, 30, 300, 400, 800, 16000])
obj = spectral_method(depth, layers, segments=segments)
logn = stats.lognorm.pdf(obj.x, 1, 0)
norm_dist = stats.norm.pdf(obj.x, loc=0, scale=3)
res_start = 8*norm_dist # 0.1*(1-obj.x/depth)
res_max = 10 * norm_dist
water_start = water_column(obj, res_start, layers=layers * segments, resource_max=res_max, replacement=lam, advection=0,
diffusion=0, logistic = True)
params = ecosystem_parameters(mass_vector, obj, lam=0.3, min_attack_rate = min_attack_rate, forage_mass = 0.05/408)
params.handling_times = np.zeros(3)
eco = ecosystem_optimization(mass_vector, layers * segments, params, obj, water_start, l2=l2, movement_cost=0)
eco.population_setter(np.array([1, 0.1, 0.01]))
eco.heat_kernel_creator(10**(-1))
eco.heat_kernels[1] = eco.heat_kernels[0]
eco.heat_kernels[2] = eco.heat_kernels[0]
eco.parameters.who_eats_who[1,0] = 1
opt_sol_quad_opt = quadratic_optimizer(eco)
opt_sol = lemke_optimizer(eco)
#
plt.plot(obj.x, opt_sol[0:layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol[layers:2*layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol[2*layers:3*layers]@eco.heat_kernels[0])
plt.show()
plt.plot(obj.x, opt_sol_quad_opt[0:layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol_quad_opt[layers:2*layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol_quad_opt[2*layers:3*layers]@eco.heat_kernels[0])
plt.show()
simulator(eco, params, "proper_tritrophic", total_days=180, lemke = True) | jemff/food_web | old_sims/other_initial_conditions.py | other_initial_conditions.py | py | 1,717 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matpl... |
19786182992 | from keras import Model
import numpy as np
from scam.exceptions import InvalidState
from scam.utils import resize_activations, normalize_activations
class ScoreCAM:
def __init__(self, model_input, last_conv_output, softmax_output, input_shape, cam_batch_size=None):
"""
Prepares class activation mappings
:param model_input: input layer of CNN, normally takes batch of images as an input. Currently batch must be limited to a single image
:param last_conv_output: last convolutional layer. The last conv layer contains the most complete information about image.
:param softmax_output: flat softmax (or similar) layer describing the class certainty
:param input_shape: Expecting a batch of a single input sample 1 x M X N X ...; it is assumed that 2D image of M x N dimensions is served as an input, which can be multiplied with a 2D-mask.
:param cam_batch_size: Optional, defaults to None, which will result in inference of batches of size 32.
"""
self.model_input = model_input
self.last_conv_output = last_conv_output
self.softmax_output = softmax_output
self.last_conv_model = Model(inputs=model_input, outputs=last_conv_output)
self.softmax_model = Model(inputs=model_input, outputs=softmax_output)
self.input_shape = input_shape
self.cam_batch_size = cam_batch_size
self.normalized_maps = None
self.classes_activation_scale = None
def prepare_cam(self, input):
output_conv = self.last_conv_model.predict(input)
# Only first image from convolutions will be used
resized = resize_activations(output_conv[0], self.input_shape)
# filter_size x input_shape[0] x input_shape[1] - resized to original input dimensions
normalized_maps = normalize_activations(resized)
# repeat input
repeat_input = np.tile(input, (normalized_maps.shape[0], 1, 1, 1))
expanded_activation_maps = np.expand_dims(normalized_maps, axis=3)
masked_images = np.multiply(repeat_input, expanded_activation_maps)
# input: filter_size x input_shape[0] x input_shape[1] -> Output filter_size x Classes_Count
self.classes_activation_scale = self.softmax_model.predict(masked_images,
batch_size=self.cam_batch_size)
self.normalized_maps = normalized_maps
def get_class_heatmap(self, class_id):
if self.normalized_maps is None or self.classes_activation_scale is None:
raise InvalidState('Call prepare_cam before accessing get_class_heatmap, '
'activations must be prepared via prepare_cam')
final_weights = self.classes_activation_scale[:, class_id]
final_maps = np.multiply(self.normalized_maps, final_weights.reshape((-1, 1, 1)))
# ReLU
final_maps_max = np.max(final_maps, axis=0)
final_class_activation_map = np.where(final_maps_max > 0, final_maps_max, 0)
return final_class_activation_map | andreysorokin/scam-net | scam/keras.py | keras.py | py | 3,075 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "keras.Model",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keras.Model",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scam.utils.resize_activations",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scam.utils.norm... |
70345439783 | import os
import sys
import pdb
import torch
import numpy as np
import pickle as pkl
from PIL import Image
from random import shuffle
from torchvision import datasets, transforms
""" Template Dataset with Labels """
class XYDataset(torch.utils.data.Dataset):
def __init__(self, x, y, **kwargs):
self.x, self.y = x, y
# this was to store the inverse permutation in permuted_mnist
# so that we could 'unscramble' samples and plot them
for name, value in kwargs.items():
setattr(self, name, value)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
x, y = self.x[idx], self.y[idx]
if type(x) != torch.Tensor:
# mini_imagenet
# we assume it's a path --> load from file
x = self.transform(Image.open(x).convert('RGB'))
y = torch.Tensor(1).fill_(y).long().squeeze()
else:
x = x.float() / 255.
y = y.long()
# for some reason mnist does better \in [0,1] than [-1, 1]
if self.source == 'mnist':
return x, y
else:
return (x - 0.5) * 2, y
""" Template Dataset for Continual Learning """
class CLDataLoader(object):
def __init__(self, datasets_per_task, args, train=True):
bs = args.batch_size if train else 64
self.datasets = datasets_per_task
# concat_list = []
# for x in self.datasets:
# concat_list.append(x)
# print('loader x', x)
self.loaders = [
torch.utils.data.DataLoader(x, batch_size=bs, shuffle=True, drop_last=train, num_workers=0)
for x in self.datasets ]
# print('len(concat_list)', len(concat_list))
# print('len(self.loaders)', len(self.loaders))
def __getitem__(self, idx):
return self.loaders[idx]
def __len__(self):
return len(self.loaders)
""" Split CIFAR10 into 5 tasks {{0,1}, ... {8,9}} """
def get_split_cifar10(args):
# assert args.n_tasks in [5, 10], 'SplitCifar only works with 5 or 10 tasks'
assert '1.' in str(torch.__version__)[:2], 'Use Pytorch 1.x!'
args.n_tasks = 5
args.n_classes = 10
args.buffer_size = args.n_tasks * args.mem_size * 2
args.use_conv = True
args.n_classes_per_task = 2
args.input_size = [3, 32, 32]
args.input_type = 'continuous'
# because data is between [-1,1]:
assert args.output_loss is not 'bernouilli'
if args.output_loss == None:
#TODO(multinomial is broken)
#args.output_loss = 'multinomial'
args.output_loss = 'mse'
print('\nsetting output loss to MSE')
# fetch MNIST
train = datasets.CIFAR10('Data/', train=True, download=True)
test = datasets.CIFAR10('Data/', train=False, download=True)
try:
train_x, train_y = train.data, train.targets
test_x, test_y = test.data, test.targets
except:
train_x, train_y = train.train_data, train.train_labels
test_x, test_y = test.test_data, test.test_labels
# sort according to the label
out_train = [
(x,y) for (x,y) in sorted(zip(train_x, train_y), key=lambda v : v[1]) ]
out_test = [
(x,y) for (x,y) in sorted(zip(test_x, test_y), key=lambda v : v[1]) ]
train_x, train_y = [
np.stack([elem[i] for elem in out_train]) for i in [0,1] ]
test_x, test_y = [
np.stack([elem[i] for elem in out_test]) for i in [0,1] ]
train_x = torch.Tensor(train_x).permute(0, 3, 1, 2).contiguous()
test_x = torch.Tensor(test_x).permute(0, 3, 1, 2).contiguous()
train_y = torch.Tensor(train_y)
test_y = torch.Tensor(test_y)
# get indices of class split
train_idx = [((train_y + i) % 10).argmax() for i in range(10)]
train_idx = [0] + [x + 1 for x in sorted(train_idx)]
test_idx = [((test_y + i) % 10).argmax() for i in range(10)]
test_idx = [0] + [x + 1 for x in sorted(test_idx)]
train_ds, test_ds = [], []
skip = 10 // 5 #args.n_tasks
for i in range(0, 10, skip):
tr_s, tr_e = train_idx[i], train_idx[i + skip]
te_s, te_e = test_idx[i], test_idx[i + skip]
train_ds += [(train_x[tr_s:tr_e], train_y[tr_s:tr_e])]
test_ds += [(test_x[te_s:te_e], test_y[te_s:te_e])]
train_ds, val_ds = make_valid_from_train(train_ds)
train_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), train_ds)
val_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), val_ds)
test_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), test_ds)
return train_ds, val_ds, test_ds
def get_split_cifar100(args):
# assert args.n_tasks in [5, 10], 'SplitCifar only works with 5 or 10 tasks'
assert '1.' in str(torch.__version__)[:2], 'Use Pytorch 1.x!'
args.n_tasks = 20
args.n_classes = 100
args.buffer_size = args.n_tasks * args.mem_size * 5
args.use_conv = True
args.n_classes_per_task = 5
args.input_size = [3, 32, 32]
args.input_type = 'continuous'
# because data is between [-1,1]:
assert args.output_loss is not 'bernouilli'
if args.output_loss == None:
#TODO(multinomial is broken)
#args.output_loss = 'multinomial'
args.output_loss = 'mse'
print('\nsetting output loss to MSE')
# fetch MNIST
train = datasets.CIFAR100('Data/', train=True, download=True)
test = datasets.CIFAR100('Data/', train=False, download=True)
try:
train_x, train_y = train.data, train.targets
test_x, test_y = test.data, test.targets
except:
train_x, train_y = train.train_data, train.train_labels
test_x, test_y = test.test_data, test.test_labels
# sort according to the label
out_train = [
(x,y) for (x,y) in sorted(zip(train_x, train_y), key=lambda v : v[1]) ]
out_test = [
(x,y) for (x,y) in sorted(zip(test_x, test_y), key=lambda v : v[1]) ]
train_x, train_y = [
np.stack([elem[i] for elem in out_train]) for i in [0,1] ]
test_x, test_y = [
np.stack([elem[i] for elem in out_test]) for i in [0,1] ]
train_x = torch.Tensor(train_x).permute(0, 3, 1, 2).contiguous()
test_x = torch.Tensor(test_x).permute(0, 3, 1, 2).contiguous()
train_y = torch.Tensor(train_y)
test_y = torch.Tensor(test_y)
# get indices of class split
train_idx = [((train_y + i) % 100).argmax() for i in range(100)]
train_idx = [0] + [x + 1 for x in sorted(train_idx)]
test_idx = [((test_y + i) % 100).argmax() for i in range(100)]
test_idx = [0] + [x + 1 for x in sorted(test_idx)]
train_ds, test_ds = [], []
skip = 100 // 20 #args.n_tasks
for i in range(0, 100, skip):
tr_s, tr_e = train_idx[i], train_idx[i + skip]
te_s, te_e = test_idx[i], test_idx[i + skip]
train_ds += [(train_x[tr_s:tr_e], train_y[tr_s:tr_e])]
test_ds += [(test_x[te_s:te_e], test_y[te_s:te_e])]
train_ds, val_ds = make_valid_from_train(train_ds)
train_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), train_ds)
val_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), val_ds)
test_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), test_ds)
return train_ds, val_ds, test_ds
def get_miniimagenet(args):
print('loading miniimagenet dataset')
ROOT_PATH = '/Data/Miniimagenet/'
args.use_conv = True
args.n_tasks = 20
args.n_classes = 100
args.n_classes_per_task = 5
args.input_size = (3, 84, 84)
label2id = {}
def get_data(setname):
ds_dir = os.path.join(ROOT_PATH, setname)
label_dirs = os.listdir(ds_dir)
data, labels = [], []
for label in label_dirs:
label_dir = os.path.join(ds_dir, label)
for image_file in os.listdir(label_dir):
data.append(os.path.join(label_dir, image_file))
if label not in label2id:
label_id = len(label2id)
label2id[label] = label_id
label_id = label2id[label]
labels.append(label_id)
return data, labels
transform = transforms.Compose([
transforms.Resize(84),
transforms.CenterCrop(84),
transforms.ToTensor(),
])
train_data, train_label = get_data('meta_train')
valid_data, valid_label = get_data('meta_val')
test_data, test_label = get_data('meta_test')
# total of 60k examples for training, the rest for testing
all_data = np.array(train_data + valid_data + test_data)
all_label = np.array(train_label + valid_label + test_label)
train_ds, test_ds = [], []
current_train, current_test = None, None
cat = lambda x, y: np.concatenate((x, y), axis=0)
for i in range(args.n_classes):
class_indices = np.argwhere(all_label == i).reshape(-1)
class_data = all_data[class_indices]
class_label = all_label[class_indices]
split = int(0.8 * class_data.shape[0])
data_train, data_test = class_data[:split], class_data[split:]
label_train, label_test = class_label[:split], class_label[split:]
if current_train is None:
current_train, current_test = (data_train, label_train), (data_test, label_test)
else:
current_train = cat(current_train[0], data_train), cat(current_train[1], label_train)
current_test = cat(current_test[0], data_test), cat(current_test[1], label_test)
if i % args.n_classes_per_task == (args.n_classes_per_task - 1):
train_ds += [current_train]
test_ds += [current_test]
current_train, current_test = None, None
# build masks
masks = []
task_ids = [None for _ in range(20)]
for task, task_data in enumerate(train_ds):
labels = np.unique(task_data[1]) #task_data[1].unique().long()
assert labels.shape[0] == args.n_classes_per_task
mask = torch.zeros(args.n_classes).cuda()
mask[labels] = 1
masks += [mask]
task_ids[task] = labels
task_ids = torch.from_numpy(np.stack(task_ids)).cuda().long()
print('task_ids', task_ids)
train_ds, val_ds = make_valid_from_train(train_ds)
train_ds = map(lambda x, y : XYDataset(x[0], x[1], **{'source':'cifar100', 'mask':y, 'task_ids':task_ids, 'transform':transform}), train_ds, masks)
val_ds = map(lambda x, y: XYDataset(x[0], x[1], **{'source': 'cifar100', 'mask': y, 'task_ids': task_ids, 'transform': transform}), val_ds, masks)
test_ds = map(lambda x, y : XYDataset(x[0], x[1], **{'source':'cifar100', 'mask':y, 'task_ids':task_ids, 'transform':transform}), test_ds, masks)
return train_ds, val_ds, test_ds
def make_valid_from_train(dataset, cut=0.95):
tr_ds, val_ds = [], []
for task_ds in dataset:
x_t, y_t = task_ds
# shuffle before splitting
perm = torch.randperm(len(x_t))
x_t, y_t = x_t[perm], y_t[perm]
split = int(len(x_t) * cut)
x_tr, y_tr = x_t[:split], y_t[:split]
x_val, y_val = x_t[split:], y_t[split:]
tr_ds += [(x_tr, y_tr)]
val_ds += [(x_val, y_val)]
return tr_ds, val_ds
class IIDDataset(torch.utils.data.Dataset):
def __init__(self, data_loaders, seed=0):
self.data_loader = data_loaders
self.idx = []
for task_id in range(len(data_loaders)):
for i in range(len(data_loaders[task_id].dataset)):
self.idx.append((task_id, i))
random.Random(seed).shuffle(self.idx)
def __getitem__(self, idx):
task_id, instance_id = self.idx[idx]
return self.data_loader[task_id].dataset.__getitem__(instance_id)
def __len__(self):
return len(self.idx) | joey-wang123/DRO-Task-free | data.py | data.py | py | 11,866 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "torch.utils",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"li... |
9502049271 | from django.db import models
from django.contrib.auth.models import AbstractUser
from voomsdb.utils.models import PersonalModel, NameTimeBasedModel
from voomsdb.utils.choices import AdmissionTypeChoice
from voomsdb.utils.media import MediaHelper
from voomsdb.utils.strings import generate_ref_no
from django.conf import settings
# Create your models here.
class User(AbstractUser):
pass
class Student(PersonalModel):
matric_number = models.CharField(max_length=40)
admission_type = models.CharField(max_length=40,
choices=AdmissionTypeChoice.choices,
default=AdmissionTypeChoice.UTME)
programme = models.ForeignKey('programme.Programme',
on_delete=models.SET_NULL, null=True, blank=True)
year_of_admission = models.IntegerField()
slug = models.SlugField(default=generate_ref_no, null=True, blank=True)
published = models.BooleanField(default=False)
def __str__(self):
return f'{self.last_name} {self.first_name}'
def image_url(self):
if self.image:
return self.image.url
return f"{settings.STATIC_URL}images/avatar/06.jpg"
class Document(models.Model):
name = models.CharField(max_length=255)
profile = models.ForeignKey('home.Student', on_delete=models.CASCADE)
document = models.FileField(upload_to=MediaHelper.get_document_upload_path)
slug = models.SlugField(default=generate_ref_no, null=True, blank=True)
def __str__(self):
return f'{self.profile} {self.name}'
| dauntless001/vooms | home/models.py | models.py | py | 1,596 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "voomsdb.utils.models.PersonalModel",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 18,
"usage_type": ... |
75121574505 | import unittest
import requests
URL = 'http://127.0.0.1:8000/segment'
IMAGE_PATH = './data/test_image/'
IMAGE_NAME = '0bf631128.jpg'
IMAGE_FORMAT = 'image/jpeg'
class ImageSegmentationTest(unittest.TestCase):
def test_image_segmentation(self):
with open(IMAGE_PATH+IMAGE_NAME, 'rb') as image_file:
image_data = image_file.read()
headers = {'accept': 'application/json'}
files = {
'file': (IMAGE_NAME, image_data, IMAGE_FORMAT)
}
response = requests.post(URL, headers=headers, files=files)
self.assertEqual(response.status_code, 200)
response_json = response.json()
self.assertIn("segmented_image", response_json)
def test_missing_image_file(self):
headers = {'accept': 'application/json'}
files = {}
response = requests.post(URL, headers=headers, files=files)
self.assertEqual(response.status_code, 422)
if __name__ == '__main__':
unittest.main()
| MykytaKyt/airbus-ship-detection | tests/test_app.py | test_app.py | py | 994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "unittest.main",
... |
990448118 | import requests
import random
import time
from threading import Thread
# Import modules for HTTP flood
import tools.randomData as randomData
import tools.ipTools as ipTools
def HTTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
if ipTools.isCloudFlare(target):
if not input("[?] Current site is under CloudFlare protection. Do you want to continue? (y/n)\n >>> ") in ("y", "Y", "1"):
exit()
print("[#] Attack started for " + str(attack_time) + " secounds..")
threads_list = []
# Load 25 random user agents
user_agents = []
for _ in range(threads):
user_agents.append( randomData.random_useragent() )
# HTTP flood
def http_flood():
global FINISH
while True:
if FINISH:
break
payload = str(random._urandom(random.randint(1, 30)))
headers = {
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Accept-Encoding": "gzip, deflate, br",
"User-agent": random.choice(user_agents)
}
try:
r = requests.get(target, params = payload)
except Exception as e:
print(e)
time.sleep(2)
else:
print("[" + str(r.status_code) + "] Request sent! Payload size: " + str(len(payload)))
# Start threads
for thread in range(0, threads):
print("[#] Staring thread " + str(thread))
t = Thread(target = http_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("[!] HTTP attack stopped!") | Marshmello1912/Git | Impulse/tools/L7/http.py | http.py | py | 1,653 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tools.ipTools.isCloudFlare",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tools.ipTools",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "tools.randomData.random_useragent",
"line_number": 24,
"usage_type": "call"
},
{
"api_na... |
25634514682 | import claripy
import code
from hashlib import sha512
import json
import sys
b = [claripy.BVS('b_%d' % i, 1) for i in range(33896)]
s = claripy.Solver()
with open("map3.txt", 'r') as f:
cipher, chalbox = json.loads(f.read())
length, gates, check = chalbox
for i in range(33767,33896):
name, args = gates[i-128]
if name == 'false':
s.add(b[i]==claripy.BVV(0,1))
else:
if args[0][1] == True:
arg1 = 1
else:
arg1 = 0
if args[1][1] == True:
arg2 = 1
else:
arg2 = 0
u1 = b[args[0][0]] ^ arg1
u2 = b[args[1][0]] ^ arg2
if name == 'or':
s.add(b[i] == u1)
elif name == 'xor':
s.add(b[i] == u2)
s.add(b[33895] == 0)
for i in range(0, 33896):
if len(s.eval(b[i],2)) == 1:
b[i] = s.eval(b[i],1)[0]
code.interact(local=locals())
| posgnu/ctfs | pctf2018/3iscABC/sol.py | sol.py | py | 907 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "claripy.BVS",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "claripy.Solver",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "claripy.BVV",
"line_number": ... |
18852099221 | from os import environ
from time import time, sleep
import requests
import requests.auth
from requests_oauthlib import OAuth1
from .exceptions import *
class API:
def __init__(self, session=None):
self.log_function = print
self.retry_rate = 5
self.num_retries = 5
self.failed_last = False
self.force_stop = False
self.ignore_errors = False
self.common_errors = (requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.HTTPError)
self.session = session
def __str__(self):
return pformat(vars(self))
def log_error(self, e):
"""
Print errors. Stop travis-ci from leaking api keys
:param e: The error
:return: None
"""
if not environ.get('CI'):
self.log_function(e)
if hasattr(e, 'response') and hasattr(e.response, 'text'):
self.log_function(e.response.text)
def _sleep(self, seconds):
"""
Sleep between requests, but don't force asynchronous code to wait
:param seconds: The number of seconds to sleep
:return: None
"""
for _ in range(int(seconds)):
if not self.force_stop:
sleep(1)
@staticmethod
def merge_params(parameters, new):
if new:
parameters = {**parameters, **new}
return parameters
def get(self, *args, **kwargs):
"""
An interface for get requests that handles errors more gracefully to
prevent data loss
"""
try:
req_func = self.session.get if self.session else requests.get
req = req_func(*args, **kwargs)
req.raise_for_status()
self.failed_last = False
return req
except requests.exceptions.RequestException as e:
self.log_error(e)
for i in range(1, self.num_retries):
sleep_time = self.retry_rate * i
self.log_function("Retrying in %s seconds" % sleep_time)
self._sleep(sleep_time)
try:
req = requests.get(*args, **kwargs)
req.raise_for_status()
self.log_function("New request successful")
return req
except requests.exceptions.RequestException:
self.log_function("New request failed")
# Allows for the api to ignore one potentially bad request
if not self.failed_last:
self.failed_last = True
raise ApiError(e)
else:
raise FatalApiError(e)
class Reddit(API):
def __init__(self, application_id, application_secret):
super().__init__()
self.retry_rate /= 2 # Because it will try reauthorise if failure
self.application_id = application_id
self.application_secret = application_secret
self.url = "https://oauth.reddit.com"
self.request_rate = 5
self.user_agent = "SocialReaper"
self.headers = {}
self.token_expiry = 0
self.requires_reauth = True
self.auth()
self.last_request = time()
def auth(self):
client_auth = requests.auth.HTTPBasicAuth('%s' % self.application_id,
'%s' % self.application_secret)
post_data = {"grant_type": "client_credentials"}
headers = {"User-Agent": self.user_agent}
try:
response = requests.post(
"https://www.reddit.com/api/v1/access_token",
auth=client_auth, data=post_data,
headers=headers)
except requests.exceptions.RequestException as e:
raise ApiError(e)
rj = response.json()
self.headers = {"Authorization": "bearer %s" % rj.get('access_token'),
"User-Agent": self.user_agent}
self.token_expiry = time() + rj.get('expires_in', 0)
def api_call(self, edge, parameters, return_results=True):
if time() > self.token_expiry + 30:
self.auth()
time_diff = time() - self.last_request
if time_diff < self.request_rate:
sleep(self.request_rate - time_diff)
self.last_request = time()
try:
req = self.get("%s/%s" % (self.url, edge), params=parameters,
headers=self.headers)
except (ApiError, FatalApiError):
try:
self.auth()
except ApiError:
pass
req = self.get("%s/%s" % (self.url, edge), params=parameters,
headers=self.headers)
if return_results:
return req.json()
def search(self, query, count=100, order="new", page='',
result_type="link", time_period="all", **params):
parameters = {"show": "all",
"q": query,
"limit": count,
"sort": order,
"type": result_type,
"t": time_period,
"after": page}
parameters = self.merge_params(parameters, params)
return self.api_call('search.json', parameters)
def subreddit(self, subreddit, count=100, category="new", page='',
time_period='all', **params):
parameters = {"limit": count,
"t": time_period,
"after": page}
parameters = self.merge_params(parameters, params)
return self.api_call('r/%s/%s.json' % (subreddit, category), parameters)
def user(self, user, count=100, order="new", page='',
result_type="overview", time_period='all', **params):
parameters = {"show": "all",
"limit": count,
"sort": order,
"type": result_type,
"t": time_period,
"after": page}
parameters = self.merge_params(parameters, params)
return self.api_call('user/%s/%s.json' % (user, result_type),
parameters)
def thread_comments(self, thread, subreddit, order="top", sub_thread=None,
**params):
parameters = {"depth": 50,
"showmore": True,
"sort": order}
parameters = self.merge_params(parameters, params)
path = None
if sub_thread:
path = 'r/%s/comments/%s/_/%s.json' % (
subreddit, thread, sub_thread)
else:
path = 'r/%s/comments/%s.json' % (subreddit, thread)
return self.api_call(path, parameters)
def more_children(self, children, link_id, sort="new",
**params):
parameters = {"api_type": "json",
"children": ",".join(children),
"link_id": link_id,
"sort": sort,
"limit_children": False
}
parameters = self.merge_params(parameters, params)
return self.api_call('api/morechildren', parameters)
class Facebook(API):
def __init__(self, api_key):
super().__init__()
self.key = api_key
self.url = "https://graph.facebook.com/v"
self.version = "2.9"
self.request_rate = 1
self.last_request = time()
def api_call(self, edge, parameters, return_results=True):
req = self.get("%s%s/%s" % (self.url, self.version, edge),
params=parameters)
time_diff = time() - self.last_request
if time_diff < self.request_rate:
sleep(self.request_rate - time_diff)
self.last_request = time()
if return_results:
return req.json()
def node_edge(self, node, edge, fields=None, params=None):
"""
:param node:
:param edge:
:param fields:
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"fields": fields,
"access_token": self.key}
parameters = self.merge_params(parameters, params)
return self.api_call('%s/%s' % (node, edge), parameters)
def post(self, post_id, fields=None, **params):
"""
:param post_id:
:param fields:
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"fields": fields,
"access_token": self.key}
parameters = self.merge_params(parameters, params)
return self.api_call('%s' % post_id, parameters)
def page_posts(self, page_id, after='', post_type="posts",
include_hidden=False, fields=None, **params):
"""
:param page_id:
:param after:
:param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts'
:param include_hidden:
:param fields:
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"access_token": self.key,
"after": after,
"fields": fields,
"include_hidden": include_hidden}
parameters = self.merge_params(parameters, params)
return self.api_call('%s/%s' % (page_id, post_type), parameters)
def post_comments(self, post_id, after='', order="chronological",
filter="stream", fields=None, **params):
"""
:param post_id:
:param after:
:param order: Can be 'ranked', 'chronological', 'reverse_chronological'
:param filter: Can be 'stream', 'toplevel'
:param fields: Can be 'id', 'application', 'attachment', 'can_comment',
'can_remove', 'can_hide', 'can_like', 'can_reply_privately', 'comments',
'comment_count', 'created_time', 'from', 'likes', 'like_count',
'live_broadcast_timestamp', 'message', 'message_tags', 'object',
'parent', 'private_reply_conversation', 'user_likes'
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"access_token": self.key,
"after": after,
"order": order,
"fields": fields,
"filter": filter}
parameters = self.merge_params(parameters, params)
return self.api_call('%s/comments' % post_id, parameters)
class Twitter(API):
def __init__(self, api_key, api_secret, access_token, access_token_secret):
super().__init__()
self.app_key = api_key
self.app_secret = api_secret
self.oauth_token = access_token
self.oauth_token_secret = access_token_secret
self.url = "https://api.twitter.com/1.1"
self.request_rate = 5
self.auth = OAuth1(self.app_key, self.app_secret, self.oauth_token,
self.oauth_token_secret)
self.last_request = time()
def api_call(self, edge, parameters, return_results=True):
req = self.get("%s/%s" % (self.url, edge), params=parameters,
auth=self.auth)
time_diff = time() - self.last_request
if time_diff < self.request_rate:
sleep(self.request_rate - time_diff)
self.last_request = time()
if return_results:
return req.json()
def search(self, query, count=100, max_id='',
result_type="mixed", include_entities=True,
tweet_mode='extended', **params):
count = 100 if count < 100 else count
parameters = {"q": query,
"count": count,
"max_id": max_id,
"result_type": result_type,
"include_entities": include_entities,
"tweet_mode": tweet_mode}
parameters = self.merge_params(parameters, params)
return self.api_call("search/tweets.json", parameters)
def user(self, username, count=200, max_id=None, exclude_replies=False,
include_retweets=False, tweet_mode='extended', **params):
parameters = {"screen_name": username,
"count": count,
"max_id": max_id,
"exclude_replies": exclude_replies,
"include_rts": include_retweets,
"tweet_mode": tweet_mode}
parameters = self.merge_params(parameters, params)
return self.api_call("statuses/user_timeline.json", parameters)
| kavyamandaliya/SentimentAnalysis | scraper/scrap/apis.py | apis.py | py | 13,220 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.exceptions",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "requests.exceptions",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_na... |
43696108113 | import os
from math import ceil
from keras import backend
from keras import optimizers
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense, Flatten, BatchNormalization, Dropout
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
# from keras.utils import plot_model
from tensor_board import MyTensorBoard
from parallel_model import ParallelModel
class ImageClassifier:
root_path = ""
input_shape = ()
output_size = 0
model = None
def __init__(self, root_path, input_shape, output_size):
self.root_path = root_path
self.input_shape = input_shape
self.output_size = output_size
self.base_model = VGG19(weights='imagenet', include_top=False, input_shape=self.input_shape)
self.model = Sequential()
self.model.add(self.base_model)
self.model.add(Flatten())
self.model.add(Dense(self.output_size, activation='sigmoid'))
# self.base_model = ResNet50(include_top=False, input_shape=self.input_shape, pooling='avg', weights='imagenet')
# x = self.base_model.output
# x = Dense(2048, activation='relu')(x)
# x = Dropout(0.25)(x)
# output = Dense(self.output_size, activation='sigmoid')(x)
# self.model = Model(inputs=self.base_model.inputs, outputs=output)
# self.model = ParallelModel(self.model, gpus=4)
# plot_model(self.model, to_file=os.path.join(root_path, 'model.png'))
@staticmethod
def f2(y_true, y_pred):
def recall(y_true, y_pred):
true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))
possible_positives = backend.sum(backend.round(backend.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + backend.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))
predicted_positives = backend.sum(backend.round(backend.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + backend.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
beta_squared = 4
return (beta_squared + 1) * ((precision * recall) / (beta_squared * precision + recall + backend.epsilon()))
def train(self, x, y, batch_size, validation_data, lr, epochs, idx_split=0):
self.model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=lr),
metrics=['accuracy', self.f2])
early_stop = EarlyStopping(patience=2)
model_checkpoint = ModelCheckpoint(self.__get_weights_path(idx_split), save_best_only=True)
reduce_lr = ReduceLROnPlateau(patience=2, cooldown=2)
tensor_board = MyTensorBoard(log_dir=self.__get_logs_path(idx_split, lr, epochs), write_images=True)
self.model.fit(x=x,
y=y,
validation_data=validation_data,
batch_size=batch_size,
epochs=epochs,
callbacks=[early_stop, reduce_lr, model_checkpoint, tensor_board])
def train_generator(self, train_gen, train_size, valid_gen, valid_size, batch_size, lr, decay, epochs, idx_split=0):
self.model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=lr, decay=decay),
metrics=['accuracy', self.f2])
early_stop = EarlyStopping(patience=4, min_delta=1e-4)
model_checkpoint = ModelCheckpoint(self.__get_weights_path(idx_split), save_best_only=True)
tensor_board = MyTensorBoard(log_dir=self.__get_logs_path(idx_split, lr, epochs), write_images=True)
self.model.fit_generator(generator=train_gen,
steps_per_epoch=(train_size // batch_size + 1),
epochs=epochs,
shuffle=False,
validation_data=valid_gen,
validation_steps=(valid_size // batch_size + 1),
callbacks=[early_stop, model_checkpoint, tensor_board])
def predict(self, x, batch_size):
return self.model.predict(x=x, batch_size=batch_size)
def predict_generator(self, test_gen, test_size, batch_size):
return self.model.predict_generator(generator=test_gen, steps=(test_size // batch_size + 1))
def save(self, idx_split=0):
self.model.save_weights(self.__get_weights_path(idx_split))
def load(self, idx_split=0):
self.model.load_weights(self.__get_weights_path(idx_split))
def load_if_exist(self, idx_split):
weights_path = self.__get_weights_path(idx_split)
if os.path.isfile(weights_path):
self.model.load_weights(weights_path)
def set_trainable(self, trainable):
for layer in self.base_model.layers:
layer.trainable = trainable
def __get_weights_path(self, idx_split):
return os.path.join(self.root_path, 'models', 'split{}.h5'.format(idx_split))
def __get_logs_path(self, idx_split, lr, epochs):
return os.path.join(self.root_path, 'logs', 'split{}-lr{}-epochs{}'.format(idx_split, lr, epochs))
| anson627/kaggle | planet/lib/classifier.py | classifier.py | py | 5,485 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.applications.vgg19.VGG19",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "keras.models.Sequential",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.layers.Flatten",
"line_number": 32,
"usage_type": "call"
},
{
"api_n... |
37980420317 | import random
from enum import Enum
fifth_ed_bad_reactions = ["cringe.jpg", "mike.jpg", "nat1.gif", "nat1.jpg", "jazz.jpg"]
fifth_ed_good_reactions = ["heisenberg.gif", "joji.jpg", "mcmahon.gif", "nat20.jpg"]
sw_bad_reactions = ["bad1.gif", "bad2.gif", "bad3.gif", "bad4.gif",
"bad5.gif", "bad6.jpg", "bad7.jpg", "bad8.jpg", "bad9.gif", "bad10.gif"]
sw_good_reactions = ["good1.gif", "good2.gif", "good3.gif",
"good4.gif", "good5.gif", "good6.gif", "good7.gif", "good8.gif"]
class GameMode(Enum):
WIZARDS_FIFTH_ED = 1
STAR_WARS_FIFTH_ED = 2
def get_good_reaction (current_gamemode):
path = "resources/reactions/5e/good/" + random.choice(fifth_ed_good_reactions)
if current_gamemode == GameMode.STAR_WARS_FIFTH_ED:
path = "resources/reactions/sw5e/good/" + random.choice(sw_good_reactions)
return path
def get_bad_reaction(current_gamemode):
path = "resources/reactions/5e/bad/" + random.choice(fifth_ed_bad_reactions)
if current_gamemode == GameMode.STAR_WARS_FIFTH_ED:
path = "resources/reactions/sw5e/bad/" + random.choice(sw_bad_reactions)
return path
| SPIGS/DiceBot | gamemode.py | gamemode.py | py | 1,148 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_numbe... |
23155147064 | import logging
import os
from datetime import datetime
file_name=f"{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log"
logs_path=os.path.join(os.getcwd(),"logs",file_name)
os.makedirs(logs_path,exist_ok=True)
logs_file_path=os.path.join(logs_path,file_name)
logging.basicConfig(filename=logs_file_path,
format="[%(asctime)s] %(lineno)d %(name)s-%(levelname)s-%(message)s",
level=logging.INFO)
#if __name__=="__main__":
# logging.info("logging started") | Hema9121/second-hema-ml-repo | src/logger.py | logger.py | py | 500 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line... |
22541773169 | # RA, 2020-10-13
import contextlib
import io
@contextlib.contextmanager
def open_maybe_gz(file, *, mode='r'):
"""
Open `file` for reading that could be a
- file descriptor
- path to file
- path to gzipped file
`mode` is either 'r' or 'rb', and has to be specified.
Usage:
with open_maybe_gz(path_to_file, mode='r') as fd:
print(fd.read())
"""
assert mode in ['r', 'rb']
if isinstance(file, io.IOBase):
yield file
return
from pathlib import Path
assert Path(file).is_file()
file = str(file)
if file.endswith(".gz"):
import gzip
with gzip.open(file, mode='rb') as fd:
if (mode == 'r'):
yield io.TextIOWrapper(fd)
elif (mode == 'rb'):
yield fd
else:
with open(file, mode=mode) as fd:
yield fd
| Luca-Blum/Computational_Biomedicine | project1/solution/humdum/io/gz.py | gz.py | py | 891 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "io.IOBase",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "io.TextIOWrapper",
"line_nu... |
31571764321 | # _ __ _ __
# | | /| / /| | /| / / GREEN ANALYTICAL INDEX GENERATOR
# | |/ |/ / | |/ |/ / W.Wojnowski 2020
# |__/|__/ |__/|__/ v.0.3 (alpha)
#
#
from tkinter import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.colors import LinearSegmentedColormap
from tkinter import ttk
from tkinter import filedialog
import tkinter.messagebox
import webbrowser
from math import log
from fpdf import FPDF
import os
from datetime import datetime
#***************** SPLASH SCREEN *******************************
root = Tk()
# show no frame
root.overrideredirect(True)
width = root.winfo_screenwidth()
height = root.winfo_screenheight()
root.geometry('%dx%d+%d+%d' % (width*0.5, height*0.5, width*0.1, height*0.1))
image_file = "zaprawa_klejowa.gif"
#assert os.path.exists(image_file)
# use Tkinter's PhotoImage for .gif files
image = PhotoImage(file=image_file)
canvas = Canvas(root, height=height*0.5, width=width*0.5, bg="black")
canvas.create_image(width*0.5/2, height*0.5/2, image=image)
canvas.pack()
# show the splash screen for 5000 milliseconds then destroy
root.after(5000, root.destroy)
root.mainloop()
# **************** MAIN PROGRAM *******************************
root = Tk()
# app title:
root.title('Analytical Method Green Index Calculator')
# default app size:
root.geometry('800x500')
root.minsize(800, 500)
# root.configure(bg='white')
# create the small icon in the task bar:
root.iconbitmap('PG_favicon.ico')
# *********************** Functions ****************************
def clearFrame(frame):
frame.destroy()
global rightFrame
rightFrame = Frame(root, width=300, height=450, padx=20)
rightFrame.pack(side=RIGHT)
# Image save dialog:
def saveImage():
ftypes = [('PNG file', '.png'), ('All files', '*')] #, ('JPG file', '.jpg') - seems that .jpg is not supported by some module
filename = filedialog.asksaveasfilename(filetypes=ftypes, defaultextension='.png')
# save the plot in the specified path; the 'tight' option removes the whitespace from around the figure:
plt.savefig(filename, bbox_inches='tight')
# temporary placeholder function:
def doNothing():
print("ok ok I won't...")
# create the popup window with some additional information:
def popup_bonus():
win = Toplevel()
win.wm_title("About Green Index")
win.iconbitmap('PG_favicon.ico')
def callback(event):
webbrowser.open_new(event.widget.cget("text"))
popup_label1 = Label(win, text='v. 0.1 2020 \n(c) Gdaลsk University of Technology', justify=LEFT)
popup_label1.grid(row=0, column=0, padx=8, pady=8)
popup_label2 = Label(win, text=r'http://www.chem.pg.edu.pl/kcha', fg='blue', cursor='hand2', justify=LEFT)
popup_label2.grid(row=1, column=0, padx=8, pady=8)
popup_label2.bind('<Button-1>', callback)
popup_label3 = Label(win, text='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus convallis non sem ut aliquet. Praesent tempus fringilla suscipit. Phasellus tellus massa, semper et bibendum quis, rhoncus id neque. Sed euismod consectetur elit id tristique. Sed eu nibh id ante malesuada condimentum. Phasellus luctus finibus luctus. Pellentesque mi tellus, condimentum sit amet porta sit amet, ullamcorper quis elit. Pellentesque eu mollis nulla. Quisque vulputate, sem at iaculis vehicula, dui orci aliquet lectus, in facilisis odio dolor ut leo. Vivamus convallis hendrerit est luctus ornare. Nullam augue nisi, aliquam sit amet scelerisque hendrerit, pretium vel dui. Pellentesque sed tortor mollis, imperdiet quam quis, scelerisque erat. Vestibulum quis mollis dolor.',
wraplength=300, justify=LEFT, bg='white')
popup_label3.grid(row=2, column=0, padx=8, pady=8)
popup_button = Button(win, text="Close", command=win.destroy)
popup_button.grid(row=3, column=0, padx=8, pady=8)
# connect a float in range 0.0 : 1.0 to a colour in a spectrum from red to yellow to green (256 discrete colour values):
def colorMapper(value):
cmap = LinearSegmentedColormap.from_list('rg', ["red", "yellow", "green"], N=256)
mapped_color = int(value * 255)
color = cmap(mapped_color)
return color
# function for refreshing the canvas:
def destroyCanvas(canvas):
canvas.destroy()
# the final score variable:
entry_text = StringVar()
def printScore():
try:
global score
score = (var_1 * weight_1.get()
+ var_2 * weight_2.get()
+ var_3 * weight_3.get()
+ var_4 * weight_4.get()
+ var_5 * weight_5.get()
+ var_6 * weight_6.get()
+ var_7 * weight_7.get()
+ var_8 * weight_8.get()
+ var_9 * weight_9.get()
+ var_10 * weight_10.get()
+ var_11 * weight_11.get()
+ var_12 * weight_12.get())/(weight_1.get() + weight_2.get() + weight_3.get() + weight_4.get() + weight_5.get() +
weight_6.get() + weight_7.get() +
weight_8.get() + weight_9.get() + weight_10.get() + weight_11.get() + weight_12.get())
# set the final score as a text string rounded to 2 decimals:
entry_text.set(str(round(score, 2)))
print(' \n Total score: %s, rounded: %s' % (str(score), str(round(score, 2))))
print('Criteria scores:')
except NameError:
tkinter.messagebox.showerror(title='Name Error', message='Please set all 12 variables.')
# a function to refresh the chart:
def chartPlotter(event=None):
printScore(), clearFrame(rightFrame), pieChart(), print_variables()
# interface for assigning custom weights to the 12 variables:
def weightChoice(row, column, tab, weightVar):
chckbxVar = StringVar()
chckbxVar.set('disabled')
radioVar = IntVar()
radioVar.set(1)
radio_1 = ttk.Radiobutton(tab, text='1', variable=radioVar, value=1)
radio_2 = ttk.Radiobutton(tab, text='2', variable=radioVar, value=2)
radio_3 = ttk.Radiobutton(tab, text='3', variable=radioVar, value=3)
radio_4 = ttk.Radiobutton(tab, text='4', variable=radioVar, value=4)
radio_1.grid(row=row + 1, column=column, sticky='sw', padx=(70, 0))
radio_2.grid(row=row + 1, column=column, sticky='sw', padx=(100, 0))
radio_3.grid(row=row + 1, column=column, sticky='sw', padx=(130, 0))
radio_4.grid(row=row + 1, column=column, sticky='sw', padx=(160, 0))
radio_1.config(state = DISABLED)
radio_2.config(state = DISABLED)
radio_3.config(state = DISABLED)
radio_4.config(state = DISABLED)
def printRadioVar():
weightVar.set(radioVar.get())
chartPlotter()
weight_button = ttk.Button(tab, text='Set weight', command=printRadioVar)
weight_button.grid(row=row + 1, column=column, sticky='sw', padx=(190, 0))
weight_button.config(state = DISABLED)
def printCheckbox():
radios = (radio_1, radio_2, radio_3, radio_4)
if chckbxVar.get() == 'disabled':
radioVar.set(1)
weightVar.set(1)
for radio in radios:
radio.config(state = DISABLED if chckbxVar.get() == 'disabled' else NORMAL)
weight_button.config(state = DISABLED if chckbxVar.get() == 'disabled' else NORMAL)
ttk.Checkbutton(tab, text='Modify default weights', command=lambda: [printCheckbox()], variable=chckbxVar, onvalue='enabled', offvalue='disabled').grid(row=row, column=column,
columnspan=4, sticky='w', padx=8,
pady=(60, 0))
Label(tab, text='Weight: ').grid(row=row + 1, column = column, sticky='sw', padx=8)
# ********** Main menu ***********************************************************************************
menu = Menu(root)
# configure the menu:
root.config(menu=menu)
FileMenu = Menu(menu)
editMenu = Menu(menu)
# add drop-down functionality:
menu.add_cascade(label='File', menu=FileMenu)
FileMenu.add_command(label='Info', command=popup_bonus)
FileMenu.add_separator()
FileMenu.add_command(label='Save image', command=saveImage)
# FileMenu.add_command(label='Exit', command=doNothing)
# menu.add_cascade(label='Edit', menu=editMenu)
# editMenu.add_command(label='Redo', command=doNothing)
# ******** Statusbar *************
def createStatusBar():
status = ttk.Label(root, textvariable=status_string, bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
status_string = StringVar()
# status_string.trace('w', createStatusBar)
status_string.set('test test')
status = ttk.Label(root, textvariable=status_string, borderwidth=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
# status = Label(root, text=status_string.get(), bd=1, relief=SUNKEN, anchor=W)
# status.pack(side=BOTTOM, fill=X)
# ******** Two separate frames ******
leftFrame = Frame(root, bd=1, width=300, height=450)
rightFrame = Frame(root, width=300, height=450, padx=20)
bottomFrame = Frame(root, bd=1)
leftFrame.pack(side=LEFT, anchor=N)
rightFrame.pack(side=RIGHT)
bottomFrame.pack(side=BOTTOM, anchor=W)
# ************************* Tabs ***************************
# create tabs:
tab_parent = ttk.Notebook(leftFrame, height=400)
tab1 = ttk.Frame(tab_parent)
tab2 = ttk.Frame(tab_parent)
tab3 = ttk.Frame(tab_parent)
tab4 = ttk.Frame(tab_parent)
tab5 = ttk.Frame(tab_parent)
tab6 = ttk.Frame(tab_parent)
tab7 = ttk.Frame(tab_parent)
tab8 = ttk.Frame(tab_parent)
tab9 = ttk.Frame(tab_parent)
tab10 = ttk.Frame(tab_parent)
tab11 = ttk.Frame(tab_parent)
tab12 = ttk.Frame(tab_parent)
# add tabs to the tab parent:
tab_parent.add(tab1, text="1")
tab_parent.add(tab2, text="2")
tab_parent.add(tab3, text="3")
tab_parent.add(tab4, text="4")
tab_parent.add(tab5, text="5")
tab_parent.add(tab6, text="6")
tab_parent.add(tab7, text="7")
tab_parent.add(tab8, text="8")
tab_parent.add(tab9, text="9")
tab_parent.add(tab10, text="10")
tab_parent.add(tab11, text="11")
tab_parent.add(tab12, text="12")
# ****** matplotlib figure ********
weight_1 = IntVar()
weight_2 = IntVar()
weight_3 = IntVar()
weight_4 = IntVar()
weight_5 = IntVar()
weight_6 = IntVar()
weight_7 = IntVar()
weight_8 = IntVar()
weight_9 = IntVar()
weight_10 = IntVar()
weight_11 = IntVar()
weight_12 = IntVar()
weight_1.set(1)
weight_2.set(1)
weight_3.set(1)
weight_4.set(1)
weight_5.set(1)
weight_6.set(1)
weight_7.set(1)
weight_8.set(1)
weight_9.set(1)
weight_10.set(1)
weight_11.set(1)
weight_12.set(1)
# Begin with default values of variables to be able to generate the chart right away:
var_1 = 1.0
var_2 = 1.0
var_3 = 1.0
var_4 = 1.0
var_5 = 1.0
var_6 = 1.0
var_7 = 1.0
var_8 = 1.0
var_9 = 1.0
var_10 = 1.0
var_11 = 1.0
var_12 = 1.0
# function for updating the status bar:
def updateStatusBar():
global status_string
status_string.set(' scores: | (1) %s | (2) %s | (3) %s | (4) %s | (5) %s | (6) %s | (7) %s | (8) %s | (9) %s | (10) %s | (11) %s | (12) %s |'
% (str(round(var_1, 2)),
str(round(var_2, 2)),
str(round(var_3, 2)),
str(round(var_4, 2)),
str(round(var_5, 2)),
str(round(var_6, 2)),
str(round(var_7, 2)),
str(round(var_8, 2)),
str(round(var_9, 2)),
str(round(var_10, 2)),
str(round(var_11, 2)),
str(round(var_12, 2))
))
# generate the pie chart (plot) with a circle decal in the middle:
def pieChart(): #weights, labels, colors
colors = [colorMapper(var_1), colorMapper(var_2), colorMapper(var_3), colorMapper(var_4), colorMapper(var_5), colorMapper(var_6), colorMapper(var_7), colorMapper(var_8), colorMapper(var_9),
colorMapper(var_10), colorMapper(var_11), colorMapper(var_12)]
weights = [weight_1.get(), weight_2.get(), weight_3.get(), weight_4.get(), weight_5.get(), weight_6.get(), weight_7.get(), weight_8.get(), weight_9.get(), weight_10.get(), weight_11.get(),
weight_12.get()]
labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
index_value = float(entry_text.get())
fig, ax = plt.subplots(figsize=(3, 3), dpi=150)
ax.clear()
ax.axis('equal')
radius = 1.0
pie2 = ax.pie(weights, radius=radius, colors=colors, labeldistance=(radius * 0.875), labels=labels,
rotatelabels=False, startangle=90, counterclock=False,
wedgeprops={"edgecolor": "black", 'linewidth': 1}, textprops={'fontsize': (radius * 10)})
plt.setp(pie2[1], rotation_mode="anchor", ha="center", va="center")
for tx in pie2[1]:
rot = tx.get_rotation()
tx.set_rotation(rot)
# if you want rotated labels:
# tx.set_rotation(rot+ 90 + (1 - rot // 180) * 180)
circle = plt.Circle(xy=(0, 0), radius=(radius * 0.75), facecolor=colorMapper(index_value), edgecolor='black',
linewidth=1)
plt.gca().add_artist(circle)
ax.text(0.5, 0.5, str(index_value),
verticalalignment='center', horizontalalignment='center',
transform=ax.transAxes,
color='black', fontsize=(radius * 40))
fig.tight_layout() # for exporting a compact figure
# Pack the figure into a canvas:
canvas = FigureCanvasTkAgg(fig, master=rightFrame) # A tk.DrawingArea.
plot_widget = canvas.get_tk_widget()
plot_widget.pack(side=TOP)
# print(weight_12.get())
# **************************************
# define a temporary function to test the printing of global variables:
def print_variables():
try:
print ('var_1: ' + str(var_1))
print('var_2: ' + str(var_2))
print('var_3: ' + str(var_3))
print('var_5: ' + str(var_5))
print('var_6: ' + str(var_6))
print('var_7: ' + str(var_7))
print('var_8: ' + str(var_8))
print('var_9: ' + str(var_9))
print('var_10: ' + str(var_10))
print('var_11: ' + str(var_11))
print('var_12: ' + str(var_12))
updateStatusBar()
except NameError:
tkinter.messagebox.showerror(title='Name error',
message='Please fill all the variables')
# generate tab captions and prompt captions:
def tab(tab_no, text1, text2):
Label(tab_no, text=text1, wraplength=300, justify=LEFT).grid(sticky='w', row=0, column=0, padx=8, pady=8)
Label(tab_no, text=text2, wraplength=300, justify=LEFT).grid(sticky='w', row=1, column=0, padx=8, pady=8)
# *****************************************************************************************************************
# TAB 1
# *****************************************************************************************************************
content_1 = tab(tab1, text1='Direct analytical techniques should be applied to avoid sample treatment.',
text2='Select the sampling procedure:')
# Create a Tkinter variable
var_1_text = StringVar(tab1)
# Dictionary with options
var_1_text_choices = {'Remote sensing without sample damage': 1.0,
'Remote sensing with little physical damage': 0.95,
'Non-invasive analysis': 0.9,
'In-field sampling and direct analysis': 0.85,
'In-field sampling and on-line analysis': 0.78,
'On-line analysis': 0.70,
'At-line analysis': 0.60,
'Off-line analysis': 0.48,
'External sample pre- and treatment and batch analysis (reduced number of steps)': 0.30,
'External sample pre- and treatment and batch analysis (large number of steps)': 0.0}
var_1_text.set('SAMPLING PROCEDURE')
dropDown_1 = OptionMenu(tab1, var_1_text, *var_1_text_choices.keys())
dropDown_1.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_1.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_1(*args):
# Define the global variable for Principle 1:
global var_1
var_1 = None
var_1 = var_1_text_choices[var_1_text.get()]
print('var_1:' + str(var_1))
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_1_text.trace('w', change_dropdown_1)
W_1 = weightChoice(10, 0, tab1, weight_1)
# *****************************************************************************************************************
# TAB 2
# *****************************************************************************************************************
content_2 = tab(tab2, text1='Minimal sample size and minimal number of samples are goals.',
text2='Enter the amount of sample in either g or mL:')
amount_var = StringVar()
amount_var.set('input')
sample_amount_entry = ttk.Entry(tab2, textvariable=amount_var, width=15)
sample_amount_entry.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# the event=None is passed so that the entry.bind does not return a positional argument
def change_entry_2(event=None):
global var_2
var_2 = None
try:
if float(amount_var.get()) > 100:
var_2 = 0
elif float(amount_var.get()) < 0.1:
var_2 = 1.0
else:
var_2 = abs(-0.142 * log(float(amount_var.get())) + 0.65) # absolute value to avoid negative values
print('var_2:' + str(var_2))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
chartPlotter()
# bind the <Return> key to the entry window, so that the function gets called as an alternative to the 'set' button:
sample_amount_entry.bind('<Return>', change_entry_2)
# insert a button that does the same:
ttk.Button(tab2, text='Set', command=change_entry_2).grid(row=2, column=0, padx=8, pady=8)
W_2 = weightChoice(10, 0, tab2, weight_2)
# *****************************************************************************************************************
# TAB 3
# *****************************************************************************************************************
content_3 = tab(tab3, 'If possible, measurements should be performed in situ.',
'What is the positioning of the analytical device?')
# Create a Tkinter variable
var_3_text = StringVar(tab3)
# Dictionary with options
var_3_text_choices = {'off-line': 0.0,
'at-line': 0.33,
'on-line': 0.66,
'in-line': 1.0}
var_3_text.set('select')
dropDown_3 = OptionMenu(tab3, var_3_text, *var_3_text_choices.keys())
dropDown_3.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_3.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_3(*args):
global var_3
var_3 = None
var_3 = var_3_text_choices[var_3_text.get()]
print('var_3:' + str(var_3))
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_3_text.trace('w', change_dropdown_3)
W_3 = weightChoice(10, 0, tab3, weight_3)
# *************************** TAB 4 ************************************************************************************
content_4 = tab(tab4, text1='Integration of analytical processes and operations saves energy and reduces the use of reagents.',
text2='How many major, distinct steps are there in the sample preparation procedure? These include e.g. sonication,'
' mineralization, centrifugation, derivatization, extraction, etc.')
var_4_text = StringVar(tab4)
# Dictionary with options
var_4_text_choices = {'3 or fewer': 1.0,
'4': 0.8,
'5': 0.6,
'6': 0.4,
'7': 0.2,
'8 or more': 0.0}
var_4_text.set('select')
dropDown_4 = OptionMenu(tab4, var_4_text, *var_4_text_choices.keys())
dropDown_4.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_4.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_4(*args):
global var_4
var_4 = None
var_4 = var_4_text_choices[var_4_text.get()]
print('var_4:' + str(var_4))
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_4_text.trace('w', change_dropdown_4)
W_4 = weightChoice(10, 0, tab4, weight_4)
# *****************************************************************************************************************
# TAB 5
# *****************************************************************************************************************
content_5 = tab(tab5, text1='Automated and miniaturized methods should be selected.', text2='Degree of automation:')
# Create a Tkinter variable
var_5a_text = StringVar(tab5)
# Dictionary with options
var_5a_text_choices = {'automatic': 1.0,
'semi-automatic': 0.5,
'manual': 0.0}
var_5a_text.set('select')
dropDown_5a = OptionMenu(tab5, var_5a_text, *var_5a_text_choices.keys())
dropDown_5a.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_5a.grid(sticky='w', row=2, column=0, padx=8, pady=8)
var_5a = DoubleVar(tab5)
var_5a.set(1.0)
var_5b = DoubleVar(tab5)
var_5b.set(1.0)
def variableFive():
global var_5
a = var_5a.get()
b = var_5b.get()
if a == 1.0 and b == 1.0:
var_5 = 1.0
elif a == 0.5 and b == 1.0:
var_5 = 0.75
elif a == 0.0 and b == 1.0:
var_5 = 0.5
elif a == 1.0 and b == 0.0:
var_5 = 0.5
elif a == 0.5 and b == 0.0:
var_5 = 0.25
elif a == 0.0 and b == 0.0:
var_5 = 0.0
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_5a(*args):
var_5a.set(var_5a_text_choices[var_5a_text.get()])
print('var_5a:' + str(var_5a))
variableFive()
# global var_5
# var_5 = var_5a.get() * var_5b.get()
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_5a_text.trace('w', change_dropdown_5a)
Label(tab5, text='Sample preparation:', wraplength=300, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
var_5b_text = StringVar(tab5)
var_5b_text_choices = {'miniaturized': 1.0,
'not miniaturized': 0.0}
var_5b_text.set('select')
dropDown_5b = OptionMenu(tab5, var_5b_text, *var_5b_text_choices.keys())
dropDown_5b.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_5b.grid(sticky='w', row=4, column=0, padx=8, pady=8)
def change_dropdown_5b(*args):
var_5b.set(var_5b_text_choices[var_5b_text.get()])
# print('var_5b:' + str(var_5b))
# global var_5
# var_5 = var_5a.get() * var_5b.get()
variableFive()
print('var_5:' + str(var_5))
chartPlotter()
var_5b_text.trace('w', change_dropdown_5b)
W_5 = weightChoice(10, 0, tab5, weight_5)
# *****************************************************************************************************************
# TAB 6
# *****************************************************************************************************************
content_6 = tab(tab6, text1='Derivatization should be avoided.', text2='Select derivatization agents (if used):')
# combine the selected options into a single string to produce a label caption
def concatenate_text(list_):
caption = ''
for i in list_:
caption = caption + i + '; '
return caption
def Select():
reslist = list()
selecion = lbox.curselection()
for i in selecion:
entered = lbox.get(i)
reslist.append(entered)
global_list.append(entered)
# update the label box with selected deriv. agents:
print(reslist)
print(global_list)
v.set(concatenate_text(global_list))
global var_6
var_6 = 1.0
# add a -0.2 penaly for using derivatization agents:
for CAS in global_list:
var_6 = var_6 * lbox_list[CAS]
if var_6 > 0.2:
var_6 = var_6 - 0.2
else:
var_6 = 0.0
print(var_6)
chartPlotter()
# update the list box
def update_list(*args):
search_term = search_var.get()
lbox.delete(0, END)
for item in lbox_list.keys():
if search_term.lower() in item.lower():
lbox.insert(END, item)
# clear the selection and the displayed label caption
def clear_list():
global v, global_list
v.set('')
global var_6
var_6 = 1.0
global_list = []
chartPlotter()
# create global variables
global_list = []
# if derivatization should be avoided, then shouldn't the highest value in the case in which derivatization agents are used be lower than 1.0?
lbox_list = {
# "None": 1.1,
"5950-69-6": 1,
"30084-90-3": 1,
"12093-10-6": 1,
"6283-74-5": 1,
"119-53-9": 1,
"126-81-8": 1,
"24257-93-0": 1,
"58068-85-2": 0.949158975023003,
"1273-85-4": 0.949158975023003,
"4233-33-4": 0.949158975023003,
"100-10-7": 0.949158975023003,
"38183-12-9": 0.945068038553621,
"41468-25-1": 0.937140592655187,
"1395920-13-4": 0.937140592655187,
"521-24-4": 0.937140592655187,
"56606-21-4": 0.935584744799731,
"65-22-5": 0.925393321432741,
"68123-33-1": 0.925393321432741,
"913253-56-2": 0.913914155272091,
"124522-09-4": 0.913914155272091,
"223463-14-7": 0.902699986612441,
"1118-68-9": 0.901394170230429,
"952102-12-4": 0.901394170230429,
"536-69-6": 0.901394170230429,
"203256-20-6": 0.901394170230429,
"86516-36-1": 0.899210326049394,
"861881-76-7": 0.886368566581839,
"56139-74-3": 0.869932201280637,
"84806-27-9": 0.865490140567591,
"91366-65-3": 0.865490140567591,
"67229-93-0": 0.855427480281241,
"1273-82-1": 0.855042238169516,
"50632-57-0": 0.846792397075292,
"10199-89-0": 0.839008465483774,
"152111-91-6": 0.836037308222637,
"7149-49-7": 0.830362674910287,
"3029-19-4": 0.830362674910287,
"68572-87-2": 0.829473879117877,
"12152-94-2": 0.829473879117877,
"29270-56-2": 0.829154698457689,
"24463-19-2": 0.827803622060042,
"100-39-0": 0.825375773537705,
"550-44-7": 0.822230968349539,
"ย 49759-20-8": 0.822230968349539,
"38609-97-1": 0.822230968349539,
"35661-51-9": 0.822230968349539,
"10401-59-9": 0.822230968349539,
"70402-14-1": 0.822230968349539,
"131076-14-7": 0.822230968349539,
"214147-22-5": 0.822230968349539,
"4930-98-7": 0.822230968349539,
"569355-30-2": 0.822230968349539,
"53348-04-2": 0.820406195102248,
"67580-39-6": 0.818423316626862,
"68133-98-2": 0.814016502590708,
"81864-15-5": 0.814016502590708,
"113722-81-9": 0.814016502590708,
"15537-71-0": 0.809079828950995,
"33008-06-9": 0.809079828950995,
"139332-64-2": 0.809079828950995,
"62642-61-9": 0.806764775754175,
"100139-54-6": 0.806764775754175,
"62796-29-6": 0.797901423240715,
"87-13-8": 0.783298381421747,
"35231-44-8": 0.778837259389339,
"88404-25-5": 0.778837259389339,
"485-47-2": 0.77674392680131,
"58520-45-9": 0.776282830117383,
"107-91-5": 0.776282830117383,
"139332-66-4": 0.776282830117383,
"89-25-8": 0.776282830117383,
"18428-76-7": 0.776282830117383,
"20624-25-3": 0.763216179776723,
"27072-45-3": 0.762516465156704,
"1459205-36-7": 0.755628677634781,
"96483-68-0": 0.747181595887401,
"132098-76-1": 0.747181595887401,
"98-59-9": 0.746227267824334,
"7612-98-8": 0.744246233476037,
"5415-58-7": 0.742560985030801,
"76-83-5": 0.740506239181083,
"1293-79-4": 0.740506239181083,
"28920-43-6": 0.740506239181083,
"100-07-2": 0.740506239181083,
"99-73-0": 0.738962425018157,
"22265-37-8": 0.737084384687495,
"3731-51-9": 0.737084384687495,
"141903-34-6": 0.737084384687495,
"122-04-3": 0.732376041854033,
"4755-50-4": 0.732376041854033,
"99-33-2": 0.732376041854033,
"605-65-2": 0.723192330411814,
"56512-49-3": 0.723192330411814,
"126565-42-2": 0.723192330411814,
"7693-46-1": 0.721322673837572,
"1711-06-4": 0.717883414280986,
"93128-04-2": 0.717798274857161,
"613-54-7": 0.716357636495872,
"74367-78-5": 0.710065827927279,
"119-26-6": 0.692633685424727,
"2508-19-2": 0.692425832968952,
"21614-17-5": 0.682522312223409,
"80-11-5": 0.681782236352849,
"100-46-9": 0.679263084173718,
"55486-13-0": 0.666338980106273,
"16315-59-6": 0.665281844920184,
"5102-79-4": 0.664748970983542,
"70-34-8": 0.664086673111964,
"132-32-1": 0.659883743356088,
"36410-81-8": 0.659179085176979,
"100-16-3": 0.659159320154698,
"104077-15-8": 0.659091847163412,
"4083-64-1": 0.649947842697737,
"21324-39-0": 0.634865149902982,
"2978-11-2_": 0.629540812510628,
"456-27-9": 0.628988106517093,
"98-09-9": 0.628032387327697,
"103-72-0": 0.606674230911606,
"504-29-0": 0.587444277328904,
"86-84-0": 0.566544585073271,
"36877-69-7": 0.556132009449506,
"103-71-9": 0.525453097624119,
"551-06-4": 0.510591749035237,
"643-79-8": 0.486298449205041,
"98-88-4": 0.475562851988167,
"5470-11-1": 0.466906948575218,
"99-65-0": 0.414382740812551,
"95-54-5": 0.409876625997181,
"60-24-2": 0.380580959884422,
"1118-71-4": 1,
"4426-47-5": 0.98287765750619,
"35342-88-2": 0.934408589712128,
"13435-12-6": 0.934408589712128,
"122-51-0": 0.90808769546171,
"17455-13-9": 0.898290310316299,
"7449-74-3": 0.896162794934563,
"1188-33-6": 0.873968193624155,
"1133-63-7": 0.845047181007906,
"57981-02-9": 0.843544327015115,
"3449-26-1": 0.831289869514086,
"54925-64-3": 0.831289869514086,
"7453-26-1": 0.831289869514086,
"23231-91-6": 0.82477424558194,
"423-39-2": 0.821174784952006,
"3332-29-4": 0.817379220173597,
"18297-63-7": 0.804205531712304,
"13257-81-3": 0.796997494513717,
"73980-71-9": 0.796226219175859,
"828-73-9": 0.796226219175859,
"36805-97-7": 0.785921382127458,
"6006-65-1": 0.785921382127458,
"4909-78-8": 0.785921382127458,
"920-68-3": 0.785921382127458,
"653-37-2": 0.78349900067157,
"422-05-9": 0.78349900067157,
"2182-66-3": 0.766534069464941,
"354-64-3": 0.763789874990475,
"58479-61-1": 0.763598909336104,
"13154-24-0": 0.763598909336104,
"70-11-1": 0.761090045768687,
"723336-86-5": 0.761090045768687,
"850418-19-8": 0.761090045768687,
"850418-20-1": 0.761090045768687,
"1546-79-8": 0.758242430472499,
"24589-78-4": 0.758242430472499,
"53296-64-3": 0.758242430472499,
"77377-52-7": 0.758242430472499,
"82112-21-8": 0.757927402509425,
"375-22-4": 0.756114760094685,
"336-59-4": 0.756114760094685,
"356-42-3": 0.756114760094685,
"420-37-1": 0.75205982910284,
"77-76-9": 0.750711985826051,
"20082-71-7": 0.749832128609721,
"2251-50-5": 0.747481224283863,
"100-11-8": 0.745015119615777,
"18162-48-6": 0.743146183479067,
"425-75-2": 0.742441949845756,
"1765-40-8": 0.742441949845756,
"76437-40-6": 0.742441949845756,
"80522-42-5": 0.742441949845756,
"1538-75-6": 0.74152936540163,
"98-03-3": 0.739537905180287,
"87020-42-6": 0.737007165264001,
"589-15-1": 0.736264650708209,
"2857-97-8": 0.736016815715654,
"17950-40-2": 0.732111366794642,
"407-25-0": 0.731258587142799,
"115-20-8": 0.730613289210088,
"823-96-1": 0.721670319376414,
"71735-32-5": 0.7183910746808,
"333-27-7": 0.7183910746808,
"996-50-9": 0.714539433160182,
"3768-58-9": 0.714539433160182,
"685-27-8": 0.713300737795531,
"25561-30-2": 0.713300737795531,
"124-41-4": 0.70689269806413,
"15933-59-2": 0.705803556150421,
"18156-74-6": 0.705803556150421,
"123-62-6": 0.703483768736821,
"2083-91-2": 0.703043095426246,
"10416-59-8": 0.700353286433786,
"69739-34-0": 0.696757084764058,
"107-46-0": 0.696026303459663,
"541-88-8": 0.680085578563036,
"994-30-9": 0.659639561940176,
"75-26-3": 0.65077439166517,
"543-27-1": 0.643008761928377,
"6092-54-2": 0.619827404668639,
"76-02-8": 0.618803077595292,
"75-77-4": 0.606190113014358,
"7719-09-7": 0.598432942089881,
"1066-35-9": 0.590259358282054,
"4637-24-5": 0.587695662266982,
"920-66-1": 0.5835440122017,
"8077-35-8": 0.580905093441462,
"108-24-7": 0.56539851162607,
"10294-34-5": 0.546920496297807,
"999-97-3": 0.539120875551113,
"7637-07-2": 0.536295783559384,
"75-89-8": 0.517064147633066,
"1899-02-1": 0.453968334570473,
"17287-03-5": 0.450591161239778,
"7664-93-9": 0.430740368201206,
"132228-87-6": 0.389860157052623,
"75-59-2": 0.35207841911058,
"77-78-1": 0.185707987424391,
"19132-06-0": 1,
"1052236-86-8": 1,
"135806-59-6": 1,
"139658-04-1": 1,
"108031-79-4": 1,
"124529-02-8": 0.789788397239459,
"124529-07-3": 0.789788397239459,
"24277-43-8": 0.789788397239459,
"958300-06-6": 0.789788397239459,
"5978-70-1": 0.661143997568766,
"3886-70-2": 0.62276366189702,
"20445-31-2": 0.616318224518582,
"17257-71-5": 0.616318224518582,
"81655-41-6": 0.616318224518582,
"21451-74-1": 0.616318224518582,
"14645-24-0": 0.616318224518582,
"147948-52-5": 0.581990910059596,
"104371-20-2": 0.581990910059596,
"132679-61-9": 0.56145194750795,
"210529-62-7": 0.56145194750795,
"3347-90-8": 0.550846501071722,
"104530-16-7": 0.547959104197752,
"39637-74-6": 0.547959104197752,
"39262-22-1": 0.52022184149657,
"1517-69-7": 0.474716248097616,
"1445-91-6": 0.474716248097616,
"107474-79-3": 0.437963083473382,
"14602-86-9": 0.412055011328408,
"3886-69-9": 0.358144912356212,
"2627-86-3": 0.326740839342668,
"24277-44-9": 0.288185973785988,
"62414-75-9": 0.288185973785988,
"14152-97-7": 0.288185973785988,
"42340-98-7": 0.176714727821325,
"14649-03-7": 0.132441393121765,
"33375-06-3": 0.116078677380125,
}
v = StringVar()
chckbxVar_tab6 = StringVar()
chckbxVar_tab6.set('disabled')
# set initial blank value of the StringVar
v.set('')
search_var = StringVar()
search_var.trace("w", update_list)
entry = ttk.Entry(tab6, textvariable=search_var, width=13)
# disable the lookup box initially:
# entry.config(state=DISABLED)
scrollbar = ttk.Scrollbar(tab6, orient='vertical')
scrollbar.grid(row=3, column=0, sticky='w', ipady=30, padx=(220, 0))
lbox = Listbox(tab6, width=34, height=6, yscrollcommand=scrollbar.set) # selectmode=MULTIPLE
# disable the lbox initially:
# lbox.config(state=DISABLED)
# def lboxActivator():
#
# # chckbxVar_tab6.set('disabled')
# if chckbxVar_tab6.get() == 'enabled':
# entry.config(state=ACTIVE)
# lbox.config(state=NORMAL)
# elif chckbxVar_tab6.get() == 'disabled':
# entry.config(state=DISABLED)
# lbox.config(state=DISABLED)
#
# ttk.Checkbutton(tab6, text='Derivatisation agent is used', command=lboxActivator, variable=chckbxVar_tab6, onvalue='enabled', offvalue='disabled').grid(row=30, column=0)
# lboxActivator()
Label(tab6, text='CAS lookup: ').grid(row=2, column=0, padx=8, pady=3, sticky='w')
entry.grid(row=2, column=0, padx=(100, 0), pady=3, sticky='w')
lbox.grid(row=3, column=0, padx=8, pady=3, sticky='w')
# link the scrollbar to the list box
scrollbar.config(command=lbox.yview)
ttk.Button(tab6, text="Select", command=Select, width=8).grid(column=1, row=3, padx=4)
# clear the selection and the caption
ttk.Button(tab6, text='Clear', command=lambda:[clear_list(), update_list()], width=8).grid(column=1, row=4, padx=4)
Label(tab6, text='Selected CAS: ').grid(column=0, row=4, sticky='w', padx=8, pady=0)
ttk.Label(tab6, textvariable=v, wraplength=180, width=34, relief='groove').grid(column=0, row=5, sticky='w', padx=8, pady=4)
# call the function to populate the list at the beginning
update_list()
W_6 = weightChoice(10, 0, tab6, weight_6)
# *****************************************************************************************************************
# TAB 7
# *****************************************************************************************************************
content_7 = tab(tab7, text1='Generation of a large volume of analytical waste should be avoided, and proper management'
'of analytical waste should be provided.', text2='Enter the amount of waste in g or mL:')
amount_var7 = StringVar()
amount_var7.set('input')
# the event=None is passed so that the entry.bind does not return a positional argument
def change_entry_7(event=None):
global var_7
var_7 = None
try:
if float(amount_var7.get()) > 150:
var_7 = 0
elif float(amount_var7.get()) < 0.1:
var_7 = 1.0
else:
var_7 = abs(-0.134 * log(float(amount_var7.get())) + 0.6946) # absolute value to avoid negative values
print('var_7:' + str(var_7))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
chartPlotter()
sample_amount_entry7 = ttk.Entry(tab7, textvariable=amount_var7, width=15)
sample_amount_entry7.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# bind the <Return> key to the entry window, so that the function gets called as an alternative to the 'set' button:
sample_amount_entry7.bind('<Return>', change_entry_7)
# insert a button that does the same:
ttk.Button(tab7, text='Set', command=change_entry_7).grid(row=2, column=0, padx=8, pady=8)
W_7 = weightChoice(10, 0, tab7, weight_7)
# *****************************************************************************************************************
# TAB 8
# *****************************************************************************************************************
content_8 = tab(tab8, text1='Multi-analyte or multi-parameter methods are preferred '
'versus methods using one analyte at a time.',
text2='Number of analytes determined in a single run:')
amount_var8a = StringVar()
amount_var8a.set('input')
sample_amount_entry8a = ttk.Entry(tab8, textvariable=amount_var8a, width=15)
sample_amount_entry8a.grid(sticky='w', row=2, column=0, padx=8, pady=8)
Label(tab8, text='Sample throughput (samples analysed per hour):', wraplength=300, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
amount_var8b = StringVar()
amount_var8b.set('input')
sample_amount_entry8b = ttk.Entry(tab8, textvariable=amount_var8b, width=15)
sample_amount_entry8b.grid(sticky='w', row=4, column=0, padx=8, pady=8)
def change_entry_8(event=None):
global var_8
var_8 = None
try:
if (float(amount_var8a.get()) * float(amount_var8b.get())) < 1.0:
var_8 = 0.0
elif (float(amount_var8a.get()) * float(amount_var8b.get())) > 70.0:
var_8 = 1.0
else:
var_8 = abs(0.2429 * log(float(amount_var8a.get()) * float(amount_var8b.get())) - 0.0517) # absolute value to avoid negative values
print('var_8:' + str(var_8))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
# refresh the plot:
chartPlotter()
sample_amount_entry8b.bind('<Return>', change_entry_8)
ttk.Button(tab8, text='Set', command=change_entry_8).grid(row=5, column=0, padx=8, pady=8)
W_8 = weightChoice(10, 0, tab8, weight_8)
# *****************************************************************************************************************
# TAB 9
# *****************************************************************************************************************
content_9 = tab(tab9, text1='The use of energy should be minimized.',
text2='Select the most energy-intensive technique used in the method:')
var_9_text = StringVar(tab9)
amount_var9 = StringVar(tab9)
amount_var9.set('input')
# Dictionary with options
var_9_text_choices = { 'None': 1.0,
'FTIR': 1.0, # what about vortexing, incubation, etc.? Mineralization?
'Immunoassay': 1.0,
'Spectrofluorometry': 1.0,
'Titration': 1.0,
'UPLC': 1.0,
'UV-Vis Spectrometry': 1.0,
'AAS': 0.5,
'GC': 0.5,
'ICP-MS': 0.5,
'LC': 0.5,
'NMR': 0.0,
'GC-MS': 0.0,
'LC-MS': 0.0,
'X-ray diffractometry': 0.0}
var_9_text.set('select')
dropDown_9 = OptionMenu(tab9, var_9_text, *var_9_text_choices.keys())
dropDown_9.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_9.grid(sticky='w', row=2, column=0, padx=8, pady=8)
def change_dropdown_9(*args):
global var_9
var_9 = 1.0
var_9 = var_9_text_choices[var_9_text.get()]
print('var_9:' + str(var_9))
chartPlotter()
var_9_text.trace('w', change_dropdown_9)
ttk.Label(tab9, text='Alternatively, estimate the total power consumption of a single analysis in kWh:', wraplength=250, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
sample_amount_entry9 = ttk.Entry(tab9, textvariable=amount_var9, width=15)
sample_amount_entry9.grid(sticky='w', row=4, column=0, padx=8, pady=8)
def change_entry_9(event=None):
global var_9
var_9 = 1.0
try:
if float(amount_var9.get()) > 1.5:
var_9 = 0.0
elif float(amount_var9.get()) < 0.1:
var_9 = 1.0
else:
var_9 = abs(-0.7143 * (float(amount_var9.get())) + 1.0714) # absolute value to avoid negative values
print('var_9:' + str(var_9))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
chartPlotter()
sample_amount_entry9.bind('<Return>', change_entry_9)
ttk.Button(tab9, text='Set', command=change_entry_9).grid(row=4, column=0, padx=8, pady=8)
W_9 = weightChoice(10, 0, tab9, weight_9)
# *****************************************************************************************************************
# TAB 10
# *****************************************************************************************************************
content_10 = tab(tab10, text1='Reagents obtained from renewable sources should be preferred.',
text2='Select the type of reagents:')
var_10_text = StringVar(tab10)
# Dictionary with options
var_10_text_choices = {'No reagents': 1.0,
'All reagents are bio-based': 1.0,
'Some reagents are bio-based': 0.5,
'None of the reagents are from bio-based sources': 0.0
}
var_10_text.set('select')
dropDown_10 = OptionMenu(tab10, var_10_text, *var_10_text_choices.keys())
dropDown_10.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_10.grid(sticky='w', row=2, column=0, padx=8, pady=8)
def change_dropdown_10(*args):
global var_10
var_10 = None
var_10 = var_10_text_choices[var_10_text.get()]
print('var_10:' + str(var_10))
chartPlotter()
var_10_text.trace('w', change_dropdown_10)
W_10 = weightChoice(10, 0, tab10, weight_10)
# *****************************************************************************************************************
# TAB 11
# *****************************************************************************************************************
content_11 = tab(tab11, text1='Toxic reagents should be eliminated or replaced.',
text2='Does the method involve the use of toxic reagents?')
var_11a_text = StringVar(tab11)
# Dictionary with options
var_11a_text_choices = {'No': 1.0,
'Yes': 0.0}
var_11a_text.set('Select')
dropDown_11a = OptionMenu(tab11, var_11a_text, *var_11a_text_choices.keys())
dropDown_11a.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_11a.grid(sticky='w', row=2, column=0, padx=8, pady=8)
def enabler_11b():
if float(var_11a_text_choices[var_11a_text.get()]) == 0.0:
return 'enabled'
else:
return 'disabled'
amount_var11b = StringVar(tab11)
amount_var11b.set(0.0)
def change_dropdown_11a(*args):
global var_11
var_11 = 1.0
var_11 = var_11a_text_choices[var_11a_text.get()]
Label(tab11, text='Amount of toxic reagents in g or mL:', wraplength=300, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
reagent_entry_11 = ttk.Entry(tab11, textvariable=amount_var11b, width=15, state=enabler_11b())
reagent_entry_11.grid(sticky='w', row=4, column=0, padx=8, pady=8)
reagent_entry_11.bind('<Return>', change_dropdown_11a)
ttk.Button(tab11, text='Set', command=change_dropdown_11a).grid(row=5, column=0, padx=8, pady=8)
if float(var_11a_text_choices[var_11a_text.get()]) != 1.0:
try:
if float(amount_var11b.get()) < 0.1:
var_11 = 0.8
elif float(amount_var11b.get()) > 50.0:
var_11 = 0.0
else:
var_11 = abs(-0.129 * log(float(amount_var11b.get())) + 0.5012) # absolute value to avoid negative
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
else:
pass
chartPlotter()
print(var_11)
var_11a_text.trace('w', change_dropdown_11a)
W_11 = weightChoice(10, 0, tab11, weight_11)
# *****************************************************************************************************************
# TAB 12
# *****************************************************************************************************************
content_12 = tab(tab12, text1='Operator\'s safety should be increased.',
text2='Select the threats which are not avoided:')
varA = IntVar()
varB = IntVar()
varC = IntVar()
varD = IntVar()
varE = IntVar()
varF = IntVar()
varG = IntVar()
ttk.Checkbutton(tab12, text='toxic to aquatic life', variable=varA).grid(row=2, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='bioacumulative', variable=varB).grid(row=3, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='persistent', variable=varC).grid(row=4, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='highly flammable', variable=varD).grid(row=5, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='highly oxidizable', variable=varE).grid(row=6, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='exposive', variable=varF).grid(row=7, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='corrosive', variable=varG).grid(row=8, sticky='w', padx=8)
def testPrint():
# print(varA.get(), varB.get(), varC.get(), varD.get(), varE.get(), varF.get(), varG.get())
global var_12a
var_12a = (varA.get() + varB.get() + varC.get() + varD.get() + varE.get() + varF.get() + varG.get())
# print(var_12a)
global var_12
if var_12a == 0:
var_12 = 1.0
elif var_12a == 1:
var_12 = 0.8
elif var_12a == 2:
var_12 = 0.6
elif var_12a == 3:
var_12 = 0.4
elif var_12a == 4:
var_12 = 0.2
else:
var_12 = 0.0
print ('var_12: %f' % var_12)
chartPlotter()
ttk.Button(tab12, text='Set', command=testPrint).grid(row=9, column=0, padx=8, pady=8)
W_12 = weightChoice(10, 0, tab12, weight_12)
##################################################################################################
# pack the tab parent and its tabs:
tab_parent.pack(expand=1, fill='both')
# ttk.Button(leftFrame, text='Print score', command=printScore).pack(side=BOTTOM)
# generate the default chart at the beginning:
chartPlotter()
############################################################################################################
# generate the report in .pdf:
def generateReport():
# connect a float in range 0.0 : 1.0 to a colour in a spectrum from red to yellow to green (256 discrete colour values):
def colorMapper(value):
cmap = LinearSegmentedColormap.from_list('rg', ["red", "yellow", "green"], N=256)
mapped_color = int(value * 255)
color = cmap(mapped_color)
color_255 = []
for band in color:
color_255.append(int(band * 255))
return tuple(color_255)
pdf = FPDF('P', 'mm', 'A4')
pdf.set_font('Arial', '', 10)
pdf.add_page()
pdf.set_margins(left=30, top=30)
# save a temp image to the program's location:
plt.savefig('temp_figure.png', bbox_inches='tight')
# insert image (image, x, y, width):
pdf.image('temp_figure.png', 107, 10, 80)
# delete the temp file from drive:
os.remove('temp_figure.png')
# insert title (Arial, 'B'old, 14 pt):
pdf.set_font('Arial', 'B', 14.0)
pdf.ln(10)
pdf.cell(100, 12, 'Green index report sheet')
pdf.set_font('Arial', '', 12)
pdf.ln(15)
now = datetime.now()
pdf.cell(100, 12, now.strftime("%d/%m/%Y %H:%M:%S"))
# Text height is the same as current font size
th = pdf.font_size + 2
# a function to change the colour of a field based on the value:
def fieldColor(score):
x = colorMapper(score)[0]
y = colorMapper(score)[1]
z = colorMapper(score)[2]
pdf.set_fill_color(x, y, z)
pdf.ln(70)
# populate the table
# Table head:
pdf.set_font('Arial', 'B', 10)
pdf.cell(120, th, 'Criteria', border=0)
pdf.cell(15, th, 'Score', border=0)
pdf.cell(15, th, 'Weight', border=0)
pdf.set_font('Arial', '', 10)
pdf.ln(th)
pdf.set_fill_color(240, 240, 240)
# Rule 1
# Save top coordinate
top = pdf.y
# Calculate x position of next cell
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '1. Direct analytical techniques should be applied to avoid sample treatment.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_1)
pdf.cell(15, th * 1.6, str(round(var_1, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_1.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 2
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '2. Minimal sample size and minimal number of samples are goals.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_2)
pdf.cell(15, th * 1.6, str(round(var_2, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_2.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 3
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '3. If possible, measurements should be performed in situ.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_3)
pdf.cell(15, th * 1.6, str(round(var_3, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_3.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 4
top = pdf.y
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '4. Integration of analytical processes and operations saves energy and reduces the use of reagents.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_4)
pdf.cell(15, th * 1.6, str(round(var_4, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_4.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 5
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '5. Automated and miniaturized methods should be selected.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_5)
pdf.cell(15, th * 1.6, str(round(var_5, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_5.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 6
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '6. Derivatization should be avoided.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_6)
pdf.cell(15, th * 1.6, str(round(var_6, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_6.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 7
top = pdf.y
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '7. Generation of a large volume of analytical waste should be avoided, and proper management of analytical waste should be provided.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_7)
pdf.cell(15, th * 1.6, str(round(var_7, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_7.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 8
top = pdf.y
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '8. Multi-analyte or multi-parameter methods are preferred versus methods using one analyte at a time.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_8)
pdf.cell(15, th * 1.6, str(round(var_8, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_8.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 9
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '9. The use of energy should be minimized.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_9)
pdf.cell(15, th * 1.6, str(round(var_9, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_9.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 10
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '10. Reagents obtained from renewable sources should be preferred.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_10)
pdf.cell(15, th * 1.6, str(round(var_10, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_10.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 11
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th *1.6, '11. Toxic reagents should be eliminated or replaced.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_11)
pdf.cell(15, th * 1.6, str(round(var_11, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_11.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 12
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '12. Operator\'s safety should be increased.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_12)
pdf.cell(15, th * 1.6, str(round(var_12, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_12.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# output the pdf:
def savePDF():
ftypes = [('PDF file', '.pdf'), ('All files', '*')]
filename = filedialog.asksaveasfilename(filetypes=ftypes, defaultextension='.pdf')
# save the pdf
pdf.output(filename, 'F')
savePDF()
# add the report functionality to the file menu:
FileMenu.add_command(label='Generate report', command=generateReport)
FileMenu.add_separator()
# add a button to refresh the chart:
refreshButton = ttk.Button(leftFrame, text='RE-GENERATE PLOT', width=20,
command=lambda: [printScore(),
clearFrame(rightFrame),
pieChart(),
print_variables()
])
refreshButton.pack(side=BOTTOM, anchor=SE)
##########################################################################################################
##################################################################################################
root.mainloop() # to keep the window continuously on, otherwise it shall disappear
| Casivelaunus/Green_index | Main_window.py | Main_window.py | py | 62,363 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.filedialog.asksaveasfilename",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 70,
"usage_type": "call"
},
{
... |
30810878899 |
import serial
import KeyConfig as kc
import struct
import socket
IP = '192.168.1.200'
PORT = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setblocking(False)
def read_data():
d: bytes = ser.readline()
if len(d) > 0:
res = d.decode().replace('\r\n', '')
return res
def send_color_data(color:tuple):
packet = struct.pack("4f",*color,1)
s.sendto(packet, (IP,PORT))
print("send: %s" % packet)
def send_command_data(command:float):
packet = struct.pack("f",command)
s.sendto(packet, (IP,PORT))
print("send: %s" % packet)
if __name__ == '__main__':
ser = serial.Serial('COM5', 9600)
while True:
data: bytes = ser.read_until(expected=b"\r\n")
data = data.replace(b"\r\n",b'')
int_data = int.from_bytes(data,byteorder="big")
print(int_data)
if len(data) > 0:
if kc.colors.get(int_data):
print("ok")
color_to_send = kc.colors[int_data]
send_color_data(color_to_send)
elif kc.commands.get(int_data):
print("command")
command_to_send = kc.commands[int_data]
send_command_data(command_to_send)
| AssoAndrea/UE-ArduinoLightController | PythonMiddleware/main.py | main.py | py | 1,249 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "socket.IPPRO... |
2112950145 | import json
class SettingFile(object):
def __init__(self, path, defaults):
self._defaults = defaults
self._path = path
self._data = dict()
self._callbacks = dict()
for setting in defaults:
self._callbacks[setting] = callback_assist()
self.load()
def load(self):
'''Attempts to load the file'''
try:
new_data = json.load(open(self._path))
except:
json.dump(
self._defaults,
open(self._path, 'w'),
indent=2,
sort_keys=True
)
new_data = self._defaults.copy()
for setting in self._defaults:
if setting in new_data:
self[setting] = new_data[setting]
else:
self[setting] = self._defaults[setting]
def save(self):
json.dump(
self._data,
open(self._path, 'w'),
indent=2,
sort_keys=True
)
def register_callback(self, setting_name, funct, args=None):
self._callbacks[setting_name].add_callback(funct, args)
def __getitem__(self, *args):
return self._data.get(*args)
def __setitem__(self, key, val):
if key not in self._defaults:
raise ValueError('Unknown Setting')
else:
self._data[key] = val
self._callbacks[key].fire()
self.save()
def __str__(self):
return "Settings in {}: {}".format(self._path, self._data)
def __repr__(self):
return "setting_file({}, {})".format(self._path, self._defaults)
class callback_assist(object):
'''This class represents a function with arguments'''
def __init__(self):
self.callbacks = list()
def add_callback(self, funct, args=None):
'''Adds a new callback'''
if args == None:
args = list()
self.callbacks.append((funct, args))
def fire(self):
'''Fires all the callbacks'''
for funct, args in self.callbacks:
funct(*args)
| sdfgeoff/newsscroller | setting_file.py | setting_file.py | py | 2,100 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 34,
"usage_type": "call"
}
] |
41341957139 | import numpy as np
from scipy import signal
from scipy.signal import iirfilter
from scipy.signal import lfilter
def Implement_Notch_Filter(fs: float, band: list, freq: float, ripple: float, order: int, filter_type: str, data: np.array):
r"""
Args:
fs: frequency sampling
band: the bandwidth around the center-line frequency that you wish to filter
freq: the center - line frequency to be filtered
ripple: the maximum pass-band ripple that is allowed in db
order: the filter order. For FIR notch filters this is best set to 2 or 3,
IIR filters are best suited for high values of order.
filter_type: the type of the filter; 'butter', 'bessel', 'cheby1', 'cheby2', 'ellip'
data: the data to be filtered
Returns:
np.array: the filtered data
"""
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
b, a = iirfilter(order, [low, high], rp=ripple, btype='bandstop', analog=False, ftype=filter_type)
filtered_data = lfilter(b, a, data)
return filtered_data
def iirnotch_filter(fs, notch_frequency, quality_factor, data):
r"""
Args:
fs: frequency sampling, in Hz
notch_freq: the center-line frequency to be filtered
quality_factor: the quality factor. see details in scipy.signal.iirnotch()
data: signal the filtered will be applied
Returns:
np.array: signal after applying the filter
"""
b_notch, a_notch = signal.iirnotch(notch_frequency, quality_factor, fs)
# apply notch filter to signal
data_notched = signal.filtfilt(b_notch, a_notch, data)
return data_notched
# freq, h = signal.freqz(b_notch, a_notch, fs = srate)
# plt.figure('filter')
# plt.plot( freq, 20*np.log10(abs(h)))
### Example
# import matplotlib.pyplot as plt
# import numpy as np
# from scipy import signal
# # Create/view notch filter
# samp_freq = 1000 # Sample frequency (Hz)
# notch_freq = 60.0 # Frequency to be removed from signal (Hz)
# quality_factor = 30.0 # Quality factor
# b_notch, a_notch = signal.iirnotch(notch_freq, quality_factor, samp_freq)
# freq, h = signal.freqz(b_notch, a_notch, fs = samp_freq)
# plt.figure('filter')
# plt.plot( freq, 20*np.log10(abs(h)))
#
# # Create/view signal that is a mixture of two frequencies
# f1 = 17
# f2 = 60
# t = np.linspace(0.0, 1, 1_000)
# y_pure = np.sin(f1 * 2.0*np.pi*t) + np.sin(f2 * 2.0*np.pi*t)
# plt.figure('result')
# plt.subplot(211)
# plt.plot(t, y_pure, color = 'r')
#
# # apply notch filter to signal
# y_notched = signal.filtfilt(b_notch, a_notch, y_pure)
#
# # plot notch-filtered version of signal
# plt.subplot(212)
# plt.plot(t, y_notched, color = 'r') | Mariellapanag/pyiEEGfeatures | src/pyiEEGfeatures/IIR_notch_filter.py | IIR_notch_filter.py | py | 2,731 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal.iirfilter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scipy.signal.lfilter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scipy.sig... |
8785489698 | import gymnasium as gym
from IPython import display
import matplotlib.pyplot as plt
from utils.visualize import visualize_policy, visualize_q, visualize_model, visualize_v
class JupyterRender(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env = env
def render(self, title='Environment', v=None, q=None, policy=None, model_r=None, model_ns=None):
viz_list = {}
if v is not None:
viz_list['v'] = v
if q is not None:
viz_list['q'] = q
if policy is not None:
viz_list['policy'] = policy
if model_r is not None:
viz_list['model_r'] = model_r
if model_ns is not None:
viz_list['model_ns'] = model_ns
fig = plt.figure(figsize=(8, 8))
ax_list = [fig.add_subplot(2, 2, 1)]
img = ax_list[0].imshow(self.env.render()) # prepare to render the environment by using matplotlib and ipython display
ax_list[0].set_title(title)
pos = 2
for i in range(pos, 2 + len(viz_list)):
ax_list.append(fig.add_subplot(2, 2, i))
ax_index = 1
for key, value in viz_list.items():
if key == 'policy':
visualize_policy(value, ax_list[ax_index], self.env.nrow, self.env.ncol)
elif key == 'v':
visualize_v(value, ax_list[ax_index], self.env.nrow, self.env.ncol)
elif key == 'q':
visualize_q(value, ax_list[ax_index], self.env.nrow, self.env.ncol)
else:
if key == 'model_r':
title = 'Reward Model'
elif key == 'model_ns':
title = 'Next State Model'
visualize_model(value, ax_list[ax_index], self.env.nrow, self.env.ncol, title)
ax_index += 1
for ax in ax_list:
ax.tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
display.display(plt.gcf())
display.clear_output(wait=True)
plt.close()
if __name__ == '__main__':
env = gym.make("FrozenLake-v1", render_mode='rgb_array', is_slippery=False) # define the environment.
env = JupyterRender(env)
| moripiri/Reinforcement-Learning-on-FrozenLake | utils/wrapper.py | wrapper.py | py | 2,250 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "gymnasium.Wrapper",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "util... |
27052908549 | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if root is None:
return []
vals = []
current_vals = []
current_queue = [root]
next_queue = []
while current_queue or next_queue:
if not current_queue:
current_queue = [*next_queue]
next_queue = []
vals.append(current_vals)
current_vals = []
current = current_queue.pop(0)
current_vals.append(current.val)
if current.left:
next_queue.append(current.left)
if current.right:
next_queue.append(current.right)
vals.append(current_vals)
return vals
if __name__ == "__main__":
from utils.binary_tree import from_nums
nums = [3, 9, 20, None, None, 15, 7]
root = from_nums(nums)
vals = Solution().levelOrder(root)
print(vals)
| ikedaosushi/leetcode | problems/python/levelOrder.py | levelOrder.py | py | 1,094 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "utils.binary_tree.from_nums",
"line_number": 41,
"usage_type": "call"
}
] |
72167483303 | """
This file holds the interaction sites class used in simulation.py.
"""
import warnings
from random import random
from copy import deepcopy
from itertools import combinations
from math import comb
import numpy as np
class InteractionSites:
"""A class designed to host interactions between persons within specific locations.
There are currently 7 different locations that can host interactions between
person objects.
All attributes are passed through the sim_obj, which accesses the simulation
configuration file. Outlined below are the main object attributes that provide the
interaction functionality.
Attributes
----------
grade_A_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to represent resturants, gas stations, retail stores, etc. Any location where you
do not visit often, but attend a wide variety of them.
grade_B_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to represent a gym, grocery store, etc. Any location where
you visit semi-often, and are likly to visit the same one, but this may varry.
grade_C_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to represent offices or schools. Any location where they are
visited almost every workday, and you almost always visit the same one.
house_sites : :obj:`np.array` of :obj:`list` of :obj:`int`
Visited by every person each day, and hosts interactions between members
of the same household. Infection spread at home is not defined by explicit contacts,
but by a known spread factor.
lect_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate university lecture hall interactions. They are only visited by students.
study_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate study environments at university, on-campus (library, bookable rooms, ...).
They are only visited by students.
food_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate cafeteria and restaurant interactions on-campus. Only visited by students.
res_sites : :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate the student residences on campus. They are only visited by first year students.
stud_house_sites : :obj:`np.array` of :obj:`list` of :obj:`int`
Visited by every student each day, and hosts interactions between members
of the same household. Infection spread at home is not defined by explicit contacts,
but by a known spread factor.
"""
def __init__(self, sim_obj):
""" __init__ method docstring.
Parameters
----------
sim_obj : :obj:`cv19.simulation.simulation`
The encompassing simulation obejct hosting the simulation.
"""
# Set attributes from config file
self.load_attributes_from_sim_obj(sim_obj)
self.daily_interactions = {"HOUSE_GENERAL": np.zeros(self.nDays),
"HOUSE_STUDENT": np.zeros(self.nDays)}
# Generates a list of people that go to different grade X sites
# len(grade_X_sites) is how many sites there are; len(grade_X_sites[i]) is how many people go to that site
self.grade_A_sites = self.init_grade(grade_code="A")
self.grade_B_sites = self.init_grade(grade_code="B")
self.grade_C_sites = self.init_grade(grade_code="C")
self.house_sites = deepcopy(self.pop.household)
self.house_indices = deepcopy(self.pop.house_ppl_i)
# Students Stuff #
self.stud_house_sites = deepcopy(self.pop.stud_houses)
self.stud_house_indices = deepcopy(self.pop.house_stud_i)
self.lect_sites = self.init_uni(grade_code="LECT")
self.study_sites = self.init_uni(grade_code="STUDY")
self.food_sites = self.init_uni(grade_code="FOOD")
self.res_sites = self.init_res(grade_code="RES")
self.daily_new_infections = 0
def load_attributes_from_sim_obj(self, sim_obj):
"""Method to load in attributes from the provided simulation class object.
Sets all objects in the "interaction_sites_data" dictionary key as self
attributes of the InteractionSites class.
Parameters
----------
sim_obj : :obj:`cv19.simulation.simulation`
The encompassing simulation obejct hosting the simulation.
"""
attributes = sim_obj.parameters["interaction_sites_data"].keys()
for attr in attributes:
setattr(self, attr, sim_obj.parameters["interaction_sites_data"][attr])
# Get the disease parameters
d_attributes = sim_obj.disease_parameters["spread_data"].keys()
for attr in d_attributes:
setattr(self, attr, sim_obj.disease_parameters["spread_data"][attr])
# Get the virus type names
self.variant_codes = sim_obj.variant_codes
self.variant_code_map = {v_id: v_name for v_name, v_id in self.variant_codes.items()} # virus ids
# Set the actual objects now
self.pop = sim_obj.pop
self.policy = sim_obj.policy
self.nDays = sim_obj.parameters["simulation_data"]["nDays"]
def init_grade(self, grade_code):
"""Method designed to associate members of the population with interaction sites.
This method initializes all non-student interaction sites by creating a list
of person indices for each interaction site, for that type of interaction type.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
grade_sites : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding one array for each interaction site of this type. Each nested
array holds the index of people that are associated with that site (can visit it).
"""
loyalty_mean = self.grade_loyalty_means[grade_code]
loyalty_std = self.grade_loyalty_stds[grade_code]
students_interact = self.students_participate[grade_code]
# Calculate number of sites
num_sites = self.calculate_num_sites(grade_code=grade_code)
grade_sites = [[] for _ in range(num_sites)]
for person in self.pop.get_population():
if students_interact or not (self.students_on and person.job == 'Student'):
# if students are meant to go to this site
# Assign people to this specific site
num_diff_sites = abs(round(np.random.normal(loyalty_mean, loyalty_std)))
num_diff_sites = num_diff_sites if num_diff_sites <= num_sites else num_sites
# Get a list of len num_diff_sites for this person to be associated with now
person_sites = np.random.choice(num_sites, num_diff_sites, replace=False)
for site in person_sites:
# Assign this person to that site
grade_sites[site].append(person.get_index())
# Convert everything to numpy arrays
grade_sites = [np.asarray(site) for site in grade_sites]
# Initialize the number of interactions dictionary
self.daily_interactions[grade_code] = np.zeros(self.nDays)
return grade_sites
def init_uni(self, grade_code):
"""Method designed to associate members of the student population with interaction sites.
This method initializes all student interaction sites by creating a list
of person indices for each interaction site, for that type of interaction type.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
grade_sites : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding one array for each interaction site of this type. Each nested
array holds the index of people that are associated with that site (can visit it).
"""
loyalty_mean = self.grade_loyalty_means[grade_code]
loyalty_std = self.grade_loyalty_stds[grade_code]
# Calculate number of sites
num_sites = self.calculate_num_sites(grade_code=grade_code)
grade_sites = [[] for _ in range(num_sites)]
for student in self.pop.get_population():
if student.job == 'Student':
# Assign people to this specific site
num_diff_sites = abs(round(np.random.normal(loyalty_mean, loyalty_std)))
num_diff_sites = num_diff_sites if num_diff_sites <= num_sites else num_sites
# Get a list of len num_diff_sites for this person to be associated with now
student_sites = np.random.choice(num_sites, num_diff_sites, replace=False)
for site in student_sites:
# Assign this person to that site
grade_sites[site].append(student.get_index())
# Convert everything to numpy arrays
grade_sites = [np.asarray(site) for site in grade_sites]
# Initialize the number of interactions dictionary
self.daily_interactions[grade_code] = np.zeros(self.nDays)
return grade_sites
def init_res(self, grade_code):
"""Method designed to associate students with the residence interaction site.
This method initializes the residence interaction sites by creating a list
of person indices for each interaction site.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
grade_sites : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding one array for each interaction site of this type. Each nested
array holds the index of people that are associated with that site (can visit it)
"""
loyalty_mean = self.grade_loyalty_means[grade_code]
loyalty_std = self.grade_loyalty_stds[grade_code]
# Calculate number of sites
num_sites = self.calculate_num_sites(grade_code=grade_code)
grade_sites = [[] for _ in range(num_sites)]
for room in self.pop.get_residences():
for student_i in self.stud_house_indices[room]:
# Assign people to this specific site
num_diff_sites = abs(round(np.random.normal(loyalty_mean, loyalty_std)))
num_diff_sites = num_diff_sites if num_diff_sites <= num_sites else num_sites
# Get a list of len num_diff_sites for this person to be associated with now
student_sites = np.random.choice(num_sites, num_diff_sites, replace=False)
for site in student_sites:
# Assign this person to that site
grade_sites[site].append(student_i)
# Convert everything to numpy arrays
grade_sites = [np.asarray(site) for site in grade_sites]
# Initialize the number of interactions dictionary
self.daily_interactions[grade_code] = np.zeros(self.nDays)
return grade_sites
def daily_reset(self):
"""Method used to reset the interaction sites at the end of each day.
This function is currently used to clean up dead agents from interaction sites,
and to reset daily counts (such as the daily infection count).
Parameters
----------
None
Returns
-------
None
"""
self.remove_dead()
self.daily_new_infections = 0
def calculate_num_sites(self, grade_code):
"""Method used to calculate the number of sites for an interaction site grade.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
num_sites : int
The number of sites to be used for that interaction site grade.
"""
if grade_code in self.site_num and self.site_num[grade_code] == 0:
# Raise a warning
warnings.warn(f"Site type '{grade_code}' size set to 0. No interaction sites of this type created.")
return 0
else:
return self.site_num[grade_code] if grade_code in self.site_num else \
max(round(self.pop.get_population_size() / self.site_size[grade_code]), 1)
def remove_dead(self):
"""Method to remove dead agents from interaction site arrays.
Iterates through each type of site array, and will remove all agents that are
dead from each array.
Parameters
----------
None
Returns
-------
None
"""
# Create list of all dead agents
dead_agents = self.pop.get_dead()
# Site type A
for i, site_array in enumerate(self.grade_A_sites):
# Mask where True indicates alive and False indicates dead (note the invert argument)
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.grade_A_sites[i] = site_array[mask_alive]
# Site type B
for i, site_array in enumerate(self.grade_B_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.grade_B_sites[i] = site_array[mask_alive]
# Site type C
for i, site_array in enumerate(self.grade_C_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.grade_C_sites[i] = site_array[mask_alive]
# Site type lecture
for i, site_array in enumerate(self.lect_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.lect_sites[i] = site_array[mask_alive]
# Site type study
for i, site_array in enumerate(self.study_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.study_sites[i] = site_array[mask_alive]
# Site type food
for i, site_array in enumerate(self.food_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.food_sites[i] = site_array[mask_alive]
# Site type res
for i, site_array in enumerate(self.res_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.res_sites[i] = site_array[mask_alive]
def will_visit_site(self, site_array, will_go_prob):
"""Method to determine who will visit a site on a given day.
Generates a boolean list for each individual interaction site in site_array,
indicating what people from the list will visit that site on a given day. Accounts
for quarantined people by setting their will_go_prob value to
self.quarantine_isolation_factor.
Parameters
----------
site_array : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding lists (one for each interaction site) of the index of each person
associated with each of the individual sites.
will_go_prob : float
The probability that any given person in site_array will visit this type of site.
Returns
-------
will_visit_grade : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding an array for each site of this interaction site type.
Each individual list holds the indexes of people that will visit that site for this day.
"""
# Figure out who is going to go to this site type today.
person_ids = np.unique(np.concatenate(site_array))
# Create array of attendence probabilities.
prob_attendence = [self.quarantine_isolation_factor
if self.pop.get_person(person).is_quarantined() else will_go_prob
for person in person_ids]
# Select a subset of people who will actually choose to go to the site.
person_will_go_mask = np.random.binomial(1, p=prob_attendence).astype(bool)
person_ids = person_ids[person_will_go_mask]
# Create a Boolean array of people (rows) and sites (columns).
# Each entry corresponds to whether or not
# a given person can go to the given site.
person_site_array = np.zeros(shape=(person_ids.shape[0], len(site_array)), dtype=bool)
for s, site in enumerate(site_array):
mask = np.isin(person_ids, site)
person_site_array[mask, s] = True
# Choose a random number for each person,
# with an upper bound as the number of available sites for that person.
high = person_site_array.sum(axis=-1)
random_site_index = np.random.randint(low=0, high=high)
# argsort the array (descending) along sites and use random number above to select
# one of the available sites (first sites up to high[i] are available for person i).
site_indexes_argsorted = np.argsort(person_site_array, axis=-1)[..., ::-1]
person_site_index = site_indexes_argsorted[np.arange(site_indexes_argsorted.shape[0]),
random_site_index]
will_visit_grade = [person_ids[np.where(person_site_index == s)[0]]
for s in range(len(site_array))]
return will_visit_grade
def site_interaction(self, will_go_array, day, personal, grade_code):
"""Method that hosts interactions between people for an interaction site type.
This method manages interactions between people going to the same interaction
site this day. Currently, all people that visit the same site on a day have a
chance to interact with each other. Does not provide a return value, all
infections are managed within the function.
Parameters
----------
will_go_array : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding an array for each site of this interaction site type. Each individual list
holds the indexes of people that will visit that site for this day.
day : int
The day value that this function is being called on in the encompassing simulation class.
Used as input to the infect function after infections have been determined.
personal : bool
Used to indicate if the type of interaction at this site is personal, which relates to
contact tracing abilities.
grade_code : str
Code used to index the values to create this type of site from the config file.
"""
new_infections = np.zeros(self.pop.get_population_size(), dtype=bool)
new_infection_type = np.zeros(self.pop.get_population_size(), dtype=int)
total_interactions_count = 0
for ppl_going in will_go_array:
infected_persons = [index for index in ppl_going if self.pop.get_person(index).is_infected()]
recovered_persons = [index for index in ppl_going if self.pop.get_person(index).is_recovered()]
# Generate a list of how many interactions ppl have at the site
num_interactions = self.calc_interactions(site_day_pop=len(ppl_going))
total_interactions_count += np.sum(num_interactions) // 2
if len(infected_persons) == 0 or (len(infected_persons) + len(recovered_persons) == len(ppl_going)):
continue # No ppl to infect here or no one already infected
while np.sum(num_interactions > 0) > 1:
# grab the highest interactor
person_1 = np.argmax(num_interactions)
# find a random interactor for them to pair with (that is not them)
person_2 = np.random.randint(num_interactions.shape[0])
while person_2 == person_1 or num_interactions[person_2] <= 0:
person_2 = np.random.randint(num_interactions.shape[0])
# Get the actual people at these indexes
person_1_index = ppl_going[person_1]
person_2_index = ppl_going[person_2]
# Getting the Person objects and logging the contacts
p1_obj = self.pop.get_person(person_1_index)
p2_obj = self.pop.get_person(person_2_index)
p1_obj.log_contact(p2_obj, day=day, personal=personal)
p2_obj.log_contact(p1_obj, day=day, personal=personal)
# Check to make sure one is infected
person_1_infected = p1_obj.is_infected()
person_2_infected = p2_obj.is_infected()
if person_1_infected != person_2_infected:
# Have an interaction between those people
did_infect = self.interact(p1_obj, p2_obj)
if did_infect:
if person_1_infected:
new_infections[person_2_index] = True
new_infection_type[person_2_index] = self.pop.get_person(person_1_index).get_virus_type()
else:
new_infections[person_1_index] = True
new_infection_type[person_1_index] = self.pop.get_person(person_2_index).get_virus_type()
# Lower the interaction count for those people
num_interactions[person_1] -= 1
num_interactions[person_2] -= 1
# Update people who get infected only at the end. Assuming if I get CV19 at work, I probably won't spread at the store that night.
new_infection_indexes = np.where(new_infections)[0]
self.daily_new_infections += len(new_infection_indexes)
for new_infection in new_infection_indexes:
self.pop.infect(index=new_infection, virus_type=new_infection_type[new_infection], day=day)
# Update total daily interactions count
self.daily_interactions[grade_code][day] = total_interactions_count
def calc_interactions(self, site_day_pop):
"""Method to determine how many interactions a person will have.
Note
----
Currently the distribution for the number of interactions a given person will have is
a "triangular" distribution with only one side (a linear distribution). The distribution
output spans from 0 to site_day_pop/day_hours_scaler, where it is much more likely to have 0
interactions than the max. day_hours_scaler takes into account that people will not all be
at the interaction site at the same time, but will be dispersed throughout the 12 hour day.
As it stands, day_hours_scaler is not a config file parameter, as the hours in the day should not be
adjusted between simulations. If the need is felt for an adjustable scaling factor, a new (second)
variable should be introduced.
Parameters
----------
site_day_pop : `int`
The total number of people at that specific interaction site this day.
Returns
-------
number_of_interactions : :obj:`np.array` of :obj:`int`
The number of interactions all people will have within this interaction site.
"""
day_hours_scaler = 12
if site_day_pop == 0:
return np.array([])
else:
# Generate a linaer distribution from
number_of_interactions = np.round(np.random.triangular(left=0, mode=0, right=site_day_pop / day_hours_scaler,
size=site_day_pop)).astype(int)
return number_of_interactions
def interact(self, person_1, person_2):
"""Method that models the interaction between two people.
Parameters
----------
person_1 : :obj:`cv19.person.Person`
First person in the two-way interaction.
person_2 : :obj:`cv19.person.Person`
Second person in the two-way interaction.
Returns
-------
: :obj:`bool`
Whether or not the interaction caused the spread of the infection.
"""
p1_infected = person_1.is_infected()
p2_infected = person_2.is_infected()
virus_type = person_1.get_virus_type() if p1_infected else person_2.get_virus_type()
spread_prob = self.base_infection_spread_prob[self.variant_code_map[virus_type]]
if self.policy.get_mask_mandate():
p1_mask = person_1.wear_mask()
p2_mask = person_2.wear_mask()
P1_INWARD_EFF, P1_OUTWARD_EFF = person_1.mask_type_efficiency()
P2_INWARD_EFF, P2_OUTWARD_EFF = person_2.mask_type_efficiency()
if p1_infected:
if p1_mask:
spread_prob *= (1 - P1_OUTWARD_EFF)
if p2_mask:
spread_prob *= (1 - P2_INWARD_EFF)
elif p2_infected:
if p1_mask:
spread_prob *= (1 - P1_INWARD_EFF)
if p2_mask:
spread_prob *= (1 - P2_OUTWARD_EFF)
p1_vaccinated1 = person_1.is_vaccinated()
p2_vaccinated1 = person_2.is_vaccinated()
p1_vaccine_eff = person_1.vaccine_type_efficiency() if p1_vaccinated1 else 0
p2_vaccine_eff = person_2.vaccine_type_efficiency() if p2_vaccinated1 else 0
spread_prob *= ((1 - p1_vaccine_eff) * (1 - p2_vaccine_eff))
return random() < spread_prob
def house_interact(self, day):
"""Method to manage interactions between members of the same household.
Determines if any infection will spread among members of the same household. Different
from interaction sites in the fact that contacts are not calculated, but assumed to happen
between all house members. Does not have a return value, infections are managed internally.
Parameters
----------
day : int
The day value that this function is being called on in the encompassing simulation class.
Used as input to the infect function after infections have been determined.
"""
total_house_interactions = 0
for house_indices in self.house_indices:
# Get people in house
house_size = len(house_indices)
housemembers = [self.pop.get_population()[ind] for ind in house_indices]
virus_types = [person.get_virus_type() for person in housemembers]
total_house_interactions += comb(len(housemembers), 2)
# Do interactions between the housemates
for member1, member2 in combinations(housemembers, 2):
member1.log_contact(member2, day=day, personal=True)
member2.log_contact(member1, day=day, personal=True)
# Check if anyone in the house is infected
if any(housemembers[i].is_infected() for i in range(house_size)):
infected_housemembers = [i for i in range(house_size) if housemembers[i].is_infected()]
virus_types = [virus_types[i] for i in infected_housemembers]
healthy_housemembers = [i for i in range(house_size) if not housemembers[i].is_infected()]
for person in healthy_housemembers:
virus_id = np.random.choice(a=virus_types)
virus_name = self.variant_code_map[virus_id]
infection_chance = self.base_infection_spread_prob[virus_name] * self.house_infection_spread_factor
person_vaccinated = housemembers[person].is_vaccinated()
person_vaccine_eff = housemembers[person].vaccine_type_efficiency() if person_vaccinated else 0
infection_chance *= (1 - person_vaccine_eff)
caught_infection = random() < infection_chance
if caught_infection:
self.daily_new_infections += 1
if virus_id is None:
raise ValueError("House infection has incorrect virus type.")
self.pop.infect(index=housemembers[person].get_index(), day=day, virus_type=virus_id)
self.daily_interactions["HOUSE_GENERAL"][day] = total_house_interactions
def student_house_interact(self, day):
"""Method to manage interactions between members of the same student household.
Determines if any infection will spread among members of the same household. Different
from interaction sites in the fact that contacts are not calculated, but assumed to happen
between all house members. Does not have a return value, infections are managed internally.
Parameters
----------
day : int
The day value that this function is being called on in the encompassing simulation class.
Used as input to the infect function after infections have been determined.
"""
total_house_interactions = 0
for house_indices in self.stud_house_indices:
# Get people in house
house_size = len(house_indices)
housemembers = [self.pop.get_population()[ind] for ind in house_indices]
virus_types = [person.get_virus_type() for person in housemembers]
total_house_interactions += comb(len(housemembers), 2)
# Do interactions between the housemates
for member1, member2 in combinations(housemembers, 2):
member1.log_contact(member2, day=day, personal=True)
member2.log_contact(member1, day=day, personal=True)
# Check if anyone in the house is infected
if any(housemembers[i].is_infected() for i in range(house_size)):
infected_housemembers = [i for i in range(house_size) if housemembers[i].is_infected()]
virus_types = [virus_types[i] for i in infected_housemembers]
healthy_housemembers = [i for i in range(house_size) if not housemembers[i].is_infected()]
for person in healthy_housemembers:
virus_id = np.random.choice(a=virus_types)
virus_name = self.variant_code_map[virus_id]
infection_chance = self.base_infection_spread_prob[virus_name] * self.house_infection_spread_factor
person_vaccinated = housemembers[person].is_vaccinated()
person_vaccine_eff = housemembers[person].vaccine_type_efficiency() if person_vaccinated else 0
infection_chance *= (1 - person_vaccine_eff)
caught_infection = random() < infection_chance
if caught_infection:
self.daily_new_infections += 1
if virus_id is None:
raise ValueError("House infection has incorrect virus type.")
self.pop.infect(index=housemembers[person].get_index(), day=day, virus_type=virus_id)
self.daily_interactions["HOUSE_STUDENT"][day] = total_house_interactions
def testing_site(self, tests_per_day, day):
"""Method to update status of symptoms and run the testing sites code.
Parameters
----------
tests_per_day : int
The max number of available tests for this given day.
day : int
The day value that this function is being called on in the encompassing simulation class.
"""
self.pop.update_uninfected_symptomatics()
self.pop.update_infected_symptomatics(day)
self.pop.get_tested(tests_per_day, day)
def get_grade_A_sites(self):
"""Method to return a copy of the grade_A_sites attribute.
Returns
-------
self.grade_A_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.grade_A_sites)
def get_grade_B_sites(self):
"""Method to return a copy of the grade_B_sites attribute.
Returns
-------
self.grade_B_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.grade_B_sites)
def get_grade_C_sites(self):
"""Method to return a copy of the grade_C_sites attribute.
Returns
-------
self.grade_C_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.grade_C_sites)
def get_lect_sites(self):
"""Method to return a copy of the lect_sites attribute.
Returns
-------
self.lect_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.lect_sites)
def get_study_sites(self):
"""Method to return a copy of the study_sites attribute.
Returns
-------
self.study_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.study_sites)
def get_food_sites(self):
"""Method to return a copy of the food_sites attribute.
Returns
-------
self.food_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.food_sites)
def get_res_sites(self):
"""Method to return a copy of the res_sites attribute.
Returns
-------
self.res_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.res_sites)
| Queens-Physics/quaboom | cv19/interaction_sites.py | interaction_sites.py | py | 33,973 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_numbe... |
37877826411 | import os
import re
import shutil
from glob import glob
from osrf_pycommon.process_utils import AsyncSubprocessProtocol
from catkin_tools.common import mkdir_p
from catkin_tools.terminal_color import fmt
from .events import ExecutionEvent
MAX_LOGFILE_HISTORY = 10
class IOBufferContainer(object):
"""A simple buffer container for use in logging.
This class will open a logfile for a given job stage and write to it
continuously while receiving stdout and stderr.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path):
self.label = label
self.job_id = job_id
self.stage_label = stage_label
self.event_queue = event_queue
self.log_path = log_path
self.is_open = False
self.stdout_buffer = b""
self.stderr_buffer = b""
self.interleaved_buffer = b""
# Construct the logfile path for this job and stage
logfile_dir_path = os.path.join(log_path, self.job_id)
self.logfile_basename = os.path.join(logfile_dir_path, '.'.join([self.label, self.stage_label]))
self.logfile_name = '{}.log'.format(self.logfile_basename)
# Create the logfile dir if it doesn't exist
if not os.path.exists(logfile_dir_path):
mkdir_p(logfile_dir_path)
# Get the existing number of logfiles
# TODO: Make this number global across all build stages
existing_logfile_indices = sorted([int(lf.split('.')[-2])
for lf in glob('{}.*.log'.format(self.logfile_basename))])
if len(existing_logfile_indices) == 0:
self.logfile_index = 0
else:
self.logfile_index = 1 + existing_logfile_indices[-1]
# Generate the logfile name
self.unique_logfile_name = '{}.{:0>{}}.log'.format(self.logfile_basename, self.logfile_index, 3)
# Remove colliding file if necessary
if os.path.exists(self.logfile_name):
os.unlink(self.logfile_name)
# Open logfile
self.log_file = open(self.logfile_name, 'wb')
self.is_open = True
def close(self):
# Close logfile
self.log_file.close()
self.is_open = False
# Copy logfile to unique name
shutil.copy(self.logfile_name, self.unique_logfile_name)
# Remove older logfiles
for logfile_name in glob('{}.*.log'.format(self.logfile_basename)):
if (self.logfile_index - int(logfile_name.split('.')[-2])) >= MAX_LOGFILE_HISTORY:
os.unlink(logfile_name)
# Save output from stderr (these don't get deleted until cleaning the logfile directory)
if len(self.stderr_buffer) > 0:
with open(self.unique_logfile_name + '.stderr', 'wb') as logfile:
logfile.write(self.stderr_buffer)
def get_interleaved_log(self):
"""get decoded interleaved log."""
try:
return self._decode(self.interleaved_buffer)
except UnicodeDecodeError:
return "interleaved_log: some output cannot be displayed.\n"
def get_stdout_log(self):
"""get decoded stdout log."""
try:
return self._decode(self.stdout_buffer)
except UnicodeDecodeError:
return "stdout_log: some output cannot be displayed.\n"
def get_stderr_log(self):
"""get decoded stderr log."""
try:
return self._decode(self.stderr_buffer)
except UnicodeDecodeError:
return "stderr_log: some output cannot be displayed.\n"
@staticmethod
def _encode(data):
"""Encode a Python str into bytes.
:type data: str
"""
return data.encode('utf8')
@staticmethod
def _decode(data):
"""Decode bytes into Python str.
:type data: bytes
"""
return data.decode('utf-8', 'replace')
def __del__(self):
if self.is_open:
self.close()
@classmethod
def factory(cls, label, job_id, stage_label, event_queue, log_path):
"""Factory method for constructing with job metadata."""
def init_proxy(*args, **kwargs):
return cls(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
return init_proxy
class IOBufferLogger(IOBufferContainer):
"""This is a logging class to be used instead of sys.stdout and sys.stderr
in FunctionStage operations.
This class also generates `stdout` and `stderr` events.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path)
def out(self, data, end='\n'):
"""
:type data: str
:type end: str
"""
# Buffer the encoded data
data += end
encoded_data = self._encode(data)
self.stdout_buffer += encoded_data
self.interleaved_buffer += encoded_data
# Save the encoded data
self.log_file.write(encoded_data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDOUT',
job_id=self.job_id,
stage_label=self.stage_label,
data=data))
def err(self, data, end='\n'):
"""
:type data: str
:type end: str
"""
# Buffer the encoded data
data += end
encoded_data = self._encode(data)
self.stderr_buffer += encoded_data
self.interleaved_buffer += encoded_data
# Save the encoded data
self.log_file.write(encoded_data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDERR',
job_id=self.job_id,
stage_label=self.stage_label,
data=data))
class IOBufferProtocol(IOBufferContainer, AsyncSubprocessProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path)
AsyncSubprocessProtocol.__init__(self, *args, **kwargs)
self.intermediate_stdout_buffer = b''
self.intermediate_stderr_buffer = b''
@staticmethod
def _split(data):
try:
last_break = data.rindex(b'\n') + 1
return data[0:last_break], data[last_break:]
except ValueError:
return b'', data
def on_stdout_received(self, data):
"""
:type data: encoded bytes
"""
data, self.intermediate_stdout_buffer = self._split(self.intermediate_stdout_buffer + data)
self.stdout_buffer += data
self.interleaved_buffer += data
self.log_file.write(data)
# Get the decoded Python str
decoded_data = self._decode(data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDOUT',
job_id=self.job_id,
stage_label=self.stage_label,
data=decoded_data))
def on_stderr_received(self, data):
"""
:type data: encoded bytes
"""
data, self.intermediate_stderr_buffer = self._split(self.intermediate_stderr_buffer + data)
self.stderr_buffer += data
self.interleaved_buffer += data
self.log_file.write(data)
# Get the decoded Python str
decoded_data = self._decode(data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDERR',
job_id=self.job_id,
stage_label=self.stage_label,
data=decoded_data))
def on_process_exited2(self, returncode):
"""
Dump anything remaining in the intermediate buffers.
"""
if len(self.intermediate_stdout_buffer) > 0:
self.on_stdout_received(self.intermediate_stdout_buffer + b'\n')
if len(self.intermediate_stderr_buffer) > 0:
self.on_stderr_received(self.intermediate_stderr_buffer + b'\n')
class CatkinTestResultsIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses the output of catkin_test_results"""
def on_stdout_received(self, data):
lines = data.decode().splitlines()
clines = []
for line in lines:
match = re.match(r'(.*): (\d+) tests, (\d+) errors, (\d+) failures, (\d+) skipped', line)
if match:
line = fmt('@!{}@|: {} tests, @{rf}{} errors@|, @{rf}{} failures@|, @{kf}{} skipped@|')
line = line.format(*match.groups())
clines.append(line)
cdata = '\n'.join(clines) + '\n'
super(CatkinTestResultsIOBufferProtocol, self).on_stdout_received(cdata.encode())
| catkin/catkin_tools | catkin_tools/execution/io.py | io.py | py | 9,125 | python | en | code | 153 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3... |
73971404584 | from scripts.util.joystick import Joystick
from carla_env import CarlaEnv
from wrapped_carla_env import BiasedAction
import time
import carla
import pygame
import numpy as np
class ManualInterface:
def __init__(self, env: CarlaEnv):
# create env
self.env = env
self.obs = None
self.rew = None
self.done = False
self.reset_env()
self.total_reward = None
# init pygame
pygame.init()
self.running = True
self.surface = None
# font
self.font = pygame.font.Font(pygame.font.get_default_font(), 30)
# control
self.joysticks = None
self.reset_flag = False
# tps counter
self.tps_total_time = 0
self.tps_total_frame = 0
def __del__(self):
pygame.quit()
def reset_env(self):
self.obs = self.env.reset()
self.rew = None
self.done = False
self.total_reward = None
def get_action(self):
# init joysticks
if self.joysticks is None:
self.joysticks = [Joystick(fn) for fn in Joystick.list_devices()]
# pump events before get
pygame.event.pump()
# get joysticks
act = []
for js in self.joysticks:
accel = -js.axes["ry"] # Left lever, L <--> R
steer = js.axes["x"] # Right lever, U <--> D
reverse = js.buttons["tl"] # LB
# act.append(carla.VehicleControl(
# throttle=max(0.0, accel),
# brake=-min(0.0, accel),
#
# steer=steer,
# reverse=reverse
# ))
act.append(np.array([accel, steer], dtype=np.float32))
# check if reset
is_reset = sum([js.buttons["y"] for js in self.joysticks])
is_reset |= self.done
if is_reset:
if not self.reset_flag:
self.reset_env()
self.reset_flag = True
else:
self.reset_flag = False
return act
def on_event(self, event):
if event.type == pygame.QUIT:
self.running = False
return
def on_update(self):
# update env
act = self.get_action()
start_time = time.time()
self.obs, self.rew, self.done, _, = self.env.step(act)
elapsed = time.time() - start_time
# update total reward
if self.total_reward is None:
self.total_reward = np.array(self.rew)
else:
self.total_reward += np.array(self.rew)
# tps counter
self.tps_total_time += elapsed
self.tps_total_frame += 1
if self.tps_total_frame >= 100:
print("TPS: {}".format(self.tps_total_frame / self.tps_total_time))
self.tps_total_frame = 0
self.tps_total_time = 0
def on_render(self):
_, h, w = self.obs[0][0].shape
# init surface
if self.surface is None:
n_cars = len(self.obs)
n_cameras = len(self.obs[0])
self.surface = pygame.display.set_mode((w * n_cameras, h * n_cars), pygame.HWSURFACE | pygame.DOUBLEBUF)
# show images
y = 0
for cam, rew in zip(self.obs, self.total_reward):
# draw car cam images
x = 0
for cam_img in cam:
if cam_img.shape[0] < 3:
# pad channel
padded_cam_img = np.concatenate([
cam_img,
np.zeros((1, *cam_img.shape[1:]), dtype=cam_img.dtype)], axis=0)
else:
padded_cam_img = cam_img
cam_surf = pygame.surfarray.make_surface(padded_cam_img.transpose(2, 1, 0))
self.surface.blit(cam_surf, (x, y))
x += w
# draw reward
rew_surf = self.font.render("Reward: {:.2f}".format(rew), True, (0, 0, 255))
self.surface.blit(rew_surf, (10, y + 10))
y += h
# update display
pygame.display.update()
def run(self, fps: int = None):
clock = pygame.time.Clock()
while self.running:
for event in pygame.event.get():
self.on_event(event)
self.on_update()
self.on_render()
if fps:
clock.tick(fps)
def main():
dt = 0.1
global_options = {
"world": {
"dt": dt
}
}
env = CarlaEnv(global_options, 0)
ui = ManualInterface(env)
ui.run(int(1.0 / dt))
if __name__ == '__main__':
main()
| imoneoi/carla_env | scripts/manual_control.py | manual_control.py | py | 4,642 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "carla_env.CarlaEnv",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pygame.init",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"li... |
1021810825 | import os
import cv2
import torch
import numpy as np
import matplotlib.pyplot as plt
model_type = "DPT_Large"
midas = torch.hub.load("intel-isl/MiDaS", model_type)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
midas.to(device)
midas.eval()
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
transform = midas_transforms.dpt_transform
else:
transform = midas_transforms.small_transform
input_dir = 'dataset/dop/speaker/output/' # directory with your input images
output_dir = 'dataset/validation/depth/speaker/' # directory where you want to save depth maps
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for filename in os.listdir(input_dir):
if filename.endswith(".jpg") or filename.endswith(".png"):
img_path = os.path.join(input_dir, filename)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_batch = transform(img).to(device)
with torch.no_grad():
prediction = midas(input_batch)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
output = np.sqrt((output - np.min(output)) / np.ptp(output)) # apply square root to decrease contrast
# Apply color map
output_colored = cv2.applyColorMap((output * 255).astype(np.uint8), cv2.COLORMAP_JET)
# Save depth map
output_colored_bgr = cv2.cvtColor(output_colored, cv2.COLOR_RGB2BGR)
output_filename = os.path.join(output_dir, filename.split('.')[0] + '_depth.jpg')
cv2.imwrite(output_filename, output_colored_bgr)
# Show depth map
# plt.imshow(output_colored)
# plt.show()
print('Depth maps created and saved.')
| Roman212Koval/Dual-channel_CNN | monocular.py | monocular.py | py | 1,994 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.hub.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.hub",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
70553071464 | import time, datetime
import numpy as np
import os
import os.path as osp
import torch
import torchvision
import matplotlib.pyplot as plt
import torchvision.utils as vutils
import torch.nn.functional as F
import cv2
import glob
import random
from lib.utils.eval_utils import (
batch_compute_similarity_transform_torch,
)
from lib.utils.geometry import batch_rodrigues
def inverse_normalize(tensor):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
def normalize(tensor):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
def get_HHMMSS_from_second(seconds):
HHMMSS = time.strftime("%H:%M:%S", time.gmtime(seconds))
return HHMMSS
def save_checkpoint(state, output_dir, filename):
""" save model parameter """
save_path = "results/{}/save_pth".format(output_dir)
if not osp.exists(save_path):
os.makedirs(save_path)
torch.save(state, osp.join(save_path, filename))
def save_img(img, output_dir, filename, epoch=None, img_path=None, test_dataset=None, test_idx=None, vflip=False, hflip=False):
""" save image """
if isinstance(img, torch.Tensor):
img = img.cpu()
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
elif epoch and img_path:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
elif test_dataset:
save_path = "results/{}/eval_data/{}_{}/{}".format(output_dir, test_dataset, epoch, test_idx)
if not osp.exists(save_path):
os.makedirs(save_path)
# For jigsaw image with tile
if len(img.shape) == 4: # tile, channel, width, height
for tile_idx, _img_tile in enumerate(img):
_filename = "_".join([filename.replace(".jpg", ""), str(tile_idx)]) + ".jpg"
# _img = np.transpose(vutils.make_grid(_img_tile, normalize=True), (1, 2, 0))
# _img = _img.numpy()
_img = inverse_normalize(_img_tile).detach().numpy()
_img = np.transpose(_img, (1, 2, 0))
if vflip:
_img = _img[::-1, :, :]
if hflip:
_img = _img[:, ::-1, :]
plt.imsave(osp.join(save_path, _filename), _img)
# For a image
elif len(img.shape) == 3: # channel, width, height
if isinstance(img, torch.Tensor):
# if filename == "reconst_img.jpg":
# img = np.transpose(vutils.make_grid(img, normalize=True), (1, 2, 0))
# img = img.numpy()
# plt.imsave(osp.join(save_path, filename), img)
if filename in ["detach.jpg", "rendering.jpg", "rendering_ren.jpg", "rendering_bg.jpg", "rendering1.jpg", "rendering_ren1.jpg",
"rendering2.jpg", "rendering_ren2.jpg", "rendering3.jpg", "rendering_ren3.jpg",
"rendering4.jpg", "rendering_ren4.jpg"]:
img = np.transpose(img, (1, 2, 0))
img = img.numpy()
cv2.imwrite(osp.join(save_path, filename), 255*img[:, :, ::-1])
else:
img = inverse_normalize(img).detach().numpy()
img = np.transpose(img, (1, 2, 0))
plt.imsave(osp.join(save_path, filename), img)
# if vflip:
# img = img[::-1, :, :]
# if hflip:
# img = img[:, ::-1, :]
elif len(img.shape) == 2: # width, height
if isinstance(img, torch.Tensor):
img = img[None, :, :]
img = np.transpose(vutils.make_grid(img, normalize=True), (1, 2, 0))
img = img.numpy()
# img = inverse_normalize(img).detach().numpy()[:, :, None]
# img = np.transpose(img, (1, 2, 0))
if vflip:
img = img[::-1, :, :]
if hflip:
img = img[:, ::-1, :]
plt.imsave(osp.join(save_path, filename), img)
def save_all_img(img_dict, output_dir, epoch=None, img_path=None, test_dataset=None, test_idx=None, vflip=False, hflip=False):
"""
img_dict keys: filename, value: image
"""
for filename, img in img_dict.items():
save_img(img, output_dir, filename, epoch, img_path, test_dataset, test_idx, vflip, hflip)
def save_mesh(verts, faces, output_dir, epoch, img_path=None, test_dataset=None, test_idx=None):
""" save verts """
filename = "mesh.obj"
img = verts.cpu().numpy()
faces = faces.cpu().numpy()
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
elif test_dataset is not None and test_idx is not None:
save_path = "results/{}/eval_data/{}_{}/{}".format(output_dir, test_dataset, epoch, test_idx)
else:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
if not osp.exists(save_path):
os.makedirs(save_path)
if len(verts.shape) == 2:
with open(osp.join(save_path, filename), "w") as f:
for verts_xyz in verts:
f.write("v {} {} {}\n".format(verts_xyz[0], verts_xyz[1], verts_xyz[2]))
for face in faces:
f.write("f {} {} {}\n".format(face[0]+1, face[1]+1, face[2]+1))
def save_templates_info(test_templates, output_dir, filename):
""" save templates
save results information or train information """
save_path = "results/{}/save_txt".format(output_dir)
if not osp.exists(save_path):
os.makedirs(save_path)
_filename = osp.join(save_path, filename)
with open(_filename, "w") as f:
f.writelines("\n".join(test_templates))
def save_joints2d_img(gt_keypoints_2d, pred_keypoints_2d, output_dir, epoch, img_path):
"""
save image of joints2d on 2d coordinate while training
"""
line_list = [
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 2 ],
[ 8, 9 ],
[ 9, 3 ],
[ 2, 3 ],
[ 8, 12],
[ 9, 10],
[12, 9 ],
[10, 11],
[12, 13],
]
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
else:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
if not osp.exists(save_path):
os.makedirs(save_path)
plt.figure()
ax = plt.subplot()
ax.invert_yaxis()
gt_2ds = gt_keypoints_2d.clone()
gt_2ds = gt_2ds.cpu()
pred_2ds = pred_keypoints_2d.clone()
pred_2ds = pred_2ds.cpu().detach()
for joint_idx, (gt_2d, pred_2d) in enumerate(zip(gt_2ds, pred_2ds)):
ax.scatter(gt_2d[0], gt_2d[1], marker='o', s=2, c="r")
ax.text(gt_2d[0], gt_2d[1], joint_idx+1)
ax.scatter(pred_2d[0], pred_2d[1], marker='o', s=2, c="b")
ax.text(pred_2d[0], pred_2d[1], joint_idx+1)
for start_point, end_point in line_list:
start_point = start_point
end_point = end_point
ax.plot([gt_2ds[start_point][0], gt_2ds[end_point][0]], [gt_2ds[start_point][1], gt_2ds[end_point][1]], "r", linewidth=2)
ax.plot([pred_2ds[start_point][0], pred_2ds[end_point][0]], [pred_2ds[start_point][1], pred_2ds[end_point][1]], "b", linewidth=2)
plt.savefig(osp.join(save_path, "joints2d.jpg"))
plt.close()
def save_joints3d_img(gt_keypoints_3d, pred_keypoints_3d, output_dir, epoch=None, img_path=None, test_dataset=None, test_idx=None):
"""
save image of joints3d on 3d coordinate while training
"""
line_list = [
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 2 ],
[ 8, 9 ],
[ 9, 3 ],
[ 2, 3 ],
[ 8, 12],
[ 9, 10],
[12, 9 ],
[10, 11],
[12, 13],
]
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
elif epoch and img_path:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
elif test_dataset is not None and test_idx is not None:
save_path = "results/{}/eval_data/{}_{}/{}".format(output_dir, test_dataset, epoch, test_idx)
if not osp.exists(save_path):
os.makedirs(save_path)
plt.figure()
ax = plt.subplot(1, 1, 1, projection='3d')
gt_3ds = gt_keypoints_3d.clone()
gt_3ds = gt_3ds.cpu()
pred_3ds = pred_keypoints_3d.clone()
pred_3ds = pred_3ds.cpu().detach()
for joint_idx, (gt_3d, pred_3d) in enumerate(zip(gt_3ds, pred_3ds)):
ax.scatter(gt_3d[0], gt_3d[1], gt_3d[2], marker='o', s=2, c="r")
ax.text(gt_3d[0], gt_3d[1], gt_3d[2], joint_idx+1)
ax.scatter(pred_3d[0], pred_3d[1], pred_3d[2], marker='o', s=2, c="b")
ax.text(pred_3d[0], pred_3d[1], pred_3d[2], joint_idx+1)
for start_point, end_point in line_list:
start_point = start_point
end_point = end_point
ax.plot(
[gt_3ds[start_point][0], gt_3ds[end_point][0]],
[gt_3ds[start_point][1], gt_3ds[end_point][1]],
[gt_3ds[start_point][2], gt_3ds[end_point][2]],
color="r",
linewidth=2)
ax.plot(
[pred_3ds[start_point][0], pred_3ds[end_point][0]],
[pred_3ds[start_point][1], pred_3ds[end_point][1]],
[pred_3ds[start_point][2], pred_3ds[end_point][2]],
color="b",
linewidth=2)
plt.savefig(osp.join(save_path, "joints3d.jpg"))
plt.close()
def get_acc(output, label):
batch_size = output.shape[0]
pred = torch.argmax(output, dim=1)
correct = (pred==label).sum().item()
acc = correct/batch_size * 100
return acc
def spin2h36m_joint(spins, device):
"""
Get h36m 14 joints from spin 49 joints
"""
convert_matrix = torch.zeros((14, 49)).to(device)
h36m_index_list = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38]
for idx, h36m_index in enumerate(h36m_index_list):
convert_matrix[idx, h36m_index] = 1
h36m_joints = torch.matmul(convert_matrix, spins)
return h36m_joints
def get_spherical_coords(X):
# X is N x 3
rad = np.linalg.norm(X, axis=1)
# Inclination
theta = np.arccos(X[:, 2] / rad)
# Azimuth
phi = np.arctan2(X[:, 1], X[:, 0])
# Normalize both to be between [-1, 1]
vv = (theta / np.pi) * 2 - 1
uu = ((phi + np.pi) / (2*np.pi)) * 2 - 1
# Return N x 2
return np.stack([uu, vv],1)
def compute_uvsampler(verts, faces, tex_size=2):
"""
For this mesh, pre-computes the UV coordinates for
F x T x T points.
Returns F x T x T x 2
"""
alpha = np.arange(tex_size, dtype=np.float) / (tex_size-1)
beta = np.arange(tex_size, dtype=np.float) / (tex_size-1)
import itertools
# Barycentric coordinate values
coords = np.stack([p for p in itertools.product(*[alpha, beta])])
vs = verts[faces]
# Compute alpha, beta (this is the same order as NMR)
v2 = vs[:, 2]
v0v2 = vs[:, 0] - vs[:, 2]
v1v2 = vs[:, 1] - vs[:, 2]
# F x 3 x T*2
samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1)
# F x T*2 x 3 points on the sphere
samples = np.transpose(samples, (0, 2, 1))
# Now convert these to uv.
uv = get_spherical_coords(samples.reshape(-1, 3))
# uv = uv.reshape(-1, len(coords), 2)
uv = uv.reshape(-1, tex_size, tex_size, 2)
return uv
def train_only_3task_network(
HMR,
context_encoder_net,
discriminator,
jigsaw_puzzle_net,
rotation_net,
loss_fn_BCE,
loss_fn_MSE,
loss_fn_CE,
losses_CE,
losses_DC,
acces_JP,
losses_JP,
acces_ROT,
losses_ROT,
discriminator_optimizer,
context_encoder_optimizer,
jigsaw_puzzle_optimizer,
rotation_optimizer,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
num_patch,
ones,
zeros,
batch_size,
):
### Context Encoder ###
# Update Discriminator
feature_ce = HMR(context_encoder_input, return_only_features=True)
feature_ce = feature_ce.reshape(-1, 2048, 1, 1)
output_ce = context_encoder_net(feature_ce)
output_ce_224 = context_encoder_input.clone()
output_ce_224[:, :, 80:144, 80:144] = output_ce.clone()
output_fake = discriminator(output_ce_224)
output_real = discriminator(img)
loss_BCE_fake = loss_fn_BCE(output_fake, zeros)
loss_BCE_real = loss_fn_BCE(output_real, ones)
loss_BCE = loss_BCE_fake + loss_BCE_real
losses_DC.update(loss_BCE.item(), batch_size)
discriminator_optimizer.zero_grad()
loss_BCE.backward()
discriminator_optimizer.step()
# Update Decoder
feature_ce = HMR(context_encoder_input, return_only_features=True)
feature_ce = feature_ce.reshape(-1, 2048, 1, 1)
output_ce = context_encoder_net(feature_ce)
output_ce_224 = context_encoder_input.clone()
output_ce_224[:, :, 80:144, 80:144] = output_ce.clone()
output_fake = discriminator(output_ce_224)
loss_BCE = loss_fn_BCE(output_fake, ones)
loss_MSE = loss_fn_MSE(output_ce, center_crop_img)
loss_ce = 0.001 * loss_BCE + 0.999 * loss_MSE
losses_CE.update(loss_ce.item(), batch_size)
context_encoder_optimizer.zero_grad()
loss_ce.backward()
context_encoder_optimizer.step()
### Jigsaw Puzzle ###
# Update classifier
_jigsaw_input = jigsaw_input.permute(1, 0, 2, 3, 4) # tile, batch, c, w, h
feature_jp = list()
for i in range(num_patch):
feature_jp.append(HMR(_jigsaw_input[i], return_only_features=True))
feature_jp.append(HMR(img, return_only_features=True))
feature_jp = torch.cat(feature_jp, 1)
output_jp = jigsaw_puzzle_net(feature_jp)
acc_jp = get_acc(output_jp, jigsaw_order)
acces_JP.update(acc_jp, batch_size)
loss_jp = loss_fn_CE(output_jp, jigsaw_order)
losses_JP.update(loss_jp.item(), batch_size)
jigsaw_puzzle_optimizer.zero_grad()
loss_jp.backward()
jigsaw_puzzle_optimizer.step()
### Rotation ###
# Update rotation net
feature_rot = HMR(rotation_img, return_only_features=True)
output_rot = rotation_net(feature_rot)
acc_rot = get_acc(output_rot, rotation_idx)
acces_ROT.update(acc_rot, batch_size)
loss_rot = loss_fn_CE(output_rot, rotation_idx)
losses_ROT.update(loss_rot, batch_size)
rotation_optimizer.zero_grad()
loss_rot.backward()
rotation_optimizer.step()
return output_ce_224
def train_hmr_using_3task(
HMR,
context_encoder_net,
discriminator,
jigsaw_puzzle_net,
rotation_net,
loss_fn_BCE,
loss_fn_MSE,
loss_fn_CE,
losses_CE,
acces_JP,
losses_JP,
acces_ROT,
losses_ROT,
losses_HMR_3task,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
num_patch,
ones,
zeros,
batch_size,
args,
):
# loss for HMR - ce
feature_ce = HMR(context_encoder_input, return_only_features=True)
feature_ce = feature_ce.reshape(-1, 2048, 1, 1)
output_ce = context_encoder_net(feature_ce)
output_ce_224 = context_encoder_input.clone()
output_ce_224[:, :, 80:144, 80:144] = output_ce.clone()
output_fake = discriminator(output_ce_224)
loss_BCE = loss_fn_BCE(output_fake, ones)
loss_MSE = loss_fn_MSE(output_ce, center_crop_img)
loss_ce = 0.001 * loss_BCE + 0.999 * loss_MSE
losses_CE.update(loss_ce.item(), batch_size)
# loss for HMR - jp
_jigsaw_input = jigsaw_input.permute(1, 0, 2, 3, 4) # tile, batch, c, w, h
feature_jp = list()
for i in range(num_patch):
feature_jp.append(HMR(_jigsaw_input[i], return_only_features=True))
feature_jp.append(HMR(img, return_only_features=True))
feature_jp = torch.cat(feature_jp, 1)
output_jp = jigsaw_puzzle_net(feature_jp)
acc_jp = get_acc(output_jp, jigsaw_order)
acces_JP.update(acc_jp, batch_size)
loss_jp = loss_fn_CE(output_jp, jigsaw_order)
losses_JP.update(loss_jp.item(), batch_size)
# loss for HMR - rot
feature_rot = HMR(rotation_img, return_only_features=True)
output_rot = rotation_net(feature_rot)
acc_rot = get_acc(output_rot, rotation_idx)
acces_ROT.update(acc_rot, batch_size)
loss_rot = loss_fn_CE(output_rot, rotation_idx)
losses_ROT.update(loss_rot, batch_size)
loss_HMR = args.ce_weight * loss_ce + args.jp_weight * loss_jp + args.rot_weight * loss_rot
loss_HMR = args.total_weight * loss_HMR
losses_HMR_3task.update(loss_HMR.item(), batch_size)
return loss_HMR, output_ce_224
def train_hmr_using_joints(
HMR,
loss_fn_keypoints,
losses_HMR_joints3d,
img,
gt_keypoints_2d,
gt_keypoints_3d,
has_joints3d,
joint_mapper_gt,
batch_size,
device,
args,
):
### training HMR resnet update using joints info
output = HMR(img)
output = output[-1]
### calcuate loss of 2d joints ###
pred_keypoints_2d = output["kp_2d"]
conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone()
joints2d_loss = (conf * loss_fn_keypoints(pred_keypoints_2d, gt_keypoints_2d[:, :, :-1])).mean()
joints2d_loss = 5*joints2d_loss
### calcuate loss of 3d joints ###
pred_keypoints_3d = output["kp_3d"][:, 25:, :]
conf = gt_keypoints_3d[:, :, -1].unsqueeze(-1).clone()
gt_keypoints_3d = gt_keypoints_3d[:, :, :-1].clone()
gt_keypoints_3d = gt_keypoints_3d[has_joints3d==1]
conf = conf[has_joints3d==1]
pred_keypoints_3d = pred_keypoints_3d[has_joints3d==1]
if len(gt_keypoints_3d) > 0:
gt_pelvis = (gt_keypoints_3d[:, 2, :] + gt_keypoints_3d[:, 3, :]) / 2
gt_keypoints_3d = gt_keypoints_3d - gt_pelvis[:, None, :]
pred_pelvis = (pred_keypoints_3d[:, 2, :] + pred_keypoints_3d[:, 3, :]) / 2
pred_keypoints_3d = pred_keypoints_3d - pred_pelvis[:, None, :]
joints3d_loss = (conf*loss_fn_keypoints(pred_keypoints_3d, gt_keypoints_3d)).mean()
pred_j3ds = pred_keypoints_3d[:, joint_mapper_gt, :].clone().detach()
target_j3ds = gt_keypoints_3d[:, joint_mapper_gt, :].clone().detach()
errors = torch.sqrt(((pred_j3ds - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
S1_hat = batch_compute_similarity_transform_torch(pred_j3ds, target_j3ds)
errors_pa = torch.sqrt(((S1_hat - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
m2mm = 1000
mpjpe = np.mean(errors) * m2mm
pa_mpjpe = np.mean(errors_pa) * m2mm
num_data = len(gt_keypoints_3d)
else:
joints3d_loss = torch.FloatTensor(1).fill_(0.).to(device).mean()
mpjpe = np.array(0)
pa_mpjpe = np.array(0)
num_data = len(gt_keypoints_3d)
total_loss = joints2d_loss
joints3d_loss = 5*joints3d_loss
total_loss += joints3d_loss
losses_HMR_joints3d.update(joints3d_loss.item(), batch_size)
total_loss *= 60
return total_loss, mpjpe, pa_mpjpe, num_data
def train_texture_net(
HMR,
texture_net,
img_renderer,
loss_fn_MSE,
loss_fn_mask,
losses_texture_ori_img,
losses_seg,
losses_texture_total,
texture_net_optimizer,
img,
black_img,
batch_size,
args,
gt_mask,
has_mask,
train_first_stage
):
output = HMR(img)
output = output[-1]
vertices = output['verts']
cam = output['theta'][:, :3]
textures = texture_net(img)
textures = textures.expand(-1, -1, 2, 2, 2, -1)
mask = gt_mask.clone().detach()
mask_est, rendering = img_renderer(vertices, cam, textures)
valid_mask = mask > 0
valid_mask = valid_mask[:, None, :, :].type(torch.int)
detach_images = img * valid_mask + black_img * (1-valid_mask)
for i in range(batch_size):
detach_images[i] = inverse_normalize(detach_images[i])
#======================================================================================#
# loss_texture => MSE loss (texture images with bg, original img)
# loss_seg => MSE loss (segmentation images, target seg)
# loss_texture_BCE => BCE loss (texture images with bg, Real(1))
# loss_texture_total => SUM(loss_texture, loss_seg, loss_texture_BCE)
#======================================================================================#
loss_texture_ori_img = loss_fn_MSE(detach_images[has_mask==1], rendering[has_mask==1])
losses_texture_ori_img.update(loss_texture_ori_img.item(), batch_size)
loss_all = args.rendering_weight*loss_texture_ori_img
if train_first_stage:
texture_net_optimizer.zero_grad()
loss_all.backward()
texture_net_optimizer.step()
return mask_est, detach_images, rendering, vertices
else:
_mask = mask_est[has_mask == 1]
_gt_mask = gt_mask[has_mask == 1]
if len(_mask) != 0 and len(_gt_mask) != 0:
loss_seg = loss_fn_mask(_mask, _gt_mask)
losses_seg.update(loss_seg.item(), batch_size)
loss_all += args.seg_weight * loss_seg
loss_all = args.texture_total_weight * loss_all
losses_texture_total.update(loss_all.item(), batch_size)
return loss_all, mask_est, detach_images, rendering, vertices
def train_hmr_using_adv_loss(
HMR,
texture_discriminator,
texture_net,
img_renderer,
losses_disc_e,
losses_disc,
losses_disc_real,
losses_disc_fake,
img,
batch_size,
):
output = HMR(img)[-1]
vertices = output['verts']
cam = output['theta'][:, :3]
textures = texture_net(img)
textures = textures.expand(-1, -1, 2, 2, 2, -1)
mask, rendering = img_renderer(vertices, cam, textures)
rendering_img = rendering.clone()
bg_idx = random.randint(1, 18)
bg_list = glob.glob("/data/indoor_bg/train/LR/{}/color/*.png".format(bg_idx))
cropped_bg_list = torch.zeros(batch_size, 3, 224, 224)
for i in range(batch_size):
random_idx = random.randint(0, len(bg_list)-1)
bg_path = bg_list[random_idx]
bg = cv2.imread(bg_path)
bg_w, bg_h, _ = bg.shape
h = w = 224
rand_idx_w = int(np.random.randint(bg_w-w))
rand_idx_h = int(np.random.randint(bg_h-h))
cropped_bg = bg[rand_idx_w:rand_idx_w+w, rand_idx_h:rand_idx_h+h, :]/255.0
cropped_bg = torch.from_numpy(cropped_bg).permute(2, 0, 1)
cropped_bg_list[i] = cropped_bg
cropped_bg_list = cropped_bg_list.to(rendering_img.device)
valid_mask = mask > 0
valid_mask = valid_mask[:, None, :, :].type(torch.int)
rendering_img_input = valid_mask * rendering_img + (1-valid_mask) * cropped_bg_list
rendering_bg = rendering_img_input.clone().detach()
for i in range(batch_size):
rendering_img_input[i] = normalize(rendering_img_input[i])
e_disc_loss = batch_encoder_disc_l2_loss(texture_discriminator(rendering_img_input))
losses_disc_e.update(e_disc_loss.item(), batch_size)
fake_rendering_img_input = rendering_img_input.clone().detach()
real = texture_discriminator(img)
fake = texture_discriminator(fake_rendering_img_input)
d_disc_real, d_disc_fake, d_disc_loss = batch_adv_disc_l2_loss(real, fake)
losses_disc.update(d_disc_loss.item(), batch_size)
losses_disc_real.update(d_disc_real.item(), batch_size)
losses_disc_fake.update(d_disc_fake.item(), batch_size)
return e_disc_loss, d_disc_loss, rendering_bg
def batch_encoder_disc_l2_loss(disc_value):
'''
Inputs:
disc_value: N x 25
'''
k = disc_value.shape[0]
return torch.sum((disc_value - 1.0) ** 2) * 1.0 / k
def batch_adv_disc_l2_loss(real_disc_value, fake_disc_value):
'''
Inputs:
disc_value: N x 25
'''
ka = real_disc_value.shape[0]
kb = fake_disc_value.shape[0]
lb, la = torch.sum(fake_disc_value ** 2) / kb, torch.sum((real_disc_value - 1) ** 2) / ka
return la, lb, la + lb | JunukCha/SSPSE | utils/trainer_utils.py | trainer_utils.py | py | 26,289 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "time.strftime",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
31524108278 | """
Test read submittal chain
NOTE: this just makes sure the chain executes properly but DOES NOT assess the quality of the agent's analysis. That is done in the ipython notebooks in the evals/ folder
"""
import pytest
from meche_copilot.schemas import Session
from meche_copilot.chains.read_submittal_chain import ReadSubmittalChain
# Used for verbose output of langchain prompts and responses
# from langchain.callbacks import StdOutCallbackHandler
@pytest.fixture(scope="session")
def read_submittal_chain():
return ReadSubmittalChain()
@pytest.mark.skip(reason="TODO - write when done")
def test_read_submittal_data(session: Session, read_submittal_chain: ReadSubmittalChain, visualize: bool):
chain = read_submittal_chain
chain.read_submittal_data(scoped_eq=session.equipments, show_your_work=visualize)
@pytest.mark.skip(reason="TODO - write when done")
def test_read_submittal_data_with_scoped_eq(session: Session, read_submittal_chain: ReadSubmittalChain, visualize: bool):
"""
Test can read submittal data for a specific equipment (e.g. pump)
"""
chain = read_submittal_chain
pump_eq = session.equipments[0]
pump_eq.instances[0].design_uid = 'P-1A'
chain.read_submittal_data(scoped_eq=[pump_eq], show_your_work=visualize)
res = chain(scoped_eq=session.equipments)
| fuzzy-tribble/meche-copilot | tests/unit_tests/chains/read_submittal_chain_test.py | read_submittal_chain_test.py | py | 1,324 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "meche_copilot.chains.read_submittal_chain.ReadSubmittalChain",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "meche_copilot.schemas.Session",
"line_number": 18,
"usage_typ... |
23882256658 | #!/usr/bin/env python3
"""
Autonomy node for the TurtleBot3.
This script relies on a YAML file of potential navigation locations,
which is listed as a `location_file` ROS parameter.
Example usage:
ros2 run tb3_autonomy autonomy_node.py
ros2 run tb3_autonomy autonomy_node.py --ros-args -p location_file:=/path/to/my/file.yaml
ros2 run tb3_autonomy autonomy_node.py --ros-args -p tree_type:=queue -p target_color:=green
"""
import os
import yaml
import random
import rclpy
from rclpy.node import Node
import time
import py_trees
import py_trees_ros
from py_trees.common import OneShotPolicy
from ament_index_python.packages import get_package_share_directory
from tb3_behaviors.navigation import GoToPose, GetLocationFromQueue
from tb3_behaviors.vision import LookForObject
default_location_file = os.path.join(
get_package_share_directory("tb3_worlds"), "maps", "sim_house_locations.yaml"
)
class AutonomyBehavior(Node):
def __init__(self):
super().__init__("autonomy_node")
self.declare_parameter("location_file", value=default_location_file)
self.declare_parameter("tree_type", value="queue")
self.declare_parameter("enable_vision", value=True)
self.declare_parameter("target_color", value="blue")
# Parse locations YAML file and shuffle the location list.
location_file = self.get_parameter("location_file").value
with open(location_file, "r") as f:
self.locations = yaml.load(f, Loader=yaml.FullLoader)
self.loc_list = list(self.locations.keys())
random.shuffle(self.loc_list)
# Create and setup the behavior tree
self.tree_type = self.get_parameter("tree_type").value
self.enable_vision = self.get_parameter("enable_vision").value
self.target_color = self.get_parameter("target_color").value
self.create_behavior_tree(self.tree_type)
self.tree.node.get_logger().info(f"Using location file: {location_file}")
self.tree.node.get_logger().info(f"Looking for color {self.target_color}...")
def create_behavior_tree(self, tree_type):
if tree_type == "naive":
self.tree = self.create_naive_tree()
elif tree_type == "queue":
self.tree = self.create_queue_tree()
else:
self.get_logger().info(f"Invalid behavior tree type {tree_type}.")
def create_naive_tree(self):
"""Create behavior tree with explicit nodes for each location."""
if self.enable_vision:
selector = py_trees.composites.Selector(name="navigation", memory=True)
root = py_trees.decorators.OneShot(
name="root",
child=selector,
policy=OneShotPolicy.ON_SUCCESSFUL_COMPLETION,
)
tree = py_trees_ros.trees.BehaviourTree(root, unicode_tree_debug=False)
tree.setup(timeout=15.0, node=self)
for loc in self.loc_list:
pose = self.locations[loc]
selector.add_child(
py_trees.decorators.OneShot(
name=f"try_{loc}",
child=py_trees.composites.Sequence(
name=f"search_{loc}",
children=[
GoToPose(f"go_to_{loc}", pose, tree.node),
LookForObject(
f"find_{self.target_color}_{loc}",
self.target_color,
tree.node,
),
],
memory=True,
),
policy=OneShotPolicy.ON_COMPLETION,
)
)
else:
seq = py_trees.composites.Sequence(name="navigation", memory=True)
root = py_trees.decorators.OneShot(
name="root", child=seq, policy=OneShotPolicy.ON_SUCCESSFUL_COMPLETION
)
tree = py_trees_ros.trees.BehaviourTree(root, unicode_tree_debug=False)
tree.setup(timeout=15.0, node=self)
for loc in self.loc_list:
pose = self.locations[loc]
seq.add_child(GoToPose(f"go_to_{loc}", pose, self))
return tree
def create_queue_tree(self):
"""Create behavior tree by picking a next location from a queue"""
bb = py_trees.blackboard.Blackboard()
bb.set("loc_list", self.loc_list)
seq = py_trees.composites.Sequence(name="search", memory=True)
root = py_trees.decorators.OneShot(
name="root", child=seq, policy=OneShotPolicy.ON_SUCCESSFUL_COMPLETION
)
tree = py_trees_ros.trees.BehaviourTree(root, unicode_tree_debug=False)
tree.setup(timeout=15.0, node=self)
seq.add_children(
[
GetLocationFromQueue("get_next_location", self.locations),
GoToPose("go_to_location", None, tree.node),
]
)
if self.enable_vision:
seq.add_child(
LookForObject(f"find_{self.target_color}", self.target_color, tree.node)
)
return tree
def execute(self, period=0.5):
"""Executes the behavior tree at the specified period."""
self.tree.tick_tock(period_ms=period * 1000.0)
rclpy.spin(self.tree.node)
rclpy.shutdown()
if __name__ == "__main__":
rclpy.init()
behavior = AutonomyBehavior()
behavior.execute()
| sea-bass/turtlebot3_behavior_demos | tb3_autonomy/scripts/autonomy_node.py | autonomy_node.py | py | 5,578 | python | en | code | 207 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "ament_index_python.packages.get_package_share_directory",
"line_number": 31,
"usage_type": "call"
},
{
... |
2655039038 | import torch
import numpy as np
import torch.utils.data
import matplotlib.pyplot as plt
n_train = 1000
class DS(torch.utils.data.Dataset):
def __init__(self, n):
self.n = n
self.y = torch.rand(n)*21-10.5
self.x = torch.sin(0.75*self.y)*7.0+self.y*0.5+torch.randn(n)
def __len__(self):
return self.n
def __getitem__(self,i):
return (self.x[i],self.y[i])
train_ds = DS(n_train)
plt.scatter(train_ds.x.numpy(),train_ds.y.numpy(), s=2)
plt.show()
# train_dl = torch.utils.data.DataLoader(train_ds, batch_size=batch_size)
class HandwritingGenerator(Module):
def __init__(self, alphabet_size, hidden_size, num_window_components, num_mixture_components):
super(HandwritingGenerator, self).__init__()
self.alphabet_size = alphabet_size + 1
# First LSTM layer, takes as input a tuple (x, y, eol)
self.lstm1_layer = LSTM(input_size=3,
hidden_size=hidden_size,
batch_first=True)
self.lstm2_layer = LSTM(input_size=3 + hidden_size + alphabet_size + 1,
hidden_size=hidden_size,
batch_first=True)
# Third LSTM layer, takes as input the concatenation of the output of the first LSTM layer,
# the output of the second LSTM layer
# and the output of the Window layer
self.lstm3_layer = LSTM(input_size=hidden_size,
hidden_size=hidden_size,
batch_first=True)
self.output_layer = mdn(input_size=hidden_size,
num_mixtures=num_mixture_components)
self.hidden1 = None
self.hidden2 = None
self.hidden3 = None
# Initiliaze parameters
self.reset_parameters()
def forward(self, strokes, onehot):
# First LSTM Layer
input_ = strokes
output1, self.hidden1 = self.lstm1_layer(input_, self.hidden1)
# Gaussian Window Layer
eos, pi, mu1, mu2, sigma1, sigma2, rho = self.output_layer(output1, bias)
return (eos, pi, mu1, mu2, sigma1, sigma2, rho) | nguyenvantui/deepwriting-master-1 | github_syn/my_mdn2.py | my_mdn2.py | py | 2,177 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.utils",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.rand",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number":... |
7755009879 | import numpy as np
import json
import copy
import functools
from tensorpack.utils import logger
from petridish.info.layer_info import LayerInfo, LayerInfoList, LayerTypes
class CellNetworkInfo(dict):
def __init__(self, master=None, normal=None, reduction=None):
super(CellNetworkInfo, self).__init__(locals())
self._cell_names = []
if master is not None:
self._cell_names.append('master')
if normal is not None:
self._cell_names.append('normal')
if reduction is not None:
self._cell_names.append('reduction')
@property
def master(self):
return self.get('master', None)
@master.setter
def master(self, val):
self['master'] = val
@property
def normal(self):
return self.get('normal', None)
@normal.setter
def normal(self, val):
self['normal'] = val
@property
def reduction(self):
return self.get('reduction', None)
@reduction.setter
def reduction(self, val):
self['reduction'] = val
@property
def cell_names(self):
return self._cell_names
@property
def operable_cell_names(self):
if self.normal is None:
return ['master']
elif self.reduction is None:
return ['normal']
return ['normal', 'reduction']
def is_cell_based(self):
return 'normal' in self.operable_cell_names
def to_str(self):
return json.dumps({key : self[key] for key in self._cell_names})
def sample_hallucinations(
self, layer_ops, merge_ops, prob_at_layer=None,
min_num_hallus=1, hallu_input_choice=None):
hallus = dict()
num_hallu_by_name = dict()
if len(self.operable_cell_names) > 1:
assert len(self.operable_cell_names) == 2, \
self.operable_cell_names
num_hallu_by_name['normal'] = min_num_hallus
num_hallu_by_name['reduction'] = min_num_hallus
else:
num_hallu_by_name[self.operable_cell_names[0]] = min_num_hallus
for cname in self.operable_cell_names:
n_hallus = num_hallu_by_name[cname]
if n_hallus == 0:
continue
if cname == 'master' or self[cname].is_end_merge_sum():
cell_based = (cname != 'master')
hallus[cname] = self[cname].sample_sum_hallucinations(layer_ops,
merge_ops, prob_at_layer, n_hallus, hallu_input_choice, cell_based)
else:
hallus[cname] = self[cname].sample_cat_hallucinations(layer_ops,
merge_ops, prob_at_layer, n_hallus, hallu_input_choice)
return hallus
def add_hallucinations(self, hallus,
final_merge_op=LayerTypes.MERGE_WITH_SUM,
stop_gradient_val=1,
hallu_gate_layer=LayerTypes.NO_FORWARD_LAYER):
for cname in hallus:
args = [hallus[cname], final_merge_op, stop_gradient_val, hallu_gate_layer]
if cname == 'master' or self[cname].is_end_merge_sum():
self[cname].add_sum_hallucinations(*args)
else:
self[cname].add_cat_hallucinations(*args)
return self
def contained_hallucination(self):
hallu_locs = dict()
for ci, cname in enumerate(self.operable_cell_names):
# candidate id to (start, end)
hid_to_range = self[cname].contained_hallucination()
for hid in hid_to_range:
hallu_locs[(ci, hid)] = hid_to_range[hid]
return hallu_locs
def sorted_hallu_indices(self, hallu_locs):
# sort by ci, then location in list
return sorted(hallu_locs, key=lambda ci_hid : (ci_hid[0], hallu_locs[ci_hid][0]))
def separate_hallu_info_by_cname(self, contained, hallu_indices, l_fs_ops, l_fs_omega):
"""
Args:
contained : a dict from (ci, hid) to (start, end) in self[operable_cnames[ci]]
hallu_indices : list of (ci, hid), in order by sorted_hallu_indices
l_fs_ops : list of list of int indices that represent the order of importance of
input op of the hallu feature selection. The first level list is in the
same order as hallu_indices (sorted by (ci,hid) ). These indices are the
ones that are chosen by each hallu.
l_fs_omega : list of list of float value that represent the importance value
whose abosolute value is in decreasing value. The first level is the in
the same order as l_op_indices and hallu_indices
These value are associated with the chosen operations.
"""
cell_names = self.operable_cell_names
# first break the info by cname so that we can call cell/layerInfoList level api.
# dictionary from hid to location (start, end)
lil_contained = { cname : dict() for cname in cell_names }
for ci_hid in contained:
ci, hid = ci_hid
cname = cell_names[ci]
lil_contained[cname][hid] = contained[ci_hid]
# hid in sorted order for each cname
lil_h_indices = { cname : [] for cname in cell_names }
for ci_hid in hallu_indices:
ci, hid = ci_hid
cname = cell_names[ci]
lil_h_indices[cname].append(hid)
# Feature selection info
if l_fs_ops is None or len(l_fs_ops) == 0:
lil_fs_ops = { cname : None for cname in cell_names }
lil_fs_omega = { cname : None for cname in cell_names }
else:
lil_fs_ops = { cname : [] for cname in cell_names }
lil_fs_omega = { cname : [] for cname in cell_names }
for ci_hid, fs_ops, fs_omega in zip(hallu_indices, l_fs_ops, l_fs_omega):
ci, hid = ci_hid
cname = cell_names[ci]
lil_fs_ops[cname].append(fs_ops)
lil_fs_omega[cname].append(fs_omega)
return (lil_contained, lil_h_indices, lil_fs_ops, lil_fs_omega)
def select_hallucination(self, selected, separated_hallu_info):
"""
selected : list of (ci, hid)
"""
cell_names = self.operable_cell_names
# selected hid for each cname
lil_selected = { cname : [] for cname in cell_names }
for ci_hid in selected:
ci, hid = ci_hid
cname = cell_names[ci]
lil_selected[cname].append(hid)
(lil_contained, lil_h_indices, lil_fs_ops, lil_fs_omega) = separated_hallu_info
# Invoke LayerInfoList select
for cname in cell_names:
lil_args = [lil_selected[cname], lil_contained[cname],
lil_h_indices[cname], lil_fs_ops[cname], lil_fs_omega[cname]
]
if cname == 'master' or self[cname].is_end_merge_sum():
self[cname] = self[cname].select_sum_hallucination(*lil_args)
else:
self[cname] = self[cname].select_cat_hallucination(*lil_args)
return self
@staticmethod
def calc_reduction_layers(num_cells, num_reduction_layers, num_init_reductions):
"""
Compute true_cell_idx of reduction layers
"""
reduction_layers = list(range(num_init_reductions))
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num) + pool_num - 1 + num_init_reductions
reduction_layers.append(layer_num)
return reduction_layers
@staticmethod
def default_master(n_normal_inputs=2, n_reduction_inputs=2,
num_cells=18, num_reduction_layers=2, num_init_reductions=0,
skip_reduction_layer_input=0, use_aux_head=1):
reduction_layers = CellNetworkInfo.calc_reduction_layers(
num_cells, num_reduction_layers, num_init_reductions)
master = LayerInfoList()
layer_id = 0
n_inputs = n_normal_inputs if num_init_reductions == 0 else n_reduction_inputs
for _ in range(n_inputs):
master.append(LayerInfo(layer_id=layer_id))
layer_id += 1
# true_num_cells counts cells from the first non-input with 0-based index
true_num_cells = num_cells + num_init_reductions + num_reduction_layers
for ci in range(true_num_cells):
info = LayerInfo(layer_id)
if ci in reduction_layers:
info.inputs = list(range(layer_id - n_reduction_inputs, layer_id))
n_in = len(info.inputs)
info.operations = [LayerTypes.IDENTITY] * n_in + ['reduction']
info.down_sampling = 1
else:
if (skip_reduction_layer_input and ci-1 in reduction_layers and
ci > num_init_reductions):
# imagenet : do not take the input of regular reduction as skip connection.
info.inputs = (list(range(layer_id - n_normal_inputs - 1, layer_id - 2)) +
[layer_id - 1])
else:
info.inputs = list(range(layer_id - n_normal_inputs, layer_id))
n_in = len(info.inputs)
info.operations = [LayerTypes.IDENTITY] * n_in + ['normal']
master.append(info)
layer_id += 1
# aux_weight at the last cell before the last reduction
if use_aux_head and len(reduction_layers) > 0:
master[reduction_layers[-1] - 1 + n_inputs].aux_weight = 0.4
master[-1].aux_weight = 1.0
return master
@staticmethod
def from_str(ss):
json_data = json.loads(ss)
return CellNetworkInfo.from_json_loads(json_data)
@staticmethod
def from_json_loads(json_data):
net_info = CellNetworkInfo()
if isinstance(json_data, list):
net_info['master'] = LayerInfoList.from_json_loads(json_data)
net_info.cell_names.append('master')
else:
for key in ['master', 'normal', 'reduction']:
jd = json_data.get(key, None)
if jd:
net_info[key] = LayerInfoList.from_json_loads(jd)
net_info.cell_names.append(key)
return net_info
@staticmethod
def to_seq(rmi):
return None
@staticmethod
def seq_to_img_flag(seq, max_depth=128, make_batcch=False):
return None
@staticmethod
def seq_to_hstr(rmi, not_exist_str='--'):
return None
@staticmethod
def str_to_seq(ss):
return CellNetworkInfo.to_seq(CellNetworkInfo.from_str(ss))
def net_info_from_str(ss):
if ss[0] == '{' and ss[-1] == '}' and LayerInfoList.DELIM in ss:
# this is for backward compatibility
ss = '[ ' + ss.replace(LayerInfoList.DELIM, ' , ') + ' ]'
json_data = json.loads(ss)
return CellNetworkInfo.from_json_loads(json_data)
"""
Examples for resnet, nasnet-a
"""
def separable_resnet_cell_info(next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]),
LayerInfo(
next_id+2,
inputs=[input_ids[1], input_ids[1]],
operations=[
LT.SEPARABLE_CONV_3_2,
LT.IDENTITY,
LT.MERGE_WITH_SUM
]
)
])
return ensure_end_merge(l_info, end_merge)
separable_resnet_cell_info.n_inputs = 2
def basic_resnet_cell_info(next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]),
LayerInfo(
next_id+2,
inputs=[input_ids[1]],
operations=[
LT.CONV_3,
LT.MERGE_WITH_NOTHING
]
),
LayerInfo(
next_id+3,
inputs=[next_id+2, input_ids[1]],
operations=[
LT.CONV_3,
LT.IDENTITY,
LT.MERGE_WITH_SUM
]
)
])
return ensure_end_merge(l_info, end_merge)
basic_resnet_cell_info.n_inputs = 2
def fully_connected_resnet_cell_info(next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]),
LayerInfo(
next_id+2, inputs=[input_ids[1]],
operations=[LT.FC_SGMD_MUL_GATE, LT.MERGE_WITH_SUM]
)
])
return ensure_end_merge(l_info, end_merge)
fully_connected_resnet_cell_info.n_inputs = 2
def fully_connected_rnn_base_cell_info(next_id=0, input_ids=[0,1],
end_merge=LayerTypes.MERGE_WITH_CAT):
"""
See implementation of PetridishRNNCell to see an example of
how this list of info is used.
The first two info are x_and_h and init_layer, which is a
projected x_and_h multiplied with gate.
The rest of layers use specified operation to morph the layers.
This is DARTS v2.
"""
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]), # next_id + 0
LayerInfo(input_ids[1]), # next_id + 1
LayerInfo(
next_id+2, inputs=[input_ids[1]],
operations=[LT.FC_SGMD_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+3, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+4, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+5, inputs=[next_id+2],
operations=[LT.FC_IDEN_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+6, inputs=[next_id+3],
operations=[LT.FC_TANH_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+7, inputs=[next_id+6],
operations=[LT.FC_SGMD_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+8, inputs=[next_id+4],
operations=[LT.FC_TANH_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+9, inputs=[next_id+6],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+10,
inputs=[
next_id+1, next_id+2, next_id+3,
next_id+4, next_id+5, next_id+6,
next_id+7, next_id+8, next_id+9,],
operations=[
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.MERGE_WITH_AVG]
)
])
return l_info
fully_connected_rnn_base_cell_info.n_inputs = 2
def darts_rnn_base_cell_info(
next_id=0, input_ids=[0,1],
end_merge=LayerTypes.MERGE_WITH_CAT):
"""
See implementation of PetridishRNNCell to see an example of
how this list of info is used.
The first two info are x_and_h and init_layer, which is a
projected x_and_h multiplied with gate.
The rest of layers use specified operation to morph the layers.
This is DARTS from the paper writing
"""
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]), # next_id + 0
LayerInfo(input_ids[1]), # next_id + 1
LayerInfo(
next_id+2, inputs=[input_ids[1]],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+3, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+4, inputs=[next_id+3],
operations=[LT.FC_TANH_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+5, inputs=[next_id+4],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+6, inputs=[next_id+5],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+7, inputs=[next_id+2],
operations=[LT.FC_IDEN_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+8, inputs=[next_id+6],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+9, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+10,
inputs=[
next_id+1, next_id+2, next_id+3,
next_id+4, next_id+5, next_id+6,
next_id+7, next_id+8, next_id+9,],
operations=[
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.MERGE_WITH_AVG]
)
])
return l_info
darts_rnn_base_cell_info.n_inputs = 2
def resnet_bottleneck_cell_info(down_sampling=0):
raise NotImplementedError("not implemented due to changing filter sizes")
def nasneta_cell_info(
next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList()
l_info.extend([
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]), # most recent layer
LayerInfo(next_id+2, inputs=[input_ids[1], input_ids[0]],
operations=[LT.SEPARABLE_CONV_5_2, LT.SEPARABLE_CONV_3_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+3, inputs=[input_ids[0], input_ids[0]],
operations=[LT.SEPARABLE_CONV_5_2, LT.SEPARABLE_CONV_3_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+4, inputs=[input_ids[1], input_ids[0]],
operations=[LT.AVGPOOL_3x3, LT.IDENTITY, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+5, inputs=[input_ids[0], input_ids[0]],
operations=[LT.AVGPOOL_3x3, LT.AVGPOOL_3x3, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+6, inputs=[input_ids[1], input_ids[1]],
operations=[LT.SEPARABLE_CONV_3_2, LT.IDENTITY, LT.MERGE_WITH_SUM]),
])
l_info.append(cat_unused(l_info, next_id+7, end_merge))
return l_info
nasneta_cell_info.n_inputs = 2
def nasnata_reduction_cell_info(
next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList()
l_info.extend([
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]), # most recent layer
LayerInfo(next_id+2, inputs=[input_ids[1], input_ids[0]],
operations=[LT.SEPARABLE_CONV_5_2, LT.SEPARABLE_CONV_7_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+3, inputs=[input_ids[1], input_ids[0]],
operations=[LT.MAXPOOL_3x3, LT.SEPARABLE_CONV_7_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+4, inputs=[input_ids[1], input_ids[0]],
operations=[LT.AVGPOOL_3x3, LT.SEPARABLE_CONV_5_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+5, inputs=[next_id+3, next_id+2],
operations=[LT.IDENTITY, LT.AVGPOOL_3x3, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+6, inputs=[next_id+2, input_ids[1]],
operations=[LT.SEPARABLE_CONV_3_2, LT.MAXPOOL_3x3, LT.MERGE_WITH_SUM]),
])
l_info.append(cat_unused(l_info, next_id+7, end_merge))
return l_info
nasnata_reduction_cell_info.n_inputs = 2
def cat_unused(layer_info_list, layer_id, end_merge):
is_used = set()
layer_dict = set()
for li, info in enumerate(layer_info_list):
if LayerInfo.is_input(info):
is_used.add(li)
continue
for in_id in info.inputs:
is_used.add(in_id)
layer_dict.add(li)
inputs = [info.id for info in layer_info_list if not info.id in is_used]
ops = [LayerTypes.IDENTITY] * (len(inputs) + 1)
ops[-1] = end_merge
info = LayerInfo(layer_id, inputs=inputs, operations=ops)
return info
def ensure_end_merge(l_info, end_merge):
LT = LayerTypes
assert len(l_info) > 0, l_info
if end_merge != l_info[-1].merge_op:
last_id = l_info[-1].id
l_info.append(LayerInfo(last_id + 1, inputs=[last_id],
operations=[LT.IDENTITY, end_merge]))
return l_info
def replace_wsum_with_catproj(net_info):
l_lil = []
if net_info.is_cell_based():
if net_info.normal:
l_lil.append(net_info.normal)
if net_info.reduction:
l_lil.append(net_info.reduction)
else:
l_lil.append(net_info.master)
for lil in l_lil:
for info in lil:
if info.merge_op == LayerTypes.MERGE_WITH_WEIGHTED_SUM:
info.merge_op = LayerTypes.MERGE_WITH_CAT_PROJ
return net_info
def add_aux_weight(net_info, aux_weight=0.4):
last_orig_id = net_info.master[-1].id
has_pass_reduction = False
for info in reversed(net_info.master):
if info.down_sampling:
has_pass_reduction = True
elif has_pass_reduction and info.id < last_orig_id:
info.aux_weight = aux_weight
break
return net_info
def net_info_cifar_to_ilsvrc(net_info, s_type, use_latest_input=False):
# if there are reduction cell, then use reduction cell twice.
# if there are no reduction cell, then use s_type=imagenet
assert isinstance(net_info, CellNetworkInfo), \
"{} is not CellNetworkInfo.".format(net_info)
# number of reduction that already happened.
if s_type == 'imagenet':
n_stem_reductions = 2
elif s_type == 'basic':
n_stem_reductions = 0
elif s_type == 'conv3' or s_type == 'conv7':
n_stem_reductions = 1
n_model_reductions = sum([info.down_sampling for info in net_info.master])
# number of reduction required at start
n_extra_reductions = 5 - n_model_reductions - n_stem_reductions
if n_extra_reductions <= 0:
return net_info
next_lid = max([info.id for info in net_info.master])
n_inputs = 2
layer_ids = [net_info.master[n_inputs - 1].id] * n_inputs
is_cell_based = bool(net_info.get('reduction', None))
l_to_insert = []
for _ in range(n_extra_reductions):
next_lid += 1
if is_cell_based:
operations = [LayerTypes.IDENTITY] * n_inputs + ['reduction']
else:
operations = [
LayerTypes.SEPARABLE_CONV_7_2,
LayerTypes.IDENTITY,
LayerTypes.MERGE_WITH_SUM
]
info = LayerInfo(
next_lid,
inputs=layer_ids[-n_inputs:],
operations=operations,
down_sampling=1)
layer_ids.append(next_lid)
l_to_insert.append(info)
# rewire later layers that use inputs directly
def mapped_input(old_idx):
if use_latest_input:
return layer_ids[n_extra_reductions + n_inputs - 1]
return layer_ids[n_extra_reductions + old_idx]
remap_dict = dict(
[(info.id, mapped_input(idx)) \
for idx, info in enumerate(net_info.master[:n_inputs])])
for info in net_info.master:
for idx, inid in enumerate(info.inputs):
newid = remap_dict.get(inid, None)
if newid is not None:
info.inputs[idx] = newid
# insertion
net_info.master[n_inputs:n_inputs] = l_to_insert
return net_info
def increase_net_info_size(net_info, multiplier=2):
"""
Increase the size of a macro net_info to be multiplier
times of the original size. This is used after macro
searching on small models to enable deeper models.
Algorithm:
1. We first find where the cells start and end, using
_is_end_of_cell(). Check the assumptions there.
2. For each cell, the inner cell connections are kept
the same.
3. The connections to previous end of cells are considered
as relative.
4. Each cell is repeated multiplier number of times,
the repeats are inserted before the real one,
Args:
net_info : a CellNetworkInfo for macro search.
multiplier (int or list of int) :
If it is int, the number of times each normal cell is repeated.
If it is a list, it is the periodic multiplier applied to each normal cell.
return:
A modified original net_info. Note that the original is changed.
"""
l_info = net_info.master
n_inputs = l_info.num_inputs()
end_cell_indices = list(range(n_inputs))
orig_end_cell_ids = [
l_info[idx].id for idx in end_cell_indices
]
next_id = 0
for info in l_info:
next_id = max(next_id, info.id)
idx = start = n_inputs
id_to_idx = dict()
normal_cnt = 0
if isinstance(multiplier, int):
multiplier = [multiplier]
while idx < len(l_info):
# using while loop as l_info is getting longer
id_to_idx[l_info[idx].id] = idx
if not l_info._is_end_of_cell(idx):
idx += 1
continue
n_copies = 0
if not l_info[idx].down_sampling:
n_copies = multiplier[normal_cnt % len(multiplier)] - 1
normal_cnt += 1
cell_size = idx - start + 1
# make copies.
for cp_idx in range(n_copies):
l_info[start:start] = copy.deepcopy(l_info[start:idx+1])
for info in l_info[start:idx+1]:
next_id += 1
info.id = next_id
inputs = info.inputs
for in_idx, in_id in enumerate(inputs):
if in_id in id_to_idx.keys():
idx_in_l_info = id_to_idx[in_id] + cp_idx * cell_size
else:
n_prev = None
for _i, _id in enumerate(reversed(orig_end_cell_ids)):
if _id == in_id:
n_prev = _i + 1
break
idx_in_l_info = end_cell_indices[-n_prev]
inputs[in_idx] = l_info[idx_in_l_info].id
info.inputs = inputs
# copied cells never produces aux predictions.
info.aux_weight = 0
end_cell_indices.append(idx)
start = idx + 1
idx += cell_size
# modify the original for the cell connections
for info in l_info[start:idx+1]:
inputs = info.inputs
for in_idx, in_id in enumerate(inputs):
if in_id not in id_to_idx.keys():
n_prev = None
for _i, _id in enumerate(reversed(orig_end_cell_ids)):
if _id == in_id:
n_prev = _i + 1
break
inputs[in_idx] = l_info[end_cell_indices[-n_prev]].id
info.inputs = inputs
end_cell_indices.append(idx)
orig_end_cell_ids.append(l_info[idx].id)
id_to_idx = dict()
idx = start = end_cell_indices[-1] + 1
#end while
net_info.master = l_info
return net_info
| microsoft/petridishnn | petridish/info/net_info.py | net_info.py | py | 27,723 | python | en | code | 111 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "petridish.info.layer_info.LayerTypes.MERGE_WITH_SUM",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "petridish.info.layer_info.LayerTypes",
"line_number": 91,
"usage_type... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.