id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
12834676 | <gh_stars>0
from model.project import Project
class ProjectHelper:
def __init__(self, app):
self.app = app
def open_project_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/manage_proj_create_page.php")):
wd.find_element_by_xpath("//div[@id='main-container']/div[@id='sidebar']/ul[@class='nav nav-list']/li[7]/a").click()
wd.find_element_by_xpath("//div[@class='row']/ul/li[3]/a").click()
def create_project(self, project):
wd = self.app.wd
# Открываем страницу создания проекта
self.open_project_page()
# Переход на форму создания проекта
wd.find_element_by_xpath("//button[@class='btn btn-primary btn-white btn-round']").click()
# Заполнение полей формы
self.fill_project_form(project)
# Нажание на кнопку добавления проекта
wd.find_element_by_xpath("//input[@class='btn btn-primary btn-white btn-round']").click()
def fill_project_form(self, project):
self.change_field_value("name", project.name)
self.change_field_value("description", project.description)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def project_count(self):
wd = self.app.wd
return len(wd.find_elements_by_xpath("//div[@class='widget-box widget-color-blue2']/div[2]/div[1]/div[2]/table/tbody/tr"))
def get_project_list(self):
wd = self.app.wd
self.open_project_page()
self.project_list = []
for element in wd.find_elements_by_xpath("//div[@class='table-responsive']/table/tr"):
cells = element.find_elements_by_tag_name("td")
name = cells[1]
description = cells[5]
self.project_list.append(Project(name=name, description=description))
return list(self.project_list)
def open_project_by_index(self, index):
wd = self.app.wd
self.open_project_page()
row = wd.find_elements_by_xpath("//div[@class='table-responsive']/table/tbody/tr")[index]
cell = row.find_elements_by_tag_name("td")[0]
cell.find_element_by_tag_name("a").click()
def del_project_by_index(self, index):
wd = self.app.wd
self.open_project_by_index(index)
wd.find_element_by_xpath("//input[@value='Удалить проект']").click()
wd.find_element_by_xpath("//input[@value='Удалить проект']").click() | StarcoderdataPython |
1919637 | <filename>main.py
import argparse
from scraper import Scraper
from common import init_config, TRY_AGAIN_STR
from extractor import Extractor
from formatter import Formatter
from saver import ContentSaver
"""
Short algo:
0st - initialize config
1st - get html with scrapper
2nd - clear tags and so on with extractor
3rd - format content with formatter
4th - save content with saver
"""
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", type=str, help="url to get readable context")
args = parser.parse_args()
if args.url:
config = init_config()
html = Scraper.get_content_in_html(args.url)
clear_content = Extractor(config).get_clear_content_from_html(html)
formatted_content = Formatter(config).get_formatted_content(clear_content)
ContentSaver.save_content(formatted_content, args.url)
else:
print(TRY_AGAIN_STR)
| StarcoderdataPython |
6614268 | import requests
from bs4 import BeautifulSoup
def make_synonym_dict(word):
#word = input()
synonym_dict={word:[]}
url = "https://thesaurus.weblio.jp/content/" + word
#headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'}
r = requests.get(url)
html = r.text
bs = BeautifulSoup(html, 'html.parser')
try:
synonyms_table = bs.find_all("td" ,class_="nwntsR")
#synonyms_table = bs.find_all("div" ,class_="Nwnts")
for synonyms in synonyms_table:
synonyms = synonyms.find_all("li")#class_='crosslink')
#meanings = bs.select_one("#main > div:nth-of-type(13) > div > div.Nwnts > table > tbody > tr:nth-of-type(2) > td.nwntsR > ul > li:nth-of-type(1) > a").text
for synonym in synonyms:
if synonym.find(class_='crosslink')!=None:
synonym = synonym.find(class_='crosslink')
synonym_dict[word] += synonym.contents
#print(synonym_dict)
return synonym_dict
except AttributeError:
meanings = "そのような言葉は見つからなかったよ...。ごめんね。"
print(meanings)
return {}
synonym_dict={}
synonym_dict = make_synonym_dict("ぬこ")
synonym_dict
| StarcoderdataPython |
6416405 | import numpy as np
x = np.array([
[19, 9],
[15, 7],
[7, 2],
[17, 6]
])
y = np.array([1, 1, 2, 2])
x1 = np.array([
x[0],
x[1],
x[2],
x[3],
])
x2 = np.array([
x[1],
x[0],
x[3],
x[2],
])
x1x2 = x1 - x2
normx1x2 = np.linalg.norm(x1x2, axis=1)
print('x1x2\n%s\nnormx1x2\n%s\n' % (x1x2, normx1x2))
x1x3 = np.array([
x1[0] - x1[2],
x1[0] - x1[3],
x1[1] - x1[2],
x1[1] - x1[3],
x1[2] - x1[0],
x1[2] - x1[1],
x1[3] - x1[0],
x1[3] - x1[1],
])
normx1x3 = np.linalg.norm(x1x3, axis=1)
expx1x3 = np.exp(1 - normx1x3)
li = {}
Li = {}
li[0] = normx1x2[0] + np.log(expx1x3[0] + expx1x3[1] + expx1x3[2] + expx1x3[3])
li[1] = normx1x2[1] + np.log(expx1x3[0] + expx1x3[1] + expx1x3[2] + expx1x3[3])
li[2] = normx1x2[2] + np.log(expx1x3[4] + expx1x3[5] + expx1x3[6] + expx1x3[7])
li[3] = normx1x2[3] + np.log(expx1x3[4] + expx1x3[5] + expx1x3[6] + expx1x3[7])
print('Li\n%s\n' % li)
diffe = {}
diffe[1] = (1 / 4)
| StarcoderdataPython |
12852260 | <filename>dictionary_service.py
#!/usr/bin/python3
import argparse
import logging as log
from aiohttp import web
from api.databasemanager import DictionaryDatabaseManager
from api.dictionary import \
entry, \
definition, \
translation, \
configuration
from api.dictionary import \
get_dictionary, \
get_dictionary_xml, \
get_language_list, \
download_dictionary, \
get_inferred_multilingual_dictionary
from api.dictionary.middlewares import \
json_error_handler, \
auto_committer
parser = argparse.ArgumentParser(description='Dictionary service')
parser.add_argument(
'-d',
'--db-file',
dest='STORAGE',
required=False,
default='default')
parser.add_argument('-p', '--port', dest='PORT', type=int, default=8001)
parser.add_argument(
'-l',
'--log-file',
dest='LOG_FILE',
type=str,
default='/opt/botjagwar/user_data/dictionary_service.log')
parser.add_argument('--host', dest='HOST', type=str, default='0.0.0.0')
parser.add_argument('--log-level', dest='LOG_LEVEL', type=str, default='debug')
parser.add_argument('--autocommit', dest='autocommit', type=bool, default=True)
parser.add_argument(
'--commit-every',
dest='commit_every',
type=int,
default=100)
args = parser.parse_args()
WORD_STORAGE = args.STORAGE
HOST = args.HOST
PORT = args.PORT
LOG = args.LOG_FILE
try:
LOG_LEVEL = log._nameToLevel[args.LOG_LEVEL.upper()]
except KeyError:
LOG_LEVEL = 10
log.basicConfig(filename=LOG, level=log.DEBUG)
dictionary_db_manager = DictionaryDatabaseManager(
database_file=WORD_STORAGE, db_header='')
routes = web.RouteTableDef()
app = web.Application(middlewares=[
json_error_handler,
auto_committer,
])
app['database'] = dictionary_db_manager
app['session_instance'] = dictionary_db_manager.session
app['autocommit'] = args.autocommit
app['commit_every'] = args.commit_every
app['commit_count'] = 0
app.router.add_route('GET', '/languages/list', get_language_list)
app.router.add_route('GET', '/languages/list/download', download_dictionary)
app.router.add_route(
'GET',
'/definition/{definition_id}',
definition.get_definition)
app.router.add_route(
'GET',
'/definition_words/{definition_id}',
definition.get_definition_with_words)
app.router.add_route(
'PUT',
'/definition/{definition_id}/edit',
definition.edit_definition)
#app.router.add_route('POST', '/definition/{language}/create', definition.create_definition)
app.router.add_route(
'DELETE',
'/definition/{definition_id}/delete',
definition.delete_definition)
app.router.add_route(
'POST',
'/definition/search',
definition.search_definition)
app.router.add_route('GET', '/dictionary/{language}', get_dictionary_xml)
app.router.add_route('GET', '/xml_dictionary/{language}', get_dictionary)
app.router.add_route(
'GET',
'/dictionary/{source}/{bridge}/{target}',
get_inferred_multilingual_dictionary)
app.router.add_route('GET', '/entry/{language}/{word}', entry.get_entry)
app.router.add_route('POST', '/entry/{language}/create', entry.add_entry)
app.router.add_route('POST', '/entry/batch', entry.add_batch)
app.router.add_route('PUT', '/entry/{word_id}/edit', entry.edit_entry)
app.router.add_route('DELETE', '/entry/{word_id}/delete', entry.delete_entry)
app.router.add_route(
'GET',
'/translations/{origin}/{target}/{word}',
translation.get_translation)
app.router.add_route(
'GET',
'/translations/{origin}/{word}',
translation.get_all_translations)
app.router.add_route('GET', '/word/{word_id}', entry.get_word_by_id)
app.router.add_route('GET', '/ping', configuration.pong)
app.router.add_route('POST', '/commit', configuration.do_commit)
app.router.add_route('POST', '/rollback', configuration.do_rollback)
app.router.add_route('PUT', '/configure', configuration.configure_service)
if __name__ == '__main__':
try:
app.router.add_routes(routes)
web.run_app(app, host=HOST, port=PORT, access_log=log)
except Exception as exc:
log.exception(exc)
log.critical("Error occurred while setting up the server")
finally:
app['session_instance'].flush()
app['session_instance'].close()
| StarcoderdataPython |
9723997 | <reponame>aelamspychron/pychron<gh_stars>1-10
# ===============================================================================
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from traits.api import Property, DelegatesTo, Instance, provides, CStr
# =============standard library imports ========================
# =============local library imports ==========================
from pychron.config_loadable import ConfigLoadable
from pychron.config_mixin import ConfigMixin
from pychron.hardware.core.i_core_device import ICoreDevice
from pychron.has_communicator import HasCommunicator
from pychron.hardware.core.core_device import CoreDevice
from pychron.hardware.core.scanable_device import ScanableDevice
PACKAGES = dict(ProXRADC='pychron.hardware.ncd.adc',
Eurotherm='pychron.hardware.eurotherm',
NMGRLFurnaceFeeder='pychron.hardware.furnace.nmgrl.feeder',
NMGRLFurnaceFunnel='pychron.hardware.furnace.nmgrl.funnel',
NMGRLFurnaceEurotherm='pychron.hardware.furnace.nmgrl.eurotherm',
MDriveMotor='pychron.hardware.mdrive',
RPiGPIO='pychron.hardware.rpi_gpio')
@provides(ICoreDevice)
class AbstractDevice(ScanableDevice, ConfigLoadable, HasCommunicator):
_cdevice = Instance(CoreDevice)
communicator = DelegatesTo('_cdevice')
dev_klass = Property(depends_on='_cdevice')
graph = DelegatesTo('_cdevice')
last_command = DelegatesTo('_cdevice')
last_response = DelegatesTo('_cdevice')
timestamp = DelegatesTo('_cdevice')
current_scan_value = DelegatesTo('_cdevice')
def load_additional_args(self, config):
"""
"""
cklass = self.config_get(config, 'General', 'type')
factory = self.get_factory(PACKAGES[cklass], cklass)
# self.debug('constructing cdevice: name={}, klass={}'.format(name, klass))
self._cdevice = factory(name=cklass, application=self.application,
configuration_dir_name=self.configuration_dir_name)
return True
@property
def com_device_name(self):
return self._cdevice.__class__.__name__
def get_factory(self, package, klass):
try:
module = __import__(package, fromlist=[klass])
if hasattr(module, klass):
factory = getattr(module, klass)
return factory
except ImportError as e:
self.warning(e)
def close(self):
if self._cdevice:
self._cdevice.close()
def set(self, v):
if self._cdevice:
self._cdevice.set(v)
def get(self, *args, **kw):
if self._cdevice:
return self._cdevice.get(*args, **kw)
def post_initialize(self, *args, **kw):
self.graph.set_y_title(self.graph_ytitle)
# use our scan configuration not the cdevice's
self.setup_scan()
self.setup_alarms()
self.setup_scheduler()
if self.auto_start:
self.start_scan()
def initialize(self, *args, **kw):
if self._cdevice:
return self._cdevice.initialize(*args, **kw)
def load(self, *args, **kw):
if self._cdevice:
if not self._check_cdevice():
self.warning('Invalid device '
'"{}" for abstract device "{}"'.format(self._cdevice.name,
self.name))
return
config = self.get_configuration()
if config:
if self.load_additional_args(config):
self._loaded = True
self._cdevice.load()
return True
def open(self, *args, **kw):
self.debug('open device')
return HasCommunicator.open(self, **kw)
def __getattr__(self, attr):
if hasattr(self._cdevice, attr):
return getattr(self._cdevice, attr)
def _get_dev_klass(self):
return self._cdevice.__class__.__name__
def _check_cdevice(self):
return True
class AddressableAbstractDevice(AbstractDevice):
address = CStr
def load_additional_args(self, config):
self.set_attribute(config, 'address', 'General', 'address')
return super(AddressableAbstractDevice, self).load_additional_args(config)
def get(self, force=False, *args, **kw):
if self._cdevice:
return self._cdevice.read_channel(self.address, *args, **kw)
def _check_cdevice(self):
if self._cdevice:
if hasattr(self._cdevice, 'read_channel'):
return True
else:
return True
# ============= EOF =====================================
| StarcoderdataPython |
6646739 | <reponame>lordvinick/Python
print('='*12, 'Custo da Viagem', '='*12)
distancia = float(input('Qual é a distância da viagem? '))
print(f'Você está prestes a começar uma viagem de {distancia}Km')
if distancia <= 200:
print('E o preço da sua passagem será de R${:.2f}'.format(distancia * 0.50))
else:
print(f'E o preço da sua viagem será de {distancia*0.45:.2f}')
| StarcoderdataPython |
8191664 | <gh_stars>0
from tkinter import * #importando tkinter
import tkinter as TK
import gramatica as g
import gramaticaF2 as g2
import Utils.TablaSimbolos as table
import Utils.Lista as l
import Librerias.storageManager.jsonMode as storage
from tkinter.filedialog import askopenfilename as files
import os
import webbrowser
from Utils.fila import fila
from Error import *
import Instrucciones.DML.select as select
import json
import reporte as reporte
import optimizar as opt
#from select import *
##########################################################################
storage.dropAll()
datos = l.Lista({}, '')
##################################FUNCIONES#################################
def openFile():
route = files(
filetypes=[("TXT Files", "*.txt")]
)
if not route:
salida.insert("end", "\nERROR AL ABRIR AL ARCHIVO")
return
editor.delete("1.0", TK.END)
with open(route, "r") as input_file:
text = input_file.read()
editor.insert(TK.END, text)
root.title(f"TYTUSDB_Parser - {route}")
def analisis():
global datos
salida.delete("1.0", "end")
texto = editor.get("1.0", "end")
#g2.tempos.restartTemp() #reinicia el contador de temporales.
prueba = g2.parse(texto)
try:
escribirEnSalidaFinal(prueba['printList'])
except:
''
#print(prueba['text'])
try:
exepy = '''
#imports
import sys
sys.path.append('../G26/Librerias/goto')
from goto import *
import gramatica as g
import Utils.Lista as l
import Librerias.storageManager.jsonMode as storage
import Instrucciones.DML.select as select
from Error import *
import reporte as reporte
#storage.dropAll()
heap = []
semerrors = []
erroresS = list()
datos = l.Lista({}, '')
l.readData(datos)
'''
exepy += '''
#funcion intermedia
def mediador(value):
global heap
global semerrors
global reporte
# Analisis sintactico
instrucciones = g.parse(heap.pop())
erroresS = (g.getMistakes())
for instr in instrucciones['ast'] :
if instr == None:
erroresS = g.getMistakes()
return 0
try:
val = instr.execute(datos)
except:
val = (instr.execute(datos, {}))
if isinstance(val, Error):
'error semantico'
print(val)
semerrors.append(val)
elif isinstance(instr, select.Select) :
if value == 0:
try:
print(val)
if len(val.keys()) > 1 :
print('El numero de columnas retornadas es mayor a 1')
return 0
for key in val:
if len(val[key]['columnas']) > 1 :
print('El numero de filas retornadas es mayor a 1')
else :
return val[key]['columnas'][0][0]
break
except:
return 0
else:
print(instr.ImprimirTabla(val))
else :
try:
return val.val
except:
print(val)
l.writeData(datos)
'''
exepy += '''
#funciones de plg-sql
'''
l.readData(datos)
optt = ""
for val in datos.tablaSimbolos.keys():
if val == 'funciones_':
for func in datos.tablaSimbolos[val]:
try:
f = open("./Funciones/" + func['name'] + ".py", "r")
pruebaaa = f.read()
optt = opt.optimizar(pruebaaa)
exepy += optt
f.close()
except:
exepy += '#Se cambio el nombre del archivo que guarda la funcion. Funcion no encontrada'
exepy += '''
#main
@with_goto
def main():
global heap
'''
exepy += str(prueba['text'])
exepy += '''
reporte.Rerrores(erroresS, semerrors, "Reporte_Errores_Semanticos.html")
#Ejecucion del main
if __name__ == "__main__":
main()
'''
f = open("./c3d.py", "w")
f.write(exepy)
f.close()
l.readData(datos)
if 'funciones_' in datos.tablaSimbolos:
for funciones in datos.tablaSimbolos['funciones_']:
#print(funciones)
if funciones['drop'] == 0:
try:
os.remove('../G26/Funciones/' + funciones['name'] +'.py')
except:
''
try:
reporte.hacerReporteGramatica(prueba['reporte'])
errores = g2.getMistakes()
recorrerErrores(errores)
reporte.Rerrores(errores, [], "Reporte_Errores_Sintactico_Lexicos.html")
reporte.reporteTabla(datos)
except:
''
escribirEnSalidaFinal('Se ha generado el codigo en 3 direcciones.')
#aqui se puede poner o llamar a las fucniones para imprimir en la consola de salida
reptOpti = prueba['opt']
fro = open("./Reportes/ReporteOptimizacion.txt", "w")
fro.write(reptOpti)
fro.close()
except:
print("No se ha podido generar el codigo ya que existen errores sintacticos")
escribirEnSalidaFinal("No se ha podido generar el codigo ya que existen errores sintacticos")
try:
l.readData(datos)
reporte.reporteTabla(datos)
except:
''
def tabla():
ruta = ".\\Reportes\\Reporte_TablaSimbolos.html"
webbrowser.open(ruta)
def ast():
g2.grafo.showtree()
def gramatica():
os.system("notepad ./Reportes/GramaticaAutomatica.md")
def guardar():
print("hola")
def ayuda():
print("hola")
def mistakes():
ruta = ".\\Reportes\\Reporte_Errores.html"
webbrowser.open(ruta)
def recorrerErrores(errores):
salidaE = ""
for error in errores:
salidaE += error.toString() + "\n"
salida.insert("1.0", salidaE)
def escribirEnSalidaInicio(texto): #borra lo que hay y escribe al inicio
salida.insert("1.0", texto)
def escribirEnSalidaFinal(texto): # no borra y escribe al final de lo que ya estaACTIVE
text = texto + "\n"
salida.insert("end", text)
#root
################################Configuracion#################################
root = Tk()
root.title("TytusDB_Manager")#titulo
root.resizable(0,0)
root.geometry("1300x700")#ajustar tamaño
root.config(bg="black", cursor="pirate")
###############################Barra menú#####################################
barra = Menu(root)
root.config(menu=barra, width=300, height=300)
archivoMenu = Menu(barra, tearoff=0)
archivoMenu.add_command(label="Abrir", command=openFile)
archivoMenu.add_command(label="Guardar", command=guardar)
barra.add_cascade(label="Archivo", menu=archivoMenu)
herramientaMenu=Menu(barra, tearoff=0)
herramientaMenu.add_command(label="Ejecutar Analisis", command=analisis)
barra.add_cascade(label="Analisis", menu=herramientaMenu)
reporteMenu = Menu(barra, tearoff=0)
reporteMenu.add_command(label="Reporte errores", command=mistakes)
reporteMenu.add_command(label="Tabla de simbolos", command=tabla)
reporteMenu.add_command(label="Reporte AST", command=ast)
reporteMenu.add_command(label="Reporte Gramatical", command=gramatica)
barra.add_cascade(label="Reportes", menu=reporteMenu)
ayudaMenu=Menu(barra, tearoff=0)
ayudaMenu.add_command(label="Ayuda", command=ayuda)
barra.add_cascade(label="Ayuda", menu=ayudaMenu)
##################################EDITOR DE CODIGO#############################
nombreL=Label( root, text="EDITOR", fg="BLUE", font=("Arial", 12))
nombreL.place(x=300, y=10)
editor = Text(root, width=122, height=18, bg="white")
editor.place(x=300, y=45)
nombreL=Label( root, text="SALIDA", fg="BLUE", font=("Arial", 12))
nombreL.place(x=300, y=350)
salida = Text(root, width=122, height=18, bg="skyblue")
salida.place(x=300, y=380)
root.mainloop() #mostrar interfaz
| StarcoderdataPython |
11237449 | import utils
TEST_INPUT = (0, 3, 0, 1, -3)
def increment_jumps(jump):
return jump + 1
def decrement_large_jumps(jump):
if jump >= 3:
return jump - 1
return jump + 1
def steps_til_exit(data, jump_modifier=increment_jumps):
data = list(data)
size = len(data)
position = 0
total_jumps = 0
while True:
if position < 0 or position >= size:
return total_jumps
jump = data[position]
data[position] = jump_modifier(jump)
position += jump
total_jumps += 1
def main():
test()
input_data = utils.get_input_data(5)
input_data = [int(line) for line in input_data.split('\n')]
part_a = steps_til_exit(input_data)
print('part a: {}'.format(part_a))
part_b = steps_til_exit(input_data, decrement_large_jumps)
print('part b: {}'.format(part_b))
def test():
assert steps_til_exit(TEST_INPUT) == 5
assert steps_til_exit(TEST_INPUT, decrement_large_jumps) == 10
if __name__ == '__main__':
main()
| StarcoderdataPython |
1915612 | <filename>scripts/traffic_sign_classifier.py<gh_stars>0
import pickle
import numpy as np
import matplotlib.pyplot as plt
import classifier_util as util
from sklearn.utils import shuffle
import tensorflow as tf
import lenet as lenet
dataset_dir = '../dataset'
training_file = dataset_dir + '/train.p'
validation_file = dataset_dir + '/valid.p'
testing_file = dataset_dir + '/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
# X_train, y_train = train['features'], train['labels']
# with open('augmented_train.p', mode='rb') as f:
# augmented_train = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
# util.print_dataset_summary(X_train, X_valid, X_test, y_train)
n_train = X_train.shape[0]
n_validation = X_valid.shape[0]
n_test = X_test.shape[0]
image_shape = X_train[0].shape
n_classes = len(np.unique(y_train))
# Stores list of traffic-sign-names, indexed by their ID as given in the dataset.
sign_names = util.load_sign_names()
# util.visualize_dataset(X_train, y_train, sign_names)
# util.describe_labels(y_train, sign_names, 'Training set labels')
# util.describe_labels(y_valid, sign_names, 'Validation set labels')
# util.describe_labels(y_test, sign_names, 'Test set labels')
### Preprocess data.
# X_train, y_train = util.augment_jitter_data(X_train, y_train, 850)
with open('augmented_train.p', mode='rb') as f:
augmented_train = pickle.load(f)
X_train, y_train = augmented_train['features'], augmented_train['labels']
# util.describe_labels(y_train, sign_names,'Augmented training samples')
X_train = util.preprocess_images(X_train)
X_valid = util.preprocess_images(X_valid)
X_test = util.preprocess_images(X_test)
### Hyper parameters.
learning_rate = 0.001
EPOCHS = 25
BATCH_SIZE = 128
### Placeholders for input features and labels.
x = tf.placeholder(tf.float32, (None, image_shape[0], image_shape[1], 1))
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32)
one_hot_y = tf.one_hot(y, n_classes)
### Training operations.
logits = lenet.build_lenet(x, keep_prob)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
training_operation = optimizer.minimize(loss_operation)
### Model evaluation.
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob:1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
def predict(image, model):
with tf.Session() as sess:
saver.restore(sess, './' + model)
prediction = tf.argmax(logits, 1)
return prediction.eval(feed_dict={x: image[np.newaxis, :, :, :], keep_prob: 1.0})
def print_top_k_softmax(test_image, model, k=5):
with tf.Session() as sess:
saver.restore(sess, './' + model)
# Softmax predictions on this test-image.
softmax = sess.run(tf.nn.softmax(logits), feed_dict={x: test_image[np.newaxis, :, :, :], keep_prob: 1.0})
# Return top-k softmax probabilities predicted on this test-image.
return sess.run(tf.nn.top_k(softmax, k))
def print_softmax_predictions(pred):
'''
Prints softmax-predictions in ascending order, with the class-label having highest probability on top.
'''
# Index 0 has tuple of probabilities.
# Index 1 has tuple of class-label indices.
for i in range(len(pred[0][0])):
print('{} : {:.3f}%'.format(sign_names[pred[1][0][i]], pred[0][0][i] * 100))
### Training routine.
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# num_examples = len(X_train)
# print("Training...")
# print()
# for i in range(EPOCHS):
# X_train, y_train = shuffle(X_train, y_train)
# for offset in range(0, num_examples, BATCH_SIZE):
# end = offset + BATCH_SIZE
# batch_x, batch_y = X_train[offset:end], y_train[offset:end]
# sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
#
# validation_accuracy = evaluate(X_valid, y_valid)
# print("EPOCH {} ...".format(i + 1))
# print("Validation Accuracy = {:.3f}".format(validation_accuracy))
# print()
#
# saver.save(sess, './lenet')
# print("Model saved")
### Testing routine.
# with tf.Session() as sess:
# saver.restore(sess, './lenet')
# test_accuracy = sess.run(accuracy_operation, feed_dict={x: X_test, y: y_test, keep_prob: 1.0})
# print('Test accuracy {}'.format(test_accuracy))
### Test images.
test_images, test_images_scaled = util.load_test_images()
print(len(test_images), len(test_images_scaled))
test_images = util.preprocess_images(test_images)
| StarcoderdataPython |
136458 | <filename>vCenterShell/commands/connect_dvswitch.py
from models.ConnectionResult import ConnectionResult
from common.utilites.common_utils import get_object_as_string
class VirtualSwitchConnectCommand:
def __init__(self,
pv_service,
virtual_switch_to_machine_connector,
dv_port_group_name_generator,
vlan_spec_factory,
vlan_id_range_parser,
logger):
"""
:param py_service: vCenter API wrapper
:param virtual_switch_to_machine_connector:
:param dv_port_group_name_generator: DvPortGroupNameGenerator
:param vlan_spec_factory: VlanSpecFactory
:param vlan_id_range_parser: VLanIdRangeParser
:param logger Logger
"""
self.pv_service = pv_service
self.virtual_switch_to_machine_connector = virtual_switch_to_machine_connector
self.dv_port_group_name_generator = dv_port_group_name_generator
self.vlan_spec_factory = vlan_spec_factory
self.vlan_id_range_parser = vlan_id_range_parser
self.logger = logger
def connect_to_networks(self, si, vm_uuid, vm_network_mappings, default_network_name):
"""
Connect VM to Network
:param si: VmWare Service Instance - defined connection to vCenter
:param vm_uuid: <str> UUID for VM
:param vm_network_mappings: <collection of 'VmNetworkMapping'>
:param default_network_name: <str> Full Network name - likes 'DataCenterName/NetworkName'
:return: None
"""
vm = self.pv_service.find_by_uuid(si, vm_uuid)
if not vm:
raise ValueError('VM having UUID {0} not found'.format(vm_uuid))
default_network_instance = self.pv_service.get_network_by_full_name(si, default_network_name)
if not default_network_instance:
raise ValueError('Default Network {0} not found'.format(default_network_name))
mappings = self._prepare_mappings(vm_network_mappings)
updated_mappings = self.virtual_switch_to_machine_connector.connect_by_mapping(
si, vm, mappings, default_network_instance)
connection_results = []
for updated_mapping in updated_mappings:
connection_result = ConnectionResult(mac_address=updated_mapping.vnic.macAddress,
vm_uuid=vm_uuid,
network_name=updated_mapping.network.name,
network_key=updated_mapping.network.key)
connection_results.append(connection_result)
return connection_results
def _prepare_mappings(self, vm_network_mappings):
mappings = []
# create mapping
for vm_network_mapping in vm_network_mappings:
vm_network_mapping.dv_port_name = \
self.dv_port_group_name_generator.generate_port_group_name(vm_network_mapping.vlan_id,
vm_network_mapping.vlan_spec)
vm_network_mapping.vlan_id = \
self.vlan_id_range_parser.parse_vlan_id(vm_network_mapping.vlan_spec, vm_network_mapping.vlan_id)
vm_network_mapping.vlan_spec = \
self.vlan_spec_factory.get_vlan_spec(vm_network_mapping.vlan_spec)
mappings.append(vm_network_mapping)
return mappings
| StarcoderdataPython |
162649 | class DiseaseError(Exception):
"Base class for disease module exceptions."
pass
class ParserError(DiseaseError): pass
| StarcoderdataPython |
8122209 | <gh_stars>0
#coding=utf-8
'''
path = ./mypackage/subB/brust.py
filename = brust.py
'''
rust = 'RUST'
print('in mypackage/subB/brust.py :',rust)
| StarcoderdataPython |
1981208 | <reponame>this-josh/felling<gh_stars>1-10
from setuptools import setup, find_packages
project_urls = {
"Source": "https://github.com/this-josh/felling",
"Tracker": "https://github.com/this-josh/felling/issues",
}
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
]
setup(
name="felling",
description="A simple package to easily create consistent logs",
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(),
include_package_data=True,
package_data={"": ["*.json"]},
url="https://felling.readthedocs.io/en/latest/",
project_urls=project_urls,
long_description=long_description,
long_description_content_type="text/markdown",
keywords="logging logs log",
license="MIT",
python_requires=">=3.6",
)
| StarcoderdataPython |
6515836 | <filename>src/hardware_indep/dataplane_smem.c.py<gh_stars>10-100
# SPDX-License-Identifier: Apache-2.0
# Copyright 2016 Eotvos Lorand University, Budapest, Hungary
from utils.codegen import format_declaration, format_statement, format_expr, format_type, gen_format_type, get_method_call_env
from compiler_log_warnings_errors import addError, addWarning
from compiler_common import types, generate_var_name, get_hdrfld_name, unique_everseen
#[ #include "gen_include.h"
#[ #include "dataplane_impl.h"
table_infos = [(table, table.short_name + ("/keyless" if table.key_bit_size == 0 else "") + ("/hidden" if table.is_hidden else "")) for table in hlir.tables]
for table, table_info in table_infos:
if len(table.direct_meters + table.direct_counters) == 0:
continue
#{ void ${table.name}_apply_smems(STDPARAMS) {
#[ // applying direct counters and meters
for smem in table.direct_meters + table.direct_counters:
for comp in smem.components:
value = "pd->parsed_size" if comp['for'] == 'bytes' else "1"
type = comp['type']
name = comp['name']
#[ apply_${smem.smem_type}(&(global_smem.${name}_${table.name}), $value, "${table.name}", "${smem.smem_type}", "$name");
#} }
#[
| StarcoderdataPython |
9720956 | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects related to translations."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core import python_utils
from core import utils
class MachineTranslation(python_utils.OBJECT):
"""Domain object for machine translation of exploration content."""
def __init__(
self, source_language_code, target_language_code, source_text,
translated_text):
"""Initializes a MachineTranslation domain object.
Args:
source_language_code: str. The language code for the source text
language. Must be different from target_language_code.
target_language_code: str. The language code for the target
translation language. Must be different from
source_language_code.
source_text: str. The untranslated source text.
translated_text: str. The machine generated translation of the
source text into the target language.
"""
self.source_language_code = source_language_code
self.target_language_code = target_language_code
self.source_text = source_text
self.translated_text = translated_text
def validate(self):
"""Validates properties of the MachineTranslation.
Raises:
ValidationError. One or more attributes of the MachineTranslation
are invalid.
"""
if not isinstance(self.source_language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected source_language_code to be a string, received %s' %
self.source_language_code
)
# TODO(#12341): Tidy up this logic once we have a canonical list of
# language codes.
if not utils.is_supported_audio_language_code(
self.source_language_code
) and not utils.is_valid_language_code(
self.source_language_code
):
raise utils.ValidationError(
'Invalid source language code: %s' % self.source_language_code)
if not isinstance(self.target_language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected target_language_code to be a string, received %s' %
self.target_language_code
)
# TODO(#12341): Tidy up this logic once we have a canonical list of
# language codes.
if not utils.is_supported_audio_language_code(
self.target_language_code
) and not utils.is_valid_language_code(
self.target_language_code
):
raise utils.ValidationError(
'Invalid target language code: %s' % self.target_language_code)
if self.source_language_code == self.target_language_code:
raise utils.ValidationError(
(
'Expected source_language_code to be different from '
'target_language_code: "%s" = "%s"') % (
self.source_language_code, self.target_language_code))
if not isinstance(self.source_text, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected source_text to be a string, received %s' %
self.source_text
)
if not isinstance(self.translated_text, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected translated_text to be a string, received %s' %
self.translated_text
)
def to_dict(self):
"""Converts the MachineTranslation domain instance into a dictionary
form with its keys as the attributes of this class.
Returns:
dict. A dictionary containing the MachineTranslation class
information in a dictionary form.
"""
return {
'source_language_code': self.source_language_code,
'target_language_code': self.target_language_code,
'source_text': self.source_text,
'translated_text': self.translated_text
}
| StarcoderdataPython |
9711989 | <filename>examples/meowbit/test_bmp.py
import pyb
import framebuf
import image
fbuf = bytearray(160*128*2)
tft = pyb.SCREEN()
fb = framebuf.FrameBuffer(fbuf, 160, 128, framebuf.RGB565, 160)
img = image.Image(fb)
img.loadbmp("images/test24.bmp")
tft.show(fb)
| StarcoderdataPython |
9604664 | <reponame>laurenmm/simmate-1
# -*- coding: utf-8 -*-
from simmate.calculators.vasp.tasks.relaxation.quality_04 import Quality04Relaxation
class Quality04Energy(Quality04Relaxation):
"""
Runs a rough VASP static energy calculation.
`Quality 04` relates to our ranking of relaxation qualities, where this
calculation uses the same settings as the Quality04Relaxation.
Note, even though this is currently our highest quality preset, these
settings are still only suitable for high-throughput calculations or massive
supercells. Settings are still below MIT and Materials Project quality.
Most commonly, this is used in evolutionary searches (for structure
prediction). We recommend instead using the relaxation/staged workflow,
which uses this calculation as the sixth and final step -- after a series
of rough relaxations are done.
"""
# The settings used for this calculation are based on the MITRelaxation, but
# we are updating/adding new settings here.
# !!! we hardcode temperatures and time steps here, but may take these as inputs
# in the future
incar = Quality04Relaxation.incar.copy()
incar.update(
dict(
ALGO="Normal",
IBRION=-1, # (optional) locks everything between ionic steps
NSW=0, # this is the main static energy setting
ISMEAR=-5, # was 0 for non-metals and 1 for metals
SIGMA=0.05, # was 0.05 for non-metals and 0.06 for metals
)
)
# We set ISMEAR=0 and SIGMA above, so we no longer need smart_ismear
incar.pop("multiple_keywords__smart_ismear")
| StarcoderdataPython |
4837919 | <gh_stars>0
from random import choice
# Variáveis com códigos de cores!
vermelho, amarelo, azul1, lilas, azul, fim = '\033[1:31m', '\033[1:33m', '\033[1:34m', '\033[1:35m', \
'\033[1:36m', '\033[m'
# Cabeçalho
print(vermelho, '-=-' * 16, fim)
print(azul, 'ADVINHE O NÚMERO QUE ESTOU PENSANDO ENTRE 0 E 10', azul)
print(vermelho, '-=-' * 16, fim)
# vARIÁVEIS
jogador = int(input("Faça sua jogada: "))
list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # Poderia usar o randint(0, 10) ao invés da lista
computador = choice(list) # não usaria o choice(list), usaria do randint(0, 10)
t = 0
# wHILE
while jogador != computador:
print('Joguei {}'.format(computador))
jogador = int(input("Você errou!\nFaça sua jogada: "))
computador = choice(list)
t = t + 1
print('Você acertou! Foram necesárias {} tentativas para o acerto.'.format(t + 1))
| StarcoderdataPython |
5117400 | <filename>L1Trigger/L1THGCalUtilities/python/clustering3d.py
import FWCore.ParameterSet.Config as cms
from L1Trigger.L1THGCal.customClustering import binSums, dr_layerbylayer
def create_distance(process, inputs,
distance=0.01
):
producer = process.hgcalBackEndLayer2Producer.clone()
producer.ProcessorParameters.C3d_parameters.dR_multicluster = cms.double(distance)
producer.ProcessorParameters.C3d_parameters.dist_dbscan_multicluster=cms.double(0.)
producer.ProcessorParameters.C3d_parameters.minN_dbscan_multicluster=cms.uint32(0)
producer.ProcessorParameters.C3d_parameters.type_multicluster = cms.string('dRC3d')
producer.InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
return producer
def create_dbscan(process, inputs,
distance=0.005,
min_points=3
):
producer = process.hgcalBackEndLayer2Producer.clone()
producer.ProcessorParameters.C3d_parameters.dR_multicluster = cms.double(0.)
producer.ProcessorParameters.C3d_parameters.dist_dbscan_multicluster = cms.double(distance)
producer.ProcessorParameters.C3d_parameters.minN_dbscan_multicluster = cms.uint32(min_points)
producer.ProcessorParameters.C3d_parameters.type_multicluster = cms.string('DBSCANC3d')
producer.InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
return producer
def create_histoMax(process, inputs,
distance = 0.03,
nBins_R = 36,
nBins_Phi = 216,
binSumsHisto = binSums,
seed_threshold = 0,
):
producer = process.hgcalBackEndLayer2Producer.clone()
producer.ProcessorParameters.C3d_parameters.dR_multicluster = cms.double(distance)
producer.ProcessorParameters.C3d_parameters.nBins_R_histo_multicluster = cms.uint32(nBins_R)
producer.ProcessorParameters.C3d_parameters.nBins_Phi_histo_multicluster = cms.uint32(nBins_Phi)
producer.ProcessorParameters.C3d_parameters.binSumsHisto = binSumsHisto
producer.ProcessorParameters.C3d_parameters.threshold_histo_multicluster = seed_threshold
producer.ProcessorParameters.C3d_parameters.type_multicluster = cms.string('HistoMaxC3d')
producer.InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
return producer
def create_histoMax_variableDr(process, inputs,
distances = dr_layerbylayer,
nBins_R = 36,
nBins_Phi = 216,
binSumsHisto = binSums,
seed_threshold = 0,
):
producer = create_histoMax(process, inputs, 0, nBins_R, nBins_Phi, binSumsHisto, seed_threshold)
producer.ProcessorParameters.C3d_parameters.dR_multicluster_byLayer = cms.vdouble(distances)
return producer
def create_histoInterpolatedMax(process, inputs,
distance = 0.03,
nBins_R = 36,
nBins_Phi = 216,
binSumsHisto = binSums,
):
producer = create_histoMax( process, inputs, distance, nBins_R, nBins_Phi, binSumsHisto )
producer.ProcessorParameters.C3d_parameters.type_multicluster = cms.string('HistoInterpolatedMaxC3d')
return producer
def create_histoInterpolatedMax1stOrder(process, inputs):
producer = create_histoInterpolatedMax( process, inputs )
producer.ProcessorParameters.C3d_parameters.neighbour_weights=cms.vdouble( 0 , 0.25, 0 ,
0.25 , 0 , 0.25,
0 , 0.25, 0
)
return producer
def create_histoInterpolatedMax2ndOrder(process, inputs):
producer = create_histoInterpolatedMax( process,inputs )
producer.ProcessorParameters.C3d_parameters.neighbour_weights=cms.vdouble( -0.25, 0.5, -0.25,
0.5 , 0 , 0.5 ,
-0.25, 0.5, -0.25
)
return producer
def create_histoThreshold(process, inputs,
threshold = 20.,
distance = 0.03,
nBins_R = 36,
nBins_Phi = 216,
binSumsHisto = binSums,
):
producer = process.hgcalBackEndLayer2Producer.clone()
producer.ProcessorParameters.C3d_parameters.threshold_histo_multicluster = cms.double(threshold)
producer.ProcessorParameters.C3d_parameters.dR_multicluster = cms.double(distance)
producer.ProcessorParameters.C3d_parameters.nBins_R_histo_multicluster = cms.uint32(nBins_R)
producer.ProcessorParameters.C3d_parameters.nBins_Phi_histo_multicluster = cms.uint32(nBins_Phi)
producer.ProcessorParameters.C3d_parameters.binSumsHisto = binSumsHisto
producer.ProcessorParameters.C3d_parameters.type_multicluster = cms.string('HistoThresholdC3d')
producer.InputCluster = cms.InputTag('{}:HGCalBackendLayer1Processor2DClustering'.format(inputs))
return producer
| StarcoderdataPython |
3205071 | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions, status
from .models import FibResItem
from .serializers import FibReqItemSerializer, FibResItemSerializer
# MQTT
import paho.mqtt.client as mqtt
# rgpc
import os
import os.path as osp
import sys
BUILD_DIR = osp.join(osp.dirname(osp.abspath(__file__)), "../build/service/")
sys.path.insert(0, BUILD_DIR)
import argparse
import grpc
import fib_pb2
import fib_pb2_grpc
import log_pb2
import log_pb2_grpc
# Create your views here.
class EchoView(APIView):
permission_classes = (permissions.AllowAny,)
def get(self, request):
return Response(data={ 'echo': 'hello world' }, status=200)
class FiboView(APIView):
permission_classes = (permissions.AllowAny,)
def __init__(self):
mqttIP = "127.0.0.1"
mqttPORT = 1883
self.client = mqtt.Client()
self.client.connect(host=mqttIP, port=mqttPORT)
# self.client.loop_start()
def post(self, request):
serializer = FibReqItemSerializer(data=request.data)
if serializer.is_valid():
# print(serializer.data)
# process
fibIP = "127.0.0.1"
fibPORT = "8080"
host = f"{fibIP}:{fibPORT}"
fiborder = serializer.data['order']
with grpc.insecure_channel(host) as channel:
stub = fib_pb2_grpc.FibCalculatorStub(channel)
request = fib_pb2.FibRequest()
request.order = fiborder
# try:
response = stub.Compute(request)
# resdict = {}
# resdict.update(serializer.data)
# resdict['value'] = response.value
# res = FibResItemSerializer(data=resdict)
print("get")
self.client.publish(topic='log', payload=fiborder)
print("success")
return Response({"status": "success", "data": response.value}, status=status.HTTP_200_OK)
# if res.is_valid():
# return Response({"status": "success", "data": res.data}, status=status.HTTP_200_OK)
# else:
# raise Exception
# except Exception as e:
# return Response({"status": "error", "data": e.args}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({"status": "error", "data": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
def get(self, _, id=None):
# if id:
# item = FibResItem.objects.get(id=id)
# serializer = FibResItemSerializer(item)
# return Response({"status": "success", "data": serializer.data}, status=status.HTTP_200_OK)
# items = FibResItem.objects.all()
# serializer = FibResItemSerializer(items, many=True)
# return Response({"status": "success", "history": serializer.data}, status=status.HTTP_200_OK)
# pass # TODO using grpc
logIP = "127.0.0.1"
logPORT = "8888"
host = f"{logIP}:{logPORT}"
with grpc.insecure_channel(host) as channel:
stub = log_pb2_grpc.LogHistoryStub(channel)
request = log_pb2.LogRequest()
try:
response = stub.getHistory(request)
# self.client.publish(topic='log', payload=response.value)
return Response({"status": "success", "data": response.value[:]}, status=status.HTTP_200_OK)
except Exception as e:
print(e)
return Response({"status": "error", "data": "error"}, status=status.HTTP_400_BAD_REQUEST)
| StarcoderdataPython |
6529093 |
from __future__ import print_function
from .sqlschema import SQLSchema, SQLResultSet
import sqlite3
import os
from datetime import datetime
import traceback, sys
show_track = 0
class DebugConnection(sqlite3.Connection):
def commit(self):
if show_track and 0:
print('============== commit', file=sys.stderr)
print(dir(self), file=sys.stderr)
#traceback.print_stack()
#print('--------------', file=sys.stderr)
ret = super(DebugConnection, self).commit()
def rollback(self):
if show_track and 0:
print('============== rollback', file=sys.stderr)
traceback.print_stack()
print('--------------', file=sys.stderr)
ret = super(DebugConnection, self).rollback()
class SQLITEResultSet(SQLResultSet):
def perform_insert(self, script, param, pk_fields, table, new_key):
global show_track
show_track = self.dict_record
self.schema.db_execute(script, param)
if new_key:
return new_key
script = u'select %sfrom %s\nwhere rowid=last_insert_rowid()' % (
u','. join ([
self.schema.render_name(field) for field in pk_fields
]),
self.schema.render_name(table)
)
res = self.schema.db_execute(script)
return res.fetchone()
class SQLITE(SQLSchema):
rs_class = SQLITEResultSet
_type_conv = dict(
enum='varchar',
boolean='integer',
datetime='timestamp',
tinyint='integer',
mediumtext='text',
)
prelude = """
PRAGMA recursive_triggers=1;
"""
postfix = """
PRAGMA foreign_keys = ON;
"""
query_prefix = """
--PRAGMA recursive_triggers=1;
"""
getdate = dict(
timestamp="strftime('%Y-%m-%d %H:%M:%f', 'now')",
date="strftime('%Y-%m-%d', 'now')",
time="strftime('%H:%M:%S.%f', 'now')",
)
deferred_fk = "DEFERRABLE INITIALLY DEFERRED"
on_update_trigger = """
create trigger [tr_%(table)s%%(c)d]
after update
of %(other_fields)s
on [%(table)s] for each row
-- when ([new].[%(field)s]=[old].[%(field)s])
begin
update [%(table)s]
set [%(field)s]=%(getdate_tr)s
where %(where_pk)s;
end;
"""
dbsuffix = '.sqlite'
path = None
def __init__(self, **kw):
super(SQLITE, self).__init__()
self.type_render['integer primary key autoincrement'] = \
self.type_render['integer']
self.dbsuffixlen = len(self.dbsuffix)
path = kw.get('path')
if os.path.exists(path):
self.path = os.path.abspath(path)
def render_autoincrement(self, attrs, entity, name):
attrs, _ = super(SQLITE, self).render_autoincrement(attrs, entity, name)
if attrs.get('is_auto_increment'):
attrs['data_type'] = 'integer primary key autoincrement'
self.this_render_pk = False
return attrs, ''
def fk_disable(self):
self.db_execute("PRAGMA foreign_keys = OFF")
def fk_enable(self):
self.db_execute("PRAGMA foreign_keys = ON")
def db_filename(self, dbname):
if dbname == ':memory:':
return dbname
return os.path.join(self.path, dbname + self.dbsuffix)
def isdba(self, **kw):
if self.dbname == ':memory:':
return True
return self.path and os.access(self.path, os.W_OK)
def db_create(self, dbname):
if dbname == ':memory:':
return True
path = self.db_filename(dbname)
if os.path.exists(path):
return False
open(path, 'w').write('')
return os.path.exists(path)
def db_drop(self, dbname):
if not self.isdba():
return
if dbname == self.dbname:
self.db_disconnect()
if dbname == ':memory:':
return True
path = self.db_filename(dbname)
if os.path.exists(path):
os.remove(path)
return not os.path.exists(path)
def db_connect(self, dbname):
try:
path = self.db_filename(dbname)
self.connection = sqlite3.connect(
path,
detect_types=sqlite3.PARSE_DECLTYPES,
# factory=DebugConnection,
)
self.dbname = dbname
return True
except:
self.db_reset()
return False
def db_disconnect(self):
if not self.connection:
return
self.connection.close()
self.db_reset()
def db_commit(self):
if not self.connection:
return
self.connection.commit()
def db_rollback(self):
if not self.connection:
return
self.connection.rollback()
def db_name(self):
return self.dbname
def db_list(self):
if self.dbname == ':memory:':
return [self.dbname]
return [
db[:-self.dbsuffixlen] for db in os.listdir(self.path) \
if db.endswith(self.dbsuffix)
]
def db_execute(self, script, param=list()):
self.pre_execute(script, param)
cur = self.db_cursor()
cur.execute(self.query_prefix)
cur.execute(script, param)
return cur
def db_executemany(self, script, param=list()):
cur = self.db_cursor()
cur.execute(self.query_prefix)
cur.executemany(script, param)
return cur
def db_executescript(self, script):
cur = self.db_cursor()
cur.executescript(self.query_prefix + ';' + script)
return cur
def db_now(self):
snow = super(SQLITE, self).db_now()
return datetime.strptime(snow, '%Y-%m-%d %H:%M:%S.%f')
| StarcoderdataPython |
108465 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright © 2012 CEA
# <NAME>
# Licensed under the terms of the CECILL License
# (see guiqwt/__init__.py for details)
"""Flip/rotate test"""
SHOW = True # Show test in GUI-based test launcher
from guiqwt.widgets.fliprotate import FlipRotateDialog, FlipRotateWidget
from guiqwt.tests.rotatecrop import imshow, create_test_data
def widget_test(fname, qapp):
"""Test the rotate/crop widget"""
array0, item = create_test_data(fname)
widget = FlipRotateWidget(None)
widget.set_item(item)
widget.set_parameters(-90, True, False)
widget.show()
qapp.exec_()
def dialog_test(fname, interactive=True):
"""Test the rotate/crop dialog"""
array0, item = create_test_data(fname)
dlg = FlipRotateDialog(None)
dlg.set_item(item)
if dlg.exec_():
array1 = dlg.output_array
imshow(array0, title="array0", hold=True)
imshow(array1, title="array1")
if __name__ == '__main__':
from guidata import qapplication
qapp = qapplication() # analysis:ignore
widget_test("brain.png", qapp)
dialog_test(fname="brain.png", interactive=True)
| StarcoderdataPython |
1607675 | import os
import re
import argparse
import csv
import subprocess
from datetime import date
import math
# ###############################################################
# Class for parsing VTR results for various experiments
# ###############################################################
class GenResults():
#--------------------------
#constructor
#--------------------------
def __init__(self):
#members
self.infile = ""
self.outfile = ""
self.result_list = []
self.metrics = ["design",\
"dirname", \
"design_filename", \
"run_num", \
"exp", \
"type", \
"arch", \
"vpr_results_found", \
"pre_vpr_blif_found", \
"parse_results_found", \
"power_results_found", \
"critical_path", \
"frequency", \
"logic_area", \
"routing_area", \
"total_area", \
"channel_width", \
"average_net_length", \
"max_net_length", \
"max_fanout", \
"max_non_global_fanout", \
"average_wire_segments_per_net", \
"max_segments_used_by_a_net", \
"total_routed_wire_length", \
"resource_usage_io", \
"resource_usage_clb", \
"resource_usage_dsp", \
"resource_usage_memory", \
"resource_usage_tensor_slice",\
"utilization_io", \
"utilization_clb", \
"utilization_dsp", \
"utilization_memory", \
"utilization_tensor_slice", \
"utilization_device", \
"device_io", \
"device_clb", \
"device_dsp", \
"device_memory", \
"device_tensor_slice", \
"max_routing_channel_util", \
"min_util_for_largest_pct_of_total_channels", \
"max_util_for_largest_pct_of_total_channels", \
"largest_pct_of_total_channels", \
"routing_histogram_1_inf_val", \
"routing_histogram_09_1_val", \
"routing_histogram_08_09_val", \
"routing_histogram_07_08_val", \
"routing_histogram_06_07_val", \
"routing_histogram_05_06_val", \
"routing_histogram_04_05_val", \
"routing_histogram_03_04_val", \
"routing_histogram_02_03_val", \
"routing_histogram_01_02_val", \
"routing_histogram_00_01_val", \
"routing_histogram_1_inf_pct", \
"routing_histogram_09_1_pct", \
"routing_histogram_08_09_pct", \
"routing_histogram_07_08_pct", \
"routing_histogram_06_07_pct", \
"routing_histogram_05_06_pct", \
"routing_histogram_04_05_pct", \
"routing_histogram_03_04_pct", \
"routing_histogram_02_03_pct", \
"routing_histogram_01_02_pct", \
"routing_histogram_00_01_pct", \
"single_bit_adders", \
"luts", \
"ffs", \
"ff_to_lut_ratio", \
"dsp_to_clb_ratio", \
"memory_to_clb_ratio", \
"adder_to_lut_ratio", \
"netlist_primitives", \
"netlist_primitives>10k", \
"vtr_flow_elapsed_time", \
"odin_time", \
"abc_time", \
"pack_time", \
"place_time", \
"route_time", \
"vtr_flow_peak_memory_usage", \
"near_crit_connections", \
"logic_depth", \
"device_height", \
"device_width" ,
"device_grid_area" ,
"device_grid_side" ,
"grid_size_limiter", \
"date", \
"tag" ,
"order"]
#self.components_of_interest = ["routing", "clock", "clb", "dsp", "memory"]
#self.power_types = ["abs_total_power", \
# "abs_dynamic_power", \
# "abs_static_power", \
# "pct_total_power", \
# "pct_dynamic_power", \
# "pct_static_power"]
#for power in self.power_types:
# for component in self.components_of_interest:
# self.metrics.append(component+"_"+power)
# for component in ["compute_ram", "storage_ram"]:
# self.metrics.append(component+"_"+power)
#
#self.metrics += [\
# "absolute_dynamic_power_of_circuit", \
# "absolute_static_power_of_circuit", \
# "absolute_total_power_of_circuit"
#]
#method calls in order
self.parse_args()
self.extract_info()
self.print_csv()
#--------------------------
#parse command line args
#--------------------------
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument("-i",
"--infile",
action='store',
default="aug_2021.log",
help="File containing the STDOUT of VTR runs")
parser.add_argument("-o",
"--outfile",
action='store',
default="out.aug_2021.csv",
help="Name of output file")
parser.add_argument("-t",
"--tag",
action='store',
default="",
help="Tag for these results")
args = parser.parse_args()
print("infile = "+ args.infile)
print("outfile = "+args.outfile)
self.infile = args.infile
self.outfile = args.outfile
self.tag = args.tag
#--------------------------
#print the csv file
#--------------------------
def print_csv(self):
print("Printing csv: " + self.outfile)
outfile = open(self.outfile, 'w+')
writer = csv.DictWriter(outfile, fieldnames=self.metrics)
writer.writeheader()
for data in self.result_list:
writer.writerow(data)
outfile.close()
#--------------------------
#find a file
#--------------------------
def find_file(self, dirname, run_num, file_to_find):
found = False
for root, dirs, files in os.walk(os.path.realpath(dirname + "/" + run_num), topdown=True):
#print(root, dirs, files)
for filename in files:
#print(filename)
match = re.match(file_to_find, filename)
if match is not None:
found = True
found_filename = os.path.join(root,filename)
#print("Found {} for {}: {}".format(file_to_find, dirname, found_filename))
return found_filename
if not found:
print("Could not find {} for {}".format(file_to_find, dirname))
return None
#--------------------------
#get routing area of various blocks
#--------------------------
def get_routing_area(self, arch, block):
#if arch == "stratix":
# routing_area_clb = 30481 #in MWTAs
# routing_area_dsp = 4 * routing_area_clb #DSP is 4 rows high
# routing_area_memory = 6 * routing_area_clb #Memory is 6 rows high
#elif arch == "agilex":
#area values from coffe, changed from um2 to mwtas
#area is SB + CB in the tile
routing_area_clb = (684+305) / 0.033864 #converting um2 into MWTAs
routing_area_dsp = 4 * routing_area_clb #DSP is 4 rows high
routing_area_memory = 2 * routing_area_clb #Memory is 2 rows high
routing_area_tensor_slice = 8 * routing_area_clb #Matmul is 8 rows high
#else:
# print("Unsupported architecture: {}".format(arch))
# raise SystemExit(0)
if block == "clb":
return routing_area_clb
elif block == "dsp":
return routing_area_dsp
elif block == "memory":
return routing_area_memory
elif block == "tensor_slice":
return routing_area_tensor_slice
else:
print("Unsupported block: {}".format(block))
raise SystemExit(0)
#--------------------------
#extract information for each entry in infile
#--------------------------
def extract_info(self):
infile = open(self.infile, 'r')
#the infile contains dir names. each dir_name/latest contains a vpr.out file
for line in infile:
#if the line is commented out, ignore it
check_for_comment = re.search(r'^#', line)
if check_for_comment is not None:
continue
check_for_task_dir = re.search(r'^task_run_dir=', line)
if check_for_task_dir is None:
continue
m = re.search('task_run_dir=(.*)/(run.*)', line.rstrip())
if m is not None:
dirname = m.group(1)
run_num = m.group(2)
else:
print("Unable to parse line: " + line)
continue
result_dict = {}
result_dict['dirname'] = dirname
result_dict['run_num'] = run_num
#extract experiment info from dirname
info = re.search(r'(exp\d)/(.*)\.(.*?)$', dirname)
if info is not None:
result_dict['exp'] = info.group(1)
result_dict['design'] = info.group(2)
result_dict['arch'] = info.group(3)
else:
print("Unable to extract experiment info from " + dirname)
if result_dict["exp"] == "exp3":
architectures = ['no_tensor_slice', \
'tensor_slice_15pct']
elif result_dict["exp"] == "exp4":
architectures = ['no_tensor_slice', \
'tensor_slice_25pct', \
'tensor_slice_30pct']
else:
architectures = ['no_tensor_slice', \
'tensor_slice_5pct', \
'tensor_slice_10pct', \
'tensor_slice_15pct', \
'tensor_slice_20pct', \
'tensor_slice_25pct', \
'tensor_slice_30pct']
assert(result_dict['arch'] in architectures), "Unknown architecture found: {i}".format(i=result_dict["arch"])
if result_dict["exp"] == "exp3" and result_dict["arch"] == "tensor_slice_15pct":
#overwrite the arch name; tensor_slice_15pct was just a placeholder
result_dict['arch'] = "tensor_slice_auto_layout"
print("Extracting info for " + dirname + "/" + run_num)
result_dict['vpr_results_found'] = "No"
result_dict['pre_vpr_blif_found'] = "No"
result_dict['parse_results_found'] = "No"
result_dict['power_results_found'] = "No"
#--------------------------
#extract information from vpr.out
#--------------------------
#try to find vpr.out
vpr_out_filename = self.find_file(dirname, run_num, "vpr.out")
if vpr_out_filename is None:
result_dict['vpr_results_found'] = "No"
else:
result_dict['vpr_results_found'] = "Yes"
#Start parsing vtr.out
vpr_out = open(vpr_out_filename, 'r')
resource_usage_ff = 0
resource_usage_adder = 0
resource_usage_lut = 0
pb_types_usage = False
routing_channel_hist_was_found = False
routing_channel_util_section = False
largest_pct_of_total_channels = 0.0
result_dict['single_bit_adders'] = 0
result_dict['logic_area'] = 0
result_dict['resource_usage_tensor_slice'] = 0
for line in vpr_out:
#pb types usage section starts with this text
pb_types_usage_match = re.search('Pb types usage', line)
if pb_types_usage_match is not None:
pb_types_usage = True
#pb types usage section ends with this text
create_device_usage_match = re.search('# Create Device', line)
if create_device_usage_match is not None:
pb_types_usage = False
#routing channel utilization section starts with this text
routing_channel_util_match = re.search(r'Routing channel utilization histogram:', line)
if routing_channel_util_match is not None:
routing_channel_util_section = True
#routing channel utilization section ends with this text
max_routing_channel_util_match = re.search(r'Maximum routing channel utilization:', line)
if max_routing_channel_util_match is not None:
routing_channel_util_section = False
#print(line)
logic_area_match = re.search(r'Total used logic block area: (.*)', line)
if logic_area_match is not None:
logic_area = logic_area_match.group(1)
result_dict['logic_area'] = logic_area or "Not found"
#routing_area_match = re.search(r'Total routing area: (.*), per logic tile', line)
#if routing_area_match is not None:
# routing_area = routing_area_match.group(1)
# result_dict['routing_area'] = routing_area or "Not found"
crit_path_match3 = re.search(r'Final critical path: (.*) ns', line)
crit_path_match4 = re.search(r'Final critical path delay \(least slack\): (.*) ns', line)
if crit_path_match3 is not None or crit_path_match4 is not None:
if crit_path_match3 is not None:
crit_path_match = crit_path_match3
if crit_path_match4 is not None:
crit_path_match = crit_path_match4
critical_path = crit_path_match.group(1)
result_dict['critical_path'] = float(critical_path) or 0
result_dict['frequency'] = 1/result_dict['critical_path']*1000
channel_width_match = re.search(r'Circuit successfully routed with a channel width factor of (.*)\.', line)
if channel_width_match is not None:
channel_width = channel_width_match.group(1)
result_dict['channel_width'] = channel_width or "Not found"
average_net_length_match = re.search(r'average net length: (.*)', line)
if average_net_length_match is not None:
average_net_length = average_net_length_match.group(1)
result_dict['average_net_length'] = average_net_length or "Not found"
max_net_length_match = re.search(r'Maximum net length: (.*)', line)
if max_net_length_match is not None:
max_net_length = max_net_length_match.group(1)
result_dict['max_net_length'] = max_net_length or "Not found"
average_wire_segments_per_net_match = re.search(r'average wire segments per net: (.*)', line)
if average_wire_segments_per_net_match is not None:
average_wire_segments_per_net = average_wire_segments_per_net_match.group(1)
result_dict['average_wire_segments_per_net'] = average_wire_segments_per_net or "Not found"
max_segments_used_by_a_net_match = re.search(r'Maximum segments used by a net: (.*)', line)
if max_segments_used_by_a_net_match is not None:
max_segments_used_by_a_net = max_segments_used_by_a_net_match.group(1)
result_dict['max_segments_used_by_a_net'] = max_segments_used_by_a_net or "Not found"
total_routed_wire_length_match = re.search(r'Total wirelength: (.*), average net length:', line)
if total_routed_wire_length_match is not None:
total_routed_wire_length = total_routed_wire_length_match.group(1)
result_dict['total_routed_wire_length'] = total_routed_wire_length or "Not found"
utilization_io_match = re.search(r'Block Utilization: (\d+\.\d+) Type: io', line)
if utilization_io_match is not None:
utilization_io = utilization_io_match.group(1)
result_dict['utilization_io'] = float(utilization_io) or 0
utilization_clb_match = re.search(r'Block Utilization: (\d+\.\d+) Type: clb', line)
if utilization_clb_match is not None:
utilization_clb = utilization_clb_match.group(1)
result_dict['utilization_clb'] = float(utilization_clb) or 0
utilization_dsp_match = re.search(r'Block Utilization: (\d+\.\d+) Type: dsp_top', line)
if utilization_dsp_match is not None:
utilization_dsp = utilization_dsp_match.group(1)
result_dict['utilization_dsp'] = float(utilization_dsp) or 0
utilization_memory_match = re.search(r'Block Utilization: (\d+\.\d+) Type: memory', line)
if utilization_memory_match is not None:
utilization_memory = utilization_memory_match.group(1)
result_dict['utilization_memory'] = float(utilization_memory) or 0
utilization_tensor_slice_match = re.search(r'Block Utilization: (\d+\.\d+) Type: tensor_slice_top', line)
if utilization_tensor_slice_match is not None:
utilization_tensor_slice = utilization_tensor_slice_match.group(1)
result_dict['utilization_tensor_slice'] = float(utilization_tensor_slice) or 0
utilization_device_match = re.search(r'Device Utilization: (\d+\.\d+)', line)
if utilization_device_match is not None:
utilization_device = utilization_device_match.group(1)
result_dict['utilization_device'] = float(utilization_device) or 0
resource_usage_io_match = re.search(r'(\d+)\s+blocks of type: io', line)
if resource_usage_io_match is not None and ("Netlist" in prev_line):
resource_usage_io = resource_usage_io_match.group(1)
result_dict['resource_usage_io'] = int(resource_usage_io) or 0
resource_usage_clb_match = re.search(r'(\d+)\s+blocks of type: clb', line)
if resource_usage_clb_match is not None and ("Netlist" in prev_line):
resource_usage_clb = resource_usage_clb_match.group(1)
result_dict['resource_usage_clb'] = int(resource_usage_clb) or 0
#if result_dict['arch'] == "stratix":
# resource_usage_dsp_match = re.search(r'(\d+)\s+blocks of type: mult_36', line)
#elif result_dict['arch'] == "agilex":
resource_usage_dsp_match = re.search(r'(\d+)\s+blocks of type: dsp_top', line)
#else:
# print("Unsupported architecture")
# raise SystemExit(0)
if resource_usage_dsp_match is not None and ("Netlist" in prev_line):
resource_usage_dsp = resource_usage_dsp_match.group(1)
result_dict['resource_usage_dsp'] = int(resource_usage_dsp) or 0
resource_usage_memory_match = re.search(r'(\d+)\s+blocks of type: memory', line)
if resource_usage_memory_match is not None and ("Netlist" in prev_line):
resource_usage_memory = resource_usage_memory_match.group(1)
result_dict['resource_usage_memory'] = int(resource_usage_memory) or 0
resource_usage_tensor_slice_match = re.search(r'(\d+)\s+blocks of type: tensor_slice_top', line)
if resource_usage_tensor_slice_match is not None and ("Netlist" in prev_line):
resource_usage_tensor_slice = resource_usage_tensor_slice_match.group(1)
result_dict['resource_usage_tensor_slice'] = int(resource_usage_tensor_slice) or 0
device_io_match = re.search(r'(\d+)\s+blocks of type: io', line)
if device_io_match is not None and ("Architecture" in prev_line):
device_io = device_io_match.group(1)
result_dict['device_io'] = int(device_io) or 0
device_clb_match = re.search(r'(\d+)\s+blocks of type: clb', line)
if device_clb_match is not None and ("Architecture" in prev_line):
device_clb = device_clb_match.group(1)
result_dict['device_clb'] = int(device_clb) or 0
device_dsp_match = re.search(r'(\d+)\s+blocks of type: dsp_top', line)
if device_dsp_match is not None and ("Architecture" in prev_line):
device_dsp = device_dsp_match.group(1)
result_dict['device_dsp'] = int(device_dsp) or 0
device_memory_match = re.search(r'(\d+)\s+blocks of type: memory', line)
if device_memory_match is not None and ("Architecture" in prev_line):
device_memory = device_memory_match.group(1)
result_dict['device_memory'] = int(device_memory) or 0
device_tensor_slice_match = re.search(r'(\d+)\s+blocks of type: tensor_slice_top', line)
if device_tensor_slice_match is not None and ("Architecture" in prev_line):
device_tensor_slice = device_tensor_slice_match.group(1)
result_dict['device_tensor_slice'] = int(device_tensor_slice) or 0
resource_usage_adder_match = re.search(r'adder\s*:\s*(\d*)', line)
if resource_usage_adder_match is not None and pb_types_usage is True:
resource_usage_adder += int(resource_usage_adder_match.group(1))
result_dict['single_bit_adders'] = int(resource_usage_adder) or "Not found"
resource_usage_lut_match = re.search(r'lut\s*:\s*(\d*)', line)
if resource_usage_lut_match is not None and pb_types_usage is True:
resource_usage_lut += int(resource_usage_lut_match.group(1))
result_dict['luts'] = int(resource_usage_lut) or 0
resource_usage_ff_match = re.search(r'ff\s*:\s*(\d*)', line)
if resource_usage_ff_match is not None and pb_types_usage is True:
resource_usage_ff += int(resource_usage_ff_match.group(1))
result_dict['ffs'] = resource_usage_ff or 0
max_fanout_match = re.search(r'Max Fanout\s*:\s*(.*)', line)
if max_fanout_match is not None and ("Avg Fanout" in prev_line):
max_fanout = max_fanout_match.group(1)
result_dict['max_fanout'] = round(float(max_fanout)) or 0
max_non_global_fanout_match = re.search(r'Max Non Global Net Fanout\s*:\s*(.*)', line)
if max_non_global_fanout_match is not None:
max_non_global_fanout = max_non_global_fanout_match.group(1)
result_dict['max_non_global_fanout'] = round(float(max_non_global_fanout)) or 0
near_crit_connections_match = re.search(r'\[ 0: 0.1\)\s*\d+\s*\(\s*([\d\.]*)%\)', line)
if near_crit_connections_match is not None and ("Final Net Connection Criticality Histogram" in prev_line):
near_crit_connections = near_crit_connections_match.group(1)
result_dict['near_crit_connections'] = float(near_crit_connections) or 0
max_routing_channel_util_match = re.search(r'Maximum routing channel utilization:\s+(.*) at \(.*\)', line)
if max_routing_channel_util_match is not None:
result_dict['max_routing_channel_util'] = max_routing_channel_util_match.group(1)
if routing_channel_util_section is True:
routing_channel_hist_was_found = True
routing_histogram_match = re.search(r'\[\s+(.*):\s+(.*)\)\s*.*\s*\(\s*(.*)%\)', line)
if routing_histogram_match is not None:
utilization_min = float(routing_histogram_match.group(1))
utilization_max = float(routing_histogram_match.group(2))
pct_of_total_channels = float(routing_histogram_match.group(3))
if pct_of_total_channels > largest_pct_of_total_channels:
largest_pct_of_total_channels = pct_of_total_channels
min_util_for_largest_pct_of_total_channels = utilization_min
max_util_for_largest_pct_of_total_channels = utilization_max
routing_histogram_1_inf_match = re.search(r'\[\s+1:\s+inf\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_1_inf_match is not None:
result_dict["routing_histogram_1_inf_val"] = int(routing_histogram_1_inf_match.group(1))
result_dict["routing_histogram_1_inf_pct"] = float(routing_histogram_1_inf_match.group(2))
routing_histogram_09_1_match = re.search(r'\[\s+0.9:\s+1\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_09_1_match is not None:
result_dict["routing_histogram_09_1_val"] = int(routing_histogram_09_1_match.group(1))
result_dict["routing_histogram_09_1_pct"] = float(routing_histogram_09_1_match.group(2))
routing_histogram_08_09_match = re.search(r'\[\s+0.8:\s+0.9\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_08_09_match is not None:
result_dict["routing_histogram_08_09_val"] = int(routing_histogram_08_09_match.group(1))
result_dict["routing_histogram_08_09_pct"] = float(routing_histogram_08_09_match.group(2))
routing_histogram_07_08_match = re.search(r'\[\s+0.7:\s+0.8\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_07_08_match is not None:
result_dict["routing_histogram_07_08_val"] = int(routing_histogram_07_08_match.group(1))
result_dict["routing_histogram_07_08_pct"] = float(routing_histogram_07_08_match.group(2))
routing_histogram_06_07_match = re.search(r'\[\s+0.6:\s+0.7\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_06_07_match is not None:
result_dict["routing_histogram_06_07_val"] = int(routing_histogram_06_07_match.group(1))
result_dict["routing_histogram_06_07_pct"] = float(routing_histogram_06_07_match.group(2))
routing_histogram_05_06_match = re.search(r'\[\s+0.5:\s+0.6\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_05_06_match is not None:
result_dict["routing_histogram_05_06_val"] = int(routing_histogram_05_06_match.group(1))
result_dict["routing_histogram_05_06_pct"] = float(routing_histogram_05_06_match.group(2))
routing_histogram_04_05_match = re.search(r'\[\s+0.4:\s+0.5\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_04_05_match is not None:
result_dict["routing_histogram_04_05_val"] = int(routing_histogram_04_05_match.group(1))
result_dict["routing_histogram_04_05_pct"] = float(routing_histogram_04_05_match.group(2))
routing_histogram_03_04_match = re.search(r'\[\s+0.3:\s+0.4\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_03_04_match is not None:
result_dict["routing_histogram_03_04_val"] = int(routing_histogram_03_04_match.group(1))
result_dict["routing_histogram_03_04_pct"] = float(routing_histogram_03_04_match.group(2))
routing_histogram_02_03_match = re.search(r'\[\s+0.2:\s+0.3\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_02_03_match is not None:
result_dict["routing_histogram_02_03_val"] = int(routing_histogram_02_03_match.group(1))
result_dict["routing_histogram_02_03_pct"] = float(routing_histogram_02_03_match.group(2))
routing_histogram_01_02_match = re.search(r'\[\s+0.1:\s+0.2\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_01_02_match is not None:
result_dict["routing_histogram_01_02_val"] = int(routing_histogram_01_02_match.group(1))
result_dict["routing_histogram_01_02_pct"] = float(routing_histogram_01_02_match.group(2))
routing_histogram_00_01_match = re.search(r'\[\s+0:\s+0.1\)\s*(.*)\s*\(\s*(.*)%\)', line)
if routing_histogram_00_01_match is not None:
result_dict["routing_histogram_00_01_val"] = int(routing_histogram_00_01_match.group(1))
result_dict["routing_histogram_00_01_pct"] = float(routing_histogram_00_01_match.group(2))
prev_line = line
#calculated metrics
if 'logic_area' in result_dict and 'resource_usage_clb' in result_dict \
and 'resource_usage_dsp' in result_dict and 'resource_usage_memory' in result_dict\
and 'resource_usage_tensor_slice' in result_dict:
routing_area_clb = self.get_routing_area(result_dict["arch"], "clb")
routing_area_dsp = self.get_routing_area(result_dict["arch"], "dsp")
routing_area_memory = self.get_routing_area(result_dict["arch"], "memory")
routing_area_tensor_slice = self.get_routing_area(result_dict["arch"], "tensor_slice")
result_dict['routing_area'] = (routing_area_clb * result_dict['resource_usage_clb']) +\
(routing_area_dsp * result_dict['resource_usage_dsp']) +\
(routing_area_tensor_slice * result_dict['resource_usage_tensor_slice']) +\
(routing_area_memory * result_dict['resource_usage_memory'])
result_dict['total_area'] = float(result_dict['logic_area']) + float(result_dict['routing_area'])
if 'ffs' in result_dict and 'luts' in result_dict and 'resource_usage_clb' in result_dict \
and 'resource_usage_dsp' in result_dict and 'resource_usage_memory' in result_dict \
and 'single_bit_adders' in result_dict:
result_dict['ff_to_lut_ratio'] = result_dict['ffs'] / result_dict['luts']
result_dict['dsp_to_clb_ratio'] = result_dict['resource_usage_dsp'] / result_dict['resource_usage_clb']
result_dict['memory_to_clb_ratio'] = result_dict['resource_usage_memory'] / result_dict['resource_usage_clb']
result_dict['adder_to_lut_ratio'] = result_dict['single_bit_adders'] / result_dict['luts']
result_dict['largest_pct_of_total_channels'] = largest_pct_of_total_channels
result_dict['min_util_for_largest_pct_of_total_channels'] = min_util_for_largest_pct_of_total_channels
result_dict['max_util_for_largest_pct_of_total_channels'] = max_util_for_largest_pct_of_total_channels
##--------------------------
##extract information from odin.blif
##--------------------------
##try to find <design>.odin.blif
#odin_blif_filename = self.find_file(dirname, run_num, result_dict['design']+'.odin.blif')
#if odin_blif_filename is None:
# result_dict['odin_blif_found'] = "No"
#else:
# result_dict['odin_blif_found'] = "Yes"
# netlist_primitives = 0
# odin_blif = open(odin_blif_filename, "r")
# for line in odin_blif:
# if ".latch" in line or ".subckt" in line or ".names" in line:
# netlist_primitives = netlist_primitives + 1
# result_dict['netlist_primitives'] = netlist_primitives
# result_dict['netlist_primitives>100k'] = (netlist_primitives > 100000)
# odin_blif.close()
#--------------------------
#identify whether this is ml or non-ml design
#--------------------------
config_file = dirname + "/config/config.txt"
config_fh = open(config_file, "r")
result_dict["type"] = "non_ml"
for line in config_fh:
#if the name of the design file contains either .tensor_slice.v
#or .dsp.v, that means it is an ml benchmarks. Non-ml benchmarks
#don't have these two variations
m = re.search("circuit_list_add=.*(tensor_slice|dsp).v", line)
if m is not None:
result_dict["type"] = "ml"
break
config_fh.close()
#--------------------------
#extract information from pre-vpr.blif
#--------------------------
#try to find <design>.pre-vpr.blif
benchname = result_dict['design']
design_type = result_dict['type']
fpga_arch = result_dict['arch']
if design_type == "ml":
if fpga_arch == "no_tensor_slice":
design_file=benchname+".dsp"
else:
design_file=benchname+".tensor_slice"
else:
design_file = benchname
result_dict['design_filename'] = design_file + ".v"
if result_dict["exp"] == "exp5":
if fpga_arch == "tensor_slice_5pct":
design_file = re.sub(r'lstm', 'lstm_35_new', design_file)
elif fpga_arch == "tensor_slice_10pct":
design_file = re.sub(r'lstm', 'lstm_28_new', design_file)
elif fpga_arch == "tensor_slice_15pct":
design_file = re.sub(r'lstm', 'lstm_20_new', design_file)
elif fpga_arch == "tensor_slice_20pct":
design_file = re.sub(r'lstm', 'lstm_15_new', design_file)
elif fpga_arch == "tensor_slice_25pct":
design_file = re.sub(r'lstm', 'lstm_8_new', design_file)
pre_vpr_blif_filename = self.find_file(dirname, run_num, design_file+'.pre-vpr.blif')
if pre_vpr_blif_filename is None:
result_dict['pre_vpr_blif_found'] = "No"
else:
result_dict['pre_vpr_blif_found'] = "Yes"
netlist_primitives = 0
pre_vpr_blif = open(pre_vpr_blif_filename, "r")
for line in pre_vpr_blif:
if ".latch" in line or ".subckt" in line or ".names" in line:
netlist_primitives = netlist_primitives + 1
result_dict['netlist_primitives'] = netlist_primitives
result_dict['netlist_primitives>10k'] = (netlist_primitives > 10000)
pre_vpr_blif.close()
#--------------------------
#extract information from parse_results.txt
#--------------------------
#try to find parse_results.txt
parse_results_filename = self.find_file(dirname, run_num, 'parse_results.txt')
if parse_results_filename is None:
result_dict['parse_results_found'] = "No"
else:
result_dict['parse_results_found'] = "Yes"
parse_results_filehandle = open(parse_results_filename, "r")
parse_results_dict_reader = csv.DictReader(parse_results_filehandle, delimiter='\t')
for row in parse_results_dict_reader:
#print(row.keys())
#print(row.values())
result_dict['vtr_flow_elapsed_time'] = row['vtr_flow_elapsed_time']
result_dict['odin_time'] = row['odin_synth_time']
result_dict['abc_time'] = row['abc_synth_time']
result_dict['pack_time'] = row['pack_time']
result_dict['place_time'] = row['place_time']
result_dict['route_time'] = row['min_chan_width_route_time']
result_dict['vtr_flow_peak_memory_usage'] = max(float(row['max_odin_mem']), \
float(row['max_abc_mem']), \
float(row['max_vpr_mem']))
result_dict['logic_depth'] = row['abc_depth']
result_dict['device_height'] = row['device_height']
result_dict['device_width'] = row['device_width']
result_dict['device_grid_area'] = int(result_dict['device_width']) * int(result_dict['device_height'])
result_dict['device_grid_side'] = math.sqrt(result_dict['device_grid_area'])
result_dict['grid_size_limiter'] = row['device_limiting_resources']
#result_dict['min_channel_width'] = row['min_chan_width']
#result_dict['critical_path'] = row['critical_path_delay']
parse_results_filehandle.close()
result_dict["tag"] = self.tag
result_dict["date"] = date.today().strftime("%B %d, %Y")
#--------------------------
# additional logic for 06-07 bucket in the routing util histogram
# because VPR doesn't print this bucket for some reason
#--------------------------
if result_dict['vpr_results_found'] == "Yes" and routing_channel_hist_was_found:
total_channels = 2 * (int(result_dict["device_width"])-1) * (int(result_dict["device_height"])-1)
result_dict["routing_histogram_06_07_val"] = \
total_channels - (\
result_dict["routing_histogram_1_inf_val"] + \
result_dict["routing_histogram_09_1_val"] + \
result_dict["routing_histogram_08_09_val"] + \
result_dict["routing_histogram_07_08_val"] + \
result_dict["routing_histogram_05_06_val"] + \
result_dict["routing_histogram_04_05_val"] + \
result_dict["routing_histogram_03_04_val"] + \
result_dict["routing_histogram_02_03_val"] + \
result_dict["routing_histogram_01_02_val"] + \
result_dict["routing_histogram_00_01_val"] )
result_dict["routing_histogram_06_07_pct"] = \
round(100 - (\
result_dict["routing_histogram_1_inf_pct"] + \
result_dict["routing_histogram_09_1_pct"] + \
result_dict["routing_histogram_08_09_pct"] + \
result_dict["routing_histogram_07_08_pct"] + \
result_dict["routing_histogram_05_06_pct"] + \
result_dict["routing_histogram_04_05_pct"] + \
result_dict["routing_histogram_03_04_pct"] + \
result_dict["routing_histogram_02_03_pct"] + \
result_dict["routing_histogram_01_02_pct"] + \
result_dict["routing_histogram_00_01_pct"] ))
##--------------------------
##extract information from <circuit>.power file
##--------------------------
##try to find the file
#power_results_filename = self.find_file(dirname, run_num, result_dict['design']+'.power')
#absolute_dynamic_power_of_circuit = 0
#absolute_static_power_of_circuit = 0
#absolute_total_power_of_circuit = 0
#if power_results_filename is None:
# result_dict['power_results_found'] = "No"
#else:
# result_dict['power_results_found'] = "Yes"
# power_results_filehandle = open(power_results_filename, "r")
# for line in power_results_filehandle:
# m = re.search(r'(.*?)\s+([0-9]*\.?[0-9]+|-nan)\s+([0-9]*\.?[0-9]+|-nan)\s+([0-9]*\.?[0-9]+|-nan)', line)
# if m is not None:
# component = m.group(1).strip().lower()
# print("Obtained power data for: {}".format(component))
# if component in self.components_of_interest:
#
# #We want to ignore rows that contain "clock" but are not in the main table
# if (component == "clock") and ("Other Estimation Methods" not in prev_line):
# continue
# absolute_total_power_of_component = float(m.group(2)) if m.group(2) != "-nan" else 0
# print("Absolute power of component is {}".format(absolute_total_power_of_component))
# how_much_pct_of_circuit_power_is_this_component = float(m.group(3)) if m.group(3) != "-nan" else 0
# how_much_pct_of_component_power_is_dynamic = float(m.group(4)) if m.group(4) != "-nan" else 0
# #Calculated metrics
# absolute_dynamic_power_of_component = absolute_total_power_of_component * how_much_pct_of_component_power_is_dynamic
# absolute_static_power_of_component = absolute_total_power_of_component - absolute_dynamic_power_of_component
# result_dict[component+"_abs_total_power"] = absolute_total_power_of_component
# result_dict[component+"_abs_dynamic_power"] = absolute_dynamic_power_of_component
# result_dict[component+"_abs_static_power"] = absolute_static_power_of_component
# result_dict[component+"_pct_total_power"] = how_much_pct_of_circuit_power_is_this_component
# absolute_dynamic_power_of_circuit += absolute_dynamic_power_of_component
# absolute_static_power_of_circuit += absolute_static_power_of_component
# absolute_total_power_of_circuit += absolute_total_power_of_component
# prev_line = line
# for component in self.components_of_interest:
# result_dict[component+"_pct_dynamic_power"] = result_dict[component+"_abs_dynamic_power"] / absolute_dynamic_power_of_circuit
# result_dict[component+"_pct_static_power"] = result_dict[component+"_abs_static_power"] / absolute_static_power_of_circuit
#
# result_dict["absolute_dynamic_power_of_circuit"] = absolute_dynamic_power_of_circuit
# result_dict["absolute_static_power_of_circuit"] = absolute_static_power_of_circuit
# result_dict["absolute_total_power_of_circuit"] = absolute_total_power_of_circuit
# power_results_filehandle.close()
# #Now extract compute ram power
# power_results_filehandle = open(power_results_filename, "r")
# for line in power_results_filehandle:
# m = re.search(r'(.*?)\s+([0-9]*\.?[0-9]+|-nan)\s+([0-9]*\.?[0-9]+|-nan)\s+([0-9]*\.?[0-9]+|-nan)', line)
# if m is not None:
# component = m.group(1).strip().lower()
# if component == "mem_128x128_compute":
#
# absolute_total_power_of_component = float(m.group(2)) if m.group(2) != "-nan" else 0
# how_much_pct_of_circuit_power_is_this_component = float(m.group(3)) if m.group(3) != "-nan" else 0
# how_much_pct_of_component_power_is_dynamic = float(m.group(4)) if m.group(4) != "-nan" else 0
# #Calculated metrics
# absolute_dynamic_power_of_component = absolute_total_power_of_component * how_much_pct_of_component_power_is_dynamic
# absolute_static_power_of_component = absolute_total_power_of_component - absolute_dynamic_power_of_component
# result_dict["compute_ram"+"_abs_total_power"] = absolute_total_power_of_component
# result_dict["compute_ram"+"_abs_dynamic_power"] = absolute_dynamic_power_of_component
# result_dict["compute_ram"+"_abs_static_power"] = absolute_static_power_of_component
# result_dict["compute_ram"+"_pct_total_power"] = how_much_pct_of_circuit_power_is_this_component
# result_dict["compute_ram"+"_pct_dynamic_power"] = result_dict["compute_ram"+"_abs_dynamic_power"] / absolute_dynamic_power_of_circuit
# result_dict["compute_ram"+"_pct_static_power"] = result_dict["compute_ram"+"_abs_static_power"] / absolute_static_power_of_circuit
# result_dict["storage_ram"+"_abs_total_power"] = result_dict["memory"+"_abs_total_power"] - result_dict["compute_ram"+"_abs_total_power"]
# result_dict["storage_ram"+"_abs_dynamic_power"] = result_dict["memory"+"_abs_dynamic_power"] - result_dict["compute_ram"+"_abs_dynamic_power"]
# result_dict["storage_ram"+"_abs_static_power"] = result_dict["memory"+"_abs_static_power"] - result_dict["compute_ram"+"_abs_static_power"]
# result_dict["storage_ram"+"_pct_total_power"] = result_dict["storage_ram"+"_abs_total_power"] / absolute_total_power_of_circuit
# result_dict["storage_ram"+"_pct_dynamic_power"] = result_dict["storage_ram"+"_abs_dynamic_power"] / absolute_dynamic_power_of_circuit
# result_dict["storage_ram"+"_pct_static_power"] = result_dict["storage_ram"+"_abs_static_power"] / absolute_static_power_of_circuit
# power_results_filehandle.close()
#----------------------------
#clean up the directory
#----------------------------
if result_dict['pre_vpr_blif_found'] == "No" \
or result_dict['vpr_results_found'] == "No" \
or result_dict['parse_results_found'] == "No":
print("One of the log files required was not found")
else:
print("Parsing complete. Deleting logs/temp files")
#Delete temp files except the 3 we need
os.system("rm -rf " + dirname +"/" + run_num + "/*/*/*/*odin.blif")
os.system("rm -rf " + dirname +"/" + run_num + "/*/*/*/*abc.blif")
os.system("rm -rf " + dirname +"/" + run_num + "/*/*/*/*.net")
os.system("rm -rf " + dirname +"/" + run_num + "/*/*/*/*.place")
os.system("rm -rf " + dirname +"/" + run_num + "/*/*/*/*.route")
os.system("rm -rf " + dirname +"/" + run_num + "/*/*/*/*.post_routing")
#append the current results to the main result list
self.result_list.append(result_dict)
# ###############################################################
# main()
# ###############################################################
if __name__ == "__main__":
GenResults()
| StarcoderdataPython |
11312849 | <filename>tests/test_robot.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: kakakaya, Date: Thu Dec 15 13:14:32 2016
# from pprint import pprint as p
from unittest import TestCase
from nose.tools import ok_, eq_, raises
from boppy import robot
from boppy.adapter.stdio import StdinInput, StdoutOutput
class TestRobot(TestCase):
def setUp(self):
self.robot = robot.Robot(src=StdinInput(), dst=[StdoutOutput()])
def test_init(self):
ok_(self.robot)
| StarcoderdataPython |
3213755 | #!/usr/bin/env python3
# coding = utf-8
import os
import unittest as ut
import numpy as np
from mykit.core.utils import get_matched_files
from mykit.vasp.xml import Vasprunxml, VasprunxmlError
class test_vasprunxml_read(ut.TestCase):
def test_scf_xml(self):
'''Test reading XMLs for SCF calculations (LORBIT not set)
'''
dataDir = 'vasprun_scf'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
typeMapping = vxml.typeMapping
# get all index
self.assertListEqual(list(range(vxml.natoms)),
vxml.get_atom_index())
self.assertFalse(vxml.edos is None)
self.assertFalse(vxml.totalDos is None)
self.assertFalse(vxml.dos is None)
# static calculation, there is only 1 ion step.
self.assertEqual(1, vxml.nIonSteps)
self.assertTupleEqual(np.shape(vxml.forces),
(vxml.nIonSteps, vxml.natoms, 3))
self.assertTupleEqual(np.shape(vxml.stress),
(vxml.nIonSteps, 3, 3))
self.assertEqual(1, len(vxml.interPoscars))
vxml.ntypes
vxml.natomsPerType
vxml.get_atom_index(0)
vxml.get_atom_index(-1)
vxml.get_atom_index(typeMapping[0])
self.assertRaisesRegex(VasprunxmlError,
r"Atom type not found: *",
vxml.get_atom_index, "UNKNOWNSYMBOL")
# empty properties
self.assertTrue(vxml.projs is None)
self.assertEqual(0, vxml.nprojs)
self.assertTrue(vxml.pDos is None)
self.assertTrue(vxml.pWave is None)
def test_band_xml(self):
'''Test reading XMLs for band calculations (LORBIT set or not)
'''
dataDir = 'vasprun_band'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
self.assertEqual(vxml.kmode, "L")
self.assertTupleEqual(np.shape(vxml.weight), (vxml.nibzkpt,))
self.assertTupleEqual(np.shape(vxml.kpoints), (vxml.nibzkpt, 3))
self.assertTupleEqual(np.shape(vxml.kptsWeight), (vxml.nibzkpt, 4))
bs = vxml.load_band()
self.assertAlmostEqual(bs.nelect, vxml.nelect, places=4)
self.assertTrue(bs.hasKvec)
self.assertTrue(bs.isKpath)
bs.kvec
bsTrimed = vxml.load_band(1)
self.assertEqual(1, bs.nkpts - bsTrimed.nkpts)
def test_mixed_k_band_xml(self):
'''Test reading XMLs for band calculations with manual input kpoints
in case of SCAN and HF band calculations
'''
dataDir = 'vasprun_mixed_k_band'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
bsMix = vxml.load_band()
bsBand = vxml.load_band(kTrimBefore=20)
self.assertEqual(bsMix.nkpts - bsBand.nkpts, 20)
self.assertTrue(np.allclose(bsBand.weight, np.ones(bsBand.nkpts)))
self.assertTrue(bsBand.isKpath)
def test_opt_xml(self):
'''Test reading XMLs for geometry optimization
'''
dataDir = 'vasprun_opt'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
vxml = Vasprunxml(fn)
self.assertTupleEqual(np.shape(vxml.forces), \
(vxml.nIonSteps, vxml.natoms, 3))
def test_pdos_xml(self):
'''Test reading XMLs with LORBIT set
'''
dataDir = 'vasprun_partial'
dataDirPath = os.path.join(os.path.dirname(__file__), '..',
'testdata', 'vasp', dataDir)
for fn in get_matched_files(dataDirPath, r"vasprun*"):
msg = "Wrong when processing {}".format(fn)
vxml = Vasprunxml(fn)
self.assertFalse(vxml.pDos is None, msg=msg)
bs = vxml.load_band()
self.assertAlmostEqual(bs.nelect, vxml.nelect, places=4, msg=msg)
self.assertTrue(bs.hasProjection, msg=msg)
# Dos related
dos = vxml.load_dos()
self.assertEqual(dos.nspins, bs.nspins, msg=msg)
self.assertTrue(dos.hasProjection, msg=msg)
if __name__ == '__main__':
ut.main()
| StarcoderdataPython |
5077211 | <reponame>DmPo/Schemaorg_CivicOS
import os
import unittest
from support import html5lib_test_files, TestData, test_dir
from html5lib import HTMLParser, inputstream
import re, unittest
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name(self):
self.assertEquals(inputstream.codecName("utf-8"), "utf-8")
self.assertEquals(inputstream.codecName("utf8"), "utf-8")
self.assertEquals(inputstream.codecName(" utf8 "), "utf-8")
self.assertEquals(inputstream.codecName("ISO_8859--1"), "windows-1252")
def buildTestSuite():
for filename in html5lib_test_files("encoding"):
test_name = os.path.basename(filename).replace('.dat',''). \
replace('-','')
tests = TestData(filename, "data")
for idx, test in enumerate(tests):
def encodingTest(self, data=test['data'],
encoding=test['encoding']):
p = HTMLParser()
t = p.parse(data, useChardet=False)
errorMessage = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n"%
(data, repr(encoding.lower()),
repr(p.tokenizer.stream.charEncoding)))
self.assertEquals(encoding.lower(),
p.tokenizer.stream.charEncoding[0],
errorMessage)
setattr(Html5EncodingTestCase, 'test_%s_%d' % (test_name, idx+1),
encodingTest)
try:
import chardet
def test_chardet(self):
data = open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt")).read()
encoding = inputstream.HTMLInputStream(data).charEncoding
assert encoding[0].lower() == "big5"
setattr(Html5EncodingTestCase, 'test_chardet', test_chardet)
except ImportError:
print "chardet not found, skipping chardet tests"
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
| StarcoderdataPython |
5112561 | <reponame>dfki-asr/MMIPython-Core<gh_stars>0
## SPDX-License-Identifier: MIT
## The content of this file has been developed in the context of the MOSIM research project.
## Original author(s): <NAME>, <NAME>
from MOSIM.core.utils.thrift_client import ThriftClient
from MOSIM.mmi.services import MSkeletonAccess
def initialize(register_ip_address) -> MSkeletonAccess:
# Get the service descriptions from the mmu register
client = ThriftClient(register_ip_address.Address, register_ip_address.Port, MSkeletonAccess.Client)
client.__enter__() ## Todo: this appears dirty and we should probably clean this up in the future.
return client._access
| StarcoderdataPython |
6410222 | <reponame>tschalch/pyTray
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/enums.py
__version__=''' $Id: enums.py,v 1.1 2006/05/26 19:19:44 thomas Exp $ '''
__doc__="""
holder for all reportlab's enumerated types
"""
TA_LEFT = 0
TA_CENTER = 1
TA_RIGHT = 2
TA_JUSTIFY = 4 | StarcoderdataPython |
4986660 | import re
from typing import Dict, List, Optional, cast
import requests
from faaspact_verifier import abc
from faaspact_verifier.definitions import Pact, VerificationResult
from faaspact_verifier.exceptions import PactBrokerError
class PactBrokerGateway(abc.PactBrokerGateway):
"""Gateway to a pact broker."""
def __init__(self, host: str, username: str, password: str) -> None:
self.host = host
self.username = username
self.password = password
def fetch_provider_pacts(self, provider: str) -> List[Pact]:
all_pacts = self._fetch_latest_provider_pacts(provider)
master_pacts = self._fetch_latest_provider_pacts(provider, tag='master')
return all_pacts + master_pacts
def provide_verification_results(self,
provider_version: str,
pact: Pact,
verification_results: List[VerificationResult]) -> None:
success = all(verification_result.verified for verification_result in verification_results)
url = (f'{self.host}/pacts/provider/{pact.provider_name}/consumer/{pact.consumer_name}'
f'/pact-version/{pact.pact_version}/verification-results')
data = {
'success': success,
'providerApplicationVersion': provider_version
}
r = requests.post(url, json=data, auth=(self.username, self.password))
if not r.status_code == requests.codes.created:
raise PactBrokerError(f'{r.status_code}: {r.text}')
def _fetch_latest_provider_pacts(self, provider: str, tag: Optional[str] = None) -> List[Pact]:
url = f'{self.host}/pacts/provider/{provider}/latest' + (f'/{tag}' if tag else '')
r = requests.get(url, auth=(self.username, self.password))
pact_hrefs = [pact['href'] for pact in r.json()['_links']['pb:pacts']]
return [self._fetch_pact_by_href(pact_href) for pact_href in pact_hrefs]
def _fetch_pact_by_href(self, href: str) -> Pact:
r = requests.get(href, auth=(self.username, self.password))
raw_pact = r.json()
consumer_version_href = raw_pact['_links']['pb:consumer-version']['href']
r = requests.get(consumer_version_href, auth=(self.username, self.password))
raw_consumer_version = r.json()
return _pluck_pact(raw_pact, raw_consumer_version)
def _pluck_pact(raw_pact: Dict, raw_consumer_version: Dict) -> Pact:
tags = [tag['name'] for tag in raw_consumer_version['_embedded']['tags']]
consumer_version = raw_consumer_version['number']
pact_json = {field: value for field, value in raw_pact.items()
if field not in ('createdAt', '_links')}
return Pact(
consumer_version=consumer_version,
pact_version=_pluck_pact_version(raw_pact),
tags=frozenset(tags),
pact_json=pact_json
)
def _pluck_pact_version(raw_pact: Dict) -> str:
publish_href = raw_pact['_links']['pb:publish-verification-results']['href']
match = re.search(r'/pact-version/(?P<provider_version>\w+)/verification-results', publish_href)
if not match:
raise RuntimeError(f'Failed to pluck pact version from pact {raw_pact}')
return cast(str, match.group('provider_version'))
# i didn't realize that 'pact versions' existed while making this. maybe pacts can be merged
# on pact version, where multiple tags _and_ multiple consumer versions can be part of one pact.
#
# def _merge_tagged_pacts(pacts: List[Pact]) -> List[Pact]:
# """
# >>> a = Pact(consumer_version='x', pact_json={}, tags=frozenset(['feature-a']))
# >>> b = Pact(consumer_version='y', pact_json={}, tags=frozenset(['feature-b']))
# >>> c = Pact(consumer_version='x', pact_json={}, tags=frozenset(['master']))
# >>> _merge_tagged_pacts([a, b, c]) == (
# ... [Pact(consumer_version='y', pact_json={}, tags=frozenset({'feature-b'})),
# ... Pact(consumer_version='x', pact_json={}, tags=frozenset({'feature-a', 'master'}))])
# True
# """
# pact_by_version: Dict[str, Pact] = {}
# for pact in pacts:
# if pact.consumer_version not in pact_by_version:
# pact_by_version[pact.consumer_version] = pact
# else:
# existing_pact = pact_by_version.pop(pact.consumer_version)
# merged_pact = Pact(
# consumer_version=pact.consumer_version,
# tags=pact.tags | existing_pact.tags,
# pact_json=pact.pact_json
# )
# pact_by_version[pact.consumer_version] = merged_pact
# return list(pact_by_version.values())
| StarcoderdataPython |
120939 | <reponame>zqngetsu96/PyForex
import pandas as pd
import numpy as np
from scipy.signal import argrelextrema
def peak_detect(price):
# Find our relative extrema
# Return the max indexes of the extrema
max_idx = list(argrelextrema(price, np.greater, order=3)[0])
# Return the min indexes of the extrema
min_idx = list(argrelextrema(price, np.less, order=3)[0])
idx = max_idx + min_idx + [len(price) - 1]
idx.sort()
current_idx = idx[-10:]
start = min(current_idx)
end = max(current_idx)
current_pat = price[current_idx]
return current_idx, current_pat, start, end
def is_Gartley(moves, err_allowed):
XA = moves[0]
AB = moves[1]
BC = moves[2]
CD = moves[3]
AB_range = np.array([0.618 - err_allowed, 0.618 + err_allowed]) * abs(XA)
BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
CD_range = np.array([1.27 - err_allowed, 1.618 + err_allowed]) * abs(BC)
if XA>0 and AB<0 and BC>0 and CD<0:
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < CD_range[1]:
return 1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
elif XA<0 and AB>0 and BC<0 and CD>0:
# AB_range = np.array([0.618 - err_allowed, 0.618 + err_allowed]) * abs(XA)
# BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
# CD_range = np.array([1.27 - err_allowed, 1.618 + err_allowed]) * abs(BC)
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < \
CD_range[1]:
return -1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
else:
return np.NaN
def is_Butterfly(moves, err_allowed):
XA = moves[0]
AB = moves[1]
BC = moves[2]
CD = moves[3]
AB_range = np.array([0.786 - err_allowed, 0.786 + err_allowed]) * abs(XA)
BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
CD_range = np.array([1.618 - err_allowed, 2.618 + err_allowed]) * abs(BC)
if XA>0 and AB<0 and BC>0 and CD<0:
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < CD_range[1]:
return 1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
elif XA<0 and AB>0 and BC<0 and CD>0:
# AB_range = np.array([0.618 - err_allowed, 0.618 + err_allowed]) * abs(XA)
# BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
# CD_range = np.array([1.27 - err_allowed, 1.618 + err_allowed]) * abs(BC)
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < \
CD_range[1]:
return -1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
else:
return np.NaN
def is_Bat(moves, err_allowed):
XA = moves[0]
AB = moves[1]
BC = moves[2]
CD = moves[3]
AB_range = np.array([0.382 - err_allowed, 0.5 + err_allowed]) * abs(XA)
BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
CD_range = np.array([1.618 - err_allowed, 2.618 + err_allowed]) * abs(BC)
if XA>0 and AB<0 and BC>0 and CD<0:
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < CD_range[1]:
return 1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
elif XA<0 and AB>0 and BC<0 and CD>0:
# AB_range = np.array([0.618 - err_allowed, 0.618 + err_allowed]) * abs(XA)
# BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
# CD_range = np.array([1.27 - err_allowed, 1.618 + err_allowed]) * abs(BC)
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < \
CD_range[1]:
return -1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
else:
return np.NaN
def is_Crab(moves, err_allowed):
XA = moves[0]
AB = moves[1]
BC = moves[2]
CD = moves[3]
AB_range = np.array([0.382 - err_allowed, 0.618 + err_allowed]) * abs(XA)
BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
CD_range = np.array([2.24 - err_allowed, 3.618 + err_allowed]) * abs(BC)
if XA>0 and AB<0 and BC>0 and CD<0:
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < CD_range[1]:
return 1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
elif XA<0 and AB>0 and BC<0 and CD>0:
# AB_range = np.array([0.618 - err_allowed, 0.618 + err_allowed]) * abs(XA)
# BC_range = np.array([0.382 - err_allowed, 0.886 + err_allowed]) * abs(AB)
# CD_range = np.array([1.27 - err_allowed, 1.618 + err_allowed]) * abs(BC)
if AB_range[0] < abs(AB) < AB_range[1] and BC_range[0] < abs(BC) < BC_range[1] and CD_range[0] < abs(CD) < \
CD_range[1]:
return -1
# plt.plot(np.arange(start, i+15), price.values[start:i+15])
# plt.scatter(idx, current_pat, c='r')
# plt.show()
else:
return np.NaN
else:
return np.NaN | StarcoderdataPython |
4923069 | """bookr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from bookr.views import profile, download_user_book_data
urlpatterns = [
# `include()` is a shortcut that allows you to combine URL configurations.
# It is common to keep one URL configuration per application in your Django project.
# Here, we've created a separate URL configuration for the `reviews` app and have
# added it to our project-level URL configuration.
# TODO Why does the `admin/` path not use `include()`? It doesn't follow any of the
# pattern found in the examples (see docstring on top of this file).
# TODO POSSIBLE ANSWER: admin.site.urls is already a tuple
path("", include('reviews.urls')),
path("books/download", download_user_book_data, name="download_user_book_data"),
path("admin/", admin.site.urls),
path(
"accounts/", include(('django.contrib.auth.urls', 'auth'), namespace='accounts')
),
path("accounts/profile/", profile, name="profile"),
path("filter_demo/", include('filter_demo.urls')),
path("book_management/", include('book_management.urls')),
]
if settings.DEBUG:
# TODO "...however, we will add it to the start of `urlpatterns` rather than
# appending it to the end. (p. 710)" Why do we have to add it to the start?
# What happens if we add it at the end?
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))] + urlpatterns
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
5196259 | from typing import List
from chaoslib.discovery.discover import (discover_probes, discover_actions,
initialize_discovery_result)
from chaoslib.types import DiscoveredActivities, Discovery
from logzero import logger
__version__ = '1.0.6-dev0'
def discover(discover_system: bool = True) -> Discovery:
"""
Discover Proofdock capabilities from this extension.
"""
logger.info("Discovering capabilities from pdchaoskit")
discovery = initialize_discovery_result(
"proofdock-chaos-kit", __version__, "pdchaoskit")
discovery["activities"].extend(load_exported_activities())
return discovery
###############################################################################
# Private functions
###############################################################################
def load_exported_activities() -> List[DiscoveredActivities]:
"""
Extract metadata from actions and probes exposed by this extension.
"""
activities = []
activities.extend(discover_probes("pdchaoskit.alert.probes"))
activities.extend(discover_actions("pdchaoskit.app.actions"))
return activities
| StarcoderdataPython |
1749828 | <gh_stars>1-10
from dagster import job
# This job will run with multiprocessing execution
@job
def do_it_all():
...
# This job will run with in-process execution
@job(config={"execution": {"config": {"in_process": {}}}})
def do_it_all_in_proc():
...
| StarcoderdataPython |
7963 | <reponame>grace1307/lan_mapper<filename>app/services/base.py
from app.db import db
# Ignore it if db can't find the row when updating/deleting
# Todo: not ignore it, raise some error, remove checkers in view
class BaseService:
__abstract__ = True
model = None
# Create
def add_one(self, **kwargs):
new_row = self.model(**kwargs)
db.session.add(new_row)
db.session.commit() # sqlalchemy auto flushes so maybe this just need commit ?
return new_row
# Read
def select_one(self, id):
return self.model.query.filter(self.model.id == id).one_or_none()
def select_all(self, conditions: list = None, sort_by=None, is_asc=None):
query = db.session.query(self.model)
if conditions is not None:
for condition in conditions:
query = query.filter(condition)
if sort_by is not None and is_asc is not None:
sort_column = self.model.__table__._columns[sort_by]
is_asc = is_asc == 'true'
if sort_column is not None:
query = query.order_by(sort_column.asc() if is_asc else sort_column.desc())
return query.all()
# Update
def update_one(self, id, updated):
row = self.model.query.filter(self.model.id == id)
row_result = row.one_or_none()
if row_result is not None:
row.update(updated)
db.session.commit()
return row.one_or_none()
# Delete
def delete_one(self, id):
row = self.select_one(id)
if row is not None:
db.session.delete(row)
db.session.commit()
| StarcoderdataPython |
243306 | <reponame>tris10au/sshclip
from sshclip import devices
import pyperclip
import click
import time
def get_last_modified_time(device, clip_path):
try:
return device.stat(clip_path).st_mtime
except FileNotFoundError:
return None
def run_sshclip(device, clip_path=None, verbose=False, delay=1):
if verbose:
echo = click.echo
else:
echo = lambda s: None
if clip_path is None:
clip_path = device.find_clip_path()
echo("[{name}] Starting SSHCLIP (write path: {path})".format(path=clip_path, name=device.NAME))
previous_clipboard = pyperclip.paste() or ""
previous_update = get_last_modified_time(device, clip_path)
while True:
latest_update = get_last_modified_time(device, clip_path)
latest_clipboard = pyperclip.paste() or ""
if latest_update is not None and (previous_update is None or previous_update < latest_update):
previous_update = latest_update
previous_clipboard = device.read(clip_path)
pyperclip.copy(previous_clipboard)
echo("Updating from other device: {0}".format(previous_clipboard))
elif latest_clipboard != previous_clipboard and len(latest_clipboard) > 0:
device.write(clip_path, latest_clipboard)
previous_clipboard = latest_clipboard
# A bug on macOS causes the clipboard to clear after 1 sec. Use the server to update it instead
#previous_update = get_last_modified_time(device, clip_path)
echo("Updating to other device: {0}, {1}, {2}".format(previous_clipboard, latest_clipboard, previous_update))
time.sleep(delay)
@click.group()
def cli():
pass
@cli.command()
@click.argument("host")
@click.argument("clip_path", default=None, required=False)
@click.option("--verbose/--no-verbose", default=False)
@click.option("--delay", default=1.11)
def client(clip_path=None, host=None, verbose=False, delay=1.11):
with devices.ClientDevice(host) as device:
run_sshclip(device, clip_path, verbose, delay)
@cli.command()
@click.argument("clip_path", default=None, required=False)
@click.option("--verbose/--no-verbose", default=False)
@click.option("--delay", default=0.2)
def server(clip_path=None, verbose=False, delay=0.2):
with devices.ServerDevice() as device:
run_sshclip(device, clip_path, verbose, delay)
| StarcoderdataPython |
11224036 | <reponame>jialin-wu-02/skyportal<filename>skyportal/handlers/api/filter.py
from marshmallow.exceptions import ValidationError
from baselayer.app.access import auth_or_token, permissions
from ..base import BaseHandler
from ...models import (
DBSession,
Filter,
)
class FilterHandler(BaseHandler):
@auth_or_token
def get(self, filter_id=None):
"""
---
single:
description: Retrieve a filter
parameters:
- in: path
name: filter_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleFilter
400:
content:
application/json:
schema: Error
multiple:
description: Retrieve all filters
responses:
200:
content:
application/json:
schema: ArrayOfFilters
400:
content:
application/json:
schema: Error
"""
if filter_id is not None:
f = Filter.get_if_owned_by(filter_id, self.current_user)
if f is None:
return self.error("Invalid filter ID.")
return self.success(data=f)
filters = (
DBSession.query(Filter)
.filter(Filter.group_id.in_([g.id for g in self.current_user.groups]))
.all()
)
return self.success(data=filters)
@permissions(["Manage groups"])
def post(self):
"""
---
description: POST a new filter.
requestBody:
content:
application/json:
schema: FilterNoID
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: integer
description: New filter ID
"""
data = self.get_json()
schema = Filter.__schema__()
try:
fil = schema.load(data)
except ValidationError as e:
return self.error(
"Invalid/missing parameters: " f"{e.normalized_messages()}"
)
DBSession.add(fil)
DBSession().commit()
return self.success(data={"id": fil.id})
@permissions(["Manage groups"])
def patch(self, filter_id):
"""
---
description: Update a filter
parameters:
- in: path
name: filter_id
required: True
schema:
type: integer
requestBody:
content:
application/json:
schema: FilterNoID
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
data = self.get_json()
data["id"] = filter_id
schema = Filter.__schema__()
try:
schema.load(data)
except ValidationError as e:
return self.error('Invalid/missing parameters: '
f'{e.normalized_messages()}')
DBSession().commit()
return self.success()
@permissions(["Manage groups"])
def delete(self, filter_id):
"""
---
description: Delete a filter
parameters:
- in: path
name: filter_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
"""
DBSession.delete(Filter.query.get(filter_id))
DBSession().commit()
return self.success()
| StarcoderdataPython |
3532562 | <reponame>Pixep/qml-files-combiner
import os
print "-----"
os.system("python combine-qml.py --help")
print "-----"
os.system("python combine-qml.py -v tests/main-base.qml tests/main.qml -c tests/Button.qml Button")
| StarcoderdataPython |
3358063 | # -*- coding: utf-8 -*-
"""
Created in Aug 2016
@author: <NAME> (ComplexCity, EIVP, KTH)
"""
#
# This script investigates the surrounding built environment of flickr photo locations thanks to 13 precise queries.
# It was implemented in order to build a statistic model to describe stress feeling and link it with the built environment.
# Thus, it provides an export solution with a JSON file output.
#
import json
import ijson
import shapely
from shapely.geometry import Point, LineString, Polygon, MultiLineString, MultiPolygon
from shapely.wkt import dumps, loads
from osgeo import ogr, osr
import time
import decimal
start_time = decimal.Decimal(time.time())
# Prepare your photos for the queries of the environment surrounding them
#
#
from more_itertools import unique_everseen
# load the flickr data to store the locations of the photos
list_point_coordinates1 = []
with open('not_stress_photos3_stress.json', 'r') as fp:
data = json.load(fp)
for feature in data:
list_point_coordinates1.append((float(feature['longitude']),float(feature['latitude'])))
# remove the duplicates
list_point_coordinates2 = list(unique_everseen(list_point_coordinates1))
# store the photo location as a georeferenced point
list_point = []
for coordinates in list_point_coordinates2:
list_point.append(shapely.geometry.Point(coordinates))
# transformation of a point to fit the selected projection system and give coordinates as an output
def coordinates(point):
Point = ogr.CreateGeometryFromWkt(dumps(point))
# transform both geometries to the fittest projection
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(26918)
transform = osr.CoordinateTransformation(source, target)
Point.Transform(transform)
x0 = Point.GetX()
y0 = Point.GetY()
return x0, y0
# Create the queries
# Query 1 : count the sidewalkcafes within 25 meters of the photo location
lines = []
with open('sidewalkcafe_26918.json') as fp:
data = json.load(fp)
for item in data['features']:
if item['properties']['CafeType'] is not None:
if item['geometry']['type'] == 'LineString':
if item['properties']['CafeType'] != 'Not Permitted':
lines.append(LineString(item['geometry']['coordinates']))
from rtree import index
idx1 = index.Index()
for pos, line in enumerate(lines):
idx1.insert(pos, line.bounds)
def count_cafes(point):
count = 0
for j in idx1.intersection(Point(coordinates(point)).buffer(25).bounds):
if lines[j].intersects(Point(coordinates(point)).buffer(25)):
count += 1
return count
# Query 2 : count the benches within 20 meters of the photo location
benches = []
with open('bench_26918.json') as fp:
data = json.load(fp)
for item in data['features']:
benches.append(Point(item['geometry']['coordinates']))
from rtree import index
idx2 = index.Index()
for pos, bench in enumerate(benches):
idx2.insert(pos, bench.bounds)
def count_benches(point):
count = 0
for j in idx2.intersection(Point(coordinates(point)).buffer(20).bounds):
if benches[j].intersects(Point(coordinates(point)).buffer(20)):
count += 1
return count
# Query 3 : inquire wether the photo location is situated within 50 meters of a waterfront
hydro_polygons = []
with open('hydrography_26918.geojson') as fp:
data = json.load(fp)
for feature in data['features']:
for polygon in feature['geometry']['coordinates']:
poly = []
for coordinates1 in polygon:
for coordinate in coordinates1:
poly.append((coordinate[0],coordinate[1]))
hydro_polygons.append(Polygon(poly))
from rtree import index
idx3 = index.Index()
for pos, poly in enumerate(hydro_polygons):
idx3.insert(pos, poly.bounds)
def hydro_criteria(photo):
for j in idx3.intersection(Point(coordinates(photo)).buffer(50).bounds):
if Point(coordinates(photo)).buffer(50).intersects(hydro_polygons[j]):
return 1
else:
return 0
return 0
# Query 4 : inquire wether the photo location is situated in a green space
green_polygons = []
with open('green_spaces_fabien.geojson') as fp:
data = json.load(fp)
for feature in data['features']:
for polygon in feature['geometry']['coordinates']:
poly = []
for coordinate in polygon:
poly.append((coordinate[0],coordinate[1]))
green_polygons.append(Polygon(poly))
from rtree import index
idx4 = index.Index()
for pos, poly in enumerate(green_polygons):
idx4.insert(pos, poly.bounds)
def green_criteria(point):
for j in idx4.intersection((point.coords[0])):
if point.intersects(green_polygons[j]):
return 1
else:
return 0
return 0
# Query 5 : count the number of trees within 20 meters of the photo location
trees = []
f = open('trees.json')
objects = ijson.items(f, 'features.item')
for obj in objects:
Point2 = ogr.CreateGeometryFromWkt(dumps(shapely.geometry.Point(float(obj['properties']['longitude']),float(obj['properties']['latitude']))))
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(26918)
transform = osr.CoordinateTransformation(source, target)
Point2.Transform(transform)
x2 = Point2.GetX()
y2 = Point2.GetY()
trees.append(shapely.geometry.Point(x2,y2))
from rtree import index
idx5 = index.Index()
for pos, tree in enumerate(trees):
idx5.insert(pos, tree.bounds)
def count_trees(point):
count = 0
for j in idx5.intersection(Point(coordinates(point)).buffer(20).bounds):
if trees[j].intersects(Point(coordinates(point)).buffer(20)):
count += 1
return count
# Query 6 : get the total built, floor area within 100 meters of the flickr photo location
# Query 7 : get the average age of the buildings located within 100 meters of the flickr photo location
# Query 8 : get the average roofheights of the buildings located within 100 meters of the flickr photo location
batiments = []
heightroof = []
year = []
build_area = []
f = open('building_footprints.geojson')
objects = ijson.items(f, 'features.item')
for obj in objects:
for polygon in obj['geometry']['coordinates']:
poly = []
for coordinates1 in polygon:
for coordinate in coordinates1:
poly.append((coordinate[0],coordinate[1]))
wktPolygon = dumps(Polygon(poly))
polygon1 = ogr.CreateGeometryFromWkt(wktPolygon)
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(26918)
transform = osr.CoordinateTransformation(source, target)
polygon1.Transform(transform)
wktPolygon1 = polygon1.ExportToWkt()
batiments.append(loads(wktPolygon1))
heightroof.append(float(obj['properties']['heightroof']))
year.append(float(obj['properties']['cnstrct_yr']))
build_area.append(float(obj['properties']['shape_area']))
from rtree import index
idx6 = index.Index()
for pos, batiment in enumerate(batiments):
idx6.insert(pos, batiment.bounds)
def area_criteria(point):
total_area = 0
build_area1 = []
for j in idx6.intersection(Point(coordinates(point)).buffer(100).bounds):
if batiments[j].intersects(Point(coordinates(point)).buffer(100)):
build_area1.append(build_area[j])
if len(build_area1)>0:
total_area = (float(sum(build_area1)))
else:
total_area = 0
return total_area
def age_criteria(point):
average_age = 0
year1 = []
for j in idx6.intersection(Point(coordinates(point)).buffer(100).bounds):
if batiments[j].intersects(Point(coordinates(point)).buffer(100)):
year1.append(year[j])
if len(year1)>0:
average_age = 2016-(float(sum(year1))/float(len(year1)))
else:
average_age = 63
return average_age
def height_criteria(point):
average_height = 0
heightroof1 = []
for j in idx6.intersection(Point(coordinates(point)).buffer(100).bounds):
if batiments[j].intersects(Point(coordinates(point)).buffer(100)):
heightroof1.append(heightroof[j])
if len(heightroof1)>0:
average_height = (float(sum(heightroof1))/float(len(heightroof1)))
else:
average_height = 50
return average_height
routes = []
with open('aadt_26918.json') as fp:
data = json.load(fp)
for feature in data['features']:
if feature is not None:
if feature['geometry'] is not None:
if feature['properties']['MUNI'] in ['Manhattan', 'Bronx', 'Brooklyn', 'Queens', 'Staten Island', 'New York', 'CITY OF NEW YORK']:
if feature['geometry']['type'] is not None:
if feature['geometry']['type'] == 'LineString':
if feature['geometry']['coordinates'] is not None:
routes.append(feature)
else:
if feature['properties']['MUNI'] == 'MultiLineString':
if feature['geometry']['coordinates'] is not None:
#if Point(coordinates(point)).buffer(100).intersects(MultiLineString(feature['geometry']['coordinates'])):
routes.append(feature)
from rtree import index
idx = index.Index()
for pos, route in enumerate(routes):
if route['geometry'] is not None:
if route['geometry']['type'] == 'LineString':
idx.insert(pos, LineString(route['geometry']['coordinates']).bounds)
else:
idx.insert(pos, MultiLineString(route['geometry']['coordinates']).bounds)
def dist(x1,y1, x2,y2, x3,y3): # x3,y3 is the point
px = x2-x1
py = y2-y1
something = px*px + py*py
u = ((x3 - x1) * px + (y3 - y1) * py) / float(something)
if u > 1:
u = 1
elif u < 0:
u = 0
x = x1 + u * px
y = y1 + u * py
dx = x - x3
dy = y - y3
dist = math.sqrt(dx*dx + dy*dy)
return dist
def dist_point_line(point,line):
if line is not None:
wktLine = dumps(line)
# create geometries from wkt
Line = ogr.CreateGeometryFromWkt(wktLine)
if Line is not None:
# transform both geometries to the fittest projection
Point = ogr.CreateGeometryFromWkt(dumps(point))
# transform both geometries to the fittest projection
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(26918)
transform = osr.CoordinateTransformation(source, target)
Point.Transform(transform)
# create a line for each point in the first geometry of the polygon
# initialize
x0 = Point.GetX()
y0 = Point.GetY()
distance1 = []
for i in range(0, Line.GetPointCount()-1):
xi, yi, zi = Line.GetPoint(i)
ai, bi, ci = Line.GetPoint(i+1)
# create line and check length
distance1.append(dist(xi,yi,ai,bi,x0,y0))
return min(distance1)
def dist_point_multi(point,multi):
if multi is not None:
wktMulti = dumps(multi)
# create geometries from wkt
Multi = ogr.CreateGeometryFromWkt(wktMulti)
# transform both geometries to the fittest projection
if Multi is not None:
Point = ogr.CreateGeometryFromWkt(dumps(point))
# transform both geometries to the fittest projection
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
target = osr.SpatialReference()
target.ImportFromEPSG(26918)
transform = osr.CoordinateTransformation(source, target)
Point.Transform(transform)
# create a line for each point in the first geometry of the polygon
# initialize
x0 = Point.GetX()
y0 = Point.GetY()
distance1 = []
for line in Multi:
for i in range(0, line.GetPointCount()-1):
xi, yi, zi = line.GetPoint(i)
ai, bi, ci = line.GetPoint(i+1)
# create line and check length
distance1.append(dist(xi,yi,ai,bi,x0,y0))
return min(distance1)
def intersecting_nearest_route(point):
routes_list = []
distances = []
intersecting_routes = []
a0 = 110
a1 = 150
for j in idx.intersection(Point(coordinates(point)).buffer(100).bounds):
if routes[j]['geometry']['type'] == 'LineString':
if LineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
else:
if MultiLineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
if len(routes_list) != 0:
for route in routes_list:
if route['properties']['MUNI'] == 'MultiLineString':
distances.append(dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])))
else:
distances.append(dist_point_line(point,LineString(route['geometry']['coordinates'])))
a0 = min(distances)
if len(distances)>1:
new_routes = routes_list
distances2 = []
for route in new_routes:
if route['properties']['MUNI'] == 'MultiLineString':
if dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])) == a0 :
new_routes.remove(route)
intersecting_routes.append(route)
else:
if dist_point_line(point,LineString(route['geometry']['coordinates'])) == a0:
new_routes.remove(route)
intersecting_routes.append(route)
for route in new_routes:
if route['geometry']['type'] == 'MultiLineString':
if routes[0]['geometry']['type'] == 'MultiLineString':
if MultiLineString(route['geometry']['coordinates']).intersects(MultiLineString(routes[0]['geometry']['coordinates'])) == False:
new_routes.remove(route)
else:
if MultiLineString(route['geometry']['coordinates']).intersects(LineString(routes[0]['geometry']['coordinates'])) == False:
new_routes.remove(route)
else:
if routes[0]['geometry']['type'] == 'MultiLineString':
if LineString(route['geometry']['coordinates']).intersects(MultiLineString(routes[0]['geometry']['coordinates'])) == False:
new_routes.remove(route)
else:
if LineString(route['geometry']['coordinates']).intersects(LineString(routes[0]['geometry']['coordinates'])) == False:
new_routes.remove(route)
for route in new_routes:
if route['properties']['MUNI'] == 'MultiLineString':
distances2.append(dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])))
else:
distances2.append(dist_point_line(point,LineString(route['geometry']['coordinates'])))
if len(distances2)>0:
a1 = min(distances2)
else:
a1 = 150
if a1 == a0:
a1 = 0
return a1
def nearest_route(point):
routes_list = []
distances = []
a0 = 110
for j in idx.intersection(Point(coordinates(point)).buffer(100).bounds):
if routes[j]['geometry']['type'] == 'LineString':
if LineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
else:
if MultiLineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
if len(routes_list) != 0:
for route in routes_list:
if route['properties']['MUNI'] == 'MultiLineString':
distances.append(dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])))
else:
distances.append(dist_point_line(point,LineString(route['geometry']['coordinates'])))
a0 = min(distances)
return a0
# Query 9 : get the inverse square distance between the photo location and the nearest route
def inverse_square_nearest_route(point):
return 1/(nearest_route(point)**2)
# Query 10 : get the inverse square distance between the photo location and the nearest route among the routes which intersect with the nearest route
def inverse_square_intersecting_nearest_route(point):
return 1/(intersecting_nearest_route(point)**2)
# Query 11 : count the number of different routes within 20 meters of the photo location
def count_route_under_20(point):
count = 0
for j in idx.intersection(shapely.geometry.Point(coordinates(point)).buffer(20).bounds):
if routes[j]['geometry']['type'] == 'LineString':
if LineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(20)):
count += 1
else:
if MultiLineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(20)):
count += 1
return count
# Query 12 : count the number of different routes within 50 meters of the photo location
def count_route_under_50(point):
count = 0
for j in idx.intersection(shapely.geometry.Point(coordinates(point)).buffer(50).bounds):
if routes[j]['geometry']['type'] == 'LineString':
if LineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(50)):
count += 1
else:
if MultiLineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(50)):
count += 1
return count
# Query 13 : get the annual average daily trafic of the nearest route to the photo location and normalize it by the length of this route
def aadt13_nearest_route(point):
routes_list = []
distances = []
a0 = 110
for j in idx.intersection(Point(coordinates(point)).buffer(100).bounds):
if routes[j]['geometry']['type'] == 'LineString':
if LineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
else:
if MultiLineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
if len(routes_list) != 0:
for route in routes_list:
if route['properties']['MUNI'] == 'MultiLineString':
distances.append(dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])))
else:
distances.append(dist_point_line(point,LineString(route['geometry']['coordinates'])))
a0 = min(distances)
new_routes = routes_list
for route in new_routes:
if route['properties']['MUNI'] == 'MultiLineString':
if dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])) == a0 :
return route['properties']['AADT13']
else:
if dist_point_line(point,LineString(route['geometry']['coordinates'])) == a0:
return route['properties']['AADT13']
def lenght_nearest_route(point):
routes_list = []
distances = []
a0 = 110
for j in idx.intersection(Point(coordinates(point)).buffer(100).bounds):
if routes[j]['geometry']['type'] == 'LineString':
if LineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
else:
if MultiLineString(routes[j]['geometry']['coordinates']).intersects(Point(coordinates(point)).buffer(100)):
routes_list.append(routes[j])
if len(routes_list) != 0:
for route in routes_list:
if route['properties']['MUNI'] == 'MultiLineString':
distances.append(dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])))
else:
distances.append(dist_point_line(point,LineString(route['geometry']['coordinates'])))
a0 = min(distances)
new_routes = routes_list
for route in new_routes:
if route['properties']['MUNI'] == 'MultiLineString':
if dist_point_multi(point,MultiLineString(route['geometry']['coordinates'])) == a0 :
return route['properties']['SHAPE_Leng']
else:
if dist_point_line(point,LineString(route['geometry']['coordinates'])) == a0:
return route['properties']['SHAPE_Leng']
def normalized_aadt(point):
if aadt13_nearest_route(point) is not None:
if aadt13_nearest_route(point) != 'null':
return float(aadt13_nearest_route(point))/float(lenght_nearest_route(point))
else:
return aadt13_nearest_route(point)
else:
return None
# Finalize by exporting all the results of the 13 queries in a JSON file
import collections
objects_list = []
for point in list_point:
d = collections.OrderedDict()
d['count_cafes'] = count_cafes(point)
d['count_benches'] = count_benches(point)
d['count_trees'] = count_trees(point)
d['green_criteria'] = green_criteria(point)
d['hydro_criteria'] = hydro_criteria(point)
d['area_criteria'] = area_criteria(point)
d['age_criteria'] = age_criteria(point)
d['height_criteria'] = height_criteria(point)
d['inverse_square_nearest_route'] = inverse_square_nearest_route(point)
d['inverse_square_intersecting_nearest_route'] = inverse_square_intersecting_nearest_route(point)
d['count_route_under_20'] = count_route_under_20(point)
d['count_route_under_50'] = count_route_under_50(point)
d['normalized_aadt'] = normalized_aadt(point)
d['variable_y'] = 0
objects_list.append(d)
o1 = open('final_results.json', 'w')
o1.write(json.dumps(objects_list))
o1.close()
o2 = open('final_results.json', 'w')
json.dump(json.dumps(objects_list), o2)
o2.close()
o3 = open('final_results.json', 'w')
json.dump(objects_list, o3)
o3.close()
end_time = decimal.Decimal(time.time())
timelapse = end_time - start_time
print "temps de calcul : %f" % timelapse | StarcoderdataPython |
4916757 | # 547 朋友圈
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
if not M: return 0
n=len(M)
p=[i for i in range(n)]
for i in range(n):
for j in range(n):
if M[i][j]==1:
self._union(p,i,j)
return len(set([self._parent(p,i) for i in range(n)]))
def _union(self,p,i,j):
p1=self._parent(p,i)
p2=self._parent(p,j)
p[p2]=p1
def _parent(self,p,i):
root=i
while p[root]!=root:
root=p[root]
while p[i]!=i:
x=i;i=p[i];p[x]=root;
return root | StarcoderdataPython |
3495051 | <filename>adminmgr/media/code/python/red1/reducer.py<gh_stars>1-10
#!/usr/bin/python3
from operator import itemgetter
import sys
import csv
import ast
Dict={}
for line in sys.stdin:
lst = ast.literal_eval(line)
tupple = (lst[0],lst[1])
if(tupple not in Dict):
Dict[tupple] = [lst[2],lst[3]]
else:
Dict[tupple][0]+=lst[2]
Dict[tupple][1]+=lst[3]
'''
for i in Dict:
if(Dict[i][0]>5):
print('%s,%s,%s,%s'%(i[0],i[1],Dict[i][0],Dict[i][1]))
'''
lst_sort = []
for i in Dict:
lst_sort.append([i[0], i[1],-1*Dict[i][0],Dict[i][1]])
lst = sorted(lst_sort, key=itemgetter(2,3,0,1))
j=0
for i in lst:
if(i[3]>5):
print('%s,%s,%s,%s'%(i[0],i[1],-1*i[2],i[3]))
| StarcoderdataPython |
5007097 | from uuid import uuid4
from blockchain import Blockchain
from utility.verification import Verification
from wallet import Wallet
class Node:
def __init__(self):
# self.wallet.public_key = str(uuid4())
self.wallet = Wallet()
self.wallet.create_keys()
self.blockchain = Blockchain(self.wallet.public_key)
def get_transaction_value(self):
"""
Returns the input of the user as a float. (new transaction amount)
"""
tx_recipient = input('Enter the recipient of the transaction: ')
tx_amount = float(input('Your transaction amount please: '))
return tx_recipient, tx_amount
def get_user_choice(self):
"""
Let user choose what they want to do:
1: add transaction
2: Mine a new block.
3: Output the blockchain blocks.
4: Check transaction validity.
h: Manipulate blockchain data
q: Quit
"""
user_input = input('Your Choice: ')
return user_input
def print_blockchain_elements(self):
"""Print all blocks of the blockchain."""
# Output the blockchain list to the console
print('\n')
print('Outputting Blocks')
print('\n')
print('-' * 20)
print('\n')
for block in self.blockchain.chain:
print(block)
else:
print('-' * 20)
def listen_for_input(self):
waiting_for_input = True
while waiting_for_input:
print('Please choose: ')
print('1: Add a new transaction value.')
print('2: Mine a new block.')
print('3: Output the blockchain blocks.')
print('4: Check transaction validity.')
print('5: Create wallet.')
print('6: Load wallet.')
print('7: Save keys')
print('q: Quit~!')
user_choice = self.get_user_choice()
if user_choice == '1':
tx_data = self.get_transaction_value()
recipient, amount = tx_data
signature = self.wallet.sigh_transaction(self.wallet.public_key, recipient, amount)
if self.blockchain.add_transaction(recipient, self.wallet.public_key, signature, amount=amount):
print('Transaction added ~!')
else:
print('Transaction failed ~!')
print(self.blockchain.get_open_transactions())
elif user_choice == '2':
if not self.blockchain.mine_block():
print('Mining failed, Got no wallet? ')
elif user_choice == '3':
self.print_blockchain_elements()
elif user_choice == '4':
if Verification.verify_transactions(self.blockchain.get_open_transactions(), self.blockchain.get_balance):
print('All transactions are valid ~!')
else:
print('Invalid transaction found')
elif user_choice == '5':
print('-' * 20)
self.wallet.create_keys()
self.blockchain = Blockchain(self.wallet.public_key)
print('-' * 20)
print('Wallet Created, Enjoy~!')
print('-' * 20)
elif user_choice == '6':
self.wallet.load_keys()
self.blockchain = Blockchain(self.wallet.public_key)
elif user_choice == '7':
self.wallet.save_keys()
elif user_choice == 'q':
waiting_for_input = False
else:
print('Invalid input, please choose again. ')
if not Verification.validate_chain(self.blockchain.chain):
self.print_blockchain_elements()
print('Invalid Blockchain~!')
break
# print('Total amount sent.')
print('Banlance of {}: {:6.2f}'.format(self.wallet.public_key, self.blockchain.get_balance()))
else:
print('User left~!')
print('Done!')
if __name__ == '__main__':
node = Node()
node.listen_for_input()
| StarcoderdataPython |
8147134 | def fu<the_ref>nc(**args):
"""
Keyword args:
foo: bar
baz
Keyword arguments:
quux
""" | StarcoderdataPython |
4913156 | """
Color Encoder
Module Description
==================
The file color_map.png contains color-coded state regions. This module
encodes those colors in RGB form into a corresponding state abbrevation.
Copyright Information
===============================
This file is Copyright (c) 2021 <NAME>, <NAME>, <NAME>, <NAME>.
"""
STATE_COLOR_MAP = {
(4, 0, 0): 'AL',
(8, 0, 0): 'AK',
(12, 0, 0): 'AZ',
(16, 0, 0): 'AR',
(20, 0, 0): 'CA',
(24, 0, 0): 'CO',
(28, 0, 0): 'CT',
(32, 0, 0): 'DE',
(36, 0, 0): 'FL',
(40, 0, 0): 'GA',
(44, 0, 0): 'HI',
(48, 0, 0): 'ID',
(52, 0, 0): 'IL',
(56, 0, 0): 'IN',
(60, 0, 0): 'IA',
(64, 0, 0): 'KS',
(68, 0, 0): 'KY',
(72, 0, 0): 'LA',
(76, 0, 0): 'ME',
(80, 0, 0): 'MD',
(84, 0, 0): 'MA',
(88, 0, 0): 'MI',
(92, 0, 0): 'MN',
(96, 0, 0): 'MS',
(100, 0, 0): 'MO',
(104, 0, 0): 'MT',
(108, 0, 0): 'NE',
(112, 0, 0): 'NV',
(116, 0, 0): 'NH',
(120, 0, 0): 'NJ',
(124, 0, 0): 'NM',
(128, 0, 0): 'NY',
(132, 0, 0): 'NC',
(136, 0, 0): 'ND',
(140, 0, 0): 'OH',
(144, 0, 0): 'OK',
(148, 0, 0): 'OR',
(152, 0, 0): 'PA',
(156, 0, 0): 'RI',
(160, 0, 0): 'SC',
(164, 0, 0): 'SD',
(168, 0, 0): 'TN',
(172, 0, 0): 'TX',
(176, 0, 0): 'UT',
(180, 0, 0): 'VT',
(184, 0, 0): 'VA',
(188, 0, 0): 'WA',
(192, 0, 0): 'WV',
(196, 0, 0): 'WI',
(200, 0, 0): 'WY'
}
| StarcoderdataPython |
1728333 | # Generated by Django 3.0.6 on 2020-05-19 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_waitlist_confirmed'),
]
operations = [
migrations.AddField(
model_name='waitlist',
name='from_email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
| StarcoderdataPython |
6519921 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# make it possible to run as standalone program
from __future__ import print_function
import sys
import string
import re
sys.path.append('/srv/chemminetools')
from django.core.management import setup_environ
import chemminetools.settings
setup_environ(chemminetools.settings)
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib import messages
# load functions used
from pubchem_rest_interface.Pubchem_pug import DownloadCIDs
def main():
cids = sys.stdin.read()
cids = cids.split()
filteredCIDs = []
for cid in cids[:]:
match = re.search("(\d{1,200})", cid)
if match:
filteredCIDs.append(int(match.group(1)))
if len(filteredCIDs) > 0:
try:
sdf = DownloadCIDs(filteredCIDs)
except:
messages.error(request, 'Invalid CIDs or no response from PubChem!')
sdf = None
else:
messages.error(request, 'Error: No valid CIDs entered!')
sdf = None
print(sdf)
if __name__ == '__main__':
main()
| StarcoderdataPython |
12864422 | from __future__ import annotations
import operator
from enum import Enum
from itertools import product
from typing import Dict, Union
import numpy as np
class Operation(Enum):
PLUS = 'PLUS'
MINUS = 'MINUS'
TIMES = 'TIMES'
EXP = 'EXP'
MAX = 'MAX'
MIN = 'MIN'
CONT = 'CONT'
NOT = 'NOT'
####################################################################################################
def h(x, fx):
"""helper function as in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24"""
fx = fx % 3
x = x % 3
if fx > x:
return x + 1
elif fx < x:
return x - 1
else:
return x
####################################################################################################
# monomial and sparse polynomial classes. These should be faster than the sympy versions due to
# their reduced scope.
####################################################################################################
class Expression(object):
def __add__(self, other):
return BinaryOperation('PLUS', self, other)
__radd__ = __add__
def __sub__(self, other):
return BinaryOperation('MINUS', self, other)
def __mul__(self, other):
return BinaryOperation('TIMES', self, other)
__rmul__ = __mul__
def __neg__(self):
return UnaryRelation('MINUS', self)
def __pow__(self, power, modulo=None):
return BinaryOperation('EXP', self, power)
# def __divmod__(self, other):
# raise NotImplementedError("division, modulus not implemented")
# def __truediv__(self, other):
# raise NotImplementedError("truediv not implemented")
# def __floordiv__(self, other):
# raise NotImplementedError("floordiv not implemented")
def eval(self, variable_dict):
"""
evaluates the expression. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type.
:param variable_dict: a dictionary of taking either single-term monomials or string (variable names) to ints
:return: evaluated expression
"""
raise NotImplementedError("eval() unimplemented in " + str(type(self)))
def is_constant(self):
raise NotImplementedError("is_constant() unimplemented in " + str(type(self)))
def as_c_expression(self):
raise NotImplementedError("as_c_expression() unimplemented in " + str(type(self)))
def as_polynomial(self) -> Union[int, Expression]:
raise NotImplementedError("as_polynomial() unimplemented in " + str(type(self)))
# def as_sympy(self):
# """
# converts to sympy expression
#
# Returns
# -------
# sympy expression
# """
# raise NotImplementedError("as_sympy() unimplemented in " + str(type(self)))
def as_numpy_str(self, variables) -> str:
"""
returns numpy-based function of variables, with order corresponding to that
given in the variables parameter
Parameters
----------
variables
Returns
-------
lambda with len(variables) parameters
"""
raise NotImplementedError("as_numpy_str() unimplemented in " + str(type(self)))
def get_variable_set(self):
""" returns a set containing all variable which occur in this expression """
raise NotImplementedError("get_var_set() unimplemented in " + str(type(self)))
def num_variables(self):
""" returns the number of variables which occur in this expression """
return len(self.get_variable_set())
def rename_variables(self, name_dict: Dict[str, str]):
""" rename variables """
raise NotImplementedError("rename_variables() unimplemented in " + str(type(self)))
def continuous_function_version(self, control_variable):
"""
Wrap this equation with the 'continuity controller' i.e. return CONT(control_variable,self)
:param control_variable: variable or string
:return: functional continuous version
"""
if self.is_constant():
return self
if isinstance(control_variable, str):
control_variable = Monomial.as_var(control_variable)
return Function('CONT', [control_variable, self])
####################################################################################################
#
# the following method converts a system of equations into one which is "continuous" in the sense
# that application of the system does not change the per-coordinate values by more than 1. This is
# accomplished by a type of curve fitting. Fortunately, the formula for this
#
# g(x) = sum_{c\in \F_3^n} h(c) prod_{j=0}^n (1-(x_j-c_j)**2)
#
# (as seen in the PLoS article, doi:10.1371/journal.pcbi.1005352.t003 pg 16/24) admits a recursive
# formulation. That is, for a polynomial x_k = f_k(x_0,x_1,...,x_l) we can select one of the
# variables, say x_0 and reduce the polynomial each of 3-ways x_0=0, x_0=1, and x_0=2. This
# correspondingly divides the sum into those which have each of the 3 types of terms
# (1-(x_0-c_0)**2) for c_0=0, c_0=1, and c_0=2
#
# fortunately, (1-(x_j-0)**2)+(1-(x_j-1)**2)+(1-(x_j-2)**2) = 1 so if the evaluations of f become
# constant or even simply eliminate a variable, we need no longer consider that variable.
#
# recursion proceeds by eliminating variables in this manner, multiplying by the appropriate fitting
# term (1-(x_j-c_j)**2) (c_j being the evaluated value of x_j) on the way up.
#
# this comment is not really the place for a full proof of this method, but the proof is easily
# obtained from the above.
#
####################################################################################################
def continuous_polynomial_version(self, control_variable):
if self.is_constant():
return self
if isinstance(control_variable, str):
control_variable = Monomial.as_var(control_variable)
# as the control variable is special (due to use in the 'h' function),
# we will need to go through the procedure for it separately, first
accumulator = Mod3Poly.zero()
for control_variable_value in range(3):
evaluated_poly = self.eval({control_variable: control_variable_value})
if is_integer(evaluated_poly) or evaluated_poly.is_constant():
computed_value = int(evaluated_poly)
continuous_value = h(control_variable_value, computed_value)
accumulator += continuous_value * (1 - (control_variable - control_variable_value) ** 2)
else:
accumulator += evaluated_poly.continuous_version_helper(control_variable_value) * \
(1 - (control_variable - control_variable_value) ** 2)
return accumulator
def continuous_version_helper(self, control_variable_value):
# find some free variable
free_variable = tuple(self.get_variable_set())[0]
if isinstance(free_variable, str):
free_variable = Monomial.as_var(free_variable)
# iterate over the ways of setting that variable: 0, 1, 2
accumulator = Mod3Poly.zero()
for free_variable_value in range(3):
evaluated_poly = self.eval({free_variable: free_variable_value})
if is_integer(evaluated_poly) or evaluated_poly.is_constant():
computed_value = int(evaluated_poly)
continuous_value = h(control_variable_value, computed_value)
accumulator += \
continuous_value * (1 - (free_variable - free_variable_value) ** 2)
else:
accumulator += evaluated_poly.continuous_version_helper(control_variable_value) * \
(1 - (free_variable - free_variable_value) ** 2)
return accumulator
####################################################################################################
def rename_helper(expression: Union[Expression, int], name_dict: Dict[str, str]):
if is_integer(expression):
return expression
else:
return expression.rename_variables(name_dict=name_dict)
####################################################################################################
# actions on expressions, suitable for conversion to polynomial form. Not best for simulator.
def mod_3(n):
return n % 3
def not3(n):
value = 2 + 2 * n
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def max3(a, b):
value = a + b + 2 * a * b + (a ** 2) * b + a * (b ** 2) + (a ** 2) * (b ** 2)
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def min3(a, b):
value = a * b + 2 * (a ** 2) * b + 2 * a * (b ** 2) + 2 * (a ** 2) * (b ** 2)
if is_integer(value) or value.is_constant():
return mod_3(int(value))
else:
return value
def is_integer(x):
return isinstance(x, int) or isinstance(x, np.integer)
####################################################################################################
class Function(Expression):
def __init__(self, function_name, expression_list):
self._function_name = function_name
self._expression_list = expression_list
def rename_variables(self, name_dict: Dict[str, str]):
renamed_parameters = [rename_helper(expr, name_dict) for expr in self._expression_list]
return Function(self._function_name, renamed_parameters)
def eval(self, variable_dict):
# evaluate function parameters
evaluated_expressions = [expr if is_integer(expr)
else expr.eval(variable_dict)
for expr in self._expression_list]
# simplify constants to ints, if possible
evaluated_expressions = [int(expr) if is_integer(expr) or expr.is_constant()
else expr
for expr in evaluated_expressions]
if self._function_name == 'MAX':
assert len(evaluated_expressions) == 2, "wrong number of arguments for MAX"
expr_one, expr_two = evaluated_expressions
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr_one) and is_integer(expr_two):
expr_one = mod_3(expr_one)
expr_two = mod_3(expr_two)
return max(expr_one, expr_two)
elif is_integer(expr_one) and expr_one == 2:
return 2
elif is_integer(expr_one) and expr_one == 0:
return expr_two
elif is_integer(expr_two) and expr_two == 2:
return 2
elif is_integer(expr_two) and expr_two == 0:
return expr_one
else:
return Function('MAX', [expr_one, expr_two])
elif self._function_name == 'MIN':
assert len(evaluated_expressions) == 2, "wrong number of arguments for MIN"
expr_one, expr_two = evaluated_expressions
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr_one) and is_integer(expr_two):
expr_one = mod_3(expr_one)
expr_two = mod_3(expr_two)
return min(expr_one, expr_two)
elif is_integer(expr_one) and expr_one == 2:
return expr_two
elif is_integer(expr_one) and expr_one == 0:
return 0
elif is_integer(expr_two) and expr_two == 2:
return expr_one
elif is_integer(expr_two) and expr_two == 0:
return 0
else:
return Function('MIN', [expr_one, expr_two])
elif self._function_name == 'CONT':
assert len(evaluated_expressions) == 2, "wrong number of arguments for CONT"
ctrl_var, expr = evaluated_expressions
if is_integer(ctrl_var):
raise Exception("Unsupported; nonsense")
return Function('CONT', [ctrl_var, expr])
elif self._function_name == 'NOT':
assert len(evaluated_expressions) == 1, "wrong number of arguments for NOT"
expr = evaluated_expressions[0]
# if it can be computed directly, do it. otherwise, return in function form
if is_integer(expr):
return not3(int(expr))
else:
return Function('NOT', [expr])
else:
raise Exception("cannot evaluate unknown function " + self._function_name)
def is_constant(self):
return all(is_integer(expr) or expr.is_constant()
for expr in self._expression_list)
def __str__(self):
return self._function_name + "(" + ",".join([str(exp) for exp in self._expression_list]) + ")"
__repr__ = __str__
def as_c_expression(self):
c_exprs = [str(expr) if is_integer(expr) else expr.as_c_expression() for expr in self._expression_list]
if self._function_name == 'MAX':
func_name = 'mod3max'
elif self._function_name == 'MIN':
func_name = 'mod3min'
elif self._function_name == 'CONT':
func_name = 'mod3continuity'
elif self._function_name == 'NOT':
func_name = 'mod3not'
else:
raise Exception("Unknown binary relation: " + self._function_name)
return func_name + '(' + ",".join(c_exprs) + ')'
def as_polynomial(self):
expressions_as_polynomials = [mod_3(expr) if is_integer(expr)
else expr.as_polynomial()
for expr in self._expression_list]
if self._function_name == 'MAX':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MAX"
return max3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'MIN':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for MIN"
return min3(expressions_as_polynomials[0], expressions_as_polynomials[1])
elif self._function_name == 'CONT':
assert len(expressions_as_polynomials) == 2, "wrong number of arguments for CONT"
return expressions_as_polynomials[1].continuous_polynomial_version(expressions_as_polynomials[0])
elif self._function_name == 'NOT':
assert len(expressions_as_polynomials) == 1, "wrong number of arguments for NOT"
return not3(expressions_as_polynomials[0])
else:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a polynomial")
# def as_sympy(self):
#
# def cont_sympy(control, expr):
# return expr if is_integer(expr) \
# else expr.continuous_polynomial_version(control)
#
# def not_sympy(expr):
# return 1 - expr
#
# # tuples are param-count, function
# functions = {'MAX': (2, sympy.Max),
# 'MIN': (2, sympy.Min),
# 'CONT': (2, cont_sympy),
# 'NOT': (1, not_sympy)}
#
# if self._function_name not in functions:
# raise Exception("cannot evaluate unknown function " + self._function_name + " as a sympy expression")
#
# if len(self._expression_list) != functions[self._function_name][0]:
# raise Exception(f"Wrong number of arguments for {self._function_name}")
#
# function = functions[self._function_name][1]
#
# sympy_expressions = [sympy.Mod(expr, 3) if is_integer(expr)
# else sympy.Mod(expr.as_sympy(), 3)
# for expr in self._expression_list]
# return function(*sympy_expressions)
def as_numpy_str(self, variables) -> str:
np_parameter_strings = [str(expr) if is_integer(expr)
else expr.as_numpy_str(variables)
for expr in self._expression_list]
# this one is slow
# continuous_str = "( (({1})>({0})) * (({0})+1) + (({1})<({0})) * (({0})-1) + (({1})==({0}))*({0}) )"
continuous_str = "( {0}+np.sign(np.mod({1},3)-np.mod({0},3)) )"
max_str = "np.maximum(np.mod({0},3),np.mod({1},3))"
min_str = "np.minimum(np.mod({0},3),np.mod({1},3))"
not_str = "(2-({0}))"
# tuples are param-count, function
function_strings = {'MAX': (2, max_str),
'MIN': (2, min_str),
'CONT': (2, continuous_str),
'NOT': (1, not_str)}
if self._function_name not in function_strings:
raise Exception("cannot evaluate unknown function " + self._function_name + " as a numpy function")
if len(self._expression_list) != function_strings[self._function_name][0]:
raise Exception(f"Wrong number of arguments for {self._function_name}")
function = function_strings[self._function_name][1]
return function.format(*np_parameter_strings)
def get_variable_set(self):
var_set = set()
for expr in self._expression_list:
if not is_integer(expr):
var_set = var_set.union(expr.get_variable_set())
return var_set
class BinaryOperation(Expression):
def __init__(self, relation_name, left_expression: Union[Expression, int],
right_expression: Union[Expression, int]):
self.relation_name = relation_name
self._left_expression: Union[Expression, int] = left_expression
self._right_expression: Union[Expression, int] = right_expression
def rename_variables(self, name_dict: Dict[str, str]):
renamed_left_expression = rename_helper(self._left_expression, name_dict)
renamed_right_expression = rename_helper(self._right_expression, name_dict)
return BinaryOperation(self.relation_name,
left_expression=renamed_left_expression,
right_expression=renamed_right_expression)
def is_constant(self):
return (is_integer(self._left_expression) or self._left_expression.is_constant()) and \
(is_integer(self._right_expression) or self._right_expression.is_constant())
def eval(self, variable_dict):
"""
evaluate parameters, making them ints if possible
:param variable_dict: a dictionary of taking either single-term monomials or string (variable names) to ints
:return: evaluated expression
"""
evaled_left_expr = self._left_expression if is_integer(self._left_expression) \
else self._left_expression.eval(variable_dict)
evaled_left_expr = int(evaled_left_expr) \
if is_integer(evaled_left_expr) or evaled_left_expr.is_constant() \
else evaled_left_expr
evaled_right_expr = self._right_expression if is_integer(self._right_expression) \
else self._right_expression.eval(variable_dict)
evaled_right_expr = int(evaled_right_expr) \
if is_integer(evaled_right_expr) or evaled_right_expr.is_constant() \
else evaled_right_expr
if self.relation_name == 'PLUS':
return evaled_left_expr + evaled_right_expr
elif self.relation_name == 'MINUS':
return evaled_left_expr - evaled_right_expr
elif self.relation_name == 'TIMES':
return evaled_left_expr * evaled_right_expr
elif self.relation_name == 'EXP':
return evaled_left_expr ** evaled_right_expr
else:
raise Exception("cannot evaluate unknown binary op: " + self.relation_name)
def __str__(self):
short_relation_name = "?"
if self.relation_name == 'PLUS':
short_relation_name = '+'
elif self.relation_name == 'MINUS':
short_relation_name = '-'
elif self.relation_name == 'TIMES':
short_relation_name = '*'
elif self.relation_name == 'EXP':
short_relation_name = '^'
left_side = str(self._left_expression)
if isinstance(self._left_expression, BinaryOperation):
left_side = "(" + left_side + ")"
right_side = str(self._right_expression)
if isinstance(self._right_expression, BinaryOperation):
right_side = "(" + right_side + ")"
return left_side + short_relation_name + right_side
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._left_expression):
left_c_expr = str(self._left_expression)
else:
left_c_expr = self._left_expression.as_c_expression()
if is_integer(self._right_expression):
right_c_expr = str(self._right_expression)
else:
right_c_expr = self._right_expression.as_c_expression()
if self.relation_name == 'PLUS':
return '(' + left_c_expr + ')+(' + right_c_expr + ')'
elif self.relation_name == 'MINUS':
return '(' + left_c_expr + ')-(' + right_c_expr + ')'
elif self.relation_name == 'TIMES':
return '(' + left_c_expr + ')*(' + right_c_expr + ')'
elif self.relation_name == 'EXP':
return 'mod3pow(' + left_c_expr + ',' + right_c_expr + ')'
else:
raise Exception("Unknown binary relation: " + self.relation_name)
def as_polynomial(self):
if is_integer(self._left_expression):
left_poly = self._left_expression
else:
left_poly = self._left_expression.as_polynomial()
if is_integer(self._right_expression):
right_poly = self._right_expression
else:
right_poly = self._right_expression.as_polynomial()
if self.relation_name == 'PLUS':
return left_poly + right_poly
elif self.relation_name == 'MINUS':
return left_poly - right_poly
elif self.relation_name == 'TIMES':
return left_poly * right_poly
elif self.relation_name == 'EXP':
# simplify the exponent = 0, 1 cases
if is_integer(right_poly):
if right_poly == 0:
return 1
elif right_poly == 1:
return left_poly
else:
return left_poly ** right_poly
else:
return left_poly ** right_poly
else:
raise Exception("Unknown binary relation: " + self.relation_name)
# def as_sympy(self):
# """
# Convert to sympy expression
# Returns
# -------
# sympy expression
# """
#
# def simple_pow(left_exp, right_exp):
# # simplify the exponent = 0, 1 cases
# if is_integer(right_exp):
# if right_exp == 0:
# return 1
# elif right_exp == 1:
# return left_exp
# else:
# return left_exp ** right_exp
# else:
# return left_exp ** right_exp
#
# relations = {'PLUS': operator.add,
# 'MINUS': operator.sub,
# 'TIMES': operator.mul,
# 'EXP': simple_pow}
#
# if self.relation_name not in relations:
# raise Exception("Unknown binary relation: " + self.relation_name)
#
# lhs = self._left_expression if is_integer(self._left_expression) else self._left_expression.as_sympy()
# rhs = self._right_expression if is_integer(self._right_expression) else self._right_expression.as_sympy()
#
# return relations[self.relation_name](lhs, rhs)
def as_numpy_str(self, variables) -> str:
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str version of numpy function
"""
relations = {'PLUS': "(({0})+({1}))",
'MINUS': "(({0})-({1}))",
'TIMES': "(({0})*({1}))",
'EXP': "(({0})**({1}))"}
if self.relation_name not in relations:
raise Exception("Unknown binary relation: " + self.relation_name)
lhs = str(self._left_expression) if is_integer(self._left_expression) \
else self._left_expression.as_numpy_str(variables)
rhs = str(self._right_expression) if is_integer(self._right_expression) \
else self._right_expression.as_numpy_str(variables)
return relations[self.relation_name].format(lhs, rhs)
def get_variable_set(self):
var_set = set()
if not is_integer(self._left_expression):
var_set = var_set.union(self._left_expression.get_variable_set())
if not is_integer(self._right_expression):
var_set = var_set.union(self._right_expression.get_variable_set())
return var_set
class UnaryRelation(Expression):
def __init__(self, relation_name, expr):
self._relation_name = relation_name
self._expr = expr
def rename_variables(self, name_dict: Dict[str, str]):
return UnaryRelation(relation_name=self._relation_name,
expr=rename_helper(self._expr, name_dict))
def is_constant(self):
return self._expr.is_constant()
def eval(self, variable_dict):
if self._relation_name == 'MINUS':
if is_integer(self._expr):
return (-1) * self._expr
elif type(self._expr) == Expression:
evaluated_subexpression = self._expr.eval(variable_dict)
if is_integer(evaluated_subexpression) or evaluated_subexpression.is_constant():
return (-1) * int(evaluated_subexpression)
else:
return (-1) * evaluated_subexpression
else:
raise Exception("UnaryRelation in bad state with unknown unary relation name")
def __str__(self) -> str:
short_rel_name = str(self._relation_name)
if self._relation_name == 'MINUS':
short_rel_name = '-'
return short_rel_name + (
"(" + str(self._expr) + ")" if type(self._expr) == BinaryOperation else str(self._expr))
__repr__ = __str__
def as_c_expression(self):
if is_integer(self._expr):
c_exp = str(mod_3(self._expr))
else:
c_exp = self._expr.as_c_expression()
if self._relation_name == 'MINUS':
return '-(' + c_exp + ')'
else:
raise Exception("Unknown binary relation: " + self._relation_name)
def as_polynomial(self):
if is_integer(self._expr) or self._expr.is_constant():
poly = mod_3(int(self._expr))
else:
poly = self._expr.as_polynomial()
if self._relation_name == 'MINUS':
return (-1) * poly
else:
raise Exception("Unknown unary relation: " + self._relation_name)
def as_sympy(self):
"""
Convert to sympy expression
Returns
-------
sympy expression
"""
relations = {'MINUS': operator.neg}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr = self._expr if is_integer(self._expr) else self._expr.as_sympy()
return relations[self._relation_name](expr)
def as_numpy_str(self, variables):
"""
Convert to numpy function
Parameters
----------
variables
Returns
-------
str numpy-representation
"""
relations = {'MINUS': "(-({0}))"}
if self._relation_name not in relations:
raise Exception("Unknown unary relation: " + self._relation_name)
expr_str = str(self._expr) if is_integer(self._expr) \
else self._expr.as_numpy_str(variables)
return relations[self._relation_name].format(expr_str)
def get_variable_set(self):
if is_integer(self._expr):
return set()
else:
return self._expr.get_variable_set()
####################################################################################################
class Monomial(Expression):
"""A class to encapsulate monomials reduced by x^3-x==0 for all variables x"""
def __init__(self, power_dict: dict):
# copy over only those terms which actually appear
self._power_dict = {str(var): power_dict[var] for var in power_dict if power_dict[var] != 0}
for var in self._power_dict.keys():
# while self._power_dict[var] < 0:
# self._power_dict[var] += 2 <--- replace with below
assert self._power_dict[var] > 0 # b/c x^-1 isn't exactly x (i.e. when x=0)
# while self._power_dict[var] >= 3:
# self._power_dict[var] -= 2 <--- replace with below
self._power_dict[var] = 1 + ((-1 + self._power_dict[var]) % 2)
def rename_variables(self, name_dict: Dict[str, str]):
# this ends up a little more complicated than I was originally thinking, b/c
# I would like to allow two variables to be updated to the same new name
renamed_dict = dict()
for variable, exponent in self._power_dict.items():
name = variable
if variable in name_dict:
name = name_dict[variable]
if name in renamed_dict:
renamed_dict[name] += self._power_dict[variable]
renamed_dict[name] = 1 + ((-1 + renamed_dict[name]) % 2)
else:
renamed_dict[name] = self._power_dict[variable]
return Monomial(power_dict=renamed_dict)
def as_polynomial(self):
return self
def is_constant(self):
return len(self._power_dict) == 0
def num_variables(self):
return len(self._power_dict)
def variable_list(self):
return self._power_dict.keys()
def eval(self, variable_dict: Dict):
"""evaluates the monomial. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type.
"""
if type(variable_dict) != dict:
raise Exception("eval is not defined on this input")
# sanitize inputs
sanitized_variable_dict = dict()
for variable, quantity in variable_dict.items():
if type(variable) == str:
sanitized_variable_dict.update({variable: variable_dict[variable]})
elif type(variable) == Monomial:
if variable.num_variables() != 1:
raise Exception(
"We do not know how to evaluate monomials of zero or several variables to a single number")
else:
variable_as_str = list(variable.variable_list())[0]
sanitized_variable_dict.update({variable_as_str: variable_dict[variable]})
variable_dict = sanitized_variable_dict
accumulator = Mod3Poly.one()
for variable, quantity in self._power_dict.items():
if variable in variable_dict.keys():
accumulator *= variable_dict[variable] ** self._power_dict[variable]
else:
accumulator *= Monomial.as_var(variable) ** self._power_dict[variable]
return accumulator
def get_variable_set(self):
""" returns a set containing all variable which occur in this monomial """
return {var for var in self._power_dict if self._power_dict[var] != 0}
@staticmethod
def unit():
"""produces the unit, 1, as a monomial"""
return Monomial(dict())
@staticmethod
def as_var(var_name: str):
return Monomial({var_name: 1})
def __mul__(self, other) -> Expression:
if isinstance(other, Monomial):
result_power_dict = self._power_dict.copy()
for key in other._power_dict.keys():
if key in result_power_dict.keys():
result_power_dict[key] += other._power_dict[key]
while result_power_dict[key] >= 3:
result_power_dict[key] -= 2
else:
result_power_dict[key] = other._power_dict[key]
return Monomial(result_power_dict)
elif isinstance(other, Mod3Poly) or is_integer(other):
return self.as_poly() * other
else:
return BinaryOperation('TIMES', self, other)
# raise TypeError("unsupported operand type(s) for *: '{}' and '{}'".format(self.__class__, type(other)))
__rmul__ = __mul__
def __neg__(self):
return (-1) * self
def __pow__(self, power, **kwargs):
if type(power) == Mod3Poly and power.is_constant():
power = power[Monomial.unit()]
assert is_integer(power)
if power == 0:
return Monomial.unit()
elif power == 1:
return self
elif power == 2:
return self * self
# Now handle higher powers; probably not going to happen too much for this application
# (int) half power root
int_root = self ** (power // 2)
if power % 2 == 0:
return int_root * int_root
else:
return int_root * int_root * self
def as_poly(self):
"""converts this monomial to a polynomial with only one term"""
return Mod3Poly({self: 1})
def __add__(self, other):
if isinstance(other, Mod3Poly):
return other + self.as_poly()
elif isinstance(other, Monomial):
return self.as_poly() + other.as_poly()
elif is_integer(other):
return self.as_poly() + other
elif isinstance(other, Expression):
return BinaryOperation("PLUS", self, other)
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self + ((-1) * other)
def __rsub__(self, other):
return ((-1) * self) + other
def __eq__(self, other):
if type(other) == str:
other = Monomial.as_var(other)
if type(other) == Monomial:
return self._power_dict == other._power_dict
elif type(other) == Mod3Poly:
if len(other.coeff_dict) == 1:
monomial, coeff = list(other.coeff_dict)[0]
return coeff == 1 and monomial == self
else:
return False
elif is_integer(other) and self == Monomial.unit():
return other == 1
else:
return False
def __ne__(self, other):
if type(other) == str:
other = Monomial.as_var(other)
return not (self == other)
def __lt__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if we have a var that they don't we cannot be "smaller"
if len(self_vars - other_vars) > 0:
return False
# check that we do not exceed and are smaller at least once
at_least_once_less = False
for var in self_vars:
if self._power_dict[var] > other._power_dict[var]:
return False
elif self._power_dict[var] < other._power_dict[var]:
at_least_once_less = True
return at_least_once_less or len(other_vars - self_vars) > 0
def __le__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if we have a var that they don't we cannot be "smaller"
if len(self_vars - other_vars) > 0:
return False
# check that we do not exceed
for var in self_vars:
if self._power_dict[var] > other._power_dict[var]:
return False
return True
def __gt__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if they have a var that they don't we cannot be "greater"
if len(other_vars - self_vars) > 0:
return False
# check that we are not smaller and are greater at least once
at_least_once_greater = False
for var in other_vars:
if self._power_dict[var] < other._power_dict[var]:
return False
elif self._power_dict[var] > other._power_dict[var]:
at_least_once_greater = True
return at_least_once_greater or len(self_vars - other_vars) > 0
def __ge__(self, other):
self_vars = set(self._power_dict.keys())
if type(other) == str:
other = Monomial.as_var(other)
other_vars = set(other._power_dict.keys())
# if they have a var that they don't we cannot be "greater"
if len(other_vars - self_vars) > 0:
return False
# check that we are not smaller
for var in other_vars:
if self._power_dict[var] < other._power_dict[var]:
return False
return True
def __hash__(self):
return sum(hash(k) for k in self._power_dict.keys()) + \
sum(hash(v) for v in self._power_dict.values())
def __str__(self):
if self._power_dict == {}:
return "1"
else:
variables = sorted(self._power_dict.keys())
return "*".join([str(var) + "^" + str(self._power_dict[var])
if self._power_dict[var] > 1 else str(var) for var in variables])
__repr__ = __str__
def as_c_expression(self):
if self._power_dict == {}:
return "1"
else:
variables = sorted(self._power_dict.keys())
return "*".join(["mod3pow(" + str(var) + "," + str(self._power_dict[var]) + ")"
if self._power_dict[var] > 1 else str(var) for var in variables
if self._power_dict[var] != 0])
# def as_sympy(self):
# # sympy empty product is 1, consistent with power_dict
# return sympy.prod([sympy.Symbol(var, integer=True) ** pow
# for var, pow in self._power_dict.items()])
# # Fun fact: sympy doesn't recognize Symbol(var) and Symbol(var, integer=True) to be the same
def as_numpy_str(self, variables) -> str:
if len(self._power_dict) == 0:
return "1"
return '(' + \
'*'.join(["1".format(variables.index(var), self._power_dict[var])
if self._power_dict[var] == 0 else
"state[{0}]".format(variables.index(var))
if self._power_dict[var] == 1 else
"(state[{0}]**{1})".format(variables.index(var), self._power_dict[var])
for var in self._power_dict]) + \
')'
####################################################################################################
class Mod3Poly(Expression):
"""a sparse polynomial class"""
def __init__(self, coeffs: Union[Dict, int]):
if type(coeffs) == dict:
self.coeff_dict = {monomial: coeffs[monomial] for monomial in coeffs if coeffs[monomial] != 0}
elif is_integer(coeffs):
self.coeff_dict = {Monomial.unit(): (coeffs % 3)}
else:
raise TypeError("unsupported initialization type for '{}': '{}'".format(self.__class__, type(coeffs)))
def rename_variables(self, name_dict: Dict[str, str]):
return Mod3Poly(coeffs={monomial.rename_variables(name_dict): coeff
for monomial, coeff in self.coeff_dict.items()})
@staticmethod
def zero():
return Mod3Poly({Monomial.unit(): 0})
@staticmethod
def one():
return Mod3Poly({Monomial.unit(): 1})
def as_polynomial(self):
return self
def __int__(self):
self.__clear_zero_monomials()
if len(self.coeff_dict) > 1 or (len(self.coeff_dict) == 1 and Monomial.unit() not in self.coeff_dict):
raise Exception("cannot cast non-constant polynomial to int")
if Monomial.unit() in self.coeff_dict:
return self.coeff_dict[Monomial.unit()]
else:
return 0
def eval(self, variable_dict):
"""evaluates the polynomial. variable_dict is expected to be a dict containing str:Expression or
Monomial:Expression pairs. The latter are constrained to be of single-variable type. """
if type(variable_dict) != dict:
raise Exception("Mod3Poly.eval is not defined on this input")
accumulator = Mod3Poly.zero()
for monomial, coeff in self.coeff_dict.items():
accumulator += coeff * monomial.eval(variable_dict)
return accumulator
def get_variable_set(self):
"""return a set containing all variables which occur in this polynomial"""
var_set = set()
for monomial in self.coeff_dict:
var_set = var_set.union(monomial.get_variable_set())
return var_set
def __clear_zero_monomials(self):
"""purge unneeded data"""
self.coeff_dict = {monomial: self.coeff_dict[monomial]
for monomial in self.coeff_dict
if self.coeff_dict[monomial] != 0}
# assure at least one entry
if len(self.coeff_dict) == 0:
self.coeff_dict = {Monomial.unit(): 0}
def is_constant(self):
# possibly unnecessary
self.__clear_zero_monomials()
num_nonzero_monomial = len(self.coeff_dict)
if num_nonzero_monomial > 1:
return False
elif num_nonzero_monomial == 0:
return True
else:
# only one entry
return Monomial.unit() in self.coeff_dict
def __getitem__(self, index):
if index in self.coeff_dict:
return self.coeff_dict[index]
else:
return 0
def __setitem__(self, index, value):
self.coeff_dict[index] = value
def __add__(self, other):
if is_integer(other):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[Monomial.unit()] = (self_copy[Monomial.unit()] + other) % 3
return self_copy
elif isinstance(other, Monomial):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[other] += 1
return self_copy
elif isinstance(other, Mod3Poly):
self_copy = Mod3Poly(self.coeff_dict)
for key in other.coeff_dict.keys():
if key in self_copy.coeff_dict.keys():
self_copy[key] = (self_copy[key] + other[key]) % 3
else:
self_copy[key] = other[key]
return self_copy
elif isinstance(other, Expression):
return BinaryOperation('PLUS', self, other)
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
__radd__ = __add__
def __sub__(self, other):
if is_integer(other):
self_copy = Mod3Poly(self.coeff_dict)
self_copy[Monomial.unit()] = (self_copy[Monomial.unit()] - other) % 3
return self_copy
elif isinstance(other, Mod3Poly) or isinstance(other, Monomial):
self_copy = Mod3Poly(self.coeff_dict)
if isinstance(other, Monomial):
other = other.as_poly()
for key in other.coeff_dict.keys():
if key in self_copy.coeff_dict.keys():
self_copy[key] = (self_copy[key] - other[key]) % 3
else:
self_copy[key] = other[key]
return self_copy
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'".format(self.__class__, type(other)))
def __rsub__(self, other):
return other + ((-1) * self)
def __mul__(self, other):
if is_integer(other):
return Mod3Poly({key: (self.coeff_dict[key] * other) % 3 for key in self.coeff_dict})
elif isinstance(other, Monomial):
return Mod3Poly({(other * monomial): self.coeff_dict[monomial] for monomial in self.coeff_dict})
elif isinstance(other, Mod3Poly):
accumulator = Mod3Poly.zero()
for self_mono, other_mono in product(self.coeff_dict.keys(), other.coeff_dict.keys()):
monomial_prod = self_mono * other_mono
accumulator[monomial_prod] = (accumulator[monomial_prod] + self[self_mono] * other[other_mono]) % 3
return accumulator
else:
return BinaryOperation('TIMES', self, other)
__rmul__ = __mul__
def __pow__(self, power, **kwargs):
if type(power) == Mod3Poly and power.is_constant():
power = power[Monomial.unit()]
assert is_integer(power)
if power == 0:
return Monomial.unit().as_poly()
elif power == 1:
return self
elif power == 2:
return self * self
# Now handle higher powers; probably not going to happen too much for this application
# (int) half power root
int_root = self ** (power // 2)
if power % 2 == 0:
return int_root * int_root
else:
return int_root * int_root * self
def __str__(self):
accumulator = ""
for monomial in sorted(self.coeff_dict.keys()):
if monomial == Monomial.unit():
if self[monomial] != 0:
accumulator += str(self[monomial])
else:
if len(accumulator) > 0 and self[monomial] != 0:
accumulator += "+"
if self[monomial] == 1:
accumulator += str(monomial)
elif self[monomial] == 2:
accumulator += "2*"
accumulator += str(monomial)
if len(accumulator) > 0:
return accumulator
else:
return "0"
__repr__ = __str__
def as_c_expression(self):
accumulator = ""
for monomial in sorted(self.coeff_dict.keys()):
if monomial == Monomial.unit():
if self[monomial] != 0:
accumulator += str(self[monomial])
else:
if len(accumulator) > 0 and self[monomial] != 0:
accumulator += "+"
if self[monomial] == 1:
accumulator += monomial.as_c_expression()
elif self[monomial] == 2:
accumulator += "2*"
accumulator += monomial.as_c_expression()
if len(accumulator) > 0:
return accumulator
else:
return "0"
# def as_sympy(self):
# return sum([coeff * expr.as_sympy() for expr, coeff in self.coeff_dict.items()])
def as_numpy_str(self, variables) -> str:
return '(' + \
"+".join(["({0}*({1}))".format(coeff, expr.as_numpy_str(variables))
for expr, coeff in self.coeff_dict.items()]) + \
')'
| StarcoderdataPython |
6620493 | <reponame>A-Ortiz-L/hyperspectral-imaging-cnn-final-degree-work
from google.cloud import storage
from google.cloud.exceptions import NotFound
from logging import getLogger
log = getLogger(__name__)
class GoogleStorage:
def __init__(self):
self.client = storage.Client()
self.storage_list = {}
def get_bucket(self, bucket_name: str):
try:
bucket = self.client.get_bucket(bucket_name)
return bucket
except NotFound:
log.warning(f'Could not find bucket={bucket_name}')
return False
def download_blob(self, bucket_name: str, source_blob_name: str, destination_file_name: str) -> bool:
"""Downloads a blob from the bucket."""
bucket = self.get_bucket(bucket_name)
if not bucket or not bucket.get_blob(source_blob_name):
log.warning(f'Could not download blob={source_blob_name} on bucket={bucket_name}')
return False
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
log.info('Blob {} downloaded to {}.'.format(
source_blob_name,
destination_file_name))
return True
| StarcoderdataPython |
256966 | <gh_stars>10-100
import os
import tempfile
import pytest
FIXTURES_ROOT = os.path.join(os.path.dirname(__file__), "fixtures")
@pytest.fixture()
def fixture_file():
return lambda name: os.path.join(FIXTURES_ROOT, name)
@pytest.fixture(scope="session")
def image_diff_reference_dir():
return os.path.join(os.path.dirname(__file__), "screenshots")
@pytest.fixture(scope="function")
def temp_file():
temp_files = []
def tempfile_factory(extension=".html", prefix="xlsx2html_"):
tf = tempfile.mktemp(suffix=extension, prefix="xlsx2html_")
temp_files.append(tf)
return tf
yield tempfile_factory
for tf in temp_files:
if os.path.exists(tf):
os.unlink(tf)
@pytest.fixture(scope="session")
def splinter_webdriver(request):
return request.config.option.splinter_webdriver or "chrome"
@pytest.fixture(scope="session")
def splinter_webdriver_executable(request, splinter_webdriver):
"""Webdriver executable directory."""
executable = request.config.option.splinter_webdriver_executable
if not executable and splinter_webdriver == "chrome":
from chromedriver_binary import chromedriver_filename
executable = chromedriver_filename
return os.path.abspath(executable) if executable else None
def pytest_addoption(parser):
parser.addoption(
"--skip-webtest",
action="store_true",
dest="skip_webtest",
default=False,
help="skip marked webtest tests",
)
def pytest_configure(config):
mark_expr = []
if config.option.markexpr:
mark_expr.append(config.option.markexpr)
if config.option.skip_webtest:
mark_expr.append("not webtest")
if mark_expr:
setattr(config.option, "markexpr", " and ".join(mark_expr))
| StarcoderdataPython |
12808159 | <reponame>Amplo-GmbH/AutoML<filename>tests/unit/api/__init__.py
import pytest
from pathlib import Path
from tests import rmtree
__all__ = ['TestAPI']
class TestAPI:
sync_dir = Path('./test_dir')
@pytest.fixture(autouse=True)
def rmtree_sync_dir(self):
rmtree(self.sync_dir)
yield
rmtree(self.sync_dir)
| StarcoderdataPython |
1627057 | from __future__ import print_function
import os, os.path, shutil, re, glob
import platform as plat
import subprocess as sp
import Tkinter as tk
import tkSimpleDialog as tkSD
import tkMessageBox as tkMB
import tkFont
import ttk
# General GENIE utilities.
import utils as U
# Most of the GUI code is in these modules...
from gui.tooltip import *
from gui.filetreeview import *
from gui.job_folder import *
from gui.job import *
from gui.panels import *
from gui.dialogs import *
from gui.after import *
from gui.util import *
#----------------------------------------------------------------------
# GENIE configuration
if not U.read_cgenie_config():
sys.exit('GENIE not set up: run the setup-cgenie script!')
# Platform setup, including any runtime environment variables needed
# to control model output buffering.
platform = U.discover_platform()
execfile(os.path.join(U.cgenie_root, 'platforms', platform))
if 'runtime_env' in locals():
for k, v in locals()['runtime_env'].iteritems(): os.environ[k] = v
#----------------------------------------------------------------------
#
# MAIN TKINTER APPLICATION CLASS
#
class Application(AfterHandler, ttk.Frame):
def __init__(self, master=None):
ttk.Frame.__init__(self, master)
AfterHandler.__init__(self, master)
self.reapable = set()
# Set up monospaced and bold fonts.
self.normal_font = tkFont.nametofont('TkDefaultFont')
self.mono_font = tkFont.nametofont('TkFixedFont')
self.bold_font = self.normal_font.copy()
sz = self.normal_font.cget('size')
self.bold_font.configure(weight='bold', size=int(1.5*sz))
# Initialise job member and find all available model
# configurations.
self.job = Job()
self.restart_jobs = []
self.find_configs()
# Set up UI: this creates the main window (with a menu bar,
# the "job tree", a button toolbar and a notebook for the main
# part of the window) plus all the "panels" that appear in the
# notebook part of the UI.
self.grid(sticky=tk.N+tk.E+tk.S+tk.W)
self.create_widgets()
# Creating this JobFolder object populates the job tree in the
# UI.
self.job_folder = JobFolder(U.cgenie_jobs, 'My Jobs', self.tree, self)
# Find jobs suitable for use as restarts and update the setup
# panel to use them.
self.restart_jobs = self.job_folder.find_restart_jobs()
self.panels['setup'].update()
#------------------------------------------------------------------
#
# MAIN BUTTON CALLBACKS
#
def new_job(self):
"""Create a new job (button press callback)"""
# Get folder location for new job.
loc = self.tree.selection()[0]
while not self.job_folder.is_folder(loc):
loc = self.tree.parent(loc)
# Get new job name and check.
job_name = tkSD.askstring("New job", "Name for new job")
if not job_name: return
jobdir = os.path.join(loc, job_name)
jobid = os.path.relpath(jobdir, self.job_folder.base_path)
if os.path.exists(jobdir):
tkMB.showerror('Error', job_name + ' already exists!')
return
# Create job folder.
try:
os.mkdir(jobdir)
os.mkdir(os.path.join(jobdir, 'config'))
except Exception as e:
tkMB.showerror('Error', "Couldn't create directory: " + jobdir)
return
# Write initial placeholder job configuration file.
with open(os.path.join(jobdir, 'config', 'config'), 'w') as fp:
print('base_config: ?', file=fp)
print('user_config: ?', file=fp)
print('run_length: ?', file=fp)
print('t100: ?', file=fp)
# Add job entry to tree and select.
self.job_folder.add_job(jobid, True)
self.tree.see(jobdir)
self.tree.selection_set(jobdir)
def new_folder(self):
"""Create a new folder (button press callback)"""
# Get folder location for new folder.
loc = self.tree.selection()[0]
while len(self.tree.get_children(loc)) == 0:
loc = self.tree.parent(loc)
# Get new folder name and check.
folder_name = tkSD.askstring("New folder", "Name for new folder")
if not folder_name: return
folder = os.path.join(loc, folder_name)
p = os.path.join(U.cgenie_jobs, folder)
if os.path.exists(p):
tkMB.showerror('Error', folder_name + ' already exists!')
return
# Create new folder.
try:
os.mkdir(p)
except Exception as e:
tkMB.showerror('Error', "Couldn't create directory: " + p)
return
# Add folder entry to tree and select.
self.job_folder.add_folder(os.relpath(folder, U.cgenie_jobs), True)
self.tree.selection_set(p)
def move_rename(self):
"""Move or rename a folder or job (button press callback)"""
# Find current full path to the selected element in the job
# tree and determine whether it's a job or a folder.
full_path = self.tree.selection()[0]
is_folder = self.job_folder.is_folder(full_path)
# Run the move/rename dialog and act on the result.
d = MoveRenameDialog(full_path, is_folder,
self.job_folder.possible_folders())
if d.result:
new_full_path = os.path.join(d.new_folder, d.new_name)
try:
self.job_folder.move(full_path, new_full_path)
self.tree.see(new_full_path)
self.tree.selection_set(new_full_path)
except Exception as e:
print(e)
tkMB.showwarning('Move/rename failed', 'Oops', parent=self)
def delete_job(self):
"""Delete a job or a folder from tree and on disk (button press
callback)
"""
# Determine whether a job or a folder is selected.
p = self.tree.selection()[0]
if self.job_folder.is_folder(p):
msg = 'Are you sure you want to delete this folder?\n\n'
msg += 'This will delete all jobs beneath the folder!\n\n'
msg += 'This action is IRREVERSIBLE!'
else:
msg = 'Are you sure you want to delete this job?\n\n'
msg += 'This action is IRREVERSIBLE!'
# Confirmation dialog -- single job or folder.
chk = tkMB.askokcancel('Confirm deletion', msg)
if not chk: return
# Find adjacent item ID for post-delete selection.
post = self.tree.next(p)
if not post: post = self.tree.prev(p)
# Recursively delete.
try:
shutil.rmtree(p)
except Exception as e:
tkMB.showerror('Error', "Couldn't delete directory: " + p)
return
# Delete from tree and select.
self.tree.selection_set(post)
self.job_folder.delete(p)
def clone_job(self):
"""Clone a job (button press callback)"""
# Determine a suitable name for the new job -- the clone has a
# name derived from the original job, and the user can rename
# it as they like after cloning.
p = self.tree.selection()[0]
pnew = p + '-CLONE'
i = 1
while os.path.exists(pnew):
i += 1
pnew = p + '-CLONE' + str(i)
# Clone the job on disk and in the job tree, make it visible
# and select it.
self.job_folder.clone(p, pnew)
self.tree.see(pnew)
self.tree.selection_set(pnew)
def clear_job(self):
"""Clear job data (button press callback)"""
# Confirmation dialog.
p = self.tree.selection()[0]
msg = 'Are you sure you want to clear\n'
msg += 'all output data for this job?\n\n'
msg += 'This action is IRREVERSIBLE!'
chk = tkMB.askokcancel('Confirm deletion', msg)
if not chk: return
# Clear panels: this MUST be done before trying to delete any
# files so that any data files get closed. Otherwise Windows
# won't delete them...
for pan in self.panels.itervalues(): pan.clear()
# Clean everything up: status, command and log files, model
# output, "run segment" configuration storage and GUI restart
# files.
if os.path.exists(os.path.join(p, 'status')):
os.remove(os.path.join(p, 'status'))
if os.path.exists(os.path.join(p, 'command')):
os.remove(os.path.join(p, 'command'))
if os.path.exists(os.path.join(p, 'run.log')):
os.remove(os.path.join(p, 'run.log'))
for d, ds, fs in os.walk(os.path.join(p, 'output')):
for f in fs: os.remove(os.path.join(d, f))
if os.path.exists(os.path.join(p, 'config', 'seglist')):
os.remove(os.path.join(p, 'config', 'seglist'))
if os.path.exists(os.path.join(p, 'config', 'segments')):
shutil.rmtree(os.path.join(p, 'config', 'segments'))
for f in glob.iglob(os.path.join(p, 'gui_restart_*.nc')):
os.remove(f)
# Update record of job data and reflect the changes in the
# panel views.
self.update_job_data()
def run_job(self):
"""Run a job (button press callback)"""
# Check for existence of genie-ship.exe executable and build
# if necessary.
exe = os.path.join(U.cgenie_jobs, 'MODELS', U.cgenie_version,
platform, 'ship', 'genie.exe')
runexe = os.path.join(self.job.jobdir, 'genie-ship.exe')
if not os.path.exists(exe):
d = BuildExecutableDialog(self, self.job.jobdir)
if not d.result: return
if os.path.exists(runexe): os.remove(runexe)
shutil.copy(exe, runexe)
# Set up GUI_RESTART command file if this is a restart after a
# pause.
command = os.path.join(self.job.jobdir, 'command')
if os.path.exists(command): os.remove(command)
jpath = os.path.relpath(self.job.jobdir, self.job_folder.base_path)
if self.job_folder.status[jpath] == 'PAUSED':
st, koverall, dum, genie_clock = self.job.status_params()
with open(command, 'w') as fp:
print('GUI_RESTART', koverall, genie_clock, file=fp)
# Start executable with stdout and stderr directed to run.log
# in job directory. Add the resulting process to the
# "reapable" set. We check this list periodically and reap
# (i.e. wait for) any processes that have finished. On Linux,
# you need to do this to make sure that you don't end up with
# a load of defunct "zombie" processes hanging around, waiting
# for their parent to notice that they've finished. (This is
# only really a problem because the existence of a zombie
# process still marks the executable that the process was
# running as being in use, which means you can't overwrite it.
# If you run a job to completion from the GUI, then extend its
# run length and try to continue it, this means that you can't
# update the GENIE executable, as we do above in the line that
# says "shutil.copy(exe, runexe)". If you try to do that, you
# get an error saying something like "Text file busy". The
# solution is to explicitly clean up these child processes
# when they finish. If the GUI exits before the model
# processes that it starts, those processes become orphaned,
# which is fine, since the kernel reparents them to the init
# process and they can happily continue running.)
with open(os.path.join(self.job.jobdir, 'run.log'), 'a') as fp:
try:
pipe = sp.Popen(runexe, cwd=self.job.jobdir,
stdout=fp, stderr=sp.STDOUT)
self.reapable.add(pipe)
except Exception as e:
tkMB.showerror('Error', 'Failed to start GENIE executable!')
def pause_job(self):
"""Pause a running job (button press callback)"""
# Just write a command file to let the model know that it
# should pause.
with open(os.path.join(self.job.jobdir, 'command'), 'w') as fp:
print('PAUSE', file=fp)
#----------------------------------------------------------------------
#
# JOB TREE MANAGEMENT
#
def item_selected(self, event=None):
"""Callback for item selection in job tree"""
# If we have a real job selected in the tree, make a Job
# object for it and recalculate the possible restart jobs for
# the setup panel.
jobid = self.tree.selection()[0]
if (len(self.tree.get_children(jobid)) != 0 or
self.job_folder.is_folder(jobid)):
self.job = None
else:
self.job = Job(jobid, self.job_folder)
self.restart_jobs = self.job_folder.find_restart_jobs()
# Let all the panels know we have a new selected job.
for p in self.panels.itervalues(): p.set_job(self.job)
# Set the action button states depending on the job status.
self.set_job_buttons()
# Buttons that change state depending on the state of the
# currently selected job. Some buttons (add job, add folder,
# etc.) are always enabled.
switchable_buttons = ['move_rename', 'delete_job', 'clear_job',
'clone_job', 'run_job', 'pause_job']
# Enabled buttons for different states of selected job.
state_buttons = { 'UNCONFIGURED': ['move_rename', 'clear_job', 'delete_job',
'clone_job'],
'RUNNABLE': ['move_rename', 'clear_job', 'delete_job',
'clone_job', 'run_job'],
'RUNNING': ['pause_job'],
'PAUSED': ['move_rename', 'clear_job', 'delete_job',
'clone_job', 'run_job'],
'COMPLETE': ['move_rename', 'clear_job', 'delete_job',
'clone_job'],
'ERRORED': ['move_rename', 'clear_job', 'delete_job',
'clone_job'] }
def set_job_buttons(self):
"""Enable or disable action buttons depending on the state of the
selected job.
"""
sel = self.tree.selection()[0]
if self.job == None:
# A folder is selected: for folders other than the
# top-level "My Jobs" folder, we can move/rename or delete
# the folder.
for k, v in self.tool_buttons.iteritems():
if k in self.switchable_buttons:
e = ((k == 'move_rename' or k == 'delete_job')
and self.tree.parent(sel) != '')
enable(v, e)
else:
# A job is selected: the actions that are enabled depend
# on the job status.
on_buttons = self.state_buttons[self.job.status]
for k, v in self.tool_buttons.iteritems():
if k in self.switchable_buttons:
e = k in on_buttons
enable(v, e)
def set_menu_state(self):
"""Enable or disable menu items depending on the state of the selected
job.
"""
sel = self.tree.selection()[0]
if self.job == None:
# A folder is selected: for folders other than the
# top-level "My Jobs" folder, we can move/rename or delete
# the folder.
for k, v in self.tool_buttons.iteritems():
if k in self.switchable_buttons:
e = ((k == 'move_rename' or k == 'delete_job')
and self.tree.parent(sel) != '')
self.job_menu.entryconfig(self.menu_items[k],
state=tk.NORMAL if e
else tk.DISABLED)
else:
# A job is selected: the actions that are enabled depend
# on the job status.
on_buttons = self.state_buttons[self.job.status]
for k, v in self.tool_buttons.iteritems():
if k in self.switchable_buttons:
e = k in on_buttons
self.job_menu.entryconfig(self.menu_items[k],
state=tk.NORMAL if e
else tk.DISABLED)
def update_job_data(self):
"""Runs on a timer to update job data in panels, maintain action
button states and clean up child processes.
"""
if self.job: self.job.set_status()
self.panels['status'].update()
self.panels['output'].update()
self.set_job_buttons()
self.reap()
self.after(500, self.update_job_data)
#----------------------------------------------------------------------
#
# UI SETUP
#
def create_widgets(self):
"""UI layout"""
# The main window is paned, so you can drag the divider
# between the job tree and the main notebook part of the
# window.
self.pane = ttk.PanedWindow(self, orient=tk.HORIZONTAL)
# The job tree is populated by the creation of the JobFolder
# object in the application constructor. Here we just set it
# up as an empty tree and put it into the paned view.
#
# Setting the rowheight attribute in the global Treeview style
# May be better to configure a style specific to this filetreeview?
s = ttk.Style()
s.configure('Treeview', rowheight=int(self.bold_font.cget('size')*2.0))
#
self.tree = FileTreeview(self.pane, selectmode='browse')
self.tree.bind('<<TreeviewSelect>>', self.item_selected)
self.pane.add(self.tree)
# The right-hand part of the main window has the button
# toolbar and the panel notebook, so we make a frame to
# contain them.
self.main_frame = ttk.Frame(self.pane)
self.pane.add(self.main_frame)
# Create toolbar buttons -- the names of the tools here
# (e.g. "new_job") refer both to the methods that are called
# when the buttons are pressed and to the image files that are
# used to make the buttons (which live in
# <cgenie_root>/tools/images). Each of the buttons has
# floating tooltip help, implemented using a helper class.
self.toolbar = ttk.Frame(self.main_frame)
self.tool_buttons = { }
tool_info = [['new_job', 'New job', True],
['new_folder', 'New folder', True],
['move_rename', 'Move/rename job', True],
['delete_job', 'Delete job', True],
['clear_job', 'Clear job output', True],
['clone_job', 'Clone job', True],
['spacer', '', False],
['run_job', 'Run job', False],
['pause_job', 'Pause job', False]]
for t, title, dia in tool_info:
if t == 'spacer':
f = ttk.Frame(self.toolbar, height=16)
f.pack()
else:
img = tk.PhotoImage(file=os.path.join(U.cgenie_root, 'tools',
'images', t + '.gif'))
b = ttk.Button(self.toolbar, image=img,
command=getattr(self, t))
b.image = img
b.pack()
ToolTip(b, title)
self.tool_buttons[t] = b
# Set up default notebook panels.
self.notebook = ttk.Notebook(self.main_frame)
self.panels = { }
self.panels['status'] = StatusPanel(self.notebook, self)
self.panels['setup'] = SetupPanel(self.notebook, self)
self.panels['namelists'] = NamelistPanel(self.notebook, self)
self.panels['output'] = OutputPanel(self.notebook, self)
self.panels['plots'] = PlotPanel(self.notebook, self)
# Enable window resizing and place widgets.
top = self.winfo_toplevel()
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.pane.columnconfigure(0, weight=1)
self.pane.rowconfigure(0, weight=1)
self.main_frame.columnconfigure(0, weight=0)
self.main_frame.columnconfigure(1, weight=1)
self.main_frame.rowconfigure(0, weight=1)
self.pane.grid(column=0, row=0, sticky=tk.N+tk.E+tk.S+tk.W)
self.toolbar.grid(column=0, row=0, sticky=tk.N+tk.E+tk.S+tk.W)
self.notebook.grid(column=1, row=0, sticky=tk.N+tk.E+tk.S+tk.W)
# Create a main menu using mostly the same items as appear in
# the toolbar. Having a menu as well as a toolbar means that
# it's possible to add less frequently used actions to the
# menu so that the main part of the GUI doesn't get too
# cluttered.
self.menu = tk.Menu(top)
self.menu_items = { }
top['menu'] = self.menu
self.job_menu = tk.Menu(self.menu, tearoff=0,
postcommand=self.set_menu_state)
self.menu.add_cascade(label='Job', menu=self.job_menu)
it = 0
for t, title, dia in tool_info:
if t == 'spacer':
self.job_menu.add_separator()
else:
if dia: title += '...'
c = getattr(self, t)
self.job_menu.add_command(label=title, command=c)
self.menu_items[t] = it
it += 1
self.job_menu.add_separator()
self.job_menu.add_command(label='Quit', command=self.quit)
# Start the background update timer.
self.after(500, self.update_job_data)
#----------------------------------------------------------------------
#
# UTILITY FUNCTIONS
#
def find_configs(self):
"""Find all base and user configuration files"""
# Base configuration files -- all in one directory.
bs = os.listdir(os.path.join(U.cgenie_data, 'base-configs'))
bs = filter(lambda s: s.endswith('.config'), bs)
self.base_configs = map(lambda s: s.rpartition('.')[0], bs)
self.base_configs.sort()
# User configuration files -- need to walk the directory
# hierarchy here.
us = []
udir = os.path.join(U.cgenie_data, 'user-configs')
for d, ds, fs in os.walk(udir):
for f in fs:
us.append(os.path.relpath(os.path.join(d, f), udir))
self.user_configs = us
self.user_configs.sort()
def reap(self):
"""Reap child processes"""
# Check the status of all child processes that have been
# recorded as reapable, reap those that have finished (which
# is indicated by the Popen.poll method returning something
# other than None) by waiting for them (which does nothing
# here except make sure they're removed from the kernel's
# process table), then removing them from our "reapable" set.
reaped = set()
for ch in self.reapable:
if ch.poll() != None:
ch.wait()
reaped.add(ch)
self.reapable -= reaped
#----------------------------------------------------------------------
#
# MAIN PROGRAM
#
# The "main program" here just initialises the Tkinter toolkit,
# creates the main Application object, sets a couple of top-level
# properties (window title and geometry and a window manager protocol
# handler to deal with window close events), then fires off the
# Tkinter event loop. Everything else that happens is driven by
# callbacks from menu or button actions or other GUI component
# interactions.
root = tk.Tk()
app = Application(root)
app.master.title("cGENIE GUI")
app.master.geometry("1024x768")
root.protocol("WM_DELETE_WINDOW", app.quit)
app.mainloop()
| StarcoderdataPython |
8164255 | <reponame>mapsme/mwm.py<filename>setup.py
from setuptools import setup
from os import path
from mwm import __version__
here = path.abspath(path.dirname(__file__))
setup(
name='mwm',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
packages=['mwm'],
package_data={'mwm': ['types.txt']},
url='https://github.com/mapsme/mwm.py',
license='Apache License 2.0',
description='Library to read binary MAPS.ME files.',
long_description=open(path.join(here, 'README.rst')).read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
entry_points={
'console_scripts': ['mwmtool = mwm.mwmtool:main']
},
)
| StarcoderdataPython |
1820732 | from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
# Load Tox21 dataset
tasks, datasets, transformers = dc.molnet.load_qm7b_from_mat()
train_dataset, valid_dataset, test_dataset = datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
# Batch size of models
batch_size = 50
graph_model = dc.nn.SequentialDTNNGraph(n_distance=100)
graph_model.add(dc.nn.DTNNEmbedding(n_embedding=20))
graph_model.add(dc.nn.DTNNStep(n_embedding=20, n_distance=100))
graph_model.add(dc.nn.DTNNStep(n_embedding=20, n_distance=100))
graph_model.add(dc.nn.DTNNGather(n_embedding=20))
n_feat = 20
model = dc.models.MultitaskGraphRegressor(
graph_model,
len(tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| StarcoderdataPython |
363927 | # Copyright 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from PyQt5 import QtCore, QtGui, QtWidgets
import os
import stat
import subprocess
import threading
import time
import webbrowser
run_list = {}
compiled_list = {}
original_wd = os.getcwd()
user_count = 0
user_number = 0
progress_count = 0
progress_max = 0
run_total = 0
run_count = 1
users_file_info = {}
users_compiled = {}
output_verification = -1
compiled = False
clone_buffer = []
compile_buffer = []
output_buffer = []
def rmtree(path): # alternative to shutil.rmtree()
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, stat.S_IWUSR)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
def _clone(user):
user_path = original_wd + "\\Usuarios\\" + user[0]
os.chdir(original_wd + "\\Usuarios\\")
if os.path.exists(user_path):
rmtree(user_path)
clone_buffer.append("#Clonando repositório de %s..." % user[0])
p = subprocess.Popen(["git", "clone", "http://github.com/%s/%s" % (user[0], user[1]), user[0]],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
creationflags=0x08000000)
clone_response = p.communicate()
if 'fatal' in clone_response[1].decode('latin-1'):
clone_buffer.append("#Erro ao clonar respositório de %s. Erro: %s" %
(user[0], clone_response[1].decode('latin-1')))
else:
clone_buffer.append("-Repositório de %s clonado com sucesso." % user[0])
global user_number, user_count
user_count += 1
if user_count == user_number:
clone_buffer.append("==Clonagem finalizada.")
# def queue_compile(users, c_files_list):
# for user in users:
# _compile(user, c_files_list)
def _compile(user, c_files_list):
global progress_count, compiled
if user not in users_file_info:
users_file_info[user] = [[], []]
user_c_files = []
user_log = open(original_wd + "\\logs\\%s_log.txt" % user, "w")
user_log.write("Compilando\n" + 60 * "-" + "\n")
compile_buffer.append("--" + user + " iniciado.")
for root, dirs, files in os.walk(os.path.join(original_wd, "Usuarios", user)):
for name in files:
if name[-2:] == ".c":
user_c_files.append(name)
if name in c_files_list:
comp_process = subprocess.Popen(["g++", "-o",
os.path.join(original_wd,
"Compilados\\%s\\%s.exe" % (user, name[0:-2])),
os.path.join(root, name)], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
creationflags=0x08000000)
comp_response = comp_process.communicate()[1].decode('latin-1')
if comp_response is "":
compile_buffer.append("#%s: %s compilado com sucesso." % (user, name))
user_log.write("#%s compilado com sucesso.\n" % name)
users_file_info[user][0].append(name)
else:
compile_buffer.append("--Erro ao compilar " + name + ". Erro: \n\n" + comp_response + "\n\n")
user_log.write("\n--Erro ao compilar " + name + ". Erro: \n===============\n" + comp_response
+ "\n===============\n\n")
progress_count += 1
user_log.write("\n")
for c_file in c_files_list:
if c_file not in user_c_files:
compile_buffer.append("#%s: %s não encontrado.\n" % (user, c_file))
user_log.write("#%s não encontrado.\n" % c_file)
time.sleep(1)
progress_count += 1
compile_buffer.append("--%s finalizado.\n" % user)
user_log.write(60 * "-" + "\n")
user_log.close()
global user_number, user_count
user_count += 1
if user_count == user_number:
compile_buffer.append("==Compilação finalizada.")
compiled = True
def _run(run_list, user_list):
global output_verification, compiled_list, run_total, run_count
compiled_list = {}
for user in user_list:
compiled_list[user] = []
if not compiled:
users_file_info[user] = [[], []]
for root, dirs, files in os.walk(os.getcwd() + "\\Compilados\\" + user):
for name in files:
if name[-4:] == ".exe":
compiled_list[user].append(name)
if name[:-4] in run_list:
run_total += 1
for user in user_list:
user_log = open(original_wd + '\\logs\\%s_log.txt' % user, 'a')
user_log.write("Rodando\n" + 60*'-' + '\n')
for name in compiled_list[user]:
if name[0:-4] in run_list:
user_log.write('#' + name + '\n')
output_buffer.append('#%s: %s' % (user, name))
time_out = 0
prog_input, prog_output = run_list[name[0:-4]][0], run_list[name[0:-4]][1]
run_process = subprocess.Popen(["%s" % os.path.join(original_wd, "Compilados", user, name)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=0x08000000)
run_process.stdin.write(bytes(prog_input, 'UTF-8'))
try:
run_response = run_process.communicate(timeout=1)[0].decode('latin-1')
except Exception:
output_buffer.append("====Tempo de execução excedido.")
user_log.write("==Tempo de execução excedido.\n")
time_out = 1
if not time_out:
user_log.write("--Entrada fornecida: '%s'\n" % prog_input)
try:
user_log.write("--Saída do programa:\n" + 45 * "\\" + "\n%s\n"
% run_response + 45 * "/" + "\n")
except Exception:
user_log.write("--Saída inesperada.\n")
user_log.write("--Saída esperada: '%s'\n" % prog_output)
output_buffer.append("--Entrada fornecida: '%s'\n" % prog_input)
output_buffer.append("--Saída do programa:\n" + 45 * "\\" + "\n%s\n"
% run_response + 45 * "/" + "\n")
output_buffer.append("--Saída esperada: '%s'\n" % prog_output)
while 1:
if output_verification == 1:
user_log.write("==Saída correta!\n\n")
run_count += 1
users_file_info[user][1].append(name)
output_verification = -1
break
elif output_verification == 0:
user_log.write("==Saída incorreta!\n\n")
run_count += 1
output_verification = -1
break
time.sleep(.5)
else:
output_buffer.append("Pressionar qualquer botão para continuar.")
while 1:
if output_verification == 1 or output_verification == 0:
output_verification = -1
run_count += 1
break
time.sleep(.5)
elif name[-4:] == '.exe':
users_file_info[user][1].append(name)
if not compiled:
for file in compiled_list[user]:
users_file_info[user][0].append(file[:-4] + '.c')
output_buffer.append("%s finalizado.\n" % user)
user_log.write(60 * "-" + "\n")
user_log.close()
output_buffer.append("Finalizado.\n")
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(380, 510)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralWidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 381, 501))
self.tabWidget.setObjectName("tabWidget")
self.cloneTab = QtWidgets.QWidget()
self.cloneTab.setObjectName("cloneTab")
self.lineUserEntry = QtWidgets.QLineEdit(self.cloneTab)
self.lineUserEntry.setGeometry(QtCore.QRect(10, 10, 111, 20))
self.lineUserEntry.setObjectName("lineUserEntry")
self.btnAddUser = QtWidgets.QPushButton(self.cloneTab)
self.btnAddUser.setGeometry(QtCore.QRect(10, 40, 111, 21))
self.btnAddUser.setObjectName("btnAddUser")
self.btnRemoveUser = QtWidgets.QPushButton(self.cloneTab)
self.btnRemoveUser.setGeometry(QtCore.QRect(260, 130, 101, 21))
self.btnRemoveUser.setObjectName("btnRemoveUser")
self.lineRepEntry = QtWidgets.QLineEdit(self.cloneTab)
self.lineRepEntry.setGeometry(QtCore.QRect(140, 10, 41, 20))
self.lineRepEntry.setObjectName("lineRepEntry")
self.lineListEntry = QtWidgets.QLineEdit(self.cloneTab)
self.lineListEntry.setGeometry(QtCore.QRect(10, 70, 111, 20))
self.lineListEntry.setObjectName("lineListEntry")
self.btnAddList = QtWidgets.QPushButton(self.cloneTab)
self.btnAddList.setGeometry(QtCore.QRect(10, 100, 111, 21))
self.btnAddList.setObjectName("btnAddList")
self.btnClone = QtWidgets.QPushButton(self.cloneTab)
self.btnClone.setGeometry(QtCore.QRect(140, 40, 221, 81))
self.btnClone.setObjectName("btnClone")
self.btnRemoveAll = QtWidgets.QPushButton(self.cloneTab)
self.btnRemoveAll.setGeometry(QtCore.QRect(260, 160, 101, 21))
self.btnRemoveAll.setObjectName("btnRemoveAll")
self.textCloneLog = QtWidgets.QTextEdit(self.cloneTab)
self.textCloneLog.setGeometry(QtCore.QRect(10, 330, 351, 121))
self.textCloneLog.setObjectName("textCloneLog")
self.textCloneLog.setReadOnly(1)
self.treeCloneUsers = QtWidgets.QTreeWidget(self.cloneTab)
self.treeCloneUsers.setGeometry(QtCore.QRect(10, 130, 241, 192))
self.treeCloneUsers.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.treeCloneUsers.setObjectName("treeCloneUsers")
self.treeCloneUsers.header().setDefaultSectionSize(138)
self.pushButton = QtWidgets.QPushButton(self.cloneTab)
self.pushButton.setGeometry(QtCore.QRect(260, 190, 101, 51))
self.pushButton.setObjectName("pushButton")
self.tabWidget.addTab(self.cloneTab, "")
self.compileTab = QtWidgets.QWidget()
self.compileTab.setObjectName("compileTab")
self.listUsers = QtWidgets.QListWidget(self.compileTab)
self.listUsers.setGeometry(QtCore.QRect(10, 30, 111, 181))
self.listUsers.setObjectName("listUsers")
self.labelUsers = QtWidgets.QLabel(self.compileTab)
self.labelUsers.setGeometry(QtCore.QRect(10, 10, 47, 13))
self.labelUsers.setObjectName("labelUsers")
self.lineFileName = QtWidgets.QLineEdit(self.compileTab)
self.lineFileName.setGeometry(QtCore.QRect(130, 160, 111, 21))
self.lineFileName.setObjectName("lineFileName")
self.btnAddFile = QtWidgets.QPushButton(self.compileTab)
self.btnAddFile.setGeometry(QtCore.QRect(130, 190, 111, 21))
self.btnAddFile.setObjectName("btnAddFile")
self.btnAddFileList = QtWidgets.QPushButton(self.compileTab)
self.btnAddFileList.setGeometry(QtCore.QRect(250, 190, 111, 21))
self.btnAddFileList.setObjectName("btnAddFileList")
self.lineEdit = QtWidgets.QLineEdit(self.compileTab)
self.lineEdit.setGeometry(QtCore.QRect(250, 160, 111, 20))
self.lineEdit.setObjectName("lineEdit")
self.btnCompile = QtWidgets.QPushButton(self.compileTab)
self.btnCompile.setGeometry(QtCore.QRect(10, 220, 351, 41))
self.btnCompile.setObjectName("btnCompile")
self.listFiles = QtWidgets.QListWidget(self.compileTab)
self.listFiles.setGeometry(QtCore.QRect(130, 30, 111, 121))
self.listFiles.setObjectName("listFiles")
self.labelFile = QtWidgets.QLabel(self.compileTab)
self.labelFile.setGeometry(QtCore.QRect(130, 10, 81, 16))
self.labelFile.setObjectName("labelFile")
self.textCompileLog = QtWidgets.QTextEdit(self.compileTab)
self.textCompileLog.setGeometry(QtCore.QRect(10, 300, 351, 131))
self.textCompileLog.setObjectName("textCompileLog")
self.textCompileLog.setReadOnly(1)
self.progressBar = QtWidgets.QProgressBar(self.compileTab)
self.progressBar.setGeometry(QtCore.QRect(10, 270, 361, 23))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.btnRemoveFile = QtWidgets.QPushButton(self.compileTab)
self.btnRemoveFile.setGeometry(QtCore.QRect(250, 30, 111, 23))
self.btnRemoveFile.setObjectName("btnRemoveFile")
self.btnRemoveAll_2 = QtWidgets.QPushButton(self.compileTab)
self.btnRemoveAll_2.setGeometry(QtCore.QRect(250, 60, 111, 23))
self.btnRemoveAll_2.setObjectName("btnRemoveAll_2")
self.comboUser = QtWidgets.QComboBox(self.compileTab)
self.comboUser.setGeometry(QtCore.QRect(10, 440, 121, 22))
self.comboUser.setObjectName("comboUser")
self.btnVerifyLog = QtWidgets.QPushButton(self.compileTab)
self.btnVerifyLog.setGeometry(QtCore.QRect(140, 440, 221, 23))
self.btnVerifyLog.setObjectName("btnVerifyLog")
self.btnVerifyLog.setDisabled(1)
self.tabWidget.addTab(self.compileTab, "")
self.runTab = QtWidgets.QWidget()
self.runTab.setObjectName("runTab")
self.labelInput = QtWidgets.QLabel(self.runTab)
self.labelInput.setGeometry(QtCore.QRect(10, 150, 47, 13))
self.labelInput.setObjectName("labelInput")
self.labelOutput = QtWidgets.QLabel(self.runTab)
self.labelOutput.setGeometry(QtCore.QRect(140, 150, 111, 16))
self.labelOutput.setObjectName("labelOutput")
self.lineInput = QtWidgets.QLineEdit(self.runTab)
self.lineInput.setGeometry(QtCore.QRect(10, 170, 111, 20))
self.lineInput.setObjectName("lineInput")
self.lineOutput = QtWidgets.QLineEdit(self.runTab)
self.lineOutput.setGeometry(QtCore.QRect(140, 170, 111, 20))
self.lineOutput.setObjectName("lineOutput")
self.tableFiles = QtWidgets.QTreeWidget(self.runTab)
self.tableFiles.setGeometry(QtCore.QRect(10, 10, 351, 91))
self.tableFiles.setObjectName("tableFiles")
self.tableFiles.header().setDefaultSectionSize(116)
self.comboFiles = QtWidgets.QComboBox(self.runTab)
self.comboFiles.setGeometry(QtCore.QRect(10, 120, 101, 21))
self.comboFiles.setObjectName("comboFiles")
self.checkNoOutput = QtWidgets.QCheckBox(self.runTab)
self.checkNoOutput.setGeometry(QtCore.QRect(140, 120, 141, 17))
self.checkNoOutput.setObjectName("checkNoOutput")
self.btnUpdate = QtWidgets.QPushButton(self.runTab)
self.btnUpdate.setGeometry(QtCore.QRect(260, 150, 101, 41))
self.btnUpdate.setObjectName("btnUpdate")
self.textFileOutput = QtWidgets.QTextEdit(self.runTab)
self.textFileOutput.setGeometry(QtCore.QRect(10, 250, 351, 171))
self.textFileOutput.setObjectName("textFileOutput")
self.textFileOutput.setReadOnly(1)
self.btnRun = QtWidgets.QPushButton(self.runTab)
self.btnRun.setGeometry(QtCore.QRect(10, 200, 351, 41))
self.btnRun.setObjectName("btnRun")
self.btnRight = QtWidgets.QPushButton(self.runTab)
self.btnRight.setGeometry(QtCore.QRect(10, 430, 171, 31))
self.btnRight.setObjectName("btnRight")
self.btnWrong = QtWidgets.QPushButton(self.runTab)
self.btnWrong.setGeometry(QtCore.QRect(190, 430, 171, 31))
self.btnWrong.setObjectName("btnWrong")
self.tabWidget.addTab(self.runTab, "")
self.resultsTab = QtWidgets.QWidget()
self.resultsTab.setObjectName("resultsTab")
self.treeUsers = QtWidgets.QTreeWidget(self.resultsTab)
self.treeUsers.setGeometry(QtCore.QRect(10, 10, 351, 181))
self.treeUsers.setObjectName("treeUsers")
self.treeUsers.header().setCascadingSectionResizes(False)
self.treeUsers.header().setDefaultSectionSize(124)
self.comboUser_2 = QtWidgets.QComboBox(self.resultsTab)
self.comboUser_2.setGeometry(QtCore.QRect(10, 200, 111, 21))
self.comboUser_2.setObjectName("comboUser_2")
self.treeFiles = QtWidgets.QTreeWidget(self.resultsTab)
self.treeFiles.setGeometry(QtCore.QRect(10, 230, 161, 181))
self.treeFiles.setObjectName("treeFiles")
self.treeFiles.header().setDefaultSectionSize(59)
self.comboFile = QtWidgets.QComboBox(self.resultsTab)
self.comboFile.setGeometry(QtCore.QRect(130, 200, 111, 22))
self.comboFile.setObjectName("comboFile")
self.btnRectify = QtWidgets.QPushButton(self.resultsTab)
self.btnRectify.setGeometry(QtCore.QRect(250, 230, 111, 23))
self.btnRectify.setObjectName("btnRectify")
self.btnLogs = QtWidgets.QPushButton(self.resultsTab)
self.btnLogs.setGeometry(QtCore.QRect(180, 420, 181, 41))
self.btnLogs.setObjectName("btnLogs")
self.btnVerify = QtWidgets.QPushButton(self.resultsTab)
self.btnVerify.setGeometry(QtCore.QRect(250, 200, 111, 23))
self.btnVerify.setObjectName("btnVerify")
self.textOutput = QtWidgets.QTextEdit(self.resultsTab)
self.textOutput.setGeometry(QtCore.QRect(180, 260, 181, 151))
self.textOutput.setObjectName("textOutput")
self.textOutput.setReadOnly(1)
self.lineLog = QtWidgets.QLineEdit(self.resultsTab)
self.lineLog.setGeometry(QtCore.QRect(60, 430, 113, 20))
self.lineLog.setObjectName("lineLog")
self.tabWidget.addTab(self.resultsTab, "")
MainWindow.setCentralWidget(self.centralWidget)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.compileTab.setDisabled(1)
self.runTab.setDisabled(1)
self.resultsTab.setDisabled(1)
## Tab : Clonar
self.btnAddUser.clicked.connect(self.add_user)
self.btnAddList.clicked.connect(self.add_user_list)
self.btnRemoveUser.clicked.connect(self.remove_user)
self.btnRemoveAll.clicked.connect(self.remove_all)
self.pushButton.clicked.connect(self.update_compiling)
self.btnClone.clicked.connect(self.clone_users)
self.clone_timer = QtCore.QTimer()
self.clone_timer.setInterval(1000)
self.clone_timer.timeout.connect(self.update_clone_log)
## Tab : Compilar
self.btnAddFile.clicked.connect(self.add_file)
self.btnAddFileList.clicked.connect(self.add_file_list)
self.btnRemoveFile.clicked.connect(self.remove_file)
self.btnRemoveAll_2.clicked.connect(self.remove_all_files)
self.btnCompile.clicked.connect(self.compile_files)
self.btnVerifyLog.clicked.connect(self.open_log)
self.compile_timer = QtCore.QTimer()
self.compile_timer.setInterval(1000)
self.compile_timer.timeout.connect(self.update_compile_log)
## Tab : Rodar
self.btnUpdate.clicked.connect(self.update_files)
self.tableFiles.itemClicked.connect(self.new_tree_selection_run)
self.comboFiles.currentTextChanged.connect(self.new_combo_selection_run)
self.btnRun.clicked.connect(self.run_files)
self.btnRight.clicked.connect(self.right_answer)
self.btnWrong.clicked.connect(self.wrong_answer)
self.output_timer = QtCore.QTimer()
self.output_timer.setInterval(100)
self.output_timer.timeout.connect(self.update_file_output)
## Tab : Resultados
self.comboUser_2.currentTextChanged.connect(self.new_combo_selection_results)
self.btnVerify.clicked.connect(self.verify_output)
self.btnRectify.clicked.connect(self.rectify_result)
self.treeUsers.itemClicked.connect(self.new_tree_selection_results)
self.btnLogs.clicked.connect(self.save_log)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Corretor MTP v1.0"))
self.lineUserEntry.setText(_translate("MainWindow", "Usuário do Github"))
self.btnAddUser.setText(_translate("MainWindow", "Adicionar"))
self.btnRemoveUser.setText(_translate("MainWindow", "Remover"))
self.lineRepEntry.setText(_translate("MainWindow", "MTP"))
self.lineListEntry.setText(_translate("MainWindow", "lista_usuarios.txt"))
self.btnAddList.setText(_translate("MainWindow", "Adicionar lista"))
self.btnClone.setText(_translate("MainWindow", "Clonar\nrepositórios"))
self.btnRemoveAll.setText(_translate("MainWindow", "Remover todos"))
self.treeCloneUsers.headerItem().setText(0, _translate("MainWindow", "Usuário"))
self.treeCloneUsers.headerItem().setText(1, _translate("MainWindow", "Repositório"))
self.pushButton.setText(_translate("MainWindow", "Atualizar\nlista para\ncompilação"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.cloneTab), _translate("MainWindow", "Clonar"))
self.labelUsers.setText(_translate("MainWindow", "Usuários:"))
self.lineFileName.setText(_translate("MainWindow", "exemplo.c"))
self.btnAddFile.setText(_translate("MainWindow", "Adicionar programa"))
self.btnAddFileList.setText(_translate("MainWindow", "Adicionar lista"))
self.lineEdit.setText(_translate("MainWindow", "lista_programas.txt"))
self.btnCompile.setText(_translate("MainWindow", "Compilar"))
self.labelFile.setText(_translate("MainWindow", "Programas:"))
self.btnRemoveFile.setText(_translate("MainWindow", "Remover"))
self.btnRemoveAll_2.setText(_translate("MainWindow", "Remover todos"))
self.btnVerifyLog.setText(_translate("MainWindow", "Verificar log de compilação"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.compileTab), _translate("MainWindow", "Compilar"))
self.labelInput.setText(_translate("MainWindow", "Input:"))
self.labelOutput.setText(_translate("MainWindow", "Output esperado:"))
self.lineInput.setText(_translate("MainWindow", "1 2 3"))
self.lineOutput.setText(_translate("MainWindow", "Hello World!"))
self.tableFiles.headerItem().setText(0, _translate("MainWindow", "Programa"))
self.tableFiles.headerItem().setText(1, _translate("MainWindow", "Input"))
self.tableFiles.headerItem().setText(2, _translate("MainWindow", "Output"))
self.checkNoOutput.setText(_translate("MainWindow", "Desconsiderar Output"))
self.btnUpdate.setText(_translate("MainWindow", "Atualizar"))
self.btnRun.setText(_translate("MainWindow", "Rodar"))
self.btnRight.setText(_translate("MainWindow", "Saída correta"))
self.btnWrong.setText(_translate("MainWindow", "Saída incorreta"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.runTab), _translate("MainWindow", "Rodar"))
self.treeUsers.headerItem().setText(0, _translate("MainWindow", "Usuário"))
self.treeUsers.headerItem().setText(1, _translate("MainWindow", "Compilados"))
self.treeUsers.headerItem().setText(2, _translate("MainWindow", "Saída correta"))
self.treeFiles.headerItem().setText(0, _translate("MainWindow", "Programa"))
self.treeFiles.headerItem().setText(1, _translate("MainWindow", "Saída correta?"))
self.btnRectify.setText(_translate("MainWindow", "Retificar correção"))
self.btnLogs.setText(_translate("MainWindow", "Gerar relatório"))
self.btnVerify.setText(_translate("MainWindow", "Verificar outuput"))
self.lineLog.setText(_translate("MainWindow", "notas_lab1.txt"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.resultsTab), _translate("MainWindow", "Resultados"))
def add_user(self):
current_users = []
for i in range(self.treeCloneUsers.topLevelItemCount()):
current_users.append(self.treeCloneUsers.topLevelItem(i).text(0))
user_text = [self.lineUserEntry.text(), self.lineRepEntry.text()]
if user_text[0] is not "":
if user_text[0] not in current_users:
user = QtWidgets.QTreeWidgetItem(user_text)
self.treeCloneUsers.addTopLevelItem(user)
self.lineUserEntry.clear()
self.lineRepEntry.setText("MTP")
def add_user_list(self):
if os.path.isfile(self.lineListEntry.text()):
user_list = []
user_list_file = open(self.lineListEntry.text(), 'r')
current_users = []
for i in range(self.treeCloneUsers.topLevelItemCount()):
current_users.append(self.treeCloneUsers.topLevelItem(i).text(0))
for line in user_list_file:
username, repname = line.split()
user_list.append([username, repname, 0, 0])
user_list_file.close()
for user in user_list:
if user[0] not in current_users:
self.treeCloneUsers.addTopLevelItem(QtWidgets.QTreeWidgetItem([user[0], user[1]]))
def remove_user(self):
user_to_remove = self.treeCloneUsers.selectedItems()
if len(user_to_remove) != 0:
self.treeCloneUsers.takeTopLevelItem(self.treeCloneUsers.indexOfTopLevelItem(user_to_remove[0]))
def remove_all(self):
self.treeCloneUsers.clear()
def update_compiling(self):
self.compileTab.setDisabled(0)
self.listUsers.clear()
self.comboUser.clear()
for i in range(self.treeCloneUsers.topLevelItemCount()):
self.listUsers.addItem(self.treeCloneUsers.topLevelItem(i).text(0))
self.comboUser.addItem(self.treeCloneUsers.topLevelItem(i).text(0))
def clone_users(self):
if self.treeCloneUsers.topLevelItemCount() != 0:
self.clone_timer.start()
users_to_clone = []
for i in range(self.treeCloneUsers.topLevelItemCount()):
users_to_clone.append([self.treeCloneUsers.topLevelItem(i).text(0),
self.treeCloneUsers.topLevelItem(i).text(1)])
global user_number, user_count
user_number = len(users_to_clone)
user_count = 0
if not os.path.exists(original_wd + "\\Usuarios"):
os.mkdir("Usuarios")
os.chdir(os.getcwd() + "\\Usuarios")
for user in users_to_clone:
thread_clone = threading.Thread(target=_clone, args=[user])
thread_clone.start()
self.btnClone.setDisabled(1)
def update_clone_log(self):
a = len(clone_buffer)
for i in range(a):
self.textCloneLog.append(clone_buffer[i])
if clone_buffer[i] == "==Clonagem finalizada.":
self.clone_timer.stop()
for i in range(a):
clone_buffer.pop(0)
def add_file(self):
self.runTab.setDisabled(0)
current_files = []
for i in range(self.listFiles.count()):
current_files.append(self.listFiles.item(i).text())
file_name = self.lineFileName.text()
if file_name is not "":
if file_name not in current_files:
self.listFiles.addItem(file_name)
self.tableFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([file_name, '-', '-']))
self.comboFiles.addItem(file_name)
if self.comboFiles.findText(" ") != -1:
self.comboFiles.removeItem(self.comboFiles.findText(" "))
self.lineFileName.clear()
if not self.tableFiles.topLevelItemCount() == 0:
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
def add_file_list(self):
os.chdir(original_wd)
if os.path.isfile(self.lineEdit.text()):
file_list = []
file_list_file = open(self.lineEdit.text(), 'r')
self.runTab.setDisabled(0)
current_files = []
for i in range(self.listFiles.count()):
current_files.append(self.listFiles.item(i).text())
for line in file_list_file:
file_name, file_input, file_output, file_run = line.split(":")
file_list.append([file_name, file_input, file_output, file_run.split()[0]])
file_list_file.close()
for file in file_list:
if file[0] not in current_files:
self.listFiles.addItem(file[0])
self.comboFiles.addItem(file[0])
if self.comboFiles.findText(" ") != -1:
self.comboFiles.removeItem(self.comboFiles.findText(" "))
if file[3] == '1':
self.tableFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([file[0], file[1], file[2]]))
else:
self.tableFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([file[0], '-', '-']))
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
def remove_file(self):
file_to_remove = self.listFiles.selectedItems()
if len(file_to_remove) != 0:
if self.listFiles.count() == 1:
self.comboFiles.addItem(" ")
self.comboFiles.removeItem(self.listFiles.row(file_to_remove[0]))
self.tableFiles.takeTopLevelItem(self.listFiles.row(file_to_remove[0]))
self.listFiles.takeItem(self.listFiles.row(file_to_remove[0]))
if self.tableFiles.topLevelItem(0):
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
else:
self.lineInput.setText("1 2 3")
self.lineOutput.setText("Hello World!")
def remove_all_files(self):
self.listFiles.clear()
self.tableFiles.clear()
self.comboFiles.clear()
if self.tableFiles.topLevelItem(0):
self.lineInput.setText(self.tableFiles.topLevelItem(0).text(1))
self.lineOutput.setText(self.tableFiles.topLevelItem(0).text(2))
else:
self.lineInput.setText("1 2 3")
self.lineOutput.setText("Hello World!")
def compile_files(self):
if self.listUsers.count() != 0 and self.listFiles.count() != 0:
self.compile_timer.start()
if not os.path.exists(original_wd + "\\logs"):
os.mkdir(original_wd + "\\logs")
users_to_compile = []
for i in range(self.listUsers.count()):
users_to_compile.append(self.listUsers.item(i).text())
c_files = []
for i in range(self.listFiles.count()):
c_files.append(self.listFiles.item(i).text())
global progress_count, progress_max
progress_max = len(users_to_compile) * len(c_files)
progress_count = 0
global user_number, user_count
user_number = len(users_to_compile)
user_count = 0
if not os.path.exists(original_wd + "\\Compilados"):
os.mkdir("Compilados")
os.chdir(original_wd + "\\Compilados")
self.textCompileLog.append("Compilando...\n")
# self.textCompileLog.ensureCursorVisible()
# delay = 0
for user in users_to_compile:
if not os.path.exists(original_wd + "\\Compilados\\" + user):
os.mkdir(original_wd + "\\Compilados\\" + user)
# thread_compile = threading.Timer(delay, _compile, [user, c_files])
thread_compile = threading.Thread(target=_compile, args=[user, c_files])
thread_compile.start()
# delay += 10
# thread_compile_all = threading.Thread(target=queue_compile, args=[users_to_compile, c_files])
# thread_compile_all.start()
os.chdir(original_wd)
def update_compile_log(self):
a = len(compile_buffer)
for i in range(a):
self.textCompileLog.append(compile_buffer[i])
if compile_buffer[i] == "==Compilação finalizada.":
self.compile_timer.stop()
self.btnVerifyLog.setDisabled(0)
for i in range(a):
compile_buffer.pop(0)
self.progressBar.setValue(100 * progress_count // progress_max)
def open_log(self):
user_name = self.comboUser.currentText()
log_path = original_wd + "\\Compilados\\" + user_name + "\\%s_log.txt" % user_name
if os.path.isfile(log_path):
webbrowser.open(log_path)
else:
window = QtWidgets.QMessageBox()
window.move(600, 200)
QtWidgets.QMessageBox.warning(window, 'Erro', "Log não encontrado", QtWidgets.QMessageBox.Ok)
def update_files(self):
if self.comboFiles.currentIndex() != -1:
table_item = self.tableFiles.topLevelItem(self.comboFiles.currentIndex())
if self.checkNoOutput.isChecked():
table_item.setData(1, 0, '-')
table_item.setData(2, 0, '-')
else:
table_item.setData(1, 0, self.lineInput.text())
table_item.setData(2, 0, self.lineOutput.text())
def new_tree_selection_run(self):
tree_selected = self.tableFiles.selectedItems()
if tree_selected:
self.tableFiles.clearSelection()
self.comboFiles.setCurrentIndex(self.tableFiles.indexOfTopLevelItem(tree_selected[0]))
def new_combo_selection_run(self):
if self.tableFiles.topLevelItemCount():
self.tableFiles.clearSelection()
self.tableFiles.topLevelItem(self.comboFiles.currentIndex()).setSelected(1)
self.lineInput.setText(self.tableFiles.selectedItems()[0].text(1))
self.lineOutput.setText(self.tableFiles.selectedItems()[0].text(2))
def run_files(self):
global run_list
run_list = {}
self.output_timer.start()
for i in range(self.tableFiles.topLevelItemCount()):
if self.tableFiles.topLevelItem(i).text(2) != '-':
if self.tableFiles.topLevelItem(i).text(1) == '-':
run_list[self.tableFiles.topLevelItem(i).text(0)[:-2]] \
= ["", self.tableFiles.topLevelItem(i).text(2)]
else:
run_list[self.tableFiles.topLevelItem(i).text(0)[:-2]] = [self.tableFiles.topLevelItem(i).text(1),
self.tableFiles.topLevelItem(i).text(2)]
# else:
# for user in users_file_info:
# if self.tableFiles.topLevelItem(i).text(0) in users_file_info[user][0]:
# users_file_info[user][1].append(self.tableFiles.topLevelItem(i).text(0))
user_list = []
for i in range(self.listUsers.count()):
user_list.append(self.listUsers.item(i).text())
if run_list and user_list:
thread_run = threading.Thread(target=_run, args=[run_list, user_list])
thread_run.start()
threading.Timer(2.0, self.update_file_output).start()
self.resultsTab.setDisabled(0)
def update_file_output(self):
if output_buffer:
for line in output_buffer:
self.textFileOutput.append(line)
self.btnRight.setDisabled(0)
self.btnWrong.setDisabled(0)
if line == "Finalizado.\n":
self.output_timer.stop()
self.btnRight.setDisabled(1)
self.btnWrong.setDisabled(1)
for user in users_file_info:
info = [user, str(len(users_file_info[user][0])), str(len(users_file_info[user][1]))]
self.treeUsers.addTopLevelItem(QtWidgets.QTreeWidgetItem(info))
self.comboUser_2.addItem(user)
for item in run_list:
self.comboFile.addItem(item + '.exe')
output_buffer.clear()
def right_answer(self):
global output_verification, run_total, run_count
self.textFileOutput.clear()
self.textFileOutput.append("%d/%d\n==Saída correta!" % (run_count, run_total))
output_verification = 1
self.btnRight.setDisabled(1)
self.btnWrong.setDisabled(1)
def wrong_answer(self):
global output_verification, run_total, run_count
self.textFileOutput.clear()
self.textFileOutput.append("%d/%d\n==Saída incorreta!" % (run_count, run_total))
output_verification = 0
self.btnRight.setDisabled(1)
self.btnWrong.setDisabled(1)
def new_combo_selection_results(self):
if self.treeUsers.topLevelItemCount():
self.treeUsers.clearSelection()
self.treeUsers.topLevelItem(self.comboUser_2.currentIndex()).setSelected(1)
self.treeFiles.clear()
for program in users_file_info[self.comboUser_2.currentText()][0]:
if program[:-2] in run_list:
if program[:-2] + ".exe" in users_file_info[self.comboUser_2.currentText()][1]:
self.treeFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([program[:-2] + '.exe', "Sim"]))
else:
self.treeFiles.addTopLevelItem(QtWidgets.QTreeWidgetItem([program[:-2] + '.exe', "Não"]))
def new_tree_selection_results(self):
tree_selected = self.treeUsers.selectedItems()
if tree_selected:
self.treeUsers.clearSelection()
self.comboUser_2.setCurrentIndex(self.treeUsers.indexOfTopLevelItem(tree_selected[0]))
def verify_output(self):
cur_program = self.comboFile.currentText()
cur_user = self.comboUser_2.currentText()
self.textOutput.clear()
if cur_program[0:-4] + '.c' not in users_file_info[cur_user][0]:
self.textOutput.append("%s não compilado para %s." % (cur_program, cur_user))
else:
time_out = 0
prog_input, prog_output = run_list[cur_program[0:-4]][0], run_list[cur_program[0:-4]][1]
run_process = subprocess.Popen(["%s" % os.path.join(original_wd, "Compilados", cur_user, cur_program)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, creationflags=0x08000000)
run_process.stdin.write(bytes(prog_input, 'UTF-8'))
try:
run_response = run_process.communicate(timeout=1)[0].decode('latin-1')
except Exception:
self.textOutput.append("====Tempo de execução excedido.")
time_out = 1
if not time_out:
self.textOutput.append("--Entrada fornecida: '%s'\n" % prog_input)
self.textOutput.append("--Saída do programa:\n" + 45 * "\\" + "\n%s\n"
% run_response + 45 * "/" + "\n")
self.textOutput.append("--Saída esperada: '%s'\n" % prog_output)
def rectify_result(self):
cur_program = self.comboFile.currentText()
cur_user = self.comboUser_2.currentText()
tree_item = self.treeUsers.topLevelItem(self.comboUser_2.currentIndex())
if cur_program in users_file_info[cur_user][1]:
users_file_info[cur_user][1].remove(cur_program)
self.treeUsers.editItem(tree_item, 2)
tree_item.setText(2, str(int(tree_item.text(2)) - 1))
else:
if cur_program[:-4] + '.c' in users_file_info[cur_user][0]:
users_file_info[cur_user][1].append(cur_program)
tree_item.setText(2, str(int(tree_item.text(2)) + 1))
self.new_combo_selection_results()
def save_log(self):
try:
log = open(self.lineLog.text(), 'w')
for user in users_file_info:
log.write(user + (20 - len(user)) * " " + " :" + (2 - len(users_file_info[user][0]) // 10) * " " +
str(len(users_file_info[user][0])) + " : " + str(len(users_file_info[user][1])) + "\n")
log.close()
finally:
self.btnLogs.setText("Relatório gerado com sucesso")
self.btnLogs.setDisabled(1)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
6679978 | import unittest
from gemstones import *
class TestCommonGems(unittest.TestCase):
def test_given(self):
self.assertEqual(2, common_gems(['abcdde', 'baccd', 'eeabg']))
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
6409197 | """Connect to a remote Tamr when SSH is proxied via OKTA Advanced Server Access"""
import paramiko
import os
import subprocess
import tamr_toolbox as tbox
# Set your connection parameters
hostname = "10.99.9.999"
username = "my.username"
# Login to Okta ASA.
# If there is no current session a browser window will open to complete authentication
subprocess.run(["sft", "login"])
# Get ssh proxy information from ssh config file and use it to create a connection client
ssh_config = paramiko.SSHConfig()
with open(os.path.expanduser("~/.ssh/config")) as ssh_config_file:
ssh_config.parse(ssh_config_file)
host_ssh_config = ssh_config.lookup(hostname)
remote_client = paramiko.SSHClient()
remote_client.load_host_keys(host_ssh_config["userknownhostsfile"])
remote_client.connect(
host_ssh_config["hostname"],
username=username,
password="",
sock=paramiko.ProxyCommand(host_ssh_config["proxycommand"]),
)
# Use the created client with Tamr-Toolbox functions
user_defined_configs = tbox.sysadmin.instance.get_configs(
user_defined_only=True,
tamr_install_dir="/home/ubuntu",
remote_client=remote_client,
impersonation_username="ubuntu",
)
print(user_defined_configs)
| StarcoderdataPython |
4847018 | <reponame>hvy/pfi-internship2016<filename>assignment4_adagrad.py<gh_stars>1-10
import math
import time
from utils import dataset, randomizer, exporter
from assignment3 import optimize_sgd
from assignment4 import Autoencoder
class AutoencoderAdaGrad(Autoencoder):
"""Simple Autoencoder implementation with one hidden layer, that uses the
identity function f(x) = x as its activation function and optimizes with
Stochastic Gradient Descent (SGD).
The learning rates for each parameters is adapted independently using
AdaGrad.
"""
def __init__(self, n_in, n_units, lr=0.01, mean=0, stddev=0.01):
"""Model constructor, initializing the parameters.
Args:
n_in (int): Number of input units, i.e. size of the input vector.
n_units (int): Number of hidden units.
lr (float): Initial learning rate.
mean (float): Mean of the initial parameters.
stddev (float): Standard deviation of the initial random
parameters.
"""
super().__init__(n_in, n_units, lr, mean, stddev)
# AdaGrad gradient history initialization.
self.eps = 1e-8 # Smoothing term.
self.gW1_history = [[0] * n_in for _ in range(n_units)]
self.gb1_history = [0] * n_units
self.gW2_history = [[0] * n_units for _ in range(n_in)]
self.gb2_history = [0] * n_in
def optimize(self, h, y, gW1, gb1, gW2, gb2):
"""Optimizes (modifies) the parameters of this model using AdaGrad
and SGD.
Args:
h (list): Activations of the hidden layer.
y (list): Activations of the output layer.
gW1 (list): Computed gradients of `W1`.
gb1 (list): Computed gradients of `b1`.
gW2 (list): Computed gradients of `W2`.
gb2 (list): Computed gradients of `bw`.
"""
# Update gradient history.
gW2, self.gW2_history = self.adagrad_iter(gW2, self.gW2_history,
self.eps)
gb2, self.gb2_history = self.adagrad_iter(gb2, self.gb2_history,
self.eps)
gW1, self.gW1_history = self.adagrad_iter(gW1, self.gW1_history,
self.eps)
gb1, self.gb1_history = self.adagrad_iter(gb1, self.gb1_history,
self.eps)
self.W2 = optimize_sgd(self.W2, gW2, self.lr)
self.b2 = optimize_sgd(self.b2, gb2, self.lr)
self.W1 = optimize_sgd(self.W1, gW1, self.lr)
self.b1 = optimize_sgd(self.b1, gb1, self.lr)
def adagrad_iter(self, g, g_history, eps=1e-8):
"""Helper method for AdaGrad. Update history, then compute the new
gradients.
Args:
g (list): The newly computed gradients. 1 or 2 dimensional.
g_history (list): The accumulated history of the grdients. Same
dimensions as `g`.
eps (float): Smoothing term. Usually in the range 1e-4 to 1e-8.
Returns:
list: The adapted newly computed gradients.
list: The updated gradient history.
"""
assert len(g) == len(g_history)
if isinstance(g[0], list): # 2 dimensional
assert len(g[0]) == len(g_history[0])
for i in range(len(g)):
for j in range(len(g[i])):
g_history[i][j] += g[i][j] ** 2
g[i][j] /= eps + math.sqrt(g_history[i][j])
else: # 1 dimensional
for i in range(len(g)):
g_history[i] += g[i] ** 2
g[i] /= eps + math.sqrt(g_history[i])
return g, g_history
if __name__ == '__main__':
# Train an Autoencoder model, with AdaGrad
N, D, xs = dataset.read('data/dataset.dat')
mean = 0
stddev = math.sqrt(1 / D)
n_hidden_units = 5
n_epochs = 20
# The initial learning rate should be much higher e.g. 1.0 than in normal
# SGD without AdaGrad, since the adaption will take care of the scaling.
initial_learning_rate = 1
model = AutoencoderAdaGrad(n_in=D, n_units=n_hidden_units,
lr=initial_learning_rate, mean=mean,
stddev=stddev)
for epoch in range(n_epochs):
randomizer.shuffle(xs)
for x in xs:
model(x, train=True)
total_loss = 0
for x in xs:
loss = model(x, train=False)
total_loss += loss
average_loss = total_loss / N
print('Epoch: {} Avg. loss: {}'.format(epoch + 1, average_loss))
# out_filename = 'output/assignment4_adagrad_params_' + str(int(time.time()))
# exporter.export_model(out_filename, model)
| StarcoderdataPython |
4844042 | <gh_stars>0
#Pop the last item of the list below.
lst=[11, 100, 99, 1000, 999]
#Type your answer here.
popped_item=lst.pop(len(lst)-1)
print(popped_item)
print(lst)
#=======================
#Remove "broccoli" from the list using .pop and .index methods.
lst=["milk", "banana", "eggs", "bread", "broccoli", "lemons"]
#Type your code here.
item=lst.pop(lst.index('broccoli'))
print(lst, item)
#=========================
#Save Italy's GDP in a separate variable and remove it from the dictionary.
GDP_2018={"US": 21, "China": 16, "Japan": 5, "Germany": 4, "India": 3, "France": 3, "UK": 3, "Italy": 2}
#Type your answer here.
italy_gdp=GDP_2018.get('Italy')
GDP_2018.pop('Italy')
print(GDP_2018)
print(italy_gdp, "trillion USD") | StarcoderdataPython |
9601250 | <reponame>tsuru/varnishapi
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import codecs
import httplib2
import os
import varnish
from feaas import storage
VCL_TEMPLATE_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), "..",
"misc", "default.vcl"))
DUMP_VCL_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), "..",
"misc", "dump_vcls.bash"))
class BaseManager(object):
def __init__(self, storage):
self.storage = storage
def new_instance(self, name):
self._check_duplicate(name)
instance = storage.Instance(name)
self.storage.store_instance(instance)
return instance
def _check_duplicate(self, name):
try:
self.storage.retrieve_instance(name=name)
raise storage.InstanceAlreadyExistsError()
except storage.InstanceNotFoundError:
pass
def bind(self, name, app_host):
instance = self.storage.retrieve_instance(name=name)
bind = storage.Bind(app_host, instance)
self.storage.store_bind(bind)
def unbind(self, name, app_host):
instance = self.storage.retrieve_instance(name=name)
for unit in instance.units:
self.remove_vcl(unit.dns_name, unit.secret)
bind = storage.Bind(app_host, instance)
self.storage.remove_bind(bind)
def write_vcl(self, instance_addr, secret, app_addr):
vcl = self.vcl_template() % {"app_host": app_addr}
try:
handler = varnish.VarnishHandler("{0}:6082".format(instance_addr),
secret=secret)
handler.vcl_inline("feaas", vcl.encode("iso-8859-1", "ignore"))
handler.vcl_use("feaas")
handler.quit()
except AssertionError as e:
if len(e.args) > 0 and "106 Already a VCL program named" in e.args[0]:
return
raise e
def remove_vcl(self, instance_addr, secret):
handler = varnish.VarnishHandler("{0}:6082".format(instance_addr),
secret=secret)
handler.vcl_use("boot")
handler.vcl_discard("feaas")
handler.quit()
def vcl_template(self):
with codecs.open(VCL_TEMPLATE_FILE, encoding="utf-8") as f:
content = f.read()
content = content.replace("\n", " ")
content = content.replace('"', r'\"')
content = content.replace("\t", "")
return '"%s"' % content.strip()
def remove_instance(self, name):
instance = self.storage.retrieve_instance(name=name)
instance.state = "removed"
self.storage.store_instance(instance)
def info(self, name):
instance = self.storage.retrieve_instance(name=name)
return [{"label": "Address",
"value": instance.units[0].dns_name}]
def status(self, name):
instance = self.storage.retrieve_instance(name=name)
return instance.state
def scale_instance(self, name, quantity):
if quantity < 1:
raise ValueError("quantity must be a positive integer")
instance = self.storage.retrieve_instance(name=name)
if instance.state == "scaling":
raise ValueError("instance is already scaling")
if quantity == len(instance.units):
raise ValueError("instance already have %d units" % quantity)
self.storage.store_scale_job({"instance": name, "quantity": quantity,
"state": "pending"})
def get_user_data(self, secret):
if "USER_DATA_URL" in os.environ:
url = os.environ.get("USER_DATA_URL")
h = httplib2.Http()
_, user_data = h.request(url)
return user_data.replace("VARNISH_SECRET_KEY", secret)
user_data_lines = None
packages = os.environ.get("API_PACKAGES")
if packages:
user_data_lines = ["apt-get update",
"apt-get install -y {0}".format(packages),
"sed -i -e 's/-T localhost:6082/-T :6082/' /etc/default/varnish",
"sed -i -e 's/-a :6081/-a :8080/' /etc/default/varnish",
"echo {0} > /etc/varnish/secret".format(secret),
"service varnish restart",
"cat > /etc/cron.hourly/dump_vcls <<'END'",
open(DUMP_VCL_FILE).read(),
"END",
"chmod +x /etc/cron.hourly/dump_vcls"]
if user_data_lines:
return "\n".join(user_data_lines) + "\n"
def start_instance(self, name):
raise NotImplementedError()
def terminate_instance(self, name):
raise NotImplementedError()
def physical_scale(self, instance, quantity):
raise NotImplementedError()
| StarcoderdataPython |
3371345 | <gh_stars>0
from time import time
### CONFIGURATION ###
WIN_WIDTH = 1280
WIN_HEIGHT = 860
FPS = 40
BACKGROUND_COLOR = (7, 7, 7)
FOOD_COLOR = (50, 50, 255)
POISON_COLOR = (255, 50, 50)
SAVE_TO_CSV = False
STARTTIME = str(int(time())) # used to save csv with unique name
SAVE_DELAY = 20 * 1000 # in milliseconds
HEADER1 = ["Time", "Fitness", "Age", "Gen", "Childs", "FoodEaten", "PoisonEaten",
"MaxVel_MaxHP", "FoodAttraction", "PoisonAttraction",
"FoodDistance", "PoisonDistance", "MaxSteerForce", "DirAngleMult"]
HEADER2 = []
for header in HEADER1:
if header == "Time":
HEADER2.append(header)
else:
HEADER2.append('Mean' + header)
HEADER2.append('Median' + header)
# Switches spawn mode, between Continuous: False, and By Gen: True
SPAWN_MODE = False
TOTAL_CREATURES = 17
MIN_CREATURE_SIZE = 7
MAX_CREATURE_SIZE = 53
# chance to spawn a new creature to add variation to the simulation
# each frame. Only for continuous mode
# keep it low to favor breeding
# if 0 creatures are alive, they spawn in bulk
NEW_CREATURE_CHANCE = 0.003
DNA_SIZE = 7 # number of values in the dna.
# below values are affected by the fitness of the creature
# breed_chance = x / (BREED_CHANCE_VALUE + x); x --> fitness
BREED_CHANCE_VALUE = 850
MAX_MUTATION_VALUE = 0.2 # how much a property will change when mutated
MUTATION_CHANCE = 0.1 # chance to mutate each property, not affected by fitness
# this should avoid that a new creature spawns directly eating poison or food
# but with (MAX_CREATURE_SIZE // 2) + 1 we won't avoid bigger creatures to pass between
# food/posion that are too close
# biggest gaps make it look ugly and unreal...
DISTANCE_BETWEEN_SPRITES = (MAX_CREATURE_SIZE // 2) + 1
TOTAL_POISON = 76
TOTAL_FOOD = 67
HEALTH_DEGENERATION = 12.3 # creatures will lose hp per second
POISON_VALUE = -52 # negative value as poison is bad!
FOOD_VALUE = 20
# Values that will vary according to DNA changes, but have a max value
MAX_STEER_FORCE = 4
MAX_PERCEPTION_DIST = 300 # max dist at which creatures can evolve to see food & poison
# the highter dir_angle_mult a creature has, the higher priority for targets in front of it
MIN_DIR_ANGLE_MULT = 1
MAX_DIR_ANGLE_MULT = 5
# Creatures have a constraint, they evolve choosing between maxvel and maxhealth
# having more maxhealth means bigger size and less maxvel
TOTAL_MAXVEL_MAXHP_POINTS = 220
# we don't want creatures to spawn with HP values lower than this
# very low values make no sense because they die with health degeneration too fast
MIN_HP = 30
# When the creature finds no food or poison, it wanders
# wander ring properties:
WANDER_RING_DISTANCE = (WIN_WIDTH + WIN_HEIGHT) // 8
WANDER_RING_RADIUS = (WIN_WIDTH + WIN_HEIGHT) // 4
WANDER_RING_WAIT = 2000
| StarcoderdataPython |
8190646 | <reponame>tonitick/horovod
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mpi4py import MPI
import horovod.torch as hvd
import torch
import time
import os
import signal
from common import env
def test():
signal.alarm(45)
with env(HOROVOD_STALL_CHECK_TIME_SECONDS="2",
HOROVOD_STALL_SHUTDOWN_TIME_SECONDS="5"):
hvd.init()
tensor = torch.IntTensor([[1, 2], [3, 4]])
if hvd.rank() != 0:
time.sleep(10 * hvd.rank());
try:
summed = hvd.allreduce(tensor, average=False)
except:
pass
finally:
hvd.shutdown()
if __name__ == "__main__":
test() | StarcoderdataPython |
5136306 | <reponame>TomFaulkner/News-At-Command-Line<filename>news/config_reader.py
import os
from contextlib import suppress
import yaml
from appdirs import AppDirs
from .__version__ import __app_name__
from .constants import constants
dirs = AppDirs(__app_name__)
class ConfigurationReader:
def __init__(self):
try:
with open(f'{dirs.user_config_dir}/config.yml') as ymlfile:
cfg = yaml.load(ymlfile)
except FileNotFoundError:
with suppress(FileExistsError):
os.makedirs(dirs.user_config_dir)
with open(f'{dirs.user_config_dir}/config.yml', 'w') as ymlfile:
ymlfile.write(yaml.dump(constants['config_defaults']))
cfg = constants['config_defaults']
self.APIKEY = cfg['api_key']
self.limit = cfg['article_limit']
| StarcoderdataPython |
86584 | <reponame>heatonk/caldera_pathfinder
import abc
class ScannerInterface(abc.ABC):
@abc.abstractmethod
def scan(self):
pass
| StarcoderdataPython |
212473 | import pandas as pd
import sys
import matplotlib.pyplot as plt
if len(sys.argv) != 4:
print("give the input file, output file, and title")
df = pd.read_csv(sys.argv[1], skipinitialspace=True)
output_filename = sys.argv[2]
df.plot(x="timestep", legend=False)
plt.ylim(ymin=0)
plt.title(sys.argv[3])
plt.savefig(output_filename, bbox_inches="tight")
| StarcoderdataPython |
1747472 | <reponame>dskkato/superannotate-python-sdk<gh_stars>0
'''
'''
import json
import logging
from collections import namedtuple
from datetime import datetime
from pathlib import Path
from PIL import Image
import numpy as np
import cv2
from ..baseStrategy import baseStrategy
from ....common import id2rgb, write_to_json
logger = logging.getLogger("superannotate-python-sdk")
class CocoBaseStrategy(baseStrategy):
project_type_to_json_ending = {
'pixel': '___pixel.json',
'vector': '___objects.json'
}
def __init__(self, args):
self.total_images_num = 0
super().__init__(args)
def set_num_total_images(self, num):
self.total_images_num = num
def get_num_total_images(self):
return self.total_images_num
def _create_categories(self, path_to_classes):
classes = None
s_class = namedtuple('Class', ['class_name', 'id'])
with open(path_to_classes, 'r') as fp:
classes = json.load(fp)
categories = [
self._create_single_category(s_class(item, classes[item]))
for item in classes
]
return categories
def _create_single_category(self, item):
category = {
'id': item.id,
'name': item.class_name,
'supercategory': item.class_name,
'isthing': 1,
'color': id2rgb(item.id)
}
return category
def _make_id_generator(self):
cur_id = 0
while True:
cur_id += 1
yield cur_id
def _create_skeleton(self):
out_json = {
'info':
{
'description':
'This is {} dataset.'.format(self.dataset_name),
'url':
'https://superannotate.ai',
'version':
'1.0',
'year':
datetime.now().year,
'contributor':
'Superannotate AI',
'date_created':
datetime.now().strftime("%d/%m/%Y")
},
'licenses':
[
{
'url': 'https://superannotate.ai',
'id': 1,
'name': 'Superannotate AI'
}
],
'images': [],
'annotations': [],
'categories': []
}
return out_json
def convert_from_old_sa_to_new(self, old_json_data, project_type):
new_json_data = {
"metadata": {},
"instances": [],
"tags": [],
"comments": []
}
meta_keys = [
"name", "width", "height", "status", "pinned", "isPredicted",
"projectId", "annotatorEmail", "qaEmail"
]
if project_type == "pixel":
meta_keys.append("isSegmented")
new_json_data["metadata"] = dict.fromkeys(meta_keys)
metadata = new_json_data["metadata"]
for item in old_json_data:
object_type = item.get("type")
#add metadata
if object_type == "meta":
meta_name = item["name"]
if meta_name == "imageAttributes":
metadata["height"] = item.get("height")
metadata["width"] = item.get("width")
metadata["status"] = item.get("status")
metadata["pinned"] = item.get("pinned")
if meta_name == "lastAction":
metadata["lastAction"] = dict.fromkeys(
["email", "timestamp"]
)
metadata["lastAction"]["email"] = item.get("userId")
metadata["lastAction"]["timestamp"] = item.get("timestamp")
#add tags
elif object_type == "tag":
new_json_data["tags"].append(item.get("name"))
#add comments
elif object_type == "comment":
item.pop("type")
item["correspondence"] = item["comments"]
for comment in item["correspondence"]:
comment["email"] = comment["id"]
comment.pop("id")
item.pop("comments")
new_json_data["comments"].append(item)
#add instances
else:
new_json_data["instances"].append(item)
return new_json_data
def _parse_json_into_common_format(self, sa_annotation_json, fpath):
"""
If the annotation format ever changes this function will handle it and
return something optimal for the converters. Additionally, if anything
important is absent from the current json, this function fills it.
"""
if isinstance(sa_annotation_json, list):
sa_annotation_json = self.convert_from_old_sa_to_new(
sa_annotation_json, self.project_type
)
if 'metadata' not in sa_annotation_json:
sa_annotation_json['metadata'] = {}
if 'tags' not in sa_annotation_json:
sa_annotation_json['tags'] = []
if 'instances' not in sa_annotation_json:
sa_annotation_json['instances'] = []
if 'comments' not in sa_annotation_json:
sa_annotation_json['comments'] = []
if 'name' not in sa_annotation_json[
'metadata'] or sa_annotation_json['metadata']['name'] is None:
fname = fpath.name
fname = fname[:-len(
self.project_type_to_json_ending[self.project_type.lower()]
)]
sa_annotation_json['metadata']['name'] = fname
sa_annotation_json['metadata']['image_path'] = str(
Path(fpath).parent / sa_annotation_json['metadata']['name']
)
sa_annotation_json['metadata']['annotation_json'] = fpath
if self.task == 'panoptic_segmentation':
panoptic_mask = str(
Path(self.export_root) /
(sa_annotation_json['metadata']['name'] + '.png')
)
sa_annotation_json['metadata']['panoptic_mask'] = panoptic_mask
if self.project_type == 'Pixel':
sa_annotation_json['metadata']['sa_bluemask_path'] = str(
Path(self.export_root) /
(sa_annotation_json['metadata']['name'] + '___save.png')
)
if not isinstance(
sa_annotation_json['metadata'].get('height', None), int
) or not isinstance(
sa_annotation_json['metadata'].get('width', None), int
):
image_height, image_width = self.get_image_dimensions(
sa_annotation_json['metadata']['image_path']
)
sa_annotation_json['metadata']['height'] = image_height
sa_annotation_json['metadata']['width'] = image_width
return sa_annotation_json
def get_image_dimensions(self, image_path):
img_height = None
img_width = None
img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
if img is not None:
dimensions = img.shape
img_height, img_width = (dimensions[0], dimensions[1])
else:
try:
img = Image.open(image_path)
img_width, img_height = img.size()
except Exception as e:
raise
return img_height, img_width
def _prepare_single_image_commons_pixel(self, id_, metadata):
ImgCommons = namedtuple(
'ImgCommons',
['image_info', 'ann_mask', 'sa_bluemask_rgb', 'flat_mask']
)
sa_bluemask_path = metadata['sa_bluemask_path']
image_info = self._make_image_info(
metadata['name'], metadata['height'], metadata['width'], id_
)
sa_bluemask_rgb = np.asarray(
Image.open(sa_bluemask_path).convert('RGB'), dtype=np.uint32
)
ann_mask = np.zeros(
(image_info['height'], image_info['width']), dtype=np.uint32
)
flat_mask = (sa_bluemask_rgb[:, :, 0] <<
16) | (sa_bluemask_rgb[:, :, 1] <<
8) | (sa_bluemask_rgb[:, :, 2])
res = ImgCommons(image_info, ann_mask, sa_bluemask_rgb, flat_mask)
return res
def _prepare_single_image_commons_vector(self, id_, metadata):
ImgCommons = namedtuple('ImgCommons', ['image_info'])
image_info = self._make_image_info(
metadata['name'], metadata['height'], metadata['width'], id_
)
res = ImgCommons(image_info)
return res
def _prepare_single_image_commons(self, id_, metadata):
res = None
if self.project_type == 'Pixel':
res = self._prepare_single_image_commons_pixel(id_, metadata)
elif self.project_type == 'Vector':
res = self._prepare_single_image_commons_vector(id_, metadata)
return res
def _make_image_info(self, pname, pheight, pwidth, id_):
image_info = {
'id': id_,
'file_name': pname,
'height': pheight,
'width': pwidth,
'license': 1
}
return image_info
def _create_sa_classes(self, json_path):
json_data = json.load(open(json_path))
classes_list = json_data["categories"]
classes = []
for data in classes_list:
color = np.random.choice(range(256), size=3)
hexcolor = "#%02x%02x%02x" % tuple(color)
classes_dict = {
'name': data["name"],
'color': hexcolor,
'attribute_groups': []
}
classes.append(classes_dict)
return classes
def to_sa_format(self):
json_data = self.export_root / (self.dataset_name + ".json")
sa_classes = self._create_sa_classes(json_data)
(self.output_dir / 'classes').mkdir(parents=True, exist_ok=True)
write_to_json(self.output_dir / 'classes' / 'classes.json', sa_classes)
self.conversion_algorithm(json_data, self.output_dir)
def make_anno_json_generator(self):
json_data = None
if self.project_type == 'Pixel':
jsons = list(Path(self.export_root).glob('*pixel.json'))
elif self.project_type == 'Vector':
jsons = list(Path(self.export_root).glob('*objects.json'))
self.set_num_total_images(len(jsons))
print()
for fpath in jsons:
with open(fpath, 'r') as fp:
json_data = json.load(fp)
json_data = self._parse_json_into_common_format(
json_data, fpath
)
yield json_data
| StarcoderdataPython |
1924637 | import re
import requests
from ScraperBase import ScraperBase
from Common import GetClinicsData, Status, SaveHtmlToTable, LIMITED_THRESHOLD
import logging
import json
class HealthMartPharmacies(ScraperBase):
def __init__(self):
self.URL = "https://healthmartcovidvaccine.com"
# API URL: https://scrcxp.pdhi.com/ScreeningEvent/fed87cd2-f120-48cc-b098-d72668838d8b/GetLocations/98072?state=WA
self.ApiUrl = "https://scrcxp.pdhi.com/Screenings/INSERT_LOCATION_ID/VisibleTimeSlots"
self.LocationName = "Health Mart Pharmacies"
self.clinics = GetClinicsData(key_filter='health_mart')
@staticmethod
def NormalizeAddress(address):
"""Removes punctuation, whitespace, and capitalization that might make addresses unequal"""
address = address.lower()
characters_to_remove = '-,. '
return re.sub(f'[{characters_to_remove}]', '', address)
def MakeGetRequest(self):
results = []
for table_row in self.clinics:
# Make outbound GET to the API URL for the zip code of the location in question
# resp = requests.get(table_row['alternateUrl'], verify = False)
# API call now requires userAge: e.g., '&userAge=42' fixes things.
# If alternateUrl doesn't include that, then add it in here. Assumes
# that if the url doesn't end in WA, then the correct userAge stuff
# was already appended in airtable.
url = table_row['alternateUrl']
if url.endswith("WA"):
url += '&userAge=42'
resp = requests.get(url, verify = False)
# resp = requests.get(table_row['alternateUrl'])
respJson = resp.json()
status = Status.NO
# Loop through returned locations that all have availability
for location in respJson:
# Make sure a returned location matches the table_row we are currently checking
table_address = self.NormalizeAddress(table_row['scraper_config']) # Street address
location_address = self.NormalizeAddress(location['address1'])
if location_address in table_address:
# Pick off the locationId to use in the subsequent API call.
locationId = location['locationId']
# Call the API endppoint to get the visibleTimeSlots list.
apiResp = requests.get(self.ApiUrl.replace('INSERT_LOCATION_ID', str(locationId)), verify = False)
apiJson = apiResp.json()
numVisibleTimeSlots = len(apiJson['visibleTimeSlots'])
# print(f"loc: {location_address}, locationId: {locationId}, #slots: {numVisibleTimeSlots}")
if numVisibleTimeSlots > LIMITED_THRESHOLD:
status = Status.YES
elif numVisibleTimeSlots > 0:
status = Status.LIMITED
break
self.Keys = [table_row['key']]
self.SaveToTable(status, resp.text)
results.append(f'{self.Keys[0]} : {status.name}')
return results # Used for single file testing
@SaveHtmlToTable
def SaveToTable(self, status, html):
return self.Keys, status, html
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
scraper = HealthMartPharmacies()
results = scraper.MakeGetRequest()
for result in results:
logging.debug(result)
| StarcoderdataPython |
3551345 | class Solution(object):
def isHappy(self, n):
def sum_of_digits(num):
sum = 0
while num: # num != 0
sum += pow(num % 10, 2)
print(sum)
num //= 10
return sum
while n > 9:
n = sum_of_digits(n)
return n == 1 or n == 7
print(Solution().isHappy(19)) | StarcoderdataPython |
185531 | from __future__ import print_function
import codecs
import json
import os
from esphome.core import CORE, EsphomeError
from esphome.py_compat import safe_input
def read_config_file(path):
# type: (basestring) -> unicode
if CORE.vscode and (not CORE.ace or
os.path.abspath(path) == os.path.abspath(CORE.config_path)):
print(json.dumps({
'type': 'read_file',
'path': path,
}))
data = json.loads(safe_input())
assert data['type'] == 'file_response'
return data['content']
try:
with codecs.open(path, encoding='utf-8') as handle:
return handle.read()
except IOError as exc:
raise EsphomeError(u"Error accessing file {}: {}".format(path, exc))
except UnicodeDecodeError as exc:
raise EsphomeError(u"Unable to read file {}: {}".format(path, exc))
| StarcoderdataPython |
9690681 | <filename>graph/migrations/0015_auto_20210215_0350.py
# Generated by Django 3.1.4 on 2021-02-14 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('graph', '0014_auto_20210214_2110'),
]
operations = [
migrations.AlterField(
model_name='source',
name='csv',
field=models.CharField(blank=True, max_length=300, null=True, verbose_name='CSV'),
),
]
| StarcoderdataPython |
11394494 | <reponame>multiii/TinyDBOperations
from distutils.core import setup
with open(('README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='TinyDBOperations',
packages=['TinyDBOperations'],
version='0.1',
license='MIT',
description='A python wrapper used to perform additional TinyDB operations',
long_description=long_description,
author='multiii',
author_email='<EMAIL>',
url='https://github.com/multiii/TinyDBOperations',
download_url='https://github.com/multiii/TinyDBOperations/archive/refs/tags/0.1.tar.gz',
keywords=['TinyDB', 'tinydb', 'operations'],
install_requires=[
'tinydb',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| StarcoderdataPython |
121836 | <gh_stars>0
# Author: <NAME>
# Creation date: 12 Aug 2021
from mlt.data import DAMatrix
from mlt.metric import AccuracyMetric
from mlt.metric import ArgmaxMeanMetric
from mlt.metric import EmpArgmaxMetric
from mlt.metric import AverageRankMetric
import numpy as np
### Test cases ###
perfs = np.array([
[0, 1],
[0, 1],
])
da_te = DAMatrix(perfs=perfs)
da_te.best_algo = 1
dist_pred = np.array(
[0.4, 0.6]
)
def test_AccuracyMetric():
accuray_metric = AccuracyMetric()
assert accuray_metric(dist_pred, da_te) == 0.6
def test_ArgmaxMeanMetric():
argmax_mean_metric = ArgmaxMeanMetric()
assert argmax_mean_metric(dist_pred, da_te) == 0.6
def test_EmpArgmaxMetric():
emp_argmax_metric = EmpArgmaxMetric()
assert emp_argmax_metric(dist_pred, da_te) == 0.6
def test_AverageRankMetric():
average_rank_metric = AverageRankMetric()
assert average_rank_metric(dist_pred, da_te) == 0.2
if __name__ == '__main__':
test_AccuracyMetric()
test_ArgmaxMeanMetric()
test_EmpArgmaxMetric()
test_AverageRankMetric() | StarcoderdataPython |
205316 | #!/usr/bin/python
import os, time, sys
import re
import urlparse
import requests
from bs4 import BeautifulSoup
agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36"
class Spider():
def __init__(self, year, month, day):
self.host = "http://news.sina.com.cn/"
self.url = "http://news.sina.com.cn/old1000/news1000_" + str(year) + str(month).zfill(2) + str(day).zfill(2) + ".shtml"
self.year = year
self.month = month
self.day = day
self.s = requests.Session()
self.s.headers.update({'User-Agent': agent})
self.s.headers.update({'Referer': self.host })
def parse_list(self):
print "getting url: " + self.url,
sys.stdout.flush()
try:
r = self.s.get(self.url, timeout=5)
except:
time.sleep(10)
return
print " done"
sys.stdout.flush()
# print r.content
soup = BeautifulSoup(r.content.decode('gbk','ignore'), 'lxml' )
tags = soup.select("ul li a")
print "tags got"
for _tag in tags:
tag = str(_tag)
start = len("<a href=\"")
end = start + 1
while tag[end] != '"':
end += 1
url = tag[start:end]
if not url.startswith("http"):
url = "http://news.sina.com.cn" + url
if url.find("cgi-bin") != -1:
continue
print "Crawl:", url
num_try = 0
get_content = 0
while get_content == 0:
try:
print "\tgetting page: " + url + ", num-try = " + str(num_try) + "..."
sys.stdout.flush()
page_r = self.s.get(url, timeout=3)
get_content = 1
print "\tdone"
sys.stdout.flush()
except:
time.sleep(2 + num_try)
num_try += 1
if num_try > 5:
break
if num_try > 10:
continue
page_soup = BeautifulSoup(page_r.content.decode('gbk', 'ignore'), 'lxml')
find_res = page_soup.find_all("font", id="zoom")
if len(find_res) == 0:
print "No Content"
continue
# print page_r.content
content = ""
for tag in page_soup.select("#zoom > p"):
print tag
content += str(tag)
f = open("results/" + str(self.year) + str(self.month).zfill(2) + str(self.day).zfill(2) + ".txt", "a")
f.write(content)
f.close()
print r.content[:20]
def main():
print "Welcome!"
year = 2006
month = 4
os.system("mkdir results")
for day in range(6, 10):
if month == 2 and day > 28:
break
spider = Spider(year, month, day)
spider.parse_list()
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
1609640 | <gh_stars>1-10
from .Base import BaseTagExtension
from shuttl.Models.ContentBlocks.MultipleBlock import MultipleBlock
from shuttl.Models.FileTree.FileObjects.Template import Template
## The class for Obtain tags
class RepeatTagExtension(BaseTagExtension):
## What tags trigger this extension
tags = {'repeat'}
endTag = "endrepeat"
## Obtain is like include but obtain is in the global scope
# \param context the context rendering the content
# \param path the path of the template to include
# \return the rendered content for the block
def _action(self, context, name):
context = dict(**context)
block = MultipleBlock.GetOrCreate(name, self.page, self.template_code)
return block.renderContent(context, template=self.template) | StarcoderdataPython |
5137353 | <filename>gmodsScripts/gmodsTLptopol.py
#!/usr/bin/env python
"""
Requirements: Python 3 or higher
Antechamber and related AmberTools
OpenBabel (strongly recommended for use with acpype)
acpype (latest version recommended with all its requirements)
Gromacs (Compulsory)
flask (Compulsory)
flaskwebgui (recommended)
pyfladesk (recommended)
This code is released under GNU General Public License V3.
<<< NO WARRANTY AT ALL!!! >>>
It was inspired by:
- CS50 online training for which this code serves as part of the final project
- PLEASE Read the README.md file and also follow instructions on the GUI and/or Terminal
<NAME>, B.Pharm. M.Sc. (Pharmacology) and Ph.D. (Science) Biochemistry
Department of Pharmacology, Faculty of Pharmacy
Obafemi Awolowo Universiy, Ile-Ife, Nigeria.
>>http://www.oauife.edu.ng<<
<EMAIL>; <EMAIL>
"""
import sys
if sys.version_info[0] < 3:
raise Exception("Python 3 or a more recent version is required.")
import os
import subprocess
from pathlib import Path
import time
import shutil
import random
import string
import math
import glob
from colored import fore, back, style
from gmodsScripts.gmodsHelpers import indexoflines, insertdetails, printWarning, printNote
def TLtopol(xrecfile, tlpcomtop, tff):
tlpcwdir = Path.cwd()
tltopol = "tlptopol.top"
tltopolopen = open(tltopol, "+a")
# Insert the header information into the tlptopol.top file
tltopolheaders = ["; This file was generated by modifying tleap/acpype generated topology file on one hand", "; And by adding selected pdb2gmx generated details on the other hands", "; The modification was done by removing duplicate atomtypes and/or replacing", "; them with equivalent compatible atoms as found in ffnonbonded.itp file of the selected forcefield", "; It was created to be used as alternative to pdb2gmx generated topologies when necessary", "; If used successfully it should have been renamed from tlptopol.top to topol.top for subsequent MDS"]
for header in tltopolheaders:
tltopolopen.write(header)
tltopolopen.write('\n')
tltopolopen.write('\n')
# Insert the forcefiled parameter header from pdb2gmx topol.top file into tlptopol.top file
xrecindex = indexoflines(xrecfile)
tsindex = xrecindex['system']
xrecfileopen = open(xrecfile, "r")
xrecreadlines = xrecfileopen.readlines()
at_id = 0
for xL in xrecreadlines:
xLat = xL.split()
if ('Include' in xLat and 'forcefield' in xLat and 'parameters' in xLat):
tltopolopen.write(xL)
at_id += 1
tltopolopen.write(xrecreadlines[at_id])
tltopolopen.write('\n')
break
else:
at_id += 1
# Insert the content of tleap/acpype generated topol file from atomtypes into tlptopol.top
tlpcindex = indexoflines(tlpcomtop)
tlpcopen = open(tlpcomtop, "r")
tlpcreadlines = tlpcopen.readlines()
taindex = tlpcindex['atomtypes']
for tline in tlpcreadlines:
if not tline in tlpcreadlines[0:int(taindex)]:
tltopolopen.write(tline)
tltopolopen.close()
tlpcopen.close()
# Create a new file and populate it with relevant details found at the end of pdb2gmx topol file
tlmnfile = "tlptopol_mn.itp"
tlmnfileopen = open(tlmnfile, "+a")
xrecfileopen.seek(0)
xn = 0
for xline in xrecreadlines:
xlinelist = xline.split()
if "POSRES" in xlinelist:
tlmnfileopen.write('\n')
xnid = xn - 1
while xnid < int(tsindex):
tlmnfileopen.write(xrecreadlines[xnid])
xnid += 1
tlmnfileopen.write('\n')
break
else:
xn += 1
tlmnfileopen.close()
xrecfileopen.close()
# Insert the details into the tlptopol.top file
insertL = "[ system ]"
insertdetails(tltopol, tlmnfile, insertL)
# We shall check for and remove duplicates in atomtypes between gmx standard and amber/tleap generated
#Determine the appropriate forcefield directory selected at run time
tlpindex = indexoflines(tltopol)
atlp = int(tlpindex['atomtypes'])
mtlp = int(tlpindex['moleculetype'])
topol = open(tltopol, "r")
topolreadlines = topol.readlines()
nf = 1
ff = ""
for tline in topolreadlines:
if ('Include' in tline.split() and 'forcefield' in tline.split() and 'parameters' in tline.split()):
ln1 = topolreadlines[nf].split()
ln2 = ln1[1].split('"')[1].split('/')
for fd in ln2:
if Path(fd).suffix == ".ff":
ff = fd
break
else:
nf += 1
print("Your detected forcefiled directory is", ff)
if not ff == tff:
print("However", ff, "does not match what was detected earlier", tff)
print("It is recommended to use the forcefield detected earlier")
print("To use the recommended forcefield {", tff, "}, type YES/y")
print("To continue with currently detected forcefield {", ff, "}, press ENTER")
response = input("Response: ")
if (response.lower() == "yes" or response.lower() == "y"):
ff = tff
print("The detected directory has been changed to", ff)
else:
print(ff, "matched the earlier detected forcefield.")
# Get the absolute path to the forcefield directory and copy ffnonbonded.itp file
gmxtopdir = " "
try:
gmxtopdir = os.path.join(os.environ.get('GMXDATA'), 'top', ff)
except:
print("We are unable to autodetect your forcefield directory")
print("Please supply absolute path to your forcefield directory")
print("Usually /path-to-gromacs/share/gromacs/top/your-selected-forcefield.ff")
while True:
gmxtopdir = input("Path to Forcefield: ")
if not os.path.isdir(gmxtopdir):
print("Directory you supplied does not exist. Please check and try again")
else:
break
if not os.path.isdir(gmxtopdir):
print(gmxtopdir, "that was autodetected, is not a valid forcefield directory")
print("Please supply the correct absolute path to your forcefield directory")
print("Usually /path-to-gromacs/share/gromacs/top/your-selected-forcefield.ff")
while True:
gmxtopdir = input("Path to Forcefield: ")
if not os.path.isdir(gmxtopdir):
print("Directory you supplied does not exist. Please check and try again")
else:
break
print("Your topology directory is", gmxtopdir)
time.sleep(5)
lsgmxtopdir = os.listdir(gmxtopdir)
for tp in lsgmxtopdir:
if tp == "ffnonbonded.itp":
shutil.copy(os.path.join(gmxtopdir, tp), './')
# Get the atomtypes present in ffnonbonded.itp as list
listffb = []
fn = 0
ffb = open("ffnonbonded.itp", "r")
ffbreadlines = ffb.readlines()
if ff[0:4].lower() == "opls":
for fb in ffbreadlines:
if 'opls_128' in fb.split():
break
else:
fn += 1
ffb.seek(0)
for fb in ffbreadlines[fn:]:
try:
if not (fb.split() == [] or fb.split()[0] == "[" or fb.split()[0] == ";" or fb.split()[0][0] == '#' or fb.split()[0][0] == ';' or fb.split()[1] in listffb):
if fb.split()[0].split('_')[0].lower() == 'opls':
listffb.append(fb.split()[1])
except IndexError:
pass
else:
for fb in ffbreadlines[fn:]:
try:
if not (fb.split() == [] or fb.split()[0] == "[" or fb.split()[0] == ";" or fb.split()[0][0] == '#' or fb.split()[0][0] == ';' or fb.split()[0] in listffb):
listffb.append(fb.split()[0])
except IndexError:
pass
ffb.seek(0)
# Check the tlptopol.top file to remove duplicate by comparing with ffnonbonded.itp list of atomtypes
print("Checking for and removing duplicate atomtypes from tlptopol.top...")
time.sleep(5)
mtlp = mtlp - 1
topol.seek(0)
ntopol = open("ntlptopol.top", "+a")
for aline in topolreadlines[0:atlp]:
ntopol.write(aline)
topol.seek(0)
for bline in topolreadlines[atlp:mtlp]:
try:
if not (bline.split() == [] or bline.split()[0].lower() in listffb or bline.split()[0].upper() in listffb):
ntopol.write(bline)
except IndexError:
pass
ntopol.write('\n')
topol.seek(0)
if ff[0:4].lower() == "opls":
btlp = 0
for bd in topolreadlines:
if 'bonds' in bd.split() and bd.split()[0] == '[' and bd.split()[1] == 'bonds':
break
else:
btlp += 1
topol.seek(0)
for bdline in topolreadlines[mtlp:btlp]:
if not (bdline.split() == [] or bdline.split()[0][0] == ';' or bdline.split()[0][0] == '['):
matom = bdline.split()[1]
ffb.seek(0)
replace = " "
bfound = 0
for fline in ffbreadlines[fn:]:
if fline.split()[1] == matom or fline.split()[1].lower() == matom or fline.split()[1] == matom.upper() and 'opls' in fline.split()[0].split('_'):
replace = bdline.replace(matom, fline.split()[0], 1)
bfound += 1
break
if not bfound > 0:
ntopol.write(bdline)
else:
ntopol.write(replace)
else:
ntopol.write(bdline)
ntopol.write('\n')
topol.seek(0)
for cline in topolreadlines[btlp:]:
ntopol.write(cline)
else:
for cline in topolreadlines[mtlp:]:
ntopol.write(cline)
ffb.close()
topol.close()
ntopol.close()
# Now we shall backup the tlptopol.top and rename ntlptopol.top as tlptopol.top
os.rename('tlptopol.top', '##tlptopol##')
os.rename('ntlptopol.top', 'tlptopol.top')
# Check to be sure tlptopol.top has been successfully generated
checktlp = os.listdir()
if not "tlptopol.top" in checktlp:
printWarning("Something went wrong. Generating tlptopol.top file was not successful")
print("If you need it later, check and correct any error, then rerun")
return tlpcomtop
else:
printNote("tlptopol.top has been generated successfully")
printNote("PLEASE NOTE:")
print("Two topology files are now included in solvation folder - topol.top and tlptopol.top")
print("By default, topol.top will be used. If topol.top failed, tlptopol will be used")
print("If not used now, it will be copied into the gmxmds subfolder")
print("To use it later, rename to topol.top and backup the original topol.top")
time.sleep(10)
return 'tlptopol.top'
| StarcoderdataPython |
95837 | <gh_stars>10-100
from app import app
from flask_ngrok import run_with_ngrok
# run_with_ngrok(app)
# app.run() | StarcoderdataPython |
5180212 | <filename>pfff/build.py
"""PEP 517 Build backend interface.
"""
import os
import flit.buildapi
import flit.inifile
import requirementslib
def _convert_requirement(r):
return '{name}{extra}{version}{marker}'.format(
name=r.req.line_part,
extra=r.extras_as_pip,
version=' ({})'.format(r.specifiers) if r.specifiers else '',
marker=r.markers_as_pip,
)
def _collect_pipfile_requires():
packages = {'packages': [], 'dev-packages': []}
for section in requirementslib.Pipfile.load(os.getcwd()).sections:
try:
ls = packages[section.name]
except KeyError:
continue
ls.extend(
_convert_requirement(r)
for r in section.requirements
if r.is_named
)
return packages['packages'], packages['dev-packages']
_read_pkg_ini = flit.inifile.read_pkg_ini
def read_pkg_ini(path):
default, develop = _collect_pipfile_requires()
result = _read_pkg_ini(path)
for key, ls in [('requires_dist', default), ('dev_requires', develop)]:
existing = result['metadata'].get(key, [])
existing.extend(ls)
result['metadata'][key] = existing
result['raw_config']['metadata'][key] = existing
return result
# Monley-patch Flit to read Pipfile!
flit.inifile.read_pkg_ini = flit.buildapi.read_pkg_ini = read_pkg_ini
# Use Flit's interface.
from flit.buildapi import * # noqa
| StarcoderdataPython |
6613417 | <gh_stars>1-10
import pickle
class Human:
def __init__(self):
self.name=input("Enter your name : ")
self.age=input("Enter your age : ")
def disp(self):
print("Hello {}, You are {} year old!".format(self.name,self.age))
num=int(input("Enter the number of people to be entered : "))
with open("HumanMulti.dat", "w+b") as f:
for i in range(num):
insaan=Human()
pickle.dump(insaan,f)
with open("HumanMulti.dat", "rb") as f:
while True:
try:
maanav=pickle.load(f)
maanav.disp()
except EOFError:
print("Done with object")
break
| StarcoderdataPython |
3485212 | T = int(input())
for _ in range(T):
H, W, N = map(int, input().split())
floor = (N-1) % H + 1
number = (N-1) // H + 1
print(floor * 100 + number)
| StarcoderdataPython |
6591577 | import unittest
from araugment import augment
class TestSimple(unittest.TestCase):
def test_run(self):
augment.back_translate("اهلا وسهلا كيف حالك؟")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
395329 | <reponame>dashcare/irrexplorer
from os import environ
import pytest
pytestmark = pytest.mark.asyncio
IRRD_MEMBEROF_EMPTY_RESPONSE = {"data": {"asSet": [], "autNum": []}} # type: ignore
IRRD_MEMBEROF_VALID_RESPONSE = {
"data": {
"asSet": [
{"rpslPk": "AS-DIRECT", "objectClass": "as-set", "source": "TEST"},
],
"autNum": [
{
"rpslPk": "AS213279",
"source": "TEST",
"mntBy": ["TEST-MNT"],
"memberOfObjs": [
{"rpslPk": "AS-VALID-MNTNER", "source": "TEST", "mbrsByRef": ["TEST-MNT"]},
{
"rpslPk": "AS-VALID-ANY",
"source": "TEST",
"mbrsByRef": [
"OTHER-MNT",
"ANY",
],
},
{
"rpslPk": "AS-NOT-VALID-EXCLUDE",
"source": "TEST",
"mbrsByRef": ["OTHER-MNT"],
},
],
}
],
},
}
async def test_asn_valid(client, httpserver):
environ["IRRD_ENDPOINT"] = httpserver.url_for("/graphql")
httpserver.expect_request("/graphql").respond_with_json(IRRD_MEMBEROF_VALID_RESPONSE)
response = await client.get("/api/sets/member-of/64500")
assert response.status_code == 200
json = response.json()
assert json["irrsSeen"] == ["TEST"]
assert set(json["setsPerIrr"]["TEST"]) == {"AS-VALID-ANY", "AS-VALID-MNTNER", "AS-DIRECT"}
async def test_asn_no_data(client, httpserver):
environ["IRRD_ENDPOINT"] = httpserver.url_for("/graphql")
httpserver.expect_request("/graphql").respond_with_json(IRRD_MEMBEROF_EMPTY_RESPONSE)
response = await client.get("/api/sets/member-of/64500")
assert response.status_code == 200
expected = {"irrsSeen": [], "setsPerIrr": {}}
assert response.json() == expected
| StarcoderdataPython |
8021333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import urlparse
import tldextract
import time
import cherrypy
import rocksdb
from collections import defaultdict
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from baseline.ccdownloader import CCDownloader
def split_uri(uri, encoding='idna'):
if not (uri.startswith("http://") or
uri.startswith("https://")):
uri = "http://%s" % uri
parsed_uri = urlparse.urlparse(uri)
if not parsed_uri.netloc:
parsed_uri = urlparse.urlparse("http://%s" + uri)
netloc = parsed_uri.netloc
assert netloc, "Cannot parse uri:%s\n" % uri
extracted = tldextract.extract(netloc)
# tld = .domain.encode(encoding)
# suffix = tldextract.extract(netloc).suffix
path = "%s" % (parsed_uri.path)
if parsed_uri.query:
path = "%s?%s" % (path, parsed_uri.query)
return extracted.domain.encode(encoding), extracted.suffix, path
def json_error(status, message, traceback, version):
err = {"status": status, "message": message,
"traceback": traceback, "version": version}
return json.dumps(err, sort_keys=True, indent=4)
class DBInterface(object):
def __init__(self, db_directories, pretty=False, verbose=0,
max_results=10000):
self.dbs = {}
self.ccdownloader = CCDownloader()
for db_directory in db_directories:
opts = rocksdb.Options()
opts.create_if_missing = False
opts.max_open_files = 100
opts.num_levels = 6
db = rocksdb.DB(db_directory, opts, read_only=True)
it = db.iterkeys()
it.seek_to_first()
key = it.next()
tld, url, crawl = key.split(" ", 2)
assert crawl not in self.dbs, "Multiple dbs for %s\n" % crawl
sys.stderr.write("DB at %s holds crawl %s\n" %
(db_directory, crawl))
self.dbs[crawl] = db
self.pretty = pretty
self.verbose = verbose
self.max_results = max_results
def _dump_json(self, data, pretty=False):
if self.pretty or pretty:
return json.dumps(data, indent=2) + "\n"
return json.dumps(data) + "\n"
@cherrypy.expose
def crawls(self, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
pretty = kwargs.get("pretty", 0) > 0
result = {"crawls": sorted(self.dbs.keys())}
return self._dump_json(result, pretty)
@cherrypy.expose
def query_prefix(self, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
start_time = time.time()
query_url = kwargs["url"]
if not (query_url.startswith("http://") or
query_url.startswith("https://")):
query_url = "http://%s" % query_url
query_crawl = kwargs.get("crawl", "")
if query_crawl:
assert query_crawl in self.dbs.keys()
pretty = kwargs.get("pretty", 0) > 0
verbose = kwargs.get("verbose", 0) > 0
exact = kwargs.get("exact", 0) > 0
get_html = kwargs.get("html", 0) > 0
max_results = int(kwargs.get("max_results", self.max_results))
query_domain, query_suffix, query_path = split_uri(query_url)
db_key = "%s %s" % (query_domain, query_url)
result = {"query_domain": query_domain,
"query_crawl": query_crawl,
"query_path": query_path,
"db_key": db_key}
n_results = 0
relevant_crawls = [query_crawl] if query_crawl else self.dbs.keys()
# result["skipped_keys"] = []
result["locations"] = defaultdict(list)
for db_crawl in relevant_crawls:
db = self.dbs[db_crawl]
it = db.iteritems()
it.seek(db_key)
for key, value in it:
key = key.decode("utf-8")
if not key.startswith(db_key):
# We've gone too far
break
tld, uri, crawl = key.split(" ", 2)
assert crawl == db_crawl
if exact and uri != query_url:
continue
n_results += 1
if n_results > max_results:
break
data = json.loads(value)
# work around stupid error
if 'offset:' in data:
data['offset'] = data.pop('offset:')
data['offset'] = int(data['offset'])
data['length'] = int(data['length'])
data["crawl"] = db_crawl
result["locations"][uri].append(data)
if verbose:
result["time"] = "%.2fs" % (time.time() - start_time)
if get_html:
for uri in result["locations"]:
for data in result["locations"][uri]:
data["html"] = self.get_html(data) # .encode("utf-8")
return self._dump_json(result, pretty)
def get_html(self, data):
html = self.ccdownloader.download(data["filename"],
data["offset"],
data["length"],
html_only=True)
return html
@cherrypy.expose
def query_tld(self, **kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
start_time = time.time()
query_tld = kwargs["tld"]
query_crawl = kwargs.get("crawl", "")
if query_crawl:
assert query_crawl in self.dbs.keys()
pretty = kwargs.get("pretty", 0) > 0
verbose = kwargs.get("verbose", 0) > 0
max_results = int(kwargs.get("max_results", self.max_results))
tld, _suffix, _path = split_uri(query_tld)
db_key = "%s " % (tld)
result = {"query_tld": query_tld, "db_key": db_key}
n_results = 0
relevant_crawls = [query_crawl] if query_crawl else self.dbs.keys()
result["locations"] = defaultdict(list)
for db_crawl in relevant_crawls:
db = self.dbs[db_crawl]
it = db.iteritems()
it.seek(db_key)
for key, value in it:
key = key.decode("utf-8")
if not key.startswith(db_key):
# We've gone too far
break
tld, uri, crawl = key.split(" ", 2)
assert crawl == db_crawl
# This allows to query a suffix (e.g. .com) as well
if query_tld not in urlparse.urlparse(uri).netloc:
continue
n_results += 1
if n_results > max_results:
break
data = json.loads(value)
# work around stupid error
if 'offset:' in data:
data['offset'] = data.pop('offset:')
data["crawl"] = db_crawl
result["locations"][uri].append(data)
if verbose:
result["time"] = "%.2fs" % (time.time() - start_time)
return self._dump_json(result, pretty)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-ip',
help='server ip to bind to, default: localhost',
default="127.0.0.1")
parser.add_argument('-port',
help='server port to bind to, default: 8080',
type=int,
default=8080)
parser.add_argument('-nthreads',
help='number of server threads, default: 8',
type=int,
default=8)
parser.add_argument('-maxresults',
help='maximum number of return results.',
type=int,
default=10000)
parser.add_argument('-pretty',
action='store_true',
help='pretty print json')
parser.add_argument('-logprefix',
help='logfile prefix, default: write to stderr')
parser.add_argument('-verbose',
help='verbosity level, default: 0',
type=int,
default=0)
parser.add_argument('db', nargs='+', help='leveldb root directories')
# parser.add_argument('url', help='url to search for')
args = parser.parse_args(sys.argv[1:])
cherrypy.config.update({'server.request_queue_size': 1000,
'server.socket_port': args.port,
'server.thread_pool': args.nthreads,
'server.socket_host': args.ip})
cherrypy.config.update({'error_page.default': json_error})
cherrypy.config.update({'log.screen': True})
cherrypy.config.update({'tools.sessions.on': True,
'tools.encode.on': True})
if args.logprefix:
cherrypy.config.update({'log.access_file': "%s.access.log"
% args.logprefix,
'log.error_file': "%s.error.log"
% args.logprefix})
cherrypy.quickstart(DBInterface(args.db,
pretty=args.pretty,
verbose=args.verbose,
max_results=args.maxresults))
| StarcoderdataPython |
3567860 | <reponame>reip-project/reip-pipelines<gh_stars>0
from interface import *
from plasma import save_data, load_data, save_meta, load_meta, save_both, load_both
import pyarrow as pa
import numpy as np
import pyarrow.plasma as plasma
import multiprocessing as mp
import time
import copy
class UniqueID:
_id = 0
@staticmethod
def Gen():
UniqueID._id += 1
t = "%20s" % str(UniqueID._id)
# print(t.encode("utf-8"))
return plasma.ObjectID(t.encode("utf-8"))
class SharedPointer:
def __init__(self, ring_size):
self.counter = mp.Value('i', 0, lock=False)
self.ring_size = ring_size
@property
def value(self):
return self.counter.value
@value.setter
def value(self, new_value):
self.counter.value = new_value
@property
def pos(self):
return self.counter.value % self.ring_size
@property
def loop(self):
return self.counter.value // self.ring_size
class BufferStore(Sink):
def __init__(self, size, debug=False, **kw):
self.size = size + 1 # need extra slot because head == tail means empty
self.data_ids = [UniqueID.Gen() for i in range(self.size)]
self.meta_ids = [UniqueID.Gen() for i in range(self.size)]
self.both_ids = [UniqueID.Gen() for i in range(self.size)]
self.head = SharedPointer(self.size)
self.tail = SharedPointer(self.size)
self.debug = debug
self.customers = []
self.pipes = []
self.client = plasma.connect("/tmp/plasma")
print("Store Connected. Warming up...")
t0 = time.time()
ret = self.client.get(self.client.put("warm-up"))
assert (ret == "warm-up")
print("Warmed up in %.4f sec" % (time.time()- t0))
super().__init__(**kw)
def full(self):
if len(self.customers) > 0:
new_value = min([customer.value for customer in self.customers])
to_delete = []
for v in range(self.tail.value, new_value):
# to_delete.append(self.data_ids[v % self.size])
# to_delete.append(self.meta_ids[v % self.size])
to_delete.append(self.both_ids[v % self.size])
if len(to_delete) > self.size / 5:
# print("Deleting:", len(to_delete))
self.client.delete(to_delete)
self.tail.value = new_value
return (self.head.value - self.tail.value) >= (self.size - 1)
def _put(self, buffer):
data, meta = buffer
# save_data(self.client, data, id=self.data_ids[self.head.pos])
# save_meta(self.client, meta, id=self.meta_ids[self.head.pos])
save_both(self.client, data, meta, id=self.both_ids[self.head.pos], debug=self.debug)
self.head.value += 1
def gen_source(self, **kw):
self.customers.append(SharedPointer(self.size))
self.customers[-1].value = self.tail.value
return Customer(self, len(self.customers) - 1, **kw)
class Customer(Source):
def __init__(self, store, id, **kw):
self.store = store
self.id = id
self.client = None
super().__init__(**kw)
def empty(self):
return self.store.head.value == self.store.customers[self.id].value
def last(self):
return (self.store.head.value - self.store.customers[self.id].value) <= 1
def next(self):
self.store.customers[self.id].value += 1
def _get(self):
if self.client is None:
self.client = plasma.connect("/tmp/plasma")
print("Customer Connected")
# data = load_data(self.client, self.store.data_ids[self.store.customers[self.id].pos])
# meta = load_meta(self.client, self.store.meta_ids[self.store.customers[self.id].pos])
data, meta = load_both(self.client, self.store.both_ids[self.store.customers[self.id].pos], debug=self.store.debug)
return data, meta
def run(customers):
print("Started")
[c0, c1, c2] = customers
print("c0:")
while not c0.empty():
print(c0.get())
c0.next()
print("c1:")
while not c1.empty():
print(c1.get())
c1.next()
# bs.put((str(100), {"buffer": 100}))
print("c2:")
while not c2.empty():
print(c2.get())
c2.next()
time.sleep(0.75)
print("c2':")
while not c2.empty():
print(c2.get())
c2.next()
time.sleep(1.5)
# raise RuntimeError("Foo")
print("Done")
if __name__ == '__main__':
bs = BufferStore(100)
print(issubclass(type(bs), Sink))
print(bs.client.store_capacity())
print(bs.client.list())
bs.client.delete(bs.client.list())
print(bs.client.list())
c0 = bs.gen_source()
c1 = bs.gen_source(strategy=Source.Skip, skip=1)
c2 = bs.gen_source(strategy=Source.Latest)
for i in range(10):
bs.put((str(i), {"buffer": i}))
process = mp.Process(target=run, args=([c0, c1, c2], ))
process.deamon = True
process.start()
time.sleep(0.5)
bs.put((str(200), {"buffer": 200}))
process.join()
time.sleep(5)
| StarcoderdataPython |
6450928 | <gh_stars>0
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge slice operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from common import NgraphTest
class TestSliceOperations(NgraphTest):
def test_slice(self):
inp = np.random.rand(4, 4).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (4, 4)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(array_ops.slice(x, [0, 0], [2, 2]))
slice_ts.append(array_ops.slice(x, [0, 0], [-1, -1]))
slice_ts.append(array_ops.slice(x, [2, 2], [-1, -1]))
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[:2, :2])
expected.append(inp[:, :])
expected.append(inp[2:, 2:])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice(self):
inp = np.random.rand(4, 5).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (4, 5)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[:])
slice_ts.append(x[:, :])
slice_ts.append(x[1:, :-2])
slice_ts.append(x[::2, :-2])
slice_ts.append(x[1, :])
slice_ts.append(x[:, 1])
slice_ts.append(x[1, 1])
slice_ts.append(x[0])
slice_ts.append(x[0][1])
slice_ts.append(x[-1])
# Various ways of representing identity slice
slice_ts.append(x[:, :])
slice_ts.append(x[::, ::])
slice_ts.append(x[::1, ::1])
# Reverse in each dimension independently
slice_ts.append(x[::-1, :])
slice_ts.append(x[:, ::-1])
## negative index tests i.e. n-2 in first component
slice_ts.append(x[-2::-1, ::1])
# degenerate by offering a forward interval with a negative stride
slice_ts.append(x[0:-1:-1, :])
# degenerate with a reverse interval with a positive stride
slice_ts.append(x[-1:0, :])
# empty interval in every dimension
slice_ts.append(x[-1:0, 2:3:-1])
slice_ts.append(x[2:2, 2:3:-1])
# stride greater than range
slice_ts.append(x[1:3:7, :])
# ellipses and new axis
slice_ts.append(x[:, tf.newaxis])
slice_ts.append(x[...])
slice_ts.append(x[1:2, ...])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[:])
expected.append(inp[:, :])
expected.append(inp[1:, :-2])
expected.append(inp[::2, :-2])
expected.append(inp[1, :])
expected.append(inp[:, 1])
expected.append(inp[1, 1])
expected.append(inp[0])
expected.append(inp[0][1])
expected.append(inp[-1])
#TODO: support ellipses and new_axis correctly
# Various ways of representing identity slice
expected.append(inp[:, :])
expected.append(inp[::, ::])
expected.append(inp[::1, ::1])
# Reverse in each dimension independently
expected.append(inp[::-1, :])
expected.append(inp[:, ::-1])
## negative index tests i.e. n-2 in first component
expected.append(inp[-2::-1, ::1])
# degenerate by offering a forward interval with a negative stride
expected.append(inp[0:-1:-1, :])
# degenerate with a reverse interval with a positive stride
expected.append(inp[-1:0, :])
# empty interval in every dimension
expected.append(inp[-1:0, 2:3:-1])
expected.append(inp[2:2, 2:3:-1])
# stride greater than range
expected.append(inp[1:3:7, :])
# ellipses and new axis
expected.append(inp[:, tf.newaxis])
expected.append(inp[...])
expected.append(inp[1:2, ...])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice_2(self):
inp = np.random.rand(3, 2, 3).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (3, 2, 3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[-1:0, 2:2, 2:3:-1])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[-1:0, 2:2, 2:3:-1])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice_3(self):
inp = np.random.rand(3, 2, 3).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (3, 2, 3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[-1:0, -10:-10, 2:3:-1])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[-1:0, -10:-10, 2:3:-1])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice_4(self):
inp = np.random.rand(3, 2, 3).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (3, 2, 3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[-1:0, 0:-10, 2:3:-1])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[-1:0, 0:-10, 2:3:-1])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
# array_ops_test.StridedSliceTest.testTensorIndexing
def test_strided_slice_5(self):
a = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
bar = tf.constant(2)
bar2 = tf.constant(3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts = [
x[..., bar:bar2], x[..., bar], x[..., 3], x[..., 2**64 // 2**63]
]
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals_ng = self.with_ngraph(run_test)
slice_vals_tf = self.without_ngraph(run_test)
for v, e in zip(slice_vals_ng, slice_vals_tf):
np.testing.assert_array_equal(v, e)
def test_strided_slice_zerodim(self):
inp = np.random.rand(4, 0, 5).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (4, 0, 5)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
#(slicing an empty dim by empty slice)
slice_ts.append(x[1:2, 2:2, 1:2])
#(slicing an empty dim by non empty slice)
slice_ts.append(x[1:2, 1:2, 1:2])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[1:2, 2:2, 1:2])
expected.append(inp[1:2, 1:2, 1:2])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_incorrect_strided_slice(self):
inp = 0
slice_ts = []
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
#(slicing an empty dim by empty slice)
slice_ts.append(x[1:1])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: inp})
with pytest.raises(Exception) as excinfo:
slice_vals = self.with_ngraph(run_test)
assert "Index out of range using input dim 1; input has only 0 dims" in excinfo.value.message
| StarcoderdataPython |
3214079 | """
Created by Fanghl on 2020/9/10 13:21
"""
# SQLALCHEMY_DATABASE_URI = 'mysql+cymysql://root:123456@127.0.0.1:3306/ginger'
SQLALCHEMY_DATABASE_URI = 'mysql+cymysql://root:123456@172.16.31.10:3306/ginger'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_COMMIT_TEARDOWN = True
SECRET_KEY = '<PASSWORD> is a secret' | StarcoderdataPython |
4927271 | import art
def encrypt(text, shift):
output = ""
for letter in text:
pos = alphabet.index(letter)
newPos = pos + shift
if newPos >= len(alphabet):
newPos -= len(alphabet)
output += alphabet[newPos]
print(output)
def decrypt(text, shift):
output = ""
for letter in text:
pos = alphabet.index(letter)
newPos = pos - shift
output += alphabet[newPos]
print(output)
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
loop = True
print(art.logo)
while loop:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
if direction == "encode":
encrypt(text, shift)
elif direction == "decode":
decrypt(text, shift)
again = input("Do again? ('yes' or 'no'): ")
if again == "yes":
pass
else:
loop = False
| StarcoderdataPython |
11387542 | <reponame>bgerxx/woodpecker
import os
import sys
import traceback
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
import zstacktestagent.plugins.vm as vm_plugin
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import apibinding.inventory as inventory
class zstack_kvm_volume_file_checker(checker_header.TestChecker):
'''check kvm volume file existencex . If it is in host,
return self.judge(True). If not, return self.judge(False)'''
def check(self):
super(zstack_kvm_volume_file_checker, self).check()
volume = self.test_obj.volume
volume_installPath = volume.installPath
if not volume_installPath:
test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid)
return self.judge(False)
ps_uuid = volume.primaryStorageUuid
ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid)
if test_lib.lib_is_ps_iscsi_backend(ps_uuid):
self.check_iscsi(volume, volume_installPath, ps)
elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE:
self.check_nfs(volume, volume_installPath)
elif ps.type == inventory.LOCAL_STORAGE_TYPE:
host = test_lib.lib_get_local_storage_volume_host(volume.uuid)
if not host:
return self.judge(False)
self.check_file_exist(volume, volume_installPath, host)
elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
self.check_ceph(volume, volume_installPath, ps)
def check_iscsi(self, volume, volume_installPath, ps):
host = test_lib.lib_find_host_by_iscsi_ps(ps)
if not host:
test_util.test_logger('Check result: can not find Host, who owns iscsi filesystem backend. [volume uuid: ] %s. Can not check volume file existence' % volume.uuid)
return self.judge(False)
test_lib.lib_install_testagent_to_host(host)
volume_file_path = volume_installPath.split(';')[1].split('file://')[1]
self.check_file_exist(volume, volume_file_path, host)
def check_ceph(self, volume, volume_installPath, ps):
monHost = ps.mons[0].hostname
for key in os.environ.keys():
if monHost in os.environ.get(key):
ceph_host, username, password = \
test_lib.lib_get_ceph_info(os.environ.get(key))
break
else:
ceph_host = monHost
username = 'root'
password = 'password'
volume_installPath = volume_installPath.split('ceph://')[1]
command = 'rbd info %s' % volume_installPath
if test_lib.lib_execute_ssh_cmd(ceph_host, username, password, command, 10):
test_util.test_logger('Check result: [volume:] %s [file:] %s exist on ceph [host name:] %s .' % (volume.uuid, volume_installPath, ceph_host))
return self.judge(True)
else:
test_util.test_logger('Check result: [volume:] %s [file:] %s does NOT exist on ceph [host name:] %s .' % (volume.uuid, volume_installPath, ceph_host))
return self.judge(False)
def check_nfs(self, volume, volume_installPath):
host = test_lib.lib_get_volume_object_host(self.test_obj)
if not host:
test_util.test_logger('Check result: can not find Host, who is belonged to same Zone Uuid of [volume uuid: ] %s. Can not check volume file existence' % volume.uuid)
return self.judge(False)
self.check_file_exist(volume, volume_installPath, host)
def check_file_exist(self, volume, volume_installPath, host):
cmd = host_plugin.HostShellCmd()
file_exist = "file_exist"
cmd.command = '[ -f %s ] && echo %s' % (volume_installPath, file_exist)
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, host_plugin.HOST_SHELL_CMD_PATH), cmd)
rsp = jsonobject.loads(rspstr)
output = jsonobject.dumps(rsp.stdout)
if file_exist in output:
test_util.test_logger('Check result: [volume:] %s [file:] %s exist on [host name:] %s .' % (volume.uuid, volume_installPath, host.managementIp))
return self.judge(True)
else:
test_util.test_logger('Check result: [volume:] %s [file:] %s does not exist on [host name:] %s .' % (volume.uuid, volume_installPath, host.managementIp))
return self.judge(False)
class zstack_kvm_volume_attach_checker(checker_header.TestChecker):
'''
Check if volume is really attached to vm in libvirt system.
'''
def check(self):
super(zstack_kvm_volume_attach_checker, self).check()
volume = self.test_obj.volume
if not volume.vmInstanceUuid:
test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid)
return self.judge(False)
if not self.test_obj.target_vm:
test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid)
return self.judge(False)
vm = self.test_obj.target_vm.vm
volume_installPath = volume.installPath
if not volume_installPath:
test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid)
return self.judge(False)
host = test_lib.lib_get_vm_host(vm)
cmd = vm_plugin.VmStatusCmd()
cmd.vm_uuids = [vm.uuid]
rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd)
rsp = jsonobject.loads(rspstr)
output = jsonobject.dumps(rsp.vm_status[vm.uuid])
if volume_installPath.startswith('iscsi'):
volume_installPath = volume_installPath.split(';')[0].split('/iqn')[1]
volume_installPath = 'iqn%s' % volume_installPath
volume_installPath = volume_installPath[:-2]
elif volume_installPath.startswith('ceph'):
volume_installPath = volume_installPath.split('ceph://')[1]
elif volume_installPath.startswith('fusionstor'):
volume_installPath = volume_installPath.split('fusionstor://')[1]
if volume_installPath in output:
test_util.test_logger('Check result: [volume:] %s [file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp))
return self.judge(True)
else:
test_util.test_logger('Check result: [volume:] %s [file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp))
return self.judge(False)
| StarcoderdataPython |
3559175 | from scipy.io import loadmat
import numpy as np
def load_weights(filename):
""" Loads a .mat file into an ndarray. """
weights = loadmat(filename)
theta1 = weights['Theta1']
theta2 = weights['Theta2']
theta2 = np.roll(theta2, 1, axis=0)
params = np.concatenate([theta1.ravel(), theta2.ravel()])
return params
| StarcoderdataPython |
1625949 | """
Design a program equivalent to Microsoft Paint. This contains shapes, print/load/save functionality, etc.
"""
| StarcoderdataPython |
4824176 | <gh_stars>1000+
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Survo puzzle Google CP Solver.
http://en.wikipedia.org/wiki/Survo_Puzzle
'''
Survo puzzle is a kind of logic puzzle presented (in April 2006) and studied
by <NAME>. The name of the puzzle is associated to Mustonen's
Survo system which is a general environment for statistical computing and
related areas.
In a Survo puzzle the task is to fill an m * n table by integers 1,2,...,m*n
so
that each of these numbers appears only once and their row and column sums are
equal to integers given on the bottom and the right side of the table.
Often some of the integers are given readily in the table in order to
guarantee uniqueness of the solution and/or for making the task easier.
'''
See also
http://www.survo.fi/english/index.html
http://www.survo.fi/puzzles/index.html
References:
Mustonen, S. (2006b). "On certain cross sum puzzles"
http://www.survo.fi/papers/puzzles.pdf
Mustonen, S. (2007b). "Enumeration of uniquely solvable open Survo puzzles."
http://www.survo.fi/papers/enum_survo_puzzles.pdf
<NAME>: "Some comments on magic squares and Survo puzzles"
http://www.helsinki.fi/~kvehkala/Kimmo_Vehkalahti_Windsor.pdf
R code: http://koti.mbnet.fi/tuimala/tiedostot/survo.R
Compare with the following models:
* Choco : http://www.hakank.org/choco/SurvoPuzzle.java
* Comet : http://www.hakank.org/comet/survo_puzzle.co
* ECLiPSE : http://www.hakank.org/eclipse/survo_puzzle.ecl
* Gecode : http://www.hakank.org/gecode/survo_puzzle.cpp
* Gecode/R: http://www.hakank.org/gecode_r/survo_puzzle.rb
* JaCoP : http://www.hakank.org/JaCoP/SurvoPuzzle.java
* MiniZinc: http://www.hakank.org/minizinc/survo_puzzle.mzn
* Tailor/Essence': http://www.hakank.org/tailor/survo_puzzle.eprime
* Zinc: http://www.hakank.org/minizinc/survo_puzzle.zinc
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main(r=0, c=0, rowsums=[], colsums=[], game=[]):
# Create the solver.
solver = pywrapcp.Solver("Survo puzzle")
#
# data
#
if r == 0:
r = 3
c = 4
rowsums = [30, 18, 30]
colsums = [27, 16, 10, 25]
game = [[0, 6, 0, 0], [8, 0, 0, 0], [0, 0, 3, 0]]
print("r:", r, "c:", c)
# declare variables
x = {}
for i in range(r):
for j in range(c):
x[(i, j)] = solver.IntVar(1, r * c, "x %i %i" % (i, j))
#
# constraints
#
#
# set the clues
#
for i in range(r):
for j in range(c):
if game[i][j] > 0:
solver.Add(x[i, j] == game[i][j])
xflat = [x[(i, j)] for i in range(r) for j in range(c)]
solver.Add(solver.AllDifferent(xflat))
#
# calculate rowsums and colsums
#
for i in range(r):
solver.Add(rowsums[i] == solver.Sum([x[i, j] for j in range(c)]))
for j in range(c):
solver.Add(colsums[j] == solver.Sum([x[i, j] for i in range(r)]))
#
# solution and search
#
solution = solver.Assignment()
solution.Add([x[(i, j)] for i in range(r) for j in range(c)])
collector = solver.AllSolutionCollector(solution)
solver.Solve(
solver.Phase(xflat, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE),
[collector])
num_solutions = collector.SolutionCount()
print("\nnum_solutions: ", num_solutions)
if num_solutions > 0:
for s in range(num_solutions):
xval = [collector.Value(s, x[(i, j)]) for i in range(r) for j in range(c)]
for i in range(r):
for j in range(c):
print("%2i" % (xval[i * c + j]), end=" ")
print()
print()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
else:
print("No solutions found")
#
# Read a problem instance from a file
#
def read_problem(file):
f = open(file, "r")
r = int(f.readline())
c = int(f.readline())
rowsums = f.readline()
colsums = f.readline()
rowsums = [int(t) for t in (rowsums.rstrip()).split(",")]
colsums = [int(t) for t in (colsums.rstrip()).split(",")]
game = []
for i in range(r):
x = f.readline()
x = [int(t) for t in (x.rstrip()).split(",")]
row = [0] * c
for j in range(c):
row[j] = int(x[j])
game.append(row)
return [r, c, rowsums, colsums, game]
if __name__ == "__main__":
if len(sys.argv) > 1:
file = sys.argv[1]
[r, c, rowsums, colsums, game] = read_problem(file)
main(r, c, rowsums, colsums, game)
else:
main()
| StarcoderdataPython |
5070064 | <gh_stars>1-10
from typing import Literal, Optional, Union
from pydantic import AnyHttpUrl, BaseModel
from .model_signature import ModelSignature
class Endpoint(BaseModel):
url: AnyHttpUrl
method: Union[Literal["POST"], Literal["GET"], Literal["PUT"]]
signature: Optional[ModelSignature] | StarcoderdataPython |
270180 | import numpy as np
import torch
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, device=None, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = None
if device is not None:
self.device = device
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size=256):
ind = np.random.randint(0, self.size, size=batch_size)
# Device lets you switch running code from either torch or tf
if self.device:
return (
torch.FloatTensor(self.state[ind]).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
else:
return dict(obs1=self.state[ind],
acts=self.action[ind],
obs2=self.next_state[ind],
rews=self.reward[ind],
done=self.not_done[ind])
def save(self, save_folder):
np.save(f"{save_folder}_state.npy", self.state[:self.size])
np.save(f"{save_folder}_action.npy", self.action[:self.size])
np.save(f"{save_folder}_next_state.npy", self.next_state[:self.size])
np.save(f"{save_folder}_reward.npy", self.reward[:self.size])
np.save(f"{save_folder}_not_done.npy", self.not_done[:self.size])
np.save(f"{save_folder}_ptr.npy", self.ptr)
def load(self, save_folder, size=-1):
reward_buffer = np.load(f"{save_folder}_reward.npy")
# Adjust crt_size if we're using a custom size
size = min(int(size), self.max_size) if size > 0 else self.max_size
self.size = min(reward_buffer.shape[0], size)
self.state[:self.size] = np.load(f"{save_folder}_state.npy")[:self.size]
self.action[:self.size] = np.load(f"{save_folder}_action.npy")[:self.size]
self.next_state[:self.size] = np.load(f"{save_folder}_next_state.npy")[:self.size]
self.reward[:self.size] = reward_buffer[:self.size]
self.not_done[:self.size] = np.load(f"{save_folder}_not_done.npy")[:self.size]
def load_yuchen_demo(self, save_folder, size=-1):
demo_data = np.load(f"{save_folder}.npz")
# Adjust crt_size if we're using a custom size
size = min(int(size), self.max_size) if size > 0 else self.max_size
# The data is trajectory per demo so (40, 40, x)
# The buffer expects (flattened_trajectories, x)
self.size = min(demo_data["r"].shape[0] * demo_data["r"].shape[1] , size)
# The state obs in the demo data has extra entry dimension, why? next_state?
self.size_obs = min(demo_data["o"].shape[0] * demo_data["o"].shape[1] , size)
# Flatten these
self.state[:self.size_obs] = demo_data["o"].reshape(-1, demo_data["o"].shape[-1])
self.action[:self.size] = demo_data["u"].reshape(-1, demo_data["u"].shape[-1])
# For training the potential, this does not matter as its not used
# self.next_state[:self.size_obs] = demo_data["o"].reshape(-1, demo_data["o"].shape[-1])
self.reward[:self.size] = demo_data["r"].reshape(-1, demo_data["r"].shape[-1])
self.not_done[:self.size] = demo_data["done"].reshape(-1, demo_data["done"].shape[-1])
| StarcoderdataPython |
1606707 | import re
import pandas as pd
dev_size = 20000
train_dir_csv = './train.csv'
train_balanced_dir_csv = './train_balanced.csv'
dev_dir_csv = './dev.csv'
train_dir_tsv = './train.tsv'
dev_dir_tsv = './dev.tsv'
def remove_link_and_slash_split(sen):
regex_link = r'(http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?'
regex_slash_3 = r'[a-zA-Z]{3,}/[a-zA-Z]{3,}/[a-zA-Z]{3,}'
regex_slash_2 = r'[a-zA-Z]{3,}/[a-zA-Z]{3,}'
sen, number = re.subn(regex_link, ' link ', sen)
result = re.findall(regex_slash_3, sen, re.S)
for word in result:
new_word = word.replace('/', ' / ')
sen = sen.replace(word, new_word)
result = re.findall(regex_slash_2, sen, re.S)
for word in result:
new_word = word.replace('/', ' / ')
sen = sen.replace(word, new_word)
return sen
def remove_formula(sen):
for i in range(100):
judge1 = '[math]' in sen and '[/math]' in sen
judge2 = '[math]' in sen and '[\math]'in sen
judge3 = '[math]' in sen and '[math]'in sen.replace('[math]', '', 1)
if judge1 or judge2:
index1 = sen.find('[math]')
index2 = max(sen.find('[\math]'), sen.find('[/math]'))+7
(index1, index2) = (min(index1, index2), max(index1, index2))
sen = sen.replace(sen[index1: index2], ' formula ')
elif judge3:
index1 = sen.find('[math]')
index2 = sen.replace('[math]', ' ', 1).find('[math]') + 6
sen = sen.replace(sen[index1: index2], ' formula ')
else:
break
return sen
def preprocessing(file_dir):
df = pd.DataFrame(pd.read_csv(file_dir, encoding='utf-8', engine='python'))
question_text = df['question_text'].fillna('').apply(
lambda x: remove_link_and_slash_split(remove_formula(x)))
df['question_text'] = question_text
df = df.sample(frac=1.0)
dev_df = df[:dev_size]
train_df = df[dev_size:]
df_target_1 = train_df[train_df['target'] == 1]
for i in range(1):
train_df = train_df.append(df_target_1)
train_df = train_df.sample(frac=1.0)
return train_df, dev_df
train_df, dev_df = preprocessing(train_dir_csv)
train_df.to_csv(train_balanced_dir_csv)
dev_df.to_csv(dev_dir_csv)
train_df = pd.DataFrame(pd.read_csv(train_balanced_dir_csv, encoding='utf-8', engine='python'))
id = train_df['qid']
text = train_df['question_text'].fillna('').values
target = train_df['target']
size = train_df.shape[0]
ftrain = open(train_dir_tsv, 'w', encoding='utf-8')
for i in range(size):
line = text[i].replace('\n', ' ', 100).replace('\t', ' ', 100)
ftrain.write(str(target[i])+'\t'+str(id[i])+'\t'+line+'\n')
ftrain.close()
dev_df = pd.DataFrame(pd.read_csv(dev_dir_csv, encoding='utf-8', engine='python'))
dev_id = dev_df['qid']
dev_text = dev_df['question_text'].fillna('').values
dev_target = dev_df['target']
fdev = open(dev_dir_tsv, 'w', encoding='utf-8')
for i in range(dev_size):
line = dev_text[i].replace('\n', ' ', 100).replace('\t', ' ', 100)
fdev.write(str(dev_target[i])+'\t'+str(dev_id[i])+'\t'+line+'\n')
fdev.close()
| StarcoderdataPython |
11292484 | #
# Author: <NAME>
# and <NAME> <<EMAIL>)
# Lincense: Academic Free License (AFL) v3.0
#
import numpy as np
from math import pi
from mpi4py import MPI
try:
from scipy import comb
except ImportError:
from scipy.special import comb
import prosper.em as em
import prosper.utils.parallel as parallel
import prosper.utils.tracing as tracing
from prosper.utils.datalog import dlog
from prosper.em.camodels import CAModel
class BSC_ET(CAModel):
"""Binary Sparse Coding
Implements learning and inference of a Binary Sparse coding model under a variational approximation
Attributes
----------
comm : MPI communicator
D : int
number of features
gamma : int
approximation parameter for maximum number of non-zero states
H : int
number of latent variables
Hprime : int
approximation parameter for latent space trunctation
K : int
number of different values the latent variables can take
no_states : (..., Hprime) ndarray
number of different states of latent variables except singleton states and zero state
single_state_matrix : ((K-1)*H, H) ndarray
matrix that holds all possible singleton states
state_abs : (no_states, ) ndarray
number of non-zero elements in the rows of the state_matrix
state_matrix : (no_states, Hprime) ndarray
latent variable states taken into account during the em algorithm
states : (K,) ndarray
the differnt values that a latent variable can take must include 0 and one more integer
to_learn : list
list of strings included in model_params.keys() that specify which parameters are going to be optimized
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> (2010). Binary Sparse Coding. Proc. LVA/ICA 2010, LNCS 6365, 450-457.
[2] <NAME> and <NAME> (2010). Expectation Truncation and the Benefits of Preselection in Training Generative Models. Journal of Machine Learning Research 11:2855-2900.
"""
def __init__(self, D, H, Hprime, gamma, to_learn=['W', 'pi', 'sigma'], comm=MPI.COMM_WORLD):
CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm)
@tracing.traced
def generate_from_hidden(self, model_params, my_hdata):
""" Generate data according to the MCA model while the latents are
given in my_hdata['s'].
This method does _not_ obey gamma: The generated data may have more
than gamma active causes for a given datapoint.
"""
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
H, D = W.shape
s = my_hdata['s']
my_N, _ = s.shape
# Create output arrays, y is data
y = np.zeros( (my_N, D) )
for n in range(my_N):
# Combine accoring do magnitude-max rulew
for h in range(H):
if s[n,h]:
y[n] += W[h]
# Add noise according to the model parameters
y += np.random.normal( scale=sigma, size=(my_N, D) )
# Build return structure
return { 'y': y, 's': s }
@tracing.traced
def select_Hprimes(self, model_params, data):
"""
Return a new data-dictionary which has been annotated with
a data['candidates'] dataset. A set of self.Hprime candidates
will be selected.
"""
my_y = data['y']
W = model_params['W'].T
Hprime = self.Hprime
my_N, D = my_y.shape
candidates = np.zeros( (my_N, Hprime), dtype=np.int )
for n in range(my_N):
sim = np.inner(W,my_y[n])/ np.sqrt(np.diag(np.inner(W,W)))/ np.sqrt(np.inner(my_y[n],my_y[n]))
candidates[n] = np.argsort(sim)[-Hprime:]
data['candidates'] = candidates
return data
@tracing.traced
def E_step(self, anneal, model_params, my_data):
""" BSC E_step
my_data variables used:
my_data['y'] Datapoints
my_data['can'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
my_y = my_data['y'].copy()
my_cand = my_data['candidates']
my_N, D = my_data['y'].shape
H = self.H
SM = self.state_matrix # shape: (no_states, Hprime)
state_abs = self.state_abs # shape: (no_states,)
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
try:
mu = model_params['mu']
except:
mu = np.zeros(D)
model_params['mu'] = mu
# Precompute
beta = 1./anneal['T']
pre1 = -1./2./sigma/sigma
pil_bar = np.log( pies/(1.-pies) )
# Allocate return structures
F = np.empty( [my_N, 1+H+self.no_states] )
pre_F = np.empty( [my_N, 1+H+ self.no_states] )
denoms = np.zeros(my_N)
# Pre-fill pre_F:
pre_F[:,0] = 0.
pre_F[:,1:H+1] = pil_bar
pre_F[:,1+H:] = pil_bar * state_abs # is (no_states,)
# Iterate over all datapoints
tracing.tracepoint("E_step:iterating")
for n in range(my_N):
y = my_data['y'][n,:] - mu
cand = my_data['candidates'][n,:]
# Zero active hidden causes
log_prod_joint = pre1 * (y**2).sum()
F[n,0] = log_prod_joint
# Hidden states with one active cause
log_prod_joint = pre1 * ((W-y)**2).sum(axis=1)
F[n,1:H+1] = log_prod_joint
# Handle hidden states with more than 1 active cause
W_ = W[cand] # is (Hprime x D)
Wbar = np.dot(SM,W_)
log_prod_joint = pre1 * ((Wbar-y)**2).sum(axis=1)
F[n,1+H:] = log_prod_joint
if anneal['anneal_prior']:
F = beta * (pre_F + F)
else:
F = pre_F + beta * F
return { 'logpj': F }
@tracing.traced
def M_step(self, anneal, model_params, my_suff_stat, my_data):
""" BSC M_step
my_data variables used:
my_data['y'] Datapoints
my_data['candidates'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
H, Hprime = self.H, self.Hprime
gamma = self.gamma
W = model_params['W'].T
pies = model_params['pi']
sigma = model_params['sigma']
mu = model_params['mu']
# Read in data:
my_y = my_data['y'].copy()
candidates = my_data['candidates']
logpj_all = my_suff_stat['logpj']
all_denoms = np.exp(logpj_all).sum(axis=1)
my_N, D = my_y.shape
N = comm.allreduce(my_N)
# Joerg's data noise idea
data_noise_scale = anneal['data_noise']
if data_noise_scale > 0:
my_y += my_data['data_noise']
SM = self.state_matrix # shape: (no_states, Hprime)
# To compute et_loglike:
my_ldenom_sum = 0.0
ldenom_sum = 0.0
# Precompute factor for pi update
A_pi_gamma = 0
B_pi_gamma = 0
for gamma_p in range(gamma+1):
A_pi_gamma += comb(H,gamma_p) * (pies**gamma_p) * ((1-pies)**(H-gamma_p))
B_pi_gamma += gamma_p * comb(H,gamma_p) * (pies**gamma_p) * ((1-pies)**(H-gamma_p))
E_pi_gamma = pies * H * A_pi_gamma / B_pi_gamma
# Truncate data
if anneal['Ncut_factor'] > 0.0:
tracing.tracepoint("M_step:truncating")
#alpha = 0.9 # alpha from ET paper
#N_use = int(alpha * (N * (1 - (1 - A_pi_gamma) * anneal['Ncut_factor'])))
N_use = int(N * (1 - (1 - A_pi_gamma) * anneal['Ncut_factor']))
cut_denom = parallel.allsort(all_denoms)[-N_use]
which = np.array(all_denoms >= cut_denom)
candidates = candidates[which]
logpj_all = logpj_all[which]
my_y = my_y[which]
my_N, D = my_y.shape
N_use = comm.allreduce(my_N)
else:
N_use = N
dlog.append('N', N_use)
# Calculate truncated Likelihood
L = H * np.log(1-pies) - 0.5 * D * np.log(2*pi*sigma**2) - np.log(A_pi_gamma)
Fs = np.log(np.exp(logpj_all).sum(axis=1)).sum()
L += comm.allreduce(Fs)/N_use
dlog.append('L',L)
# Precompute
pil_bar = np.log( pies/(1.-pies) )
corr_all = logpj_all.max(axis=1) # shape: (my_N,)
pjb_all = np.exp(logpj_all - corr_all[:, None]) # shape: (my_N, no_states)
# Allocate
my_Wp = np.zeros_like(W) # shape (H, D)
my_Wq = np.zeros((H,H)) # shape (H, H)
my_pi = 0.0 #
my_sigma = 0.0 #
#my_mup = np.zeros_like(W) # shape (H, D)
#my_muq = np.zeros((H,H)) # shape (H, H)
my_mus = np.zeros(H) # shape D
data_sum = my_y.sum(axis=0) # sum over all data points for mu update
## Calculate mu
#for n in xrange(my_N):
#tracing.tracepoint("Calculationg offset")
#y = my_y[n,:] # length D
#cand = candidates[n,:] # length Hprime
#logpj = logpj_all[n,:] # length no_states
#corr = corr_all[n] # scalar
#pjb = pjb_all[n, :]
## Zero active hidden cause (do nothing for the W and pi case)
## this_Wp += 0. # nothing to do
## this_Wq += 0. # nothing to do
## this_pi += 0. # nothing to do
## One active hidden cause
#this_mup = np.outer(pjb[1:(H+1)],y)
#this_muq = pjb[1:(H+1)] * np.identity(H)
#this_mus = pjb[1:(H+1)]
## Handle hidden states with more than 1 active cause
#this_mup[cand] += np.dot(np.outer(y,pjb[(1+H):]),SM).T
#this_muq_tmp = np.zeros_like(my_muq[cand])
#this_muq_tmp[:,cand] = np.dot(pjb[(1+H):] * SM.T,SM)
#this_muq[cand] += this_muq_tmp
#this_mus[cand] += np.inner(SM.T,pjb[(1+H):])
#denom = pjb.sum()
#my_mup += this_mup / denom
#my_muq += this_muq / denom
#my_mus += this_mus / denom
## Calculate updated mu
#if 'mu' in self.to_learn:
#tracing.tracepoint("M_step:update mu")
#mup = np.empty_like(my_mup)
#muq = np.empty_like(my_muq)
#mus = np.empty_like(my_mus)
#all_data_sum = np.empty_like(data_sum)
#comm.Allreduce( [my_mup, MPI.DOUBLE], [mup, MPI.DOUBLE] )
#comm.Allreduce( [my_muq, MPI.DOUBLE], [muq, MPI.DOUBLE] )
#comm.Allreduce( [my_mus, MPI.DOUBLE], [mus, MPI.DOUBLE] )
#comm.Allreduce( [data_sum, MPI.DOUBLE], [all_data_sum, MPI.DOUBLE] )
#mu_numer = all_data_sum - np.dot(mus,np.dot(np.linalg.inv(muq), mup))
#mu_denom = my_N - np.dot(mus,np.dot(np.linalg.inv(muq), mus))
#mu_new = mu_numer/ mu_denom
#else:
#mu_new = mu
# Iterate over all datapoints
tracing.tracepoint("M_step:iterating")
for n in range(my_N):
y = my_y[n,:]-mu # length D
cand = candidates[n,:] # length Hprime
pjb = pjb_all[n, :]
this_Wp = np.zeros_like(my_Wp) # numerator for current datapoint (H, D)
this_Wq = np.zeros_like(my_Wq) # denominator for current datapoint (H, H)
this_pi = 0.0 # numerator for pi update (current datapoint)
# Zero active hidden cause (do nothing for the W and pi case)
# this_Wp += 0. # nothing to do
# this_Wq += 0. # nothing to do
# this_pi += 0. # nothing to do
# One active hidden cause
this_Wp = np.outer(pjb[1:(H+1)],y)
this_Wq = pjb[1:(H+1)] * np.identity(H)
this_pi = pjb[1:(H+1)].sum()
this_mus = pjb[1:(H+1)].copy()
# Handle hidden states with more than 1 active cause
this_Wp[cand] += np.dot(np.outer(y,pjb[(1+H):]),SM).T
this_Wq_tmp = np.zeros_like(my_Wq[cand])
this_Wq_tmp[:,cand] = np.dot(pjb[(1+H):] * SM.T,SM)
this_Wq[cand] += this_Wq_tmp
this_pi += np.inner(pjb[(1+H):], SM.sum(axis=1))
this_mus[cand] += np.inner(SM.T,pjb[(1+H):])
denom = pjb.sum()
my_Wp += this_Wp / denom
my_Wq += this_Wq / denom
my_pi += this_pi / denom
my_mus += this_mus / denom
# Calculate updated W
if 'W' in self.to_learn:
tracing.tracepoint("M_step:update W")
Wp = np.empty_like(my_Wp)
Wq = np.empty_like(my_Wq)
comm.Allreduce( [my_Wp, MPI.DOUBLE], [Wp, MPI.DOUBLE] )
comm.Allreduce( [my_Wq, MPI.DOUBLE], [Wq, MPI.DOUBLE] )
#W_new = np.dot(np.linalg.inv(Wq), Wp)
#W_new = np.linalg.solve(Wq, Wp) # TODO check and switch to this one
rcond = -1
if float(np.__version__[2:]) >= 14.0:
rcond = None
W_new = np.linalg.lstsq(Wq, Wp, rcond=rcond)[0] # TODO check and switch to this one
else:
W_new = W
# Calculate updated pi
if 'pi' in self.to_learn:
tracing.tracepoint("M_step:update pi")
pi_new = E_pi_gamma * comm.allreduce(my_pi) / H / N_use
else:
pi_new = pies
# Calculate updated sigma
if 'sigma' in self.to_learn:
tracing.tracepoint("M_step:update sigma")
# Loop for sigma update:
for n in range(my_N):
y = my_y[n,:]-mu # length D
cand = candidates[n,:] # length Hprime
logpj = logpj_all[n,:] # length no_states
corr = logpj.max() # scalar
pjb = np.exp(logpj - corr)
# Zero active hidden causes
this_sigma = pjb[0] * (y**2).sum()
# Hidden states with one active cause
this_sigma += (pjb[1:(H+1)] * ((W-y)**2).sum(axis=1)).sum()
# Handle hidden states with more than 1 active cause
SM = self.state_matrix # is (no_states, Hprime)
W_ = W[cand] # is (Hprime x D)
Wbar = np.dot(SM,W_)
this_sigma += (pjb[(H+1):] * ((Wbar-y)**2).sum(axis=1)).sum()
denom = pjb.sum()
my_sigma += this_sigma/ denom
sigma_new = np.sqrt(comm.allreduce(my_sigma) / D / N_use)
else:
sigma_new = sigma
# Calculate updated mu:
if 'mu' in self.to_learn:
tracing.tracepoint("M_step:update mu")
mus = np.empty_like(my_mus)
all_data_sum = np.empty_like(data_sum)
comm.Allreduce( [my_mus, MPI.DOUBLE], [mus, MPI.DOUBLE] )
comm.Allreduce( [data_sum, MPI.DOUBLE], [all_data_sum, MPI.DOUBLE] )
mu_new = all_data_sum/my_N - np.inner(W_new.T/my_N,mus)
else:
mu_new = mu
for param in anneal.crit_params:
exec('this_param = ' + param)
anneal.dyn_param(param, this_param)
dlog.append('N_use', N_use)
return { 'W': W_new.T, 'pi': pi_new, 'sigma': sigma_new, 'mu': mu_new }
| StarcoderdataPython |
9768069 | from InquirerPy import inquirer
from InquirerPy.validator import PathValidator
def main():
src_path = inquirer.filepath(
message="Enter file to upload:",
default="~/",
validate=PathValidator(is_file=True, message="Input is not a file"),
only_files=True,
).execute()
dest_path = inquirer.filepath(
message="Enter path to download:",
validate=PathValidator(is_dir=True, message="Input is not a directory"),
only_directories=True,
).execute()
if __name__ == "__main__":
main()
| StarcoderdataPython |
11231240 | <filename>src/fastG.py
# examples/Python/Advanced/fast_global_registration.py
import open3d as o3d
from global_registration import *
import numpy as np
import copy
import sys
import time
def execute_fast_global_registration(source_down, target_down, source_fpfh,
target_fpfh, voxel_size):
distance_threshold = voxel_size
print(":: Apply fast global registration with distance threshold %.3f" \
% distance_threshold)
result = o3d.registration.registration_fast_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh,
o3d.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold, iteration_number=1000, division_factor=3))
return result
def execute_global_registration(source_down, target_down, source_fpfh,
target_fpfh, voxel_size):
distance_threshold = voxel_size * 2
# print(":: RANSAC registration on downsampled point clouds.")
# print(" Since the downsampling voxel size is %.3f," % voxel_size)
# print(" we use a liberal distance threshold %.3f." % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(4000000, 500))
return result
def registration_ransac_based_on_correspondence(source, target, corr, voxel_size):
source_pcd = o3d.geometry.PointCloud()
source_pcd.points = o3d.utility.Vector3dVector(source)
target_pcd = o3d.geometry.PointCloud()
target_pcd.points = o3d.utility.Vector3dVector(target)
corres = o3d.utility.Vector2iVector(corr)
result = o3d.registration.registration_ransac_based_on_correspondence(source_pcd, target_pcd, corres, voxel_size,)
return result
def FGR(source, target,voxel_size=0.03):
source_pcd = o3d.geometry.PointCloud()
source_pcd.points = o3d.utility.Vector3dVector(source)
target_pcd = o3d.geometry.PointCloud()
target_pcd.points = o3d.utility.Vector3dVector(target)
# trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
# source_pcd.transform(trans_init)
# draw_registration_result(source_pcd, target_pcd, np.identity(4))
source_down, source_fpfh = preprocess_point_cloud(source_pcd, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target_pcd, voxel_size)
# start = time.time()
result_fast = execute_global_registration(source_down, target_down,
source_fpfh, target_fpfh,
voxel_size)
# print("Fast global registration took %.3f sec.\n" % (time.time() - start))
# print(result_fast)
# draw_registration_result(source_down, target_down,
# result_fast.transformation)
return result_fast.transformation
if __name__ == "__main__":
voxel_size = 0.008 # means 5cm for the dataset
path = sys.argv[1]
file_txt = path +'object.txt'
with open(file_txt, 'r') as file:
allDirs = file.readlines()
allDirs = [x.strip() for x in allDirs]
for i in range(len(allDirs)):
for j in range(len(allDirs)):
if i == j:
continue
modelFName = path + 'mat/'
modelFName += allDirs[i]
dataFName = path + 'mat/'
dataFName += allDirs[j]
outputFname = path + 'output/fgr/'
outputFname += 'outliers_'
outputFname += allDirs[i][:-4]
outputFname += '_'
outputFname += allDirs[j]
source, target, source_down, target_down, source_fpfh, target_fpfh = \
prepare_dataset(dataFName, modelFName, voxel_size)
# start = time.time()
result_fast = execute_fast_global_registration(source_down, target_down,
source_fpfh, target_fpfh,
voxel_size)
# print("Fast global registration took %.3f sec.\n" % (time.time() - start))
# print(result_fast)
# draw_registration_result(source_down, target_down,
# result_fast.transformation)
trans = np.zeros((7, 3))
trans[1:4, :] = result_fast.transformation[:3, :3].transpose()
trans[4:, 0] = result_fast.transformation[:3, 3]
np.savetxt(outputFname, trans) | StarcoderdataPython |
8126573 | """
Version information for NetworkX, created during installation.
Do not add this file to the repository.
"""
import datetime
version = '1.7'
date = 'Wed Jul 4 17:53:34 2012'
# Was NetworkX built from a development version? If so, remember that the major
# and minor versions reference the "target" (rather than "current") release.
dev = False
# Format: (name, major, min, revision)
version_info = ('networkx', 1, 7, '70eea5d9e665')
# Format: a 'datetime.datetime' instance
date_info = datetime.datetime(2012, 7, 4, 17, 53, 34, 228318)
# Format: (vcs, vcs_tuple)
vcs_info = ('mercurial', ('70eea5d9e665', 'tip'))
| StarcoderdataPython |
3410576 | from selenium.webdriver.common.by import By
from time import sleep
def get_gas_cost(driver, mail_address, password):
# login page
driver.get('https://members.tokyo-gas.co.jp/')
login_page_link = driver.find_element(By.CLASS_NAME, 'mtg-button-cta').get_attribute('href')
driver.get(login_page_link)
login_id_element = driver.find_element(By.XPATH, '//input[@name="loginId"]')
login_id_element.send_keys(mail_address)
password_element = driver.find_element(By.XPATH, '//input[@name="password"]')
password_element.send_keys(password)
# top page
submit_btn = driver.find_element(By.ID, 'submit-btn')
submit_btn.click()
sleep(5)
# total page
total_page_link = driver.find_element(By.CLASS_NAME, 'mtg-content-number-link').get_attribute('href')
driver.get(total_page_link)
sleep(5)
# detail page
detail_page_link = driver.find_element(By.CLASS_NAME, 'mtg-button-link').get_attribute('href')
driver.get(detail_page_link)
sleep(5)
usage_term = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[1]/div/div[3]/p[2]/span[1]').text
usage_days = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[1]/div/div[3]/p[2]/span[2]').text
billing_amount = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[2]/div[1]/div[1]/p[2]/span').text
usage_amount = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[2]/div[2]/div[1]/p[2]/span').text
yoy = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[2]/div[2]/div[2]/p/span').text
yoy_days = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[2]/div[2]/div[2]/div/p[2]').text
mom = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[2]/div[2]/div[3]/p/span').text
mom_days = driver.find_element(By.XPATH, '//*[@id="gas"]/div[2]/div[2]/div[2]/div[3]/div/p[2]').text
print('使用期間:' + usage_term)
print('使用日:' + usage_days)
print('請求額:' + billing_amount)
print('使用量:' + usage_amount)
print('前年同月:' + yoy + '(' + yoy_days + ')')
print('前月:' + mom + '(' + mom_days + ')')
| StarcoderdataPython |
3261699 | __version__ = "0.6.0"
__version_info__ = tuple(int(i) for i in __version__.split("."))
| StarcoderdataPython |
11287174 | # -*- coding: utf-8 -*-
from __future__ import division
import os,sys,datetime
import requests, json
import BeautifulSoup
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import pandas.io.data as web
from data_model import *
from data_handler import *
from data_writer import *
from services import *
from stock_finder import *
class DataCrawler:
def __init__(self,wait_sec=5):
self.wait_sec = wait_sec
self.dbwriter = services.get('dbwriter')
self.dbhandler = services.get('dbhandler')
# 从Koscom股票搜索器中获得全部股票的代码值,然后转为HTML
def downloadCode(self,market_type):
url = 'http://datamall.koscom.co.kr/servlet/infoService/SearchIssue'
html = requests.post(url, data={'flag':'SEARCH', 'marketDisabled': 'null', 'marketBit':market_type})
return html.content
# 对参数HTML进行分析后,提取代码和公司名称,然后添加到管理代码的集合类StockCode,最后返回集合类
def parseCodeHTML(self,html,market_type):
soup = BeautifulSoup.BeautifulSoup(html)
options = soup.findAll('option')
codes = StockCode()
for a_option in options:
#print a_tr
if len(a_option)==0:
continue
code = a_option.text[1:7]
company = a_option.text[8:]
full_code = a_option.get('value')
codes.add(market_type,code,full_code,company)
return codes
def parseCodeHTML2(self,html,market_type):
soup = BeautifulSoup(html)
table = soup.find('table', {'id':'tbl1'})
trs = table.findAll('tr')
codes = StockCode()
for a_tr in trs:
#print a_tr
cols = a_tr.findAll('td')
if len(cols)==0:
continue
#print cols
code = cols[0].text[1:]
company = cols[1].text.replace(";", "")
full_code = cols[2].text
codes.add(market_type,code,full_code,company)
return codes
def updateAllCodes(self):
for market_type in ['kospiVal','kosdaqVal']:
html = self.downloadCode(market_type)
codes = self.parseCodeHTML(html,market_type)
self.dbwriter.updateCodeToDB(codes)
def downloadStockData(self,market_type,code,year1,month1,date1,year2,month2,date2):
def makeCode(market_type,code):
if market_type==1:
return "%s.KS" % (code)
return "%s.KQ" % (code)
start = datetime(year1, month1, date1)
end = datetime(year2, month2, date2)
try:
df = web.DataReader(makeCode(market_type,code), "yahoo", start, end)
return df
except:
print "!!! Fatal Error Occurred"
return None
def getDataCount(self,code):
sql = "select code from prices where code='%s'" % (code)
rows = self.dbhandler.openSql(sql).fetchall()
return len(rows)
def updateAllStockData(self,market_type,year1,month1,date1,year2,month2,date2,start_index=1):
print "Start Downloading Stock Data : %s , %s%s%s ~ %s%s%s" % (market_type,year1,month1,date1,year2,month2,date2)
sql = "select * from codes"
sql += " where market_type=%s" % (market_type)
if start_index>1:
sql += " and id>%s" % (start_index)
rows = self.dbhandler.openSql(sql).fetchall()
self.dbhandler.beginTrans()
index = 1
for a_row in rows:
#print a_row
code = a_row[2]
company = a_row[5]
data_count = self.getDataCount(code)
if data_count == 0:
print "... %s of %s : Downloading %s data " % (index,len(rows),company)
df_data = self.downloadStockData(market_type,code,year1,month1,date1,year2,month2,date2)
if df_data is not None:
df_data_indexed = df_data.reset_index()
self.dbwriter.updatePriceToDB(code,df_data_indexed)
index += 1
#return
self.dbhandler.endTrans()
print "Done!!!"
if __name__ == "__main__":
services.register('dbhandler',DataHandler())
services.register('dbwriter',DataWriter())
crawler = DataCrawler()
html_codes = crawler.downloadCode('2')
print html_codes.__class__
crawler.parseCodeHTML(html_codes,'2')
#crawler.updateAllCodes()
#crawler.updateAllStockData(1,2010,1,1,2015,12,1,start_index=1)
#finder = StockFinder()
#finder.setTimePeriod('20150101','20151130')
#print finder.doStationarityTest('price_close')
| StarcoderdataPython |
6407666 | from switch_sort import switch_sort
def test_first():
assert switch_sort([1, 2, 4, 3]) == 1
def test_second():
assert switch_sort([1, 2, 3, 4]) == 0
def test_third():
assert switch_sort([3, 4, 2, 1]) == 3
def test_fourth():
assert switch_sort([1, 3, 4, 2]) == 2
def test_five():
assert switch_sort([3, 1, 2]) == 2
def test_six():
assert switch_sort([4, 3, 2, 1]) == 2
def test_seven():
assert switch_sort([5, 4, 3, 2, 1]) == 2
def test_eight():
assert switch_sort([2, 1]) == 1
def test_nine():
assert switch_sort([5, 3, 4, 1, 2]) == 4
def test_ten():
assert switch_sort([5, 4, 3, 1, 2]) == 3
def main():
test_first()
test_second()
test_third()
test_fourth()
test_five()
test_six()
test_seven()
test_eight()
test_nine()
test_ten()
if __name__ == '__main__':
main() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.