text stringlengths 38 1.54M |
|---|
import libfoolang
ctx = libfoolang.AnalysisContext()
foo = ctx.get_from_file('foo.txt')
assert foo.root.is_a(libfoolang.HasExamplePresent)
print(foo.root.p_prop)
print('Done.')
|
import hashlib
import numpy as np
from utils.myPrint import PRINT_blue
from utils.myPrint import PRINT_red
def check(data1, data2, name1 = 'data1', name2 = 'data2'):
"""
function:
to check the data is equal or not.
parameters:
data1: numpy.ndarray, must, first data.
data2: numpy.ndarray, must, second data.
name1: str, the name of data1.
name2: str, the name of data2.
return:
str, the EQUAL or NOT.
"""
m1 = hashlib.md5()
m2 = hashlib.md5()
m1.update(str(np.array(data1).flatten()).encode('utf-8'))
m2.update(str(np.array(data2).flatten()).encode('utf-8'))
if m1.hexdigest() == m2.hexdigest():
res = f"{name1} and {name2} are EQUAL"
PRINT_blue(res)
return res
else:
res = f"{name1} and {name2} are NOT EQUAL"
PRINT_red(res)
return res
def checkBool(data1, data2):
"""
function:
to check the data is equal or not.
parameters:
data1: numpy.ndarray, must, first data.
data2: numpy.ndarray, must, second data.
return:
bool, True/False.
"""
m1 = hashlib.md5()
m2 = hashlib.md5()
m1.update(str(np.array(data1).flatten()).encode('utf-8'))
m2.update(str(np.array(data2).flatten()).encode('utf-8'))
if m1.hexdigest() == m2.hexdigest():
return True
else:
return False |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 15:49:29 2019
@author: skondaveeti
Each roll is equally likely, so it will show 1,2,3,4,5,6 with equal probability. Thus their average of 3.5 is the expected payoff.
Now let's suppose we have 2 rolls. If on the first roll, I roll a 6, I would not continue. The next throw would only maintain my winnings of 6 (with 1/6 chance) or make me lose. Similarly, if I threw a 5 or a 4 on the first roll, I would not continue, because my expected payoff on the last throw would be a 3.5. However, if I threw a 1,2 of 3, I would take that second round. This is again because I expect to win 3.5.
So in the 2 roll game, if I roll a 4,5,6, I keep those rolls, but if I throw a 1,2,3, we will roll again.
Thus I have a 1/2 chance of keeping a 4,5,6, or a 1/2 chance of rerolling the dice.
Rerolling the dice have an expected return of 3.5. As the 4,5,6 are equally likely, rolling a 4,5 or 6 has expected return 5.
Thus my expected payout on 2 rolls is .5(5)+.5(3.5)=4.25.
Now we go to the 3 roll game. If I roll a 5 or 6, I keep my roll. But now, even a 4 is undesirable, because by rerolling, I'd be playing the 2 roll game, which has expected payout of 4.25. So now the expected payout is 1/3(5.5)+2/3(4.25)=4.66
"""
import math
def expected_winnings(n, k):
rollSum = [0 for i in range(0, k)]
m = n
for r in range(1, k + 1):
rollSum[r - 1] = (1 / r) * (avgSumNum(n, m))
if (r - 1) >= 1:
rollSum[r - 1] += (r - 1) / r * rollSum[r - 2]
m = math.ceil(m / 2)
return rollSum[k - 1]
def avgSumNum(N, M):
return (N * (N + 1) / 2 - (N - M) * (N - M + 1) / 2) / M
# Driver code
if __name__ == "__main__":
# multiple assignments
n, k = 6, 3
# function calling
print(expected_winnings(n, k)) |
from pyspark.sql import SparkSession
from pyspark.sql.functions import window, column, desc, col
import time
if __name__ == "__main__":
spark = SparkSession.builder.master("local").appName("structured-streaming").getOrCreate()
spark.conf.set("spark.sql.shuffle.partitions", "1")
data_path = "/home/jameslin/Github-Project/Spark-The-Definitive-Guide/data/retail-data/by-day/*.csv"
print(f"data_path: {data_path}")
# Read data
df = spark.read.csv(
data_path,
header=True,
inferSchema=True,
)
# df.createOrReplaceTempView("retail_data")
df.show()
staticSchema = df.schema
df.printSchema()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-u
"""
Purpose : Exceptions for our pythons wrapper
"""
class NotValidEmail(Exception):
""" The email is not valid """
pass
class BreachNotFound(Exception):
""" The breach Name is not found """
pass
class UnvalidParameters(Exception):
pass
|
__author__ = 'Eidan Wasser'
from Utils import Config
from selenium.webdriver.common.by import By
from selenium.common.exceptions import ElementNotVisibleException, NoSuchWindowException
import unittest, time
class GooglePlus(unittest.TestCase):
def test(self):
driver = Config.get_driver()
google = None
mainWindow = driver.current_window_handle
self.verificationErrors = []
try: Config.wait_for_element_visibility(Config.singUp_GooglePlusButton, trys=10)
except ElementNotVisibleException:
if not Config.is_element_present(Config.settingsPane):
Config.find_element(Config.openSettings).click()
Config.find_element(Config.settings_profile).click()
Config.find_element(Config.profile_signOut).click()
Config.wait_for_element_visibility(Config.singUp_GooglePlusButton)
Config.find_element(Config.singUp_GooglePlusButton).click()
try:
google = driver.window_handles
google.remove(mainWindow)
driver.switch_to_window(google)
Config.wait_for_element([By.ID, "Email"])
Config.find_element([By.ID, "Email"]).clear()
Config.find_element([By.ID, "Email"]).send_keys("anydoeidan15@gmail.com")
if Config.is_element_present([By.ID, "Passwd"]) == False:
Config.find_element([By.ID, "next"]).click()
Config.wait_for_element([By.CSS_SELECTOR, "input#Passwd"])
Config.find_element([By.CSS_SELECTOR, "input#Passwd"]).clear()
Config.find_element([By.CSS_SELECTOR, "input#Passwd"]).send_keys("mobiitnow")
Config.find_element([By.ID, "signIn"]).click()
time.sleep(3)
if Config.is_element_present([By.XPATH, "//button[@id=\"submit_approve_access\"]"]):
Config.wait_for_element([By.XPATH, "//button[@id=\"submit_approve_access\" and @disabled]"], present=False)
Config.find_element([By.ID, "submit_approve_access"]).click()
except NoSuchWindowException:
driver.switch_to_window(mainWindow)
except: self.verificationErrors.append("Google authentication error")
else: driver.switch_to_window(mainWindow)
try:
Config.wait_for_element([By.CSS_SELECTOR, "div#skip.textButton"])
Config.find_element([By.CSS_SELECTOR, "div#skip.textButton"]).click()
except: pass
Config.wait_for_element(Config.main_hayush)
try: self.assertEqual("HI ANY", Config.find_element(Config.main_hayush).text)
except AssertionError as e: self.verificationErrors.append(str(e))
try: Config.find_element(Config.profilePic).value_of_css_property("background-image").index("google")
except ValueError as e: self.verificationErrors.append(e)
|
# Copyright 2014-2022 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This special test module is responsible for running the 'managed_packages' tests in a remote environment,
for example ec2 instance or docker container.
"""
import argparse
import pathlib as pl
import logging
import sys
import tarfile
from typing import List, Type, Dict
sys.path.append(str(pl.Path(__file__).parent.parent.parent.parent.parent))
from agent_build_refactored.managed_packages.managed_packages_builders import (
LinuxAIOPackagesBuilder,
ALL_PACKAGE_BUILDERS,
)
from agent_build_refactored.utils.constants import CpuArch
from agent_build_refactored.utils.common import init_logging
from tests.end_to_end_tests.run_in_remote_machine import (
run_test_in_docker,
run_tests_in_ec2,
)
from tests.end_to_end_tests.run_in_remote_machine import DISTROS
from tests.end_to_end_tests.managed_packages_tests.remote_machine_tests.tools import (
create_packages_repo_root,
get_packages_stable_version,
is_builder_creates_aio_package,
)
from tests.end_to_end_tests.managed_packages_tests.remote_machine_tests.conftest import (
add_cmd_args,
)
from tests.end_to_end_tests.run_in_remote_machine import RemoteTestDependenciesBuilder
init_logging()
logger = logging.getLogger(__name__)
PACKAGES_ROOT_TARBALL_NAME = "packages_repo_root.tar"
class RemotePackageTestDependenciesBuilder(RemoteTestDependenciesBuilder):
PACKAGE_BUILDER: Type[LinuxAIOPackagesBuilder]
def __init__(
self,
package_type: str,
packages_source_type: str,
packages_source: str,
package_builder_name: str,
):
super(RemoteTestDependenciesBuilder, self).__init__()
self.package_type = package_type
self.packages_source_type = packages_source_type
self.packages_source = packages_source
self.package_builder_name = package_builder_name
def build(self):
is_aio = is_builder_creates_aio_package(
package_builder_name=self.package_builder_name
)
if is_aio:
# If it's already AIO package, then just use itself
dependencies_arch = self.__class__.PACKAGE_BUILDER.ARCHITECTURE
else:
# If it's non-aio, then we use x86_64 builder.
dependencies_arch = CpuArch.x86_64
self._build_dependencies(
architecture=dependencies_arch,
)
stable_version_package_version = get_packages_stable_version()
packages_root = create_packages_repo_root(
packages_source_type=self.packages_source_type,
packages_source=self.packages_source,
package_builder=self.__class__.PACKAGE_BUILDER,
package_type=self.package_type,
stable_packages_version=stable_version_package_version,
output_dir=self.work_dir,
)
packages_root_tarball_path = self.result_dir / PACKAGES_ROOT_TARBALL_NAME
with tarfile.open(packages_root_tarball_path, "w") as tf:
tf.add(packages_root, arcname="/")
remote_test_dependency_builders: Dict[
str, Type[RemotePackageTestDependenciesBuilder]
] = {}
for package_builder in ALL_PACKAGE_BUILDERS.values():
class _RemotePackageTestDependenciesBuilder(RemotePackageTestDependenciesBuilder):
NAME = "remote_test_dependency_builder"
PACKAGE_BUILDER = package_builder
remote_test_dependency_builders[
package_builder.NAME
] = _RemotePackageTestDependenciesBuilder
def main(
package_type: str,
packages_source_type: str,
packages_source: str,
distro_name,
remote_machine_type,
package_builder_name,
other_cmd_args: List[str],
):
use_aio_package = is_builder_creates_aio_package(
package_builder_name=package_builder_name
)
dependencies_builder_cls = remote_test_dependency_builders[package_builder_name]
if use_aio_package:
arch = dependencies_builder_cls.PACKAGE_BUILDER.ARCHITECTURE
else:
arch = CpuArch.x86_64
dependencies_builder = dependencies_builder_cls(
package_type=package_type,
packages_source_type=packages_source_type,
packages_source=packages_source,
package_builder_name=package_builder_name,
)
dependencies_builder.build()
distro = DISTROS[distro_name]
packages_root_tarball = dependencies_builder.result_dir / PACKAGES_ROOT_TARBALL_NAME
in_docker_packages_root_tarball = f"/tmp/{PACKAGES_ROOT_TARBALL_NAME}"
command = [
"tests/end_to_end_tests/managed_packages_tests/remote_machine_tests",
"--builder-name",
package_builder_name,
"--package-type",
package_type,
"--distro-name",
distro_name,
"--remote-machine-type",
remote_machine_type,
"--packages-source-type",
"repo-tarball",
"--packages-source",
in_docker_packages_root_tarball,
*other_cmd_args,
]
try:
if remote_machine_type == "ec2":
run_tests_in_ec2(
ec2_image=distro.ec2_images[arch],
ec2_instance_size_id=distro.ec2_instance_size_id,
command=command,
pytest_runner_path=dependencies_builder.portable_pytest_runner_path,
source_tarball_path=dependencies_builder.agent_source_tarball_path,
file_mappings={
packages_root_tarball: in_docker_packages_root_tarball,
},
)
else:
run_test_in_docker(
docker_image=distro.docker_image,
command=command,
architecture=arch,
pytest_runner_path=dependencies_builder.portable_pytest_runner_path,
source_tarball_path=dependencies_builder.agent_source_tarball_path,
file_mappings={
packages_root_tarball: in_docker_packages_root_tarball,
},
)
except Exception as e:
logger.error(f"Remote test failed. Error: {str(e)}")
raise
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_cmd_args(parser=parser, is_pytest_parser=False)
parser.add_argument("test_path")
args, other_argv = parser.parse_known_args()
main(
package_type=args.package_type,
packages_source_type=args.packages_source_type,
packages_source=args.packages_source,
distro_name=args.distro_name,
remote_machine_type=args.remote_machine_type,
package_builder_name=args.builder_name,
other_cmd_args=other_argv,
)
|
def tri_area(width,height):
return width*height*1/2
def box_area(width,height):
return width*height
def print_area(width,height):
print("๊ฐ๋ก : ",width," ์ธ๋ก : ",height," ์ผ๊ฐํ์ ๋์ด : ",tri_area(width,height))
print("๊ฐ๋ก : ",width," ์ธ๋ก : ",height," ์ฌ๊ฐํ์ ๋์ด : ",box_area(width,height))
if __name__ == '__main__':
print_area(3,5)
print_area(6,10) |
class MainPage:
type = "Page"
def __init__(self, topic = None, name = None, icon="home"):
self.topic = topic
self.name = name
self.icon = icon
class Device:
type = "Device"
def __init__(self, topic = None, name = None, icon="zap"):
self.topic = topic
self.name = name
self.icon = icon
self.toggle = False
self.online_status = False
self.time = ''
def on(self):
self.toggle = True
def off(self):
self.toggle = False
def is_on(self):
return self.toggle
def online(self):
self.online_status = True
def offline(self):
self.online_status = False
def is_online(self):
return self.online_status
class DeviceSensor(Device):
power = {'Total': 0, 'Yesterday': 0, 'Today': 0, 'Power': 0, 'Factor': 0, 'Voltage': 0, 'Current': 0}
type = "DeviceSensor"
def new_power(self, power, time):
self.power = power
self.time = time
def last_power(self):
return self.power['Power']
def power_today(self):
return self.power['Today']
def last_time(self):
return self.time
class DictObjects:
data = {}
def __init__(self, device=None):
if device is not None:
self.add(device)
def add(self, device):
if not self.check(device.topic):
self.data[device.topic] = device
return True
else:
return False
def check(self, topic):
return topic in self.data
def list_of_topics(self):
return self.data.keys()
def type_device(self, topic):
return self.data[topic].type
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CloudAdministrator', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('uuid', models.CharField(
default=uuid.uuid4, unique=True, max_length=36, editable=False)), ('provider', models.ForeignKey(
to='core.Provider')), ('user', models.ForeignKey(
to=settings.AUTH_USER_MODEL)), ], options={
'db_table': 'cloud_administrator', }, bases=(
models.Model,), ), ]
|
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/custom-actions
# This is a simple example for a custom action which utters "Hello World!"
from time import strftime
from typing import Any, Text, Dict, List
#
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
#
#
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
#
# dispatcher.utter_message(text="Hello World!")
#
# return []
import json
from datetime import date
def obtener_datos_arch(ruta):
a_file = open(str(ruta), "r")
json_object = json.load(a_file)
a_file.close()
return json_object
def guardar_datos_arch(ruta, datos):
a_file = open(str(ruta), "w")
json.dump(datos, a_file)
a_file.close()
def borrar_tarea(id):
datos = obtener_datos_arch("tareasEnDesarrollo.json")
tarea = datos[str(id)]
#desasignar la tarea completada del perfil del empleado
empladoID = tarea["empleado"]
empleados = obtener_datos_arch("employees.json")
empleados[str(empladoID)]["tarea"] = -1
guardar_datos_arch("employees.json", empleados)
#guardar la tarea completada en tareas terminadas
terminadas = obtener_datos_arch("tareasTerminadas.json")
terminadas[str(id)] = tarea
guardar_datos_arch("tareasTerminadas.json", terminadas)
#eliminar tarea completada de tareas en desarrollo
datos.pop(str(id))
guardar_datos_arch("tareasEnDesarrollo.json", datos)
def definir_actitud(idEmpleado):
quejas = obtener_datos_arch("quejas.json")
empleados = obtener_datos_arch("employees.json")
#si el empleado tiene muchas quejas de otros empleados se lo clasifica como "problematico"
nroQuejas = 0
for queja in quejas:
if quejas[queja]["id"] == idEmpleado:
nroQuejas = nroQuejas + 1
if nroQuejas > 5:
return "problematico"
else:
#si el empleado tiene muchas ausencias se lo clasifica como "irresponsable"
cantFaltas = len(empleados[idEmpleado]["ausencias"])
if cantFaltas > 10:
return "irresponsable"
else:
#si el empleado realiza muchas quejas se lo clasifica como "quejoso"
if empleados[idEmpleado]["cant quejas"] > 3:
return "quejoso"
else:
return "normal"
def asignar_tarea(empleadoID):
#dado un emplado buscar una tarea compatible y asignarla
#obtener los datos necesarios
tareas = obtener_datos_arch("tareas.json")
empleados = obtener_datos_arch("employees.json")
for tarea in tareas:
tieneTodas = True
if tareas[tarea]["rango"] == empleados[empleadoID]["rango"]:
for habilidadReq in tareas[tarea]["habilidades necesarias"]:
if tieneTodas == False:
break
for habilidadEmp in empleados[empleadoID]["habilidades"]:
if habilidadReq == habilidadEmp:
tieneTodas = True
break
else:
tieneTodas = False
if tieneTodas == True:
empleados[empleadoID]["tarea"] = str(tarea)
guardar_datos_arch("employees.json", empleados)
#mover tarea a tareas en desarrollo
enDesarrollo = obtener_datos_arch("tareasEnDesarrollo.json")
enDesarrollo[tarea] = tareas[tarea]
enDesarrollo[tarea]["empleado"] = str(empleadoID)
hoy = date.today()
enDesarrollo[tarea]["fecha inicio"] = hoy.strftime("%d-%m-%Y")
guardar_datos_arch("tareasEnDesarrollo.json", enDesarrollo)
#eliminar de tareas sin asignar
tareas.pop(tarea)
guardar_datos_arch("tareas.json", tareas)
return tarea
return int(-1)
#accion para registrar ausencia
class ActionFaltar(Action):
def name(self) -> Text:
return "action_faltar"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
MAX_FALTAS = 6
respuestas = {"problematico": "Recorda tener una buena actitud y un trabajo ordenado cuando regreses!", "irresponsable": "Recorda que faltar mucho al trabajo puede afectar tu desempenio y tener consecuencias graves.", "quejoso": ":)", "normal": ":)"}
a_file = open("employees.json", "r")
json_object = json.load(a_file)
a_file.close()
intencion = str(tracker.latest_message["intent"]["name"])
if (intencion == "justificar"):
motivo = str(tracker.latest_message["text"])
try:
employee = json_object[str(tracker.get_slot("documento"))]
actitud = definir_actitud(tracker.get_slot("documento"))
except:
dispatcher.utter_message(text="Perdon, no tenemos ese ID en nuestra base de datos.")
return[]
cantidad_ausencias = len(employee["ausencias"]) + 1
if (cantidad_ausencias > MAX_FALTAS):
msg = "Registramos correctamente tu ausencia, en total faltaste " + str(cantidad_ausencias) + " dias, eso excede el limite de la empresa. Un empleado de Recursos Humanos estara en contacto contigo. " + respuestas[actitud]
dispatcher.utter_message(text=msg)
else:
msg = "Registramos correctamente tu ausencia, en total faltaste " + str(cantidad_ausencias) + " dias. " + respuestas[actitud]
dispatcher.utter_message(text=msg)
hoy = date.today()
d1 = hoy.strftime("%d-%m-%Y")
employee["ausencias"].append({d1:motivo})
json_object[str(tracker.get_slot("documento"))] = employee
a_file = open("employees.json", "w")
json.dump(json_object, a_file)
a_file.close()
else:
dispatcher.utter_message(text=intencion)
return[]
class ActionHorarios(Action):
def name(self) -> Text:
return "action_horarios"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
a_file = open("employees.json", "r")
json_object = json.load(a_file)
a_file.close()
hora_incio = int(tracker.get_slot("inicio"))
hora_fin = int(tracker.get_slot("fin"))
if (hora_fin <= hora_incio):
dispatcher.utter_message(text="Lo siento, ese horario no es valido")
return[]
else:
horarios_actuales = json_object[str(tracker.get_slot("documento"))]["horario"]
cant_horas = horarios_actuales[1] - horarios_actuales[0]
cant_horas_pedidas = hora_fin - hora_incio
json_object[str(tracker.get_slot("documento"))]["pedidos"].append({"cambiar horas": [hora_incio, hora_fin]})
a_file = open("employees.json", "w")
json.dump(json_object, a_file)
a_file.close()
if (cant_horas_pedidas < cant_horas):
dispatcher.utter_message(text="Ya registre tu pedido, tene en cuenta que la cantidad de horas que solicitaste son menores a las que tenes en este momento.")
else:
dispatcher.utter_message(text="Tu pedido fue registrado!")
return[]
class ActionQueja(Action):
def name(self) -> Text:
return "action_queja"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
intencion = str(tracker.latest_message["intent"]["name"])
a_file = open("indiceEmpleados.json", "r")
empleados = json.load(a_file)
a_file.close()
a_file = open("quejas.json", "r")
quejas = json.load(a_file)
a_file.close()
companero = str(tracker.get_slot("companero"))
if (intencion == "motivo"):
try:
id_companero = empleados[companero]
except:
dispatcher.utter_message(text="Perdon, no puedo encontrar ese companero, lo habras escrito bien?")
return[]
motivo = str(tracker.latest_message["text"])
hoy = date.today()
d1 = hoy.strftime("%d-%m-%Y")
quejas.append({"fecha": d1, "empleado": companero, "id": id_companero, "motivo": motivo})
a_file = open("quejas.json", "w")
json.dump(quejas, a_file)
a_file.close()
dispatcher.utter_message(text="Tu queja ha sido regitrada!")
return[]
else:
dispatcher.utter_message(text="Perdon no entendi eso, podrias reformular?")
return[]
class ActionDias(Action):
def name(self) -> Text:
return "action_dias"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
MAX_DAYS = 20
empleados = obtener_datos_arch("employees.json")
tareasEnDesarrollo = obtener_datos_arch("tareasEnDesarrollo.json")
idEmpleado = tracker.get_slot("documento")
idTarea = empleados[str(idEmpleado)]["tarea"]
diasAdicionales = tracker.get_slot("dias")
if (idTarea == -1):
dispatcher.utter_message(text="Parece que todavia no tenes ninguna tarea asignada. Si queres, me podes pedir que te asigne una.")
return []
else:
tarea = tareasEnDesarrollo[str(idTarea)]
dias = tarea["dias asignados"]
if (int(dias) + int(diasAdicionales) > MAX_DAYS):
if (int(dias) == MAX_DAYS):
msg = "No puedo asignarte mas dias, tu tarea ya tiene el maximo de dias extras."
else:
msg = "No puedo asignarte " + str(diasAdicionales) + "extra, se pasa del limite de la empresa. Como maximo te puedo dar " + str(MAX_DAYS - int(dias)) + "dias mas."
else:
msg = "Perfecto, se te asignaron " + str(diasAdicionales) + " dias extra."
tareasEnDesarrollo[str(idTarea)]["dias asignados"] = int(dias) + int(diasAdicionales)
guardar_datos_arch("tareasEnDesarrollo.json", tareasEnDesarrollo)
dispatcher.utter_message(text=msg)
return []
class ActionPedirTarea(Action):
def name(self) -> Text:
return "action_pedir_tarea"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
empleados = obtener_datos_arch("employees.json")
tareas = obtener_datos_arch("tareasEnDesarrollo.json")
idEmpleado = tracker.get_slot("documento")
if (empleados[idEmpleado]["tarea"] > 0):
idTarea = empleados[idEmpleado]["tarea"]
msg = "Parece que ya tenes una tarea sin terminar, la de " + tareas[str(idTarea)]["nombre"] + " del proyecto " + tareas[str(idTarea)]["proyecto"]
dispatcher.utter_message(text=msg)
else:
tareaAsignada = asignar_tarea(idEmpleado)
if tareaAsignada != -1 :
msg = "Encontre una tarea para vos, la de " + tareas[str(tareaAsignada)]["nombre"] + " del proyecto " + tareas[str(tareaAsignada)]["proyecto"]
dispatcher.utter_message(text=msg)
else:
dispatcher.utter_message("Ufa, parece que tenemos ninguna tarea para vos en este momento :(")
return[]
class ActionFinalizarTarea(Action):
def name(self) -> Text:
return "action_finalizar_tarea"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
empleados = obtener_datos_arch("employees.json")
idEmpleado = tracker.get_slot("documento")
idTarea = empleados[idEmpleado]["tarea"]
if (idTarea < 0):
msg = "Parece que no tenes ninguna tarea asignada todavia. Te recomiendo que me pidas que te asigne una."
dispatcher.utter_message(text=msg)
else:
borrar_tarea(idTarea)
dispatcher.utter_message("Ya di por finalizada tu tarea, si queres me podes pedir otra.")
return[]
|
def sum_sfarot(num):
print((num//100)+(num//10%10)+(num%10))
sum_sfarot(int(input("enter number in 3 numbers: "))) |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 17:12:47 2020
@author: anish pratheepkumar
code to run the quadcopter model autonomously on a track using trained CNN model
"""
#import essential libraries
import sim
import sys
#import os
#import matplotlib.pyplot as plt
import cv2
import numpy as np
import time
from keras.models import load_model
#Loading trained CNN model with weights
PATH = '/home/anish/anaconda_py3_copelia'
QuadNet = load_model(PATH + '/Model/Quad_Net_Wt.h5')
time.sleep(0.5)
#just in case, close all opened connections to CoppeliaSim
sim.simxFinish(-1)
#connect to CoppeliaSim
clientID=sim.simxStart('127.0.0.1',19999,True,True,5000,5)
#verify that connection is established with coppeliasim
if clientID!=-1:
print ('Connected to remote API server')
else:
print("Not connected to remote API server")
sys.exit("Could not connect")
#getting object handles for quad control
err_code,target_handle = sim.simxGetObjectHandle(clientID,'Quadricopter_target',sim.simx_opmode_blocking)
#initialise var for accessing LUA function at Server
inputInts=[] #dtype table (inside it string)
inputFloats=[] #dtype table (inside it floats)
inputStrings=[] #dtype table inside it strings
inputBuffer='' #dtype stirng
while True:
###Getting Image using LUAcode in server(coppeliaSim))###
res,retTable1,retTable2,retTable3,retString=sim.simxCallScriptFunction(clientID,'Vision_sensor',sim.sim_scripttype_childscript,
'getImage',inputInts,inputFloats,inputStrings,inputBuffer,sim.simx_opmode_blocking)
if res==sim.simx_return_ok:
image = retString
resolution = retTable1
#Image Processing
image = np.array(image, dtype = np.uint8) #signedint -> unsigned int now each value range 0-255
image.resize([resolution[0],resolution[1],3]) #resize to 512*512*3
image = np.flip(image,0)
image = cv2.resize(image,(int(256/2),int(256/2))) #resize image to model input dimension 128x128
image = image[None,:,:,:]
#using QuadNet to predict the quad motion
y_pred = QuadNet.predict(image)
cls_pred = np.argmax(y_pred,axis=1)
cls = np.squeeze(cls_pred)
#print (cls)
#getting current pos & orien of the quad
err_code, target_orien_body = sim.simxGetObjectOrientation(clientID, target_handle, target_handle, sim.simx_opmode_blocking)
err_code, target_pos_body = sim.simxGetObjectPosition(clientID, target_handle, target_handle, sim.simx_opmode_blocking)
#condtion for motion control of the quad (setting pos&orien based on QuadNetwork prediction)
if cls == 0:
#move Left
target_pos_body[0] = target_pos_body[0] + (0.018)
target_orien_body[2] = target_orien_body[2] + 0.02618
err_code = sim.simxSetObjectOrientation(clientID, target_handle, target_handle, target_orien_body, sim.simx_opmode_oneshot)
err_code = sim.simxSetObjectPosition(clientID, target_handle, target_handle, target_pos_body, sim.simx_opmode_oneshot)
elif cls == 1:
#move Right
target_pos_body[0] = target_pos_body[0] + (0.018)
target_orien_body[2] = target_orien_body[2] - 0.0349
err_code = sim.simxSetObjectOrientation(clientID, target_handle, target_handle, target_orien_body, sim.simx_opmode_oneshot)
err_code = sim.simxSetObjectPosition(clientID, target_handle, target_handle, target_pos_body, sim.simx_opmode_oneshot)
else:
#move forward
target_pos_body[0] = target_pos_body[0] + (0.018)
target_orien_body[2] = target_orien_body[2] + 0.0
err_code = sim.simxSetObjectOrientation(clientID, target_handle, target_handle, target_orien_body, sim.simx_opmode_oneshot)
err_code = sim.simxSetObjectPosition(clientID, target_handle, target_handle, target_pos_body, sim.simx_opmode_oneshot)
#time.sleep(0.025)
|
#Code to find sum and average of elements in list anf to multiply all elements in list
def sumAndAvg(lst):
sum=0
avg=0
mul1=1
for i in lst:
sum+=i
mul1*=i
avg=sum/len(lst)
print("Sum of elements in list is {}".format(sum))
print("Average of elements in list is {}".format(avg))
print("Multiply of elements in list is {}".format(mul1))
sumAndAvg([1,3,4,5,6,7,8,9,12])
|
print("Hello, Themyscira!")
# This is a comment that won't be interpreted as a command.
# Associate the variable diana with the value "Wonder Woman 1984"
diana="Wonder Woman 1984"
# Print a message with the true identity of Diana
print("I believe Diana is actually "+diana)
# Define a power (fucntion) to chant a phrase
def chant(phrase):
# glue three copies together and print it as a message
print(phrase+phrase+phrase)
# Invoke the power chant on the phrase "Wonder Woman 1984!"
chant("Wonder Woman 1984! ")
def lassoLetter(letter,shiftAmount):
letterCode=ord(letter.lower())
aAscii=ord('a')
alphabetSize=26
trueLetterCode=aAscii + (((letterCode - aAscii) + shiftAmount) % alphabetSize)
decodedLetter=chr(trueLetterCode)
return decodedLetter |
#!/usr/bin/python
import sys, os, string, re
#******************************
# check that the patch numbers are in order
# August 30, 2006
# Annette Roll (help from Mike Harris)
##########################################
# function to determine missing patches
def getMissingPatches(num_patches, s_patches):
# for each patch, check the curent patch value with the next next higher patch value
# if the next patch value isn't 1 greater than the current patch value, print the
# current patch value + 1 through the next higher patch value - 1
count = 0
while count < num_patches - 1:
current_patch = string.atoi(string.strip(s_patches[count]))
next_patch = string.atoi(string.strip(s_patches[count + 1]))
if current_patch + 1 != next_patch:
patch_diff = next_patch - current_patch - 1
while patch_diff > 0:
current_patch += 1
print "Missing patch " + str(current_patch)
patch_diff -= 1
count += 1
#Collect a list of patch files, make sure the total number of patches
#matches the patch number of the last file in the directory.
#
#Returns 1 on success, 0 on failure.
def checkPatches(path, debug=0):
patchpath = path
os.chdir(patchpath)
#Collect the patch numbers, sort, and count.
s_patches = os.popen("find | grep .mvlpatch | awk -F - {'print $2'}").readlines()
s_patches.sort()
num_patches = len(s_patches)
# If the first patch + number of patches != highest patch number, fail. Else succeed.
if string.atoi(s_patches[0]) + (num_patches - 1) != string.atoi(s_patches[num_patches - 1]):
if debug:
print "\npatches out of sequence, determining missing patches..."
getMissingPatches(num_patches, s_patches)
return 0
else:
return 0
else:
if debug:
print "\npatches in sequnce, no missing patches"
print "first patch value is %s" % string.strip(s_patches[0])
print "last patch value is %s" % string.strip(s_patches[num_patches - 1])
print "number of patches is %s" % num_patches
print
return 1
else:
return 1
if __name__ == "__main__":
if len(sys.argv) != 2:
print "usage: %s %s" % (sys.argv[0], "<patch path>")
sys.exit(1)
checkPatches(sys.argv[1], debug=1)
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import ticker
from matplotlib.ticker import MaxNLocator,MultipleLocator, FormatStrFormatter, FuncFormatter, ScalarFormatter
import matplotlib as mpl
from matplotlib import cm
def gradient_fill_between(x, y1, y2=0, values=None, cmap=None, where=None, ax=None):
"""
Same as matplotlib fill_between but uses a colormap with values to color in the region
"""
N = len(x)
dx = x[1] - x[0]
if ax is None:
ax = plt.gca()
if np.isscalar(y1):
y1 = np.full_like(x, y1)
if np.isscalar(y2):
y2 = np.full_like(x, y2)
if where is None:
where = np.full_like(x, True, dtype=bool)
if values is None:
values = np.linspace(0, 1, N)
if cmap is None:
cmap = mpl.cm.viridis
elif isinstance(cmap, str):
cmap = cm.get_cmap(cmap)
verts = []
for i in range(N-1):
if where[i]:
verts.append([(x[i],y1[i]), (x[i+1],y1[i+1]), (x[i+1],y2[i+1]), (x[i],y2[i]) ])
colors = cmap(values)
collection = mpl.collections.PolyCollection(verts, edgecolors=colors, facecolors=colors)
ax.add_collection(collection)
def pcolor_z_info(data,xdata,ydata, ax=None):
""" Allow pcolor data to be seen interactively in the plot
data 2-d data
xdata 1-d x data
ydata 1-d y data
ax axis """
if not ax:
ax = plt.gca()
numrows, numcols = data.shape
def format_coord(x, y):
col = np.argmin(np.abs(x-xdata))
row = np.argmin(np.abs(y-ydata))
if col>=0 and col<numcols and row>=0 and row<numrows:
z = data[row,col]
return 'x=%1.4g, y=%1.4g, z=%1.4g'%(x, y, z)
else:
return 'x=%1.4g, y=%1.4g'%(x, y)
ax.format_coord = format_coord
return format_coord
def modify_legend(**kwargs):
l = plt.gca().legend_
if l == None:
return
defaults = dict(
loc = l._loc,
numpoints = l.numpoints,
markerscale = l.markerscale,
scatterpoints = l.scatterpoints,
scatteryoffsets = l._scatteryoffsets,
prop = l.prop,
# fontsize = None,
borderpad = l.borderpad,
labelspacing = l.labelspacing,
handlelength = l.handlelength,
handleheight = l.handleheight,
handletextpad = l.handletextpad,
borderaxespad = l.borderaxespad,
columnspacing = l.columnspacing,
ncol = l._ncol,
mode = l._mode,
fancybox = type(l.legendPatch.get_boxstyle())==matplotlib.patches.BoxStyle.Round,
shadow = l.shadow,
title = l.get_title().get_text() if l._legend_title_box.get_visible() else None,
framealpha = l.get_frame().get_alpha(),
bbox_to_anchor = l.get_bbox_to_anchor()._bbox,
bbox_transform = l.get_bbox_to_anchor()._transform,
frameon = l._drawFrame,
handler_map = l._custom_handler_map,
)
if "fontsize" in kwargs and "prop" not in kwargs:
defaults["prop"].set_size(kwargs["fontsize"])
defaults.update(kwargs)
plt.legend(**defaults)
def fitted_colorbar(im, size="3%", pad=0.15, label=None, ax=None, **kwargs):
""" Add a colorbar that matches the height of the figure
im the image (returned by pcolormesh/imshow)
size the width, as a percentatge ("x%")
pad spacing between figure and colorbar
label colorbar label """
if ax is None:
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size=size, pad=pad)
if label:
cb = plt.colorbar(im,cax=cax, label=label, **kwargs)
else:
cb = plt.colorbar(im,cax=cax, **kwargs)
return cb
def colorbar(cmap, vmin, vmax, label=None, **kwargs):
"""Adds a colorbar to the plot (useful when using colormaps outside of colormeshes)
cmap colormap
vmin minimum value
vmax maximum value
label colorbar label """
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
cb = plt.colorbar(sm)
if label:
cb.set_label(label, **kwargs)
return cb
def top_colorbar(size="3%", pad=0.15, shrink=1.0, aspect=20, label=None, **colorbar_kw):
""" Add a colorbar to the top of the figure
size the width, as a percentatge ("x%")
pad spacing between figure and colorbar
label colorbar label """
# divider = make_axes_locatable(plt.gca())
# cax = divider.append_axes("top", size="5%", pad=0.05)
cax,kw = mpl.colorbar.make_axes(plt.gca(), location='top', shrink=shrink, pad=pad, aspect=aspect)
if label: kw['label'] = label
kw.update(colorbar_kw)
cb = plt.colorbar(cax=cax, format='%1.1f', **kw)
cb.ax.xaxis.set_ticks_position('top')
cb.ax.xaxis.set_label_position('top')
tick_locator = ticker.MaxNLocator(nbins=5)
cb.locator = tick_locator
cb.update_ticks()
return cb
def scientific_axis(precision=1, power=None, ax=None, show_multiplier=True):
"""create a scientific_axis on the y-axis
precision floating point precision
power set the power value explicitly
ax axis to be used
show_multiplier If true, draw the multiplier above the axis """
if not ax:
ax = plt.gca()
# determine the power value
if power == None:
ymin, ymax = ax.get_ylim()
# x = "%.{}e" % ymax
x = "{0:.{1}e}".format(ymax,precision)
pos = x.find('+')
sgn = ''
if pos == -1:
pos = x.find('-')
sgn = '-'
n = int(x[pos+1:])
if sgn == '-':
n *= -1
else:
n = power
# set the formatter
def formatter(xtick , pos):
return '{0:.{1}f}'.format(xtick/10**n,precision)
ax.yaxis.set_major_formatter( FuncFormatter(formatter) )
# draw the multiplier
if show_multiplier:
bbox = ax.get_position()
x,y = bbox.corners()[1]
buff = bbox.height*0.01
plt.figtext(x,y+buff, r'$\times \, \mathregular{{10^{{ {0} }} }}$'.format(n))
# embed()
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# plt.gca().yaxis.set_major_formatter( FormatStrFormatter('%.1f') )
def set_axis_formatter(format_str = '%.1f', axis='both'):
if axis == 'both' or axis == 'x':
plt.gca().xaxis.set_major_formatter( FormatStrFormatter(format_str) )
if axis == 'both' or axis == 'y':
plt.gca().yaxis.set_major_formatter( FormatStrFormatter(format_str) )
def set_num_ticks(min_ticks, max_ticks, axis='both', prune=None, ax=None):
""" Set bounds on the number of ticks """
if ax is None:
ax = plt.gca()
if axis == 'both' or axis == 'x':
ax.xaxis.set_major_locator(MaxNLocator(min_n_ticks=min_ticks, nbins=max_ticks, prune=prune))
if axis == 'both' or axis == 'y':
ax.yaxis.set_major_locator(MaxNLocator(min_n_ticks=min_ticks, nbins=max_ticks, prune=prune))
def axis_equal_aspect(ax=None):
"""set axes aspect ratio to be equal"""
if not ax:
ax = plt.gca()
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
ax.set_aspect((x1-x0)/(y1-y0))
|
import random
try:
min_value = int(input('Enter the minimum value of the die: '))
max_value = int(input('Enter the maximum value of the die: '))
except:
print('Imput invalid program will revert to default.')
min_value = 1
max_value = 20
again = True
while again:
print(random.randint(min_value, max_value))
another_roll = input('Want to roll the die again? ')
if another_roll.lower() == 'yes' or another_roll.lower() == 'y' :
again = True
else:
again = False |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 20:14:57 2020
@author: Pinar
@description: creating figs for NeurIPS
"""
import gym
import random
import string
import os
import numpy as np
from diabetes import *
from constants import *
from gym.envs.registration import register
register(
id='Pinar_SimGlucose-v0',
entry_point='SimGlucose.simglucose.envs:Pinar_T1DSimEnv',
kwargs={'patient_name':PATIENT}
)
env = gym.make('Pinar_SimGlucose-v0')
def compute_min_max_return():
mini = MIN_RETURN
maxi = MAX_RETURN
for i in range(0, int(1e+7)):
if (i % 1000) == 0:
print('Iteration: ', i)
print('Min return: ', mini)
print('Max return: ', maxi)
cr = np.random.uniform(low=CR_LOW, high=CR_HIGH)
cf = np.random.uniform(low=CF_LOW, high=CF_HIGH)
env.reset()
observation, reward, done, info = env.step([cr, cf])
if reward > maxi:
maxi = reward
if reward < mini:
mini = reward
return mini, maxi
def compute_min_max_return_from_data(path, mini, maxi):
with open(path, 'r') as f:
next(f) # skip header
for line in f:
[cr, cf, sampled_cr, sampled_cf, reward] = line.split('\t')
r = float(reward[:-1]) # exclude '\n'
if r > maxi:
maxi = r
if r < mini:
mini = r
return mini, maxi
def investigate_params():
#mini, maxi = compute_min_max_return_from_data('/Users/Pinar/Desktop/behavior_policy.csv', MIN_RETURN, MAX_RETURN)
#print('---------------------------------')
#compute_J('/Users/Pinar/Desktop/behavior_policy.csv')
print('---------------------------------')
filenames = os.listdir('/Users/Pinar/Desktop/NeurIPS_fig1/diabetes_eval_policies/')
datasets = [filename for filename in filenames if filename.endswith('.csv')]
mini_J = 100000
mini = 100000000000
maxi = -10
for file in datasets:
m = compute_J('/Users/Pinar/Desktop/NeurIPS_fig1/diabetes_eval_policies/'+file)
if m < mini_J:
mini_J = m
mini, maxi = compute_min_max_return_from_data('/Users/Pinar/Desktop/NeurIPS_fig1/diabetes_eval_policies/'+file, mini, maxi)
print('Smallest J: ', mini_J)
print('Smallest reward: ', mini)
print('Largest reward: ', maxi)
# Mega distribution from which all policy parameters are chosen from
# Uniform distribution used
def pick_policy_distribution():
cr_c = np.random.uniform(low=CR_LOW, high=CR_HIGH)
cf_c = np.random.uniform(low=CF_LOW, high=CF_HIGH)
return cr_c, cf_c
# Create datasets
def create_datasets(cr_c_b, cf_c_b):
os.mkdir(PATH+'data/diabetes_b_policy/')
for i in range(0, NUM_DATASETS):
filename = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
with open(PATH+'data/diabetes_b_policy/'+filename+'.csv', 'w+') as f:
f.write('CR\tCF\tSampledCR\tSampledCF\tReward\n')
for j in range(0, NUM_POINTS_PER_DATASET):
cr = np.random.triangular(left=CR_LOW, mode=cr_c_b, right=CR_HIGH, size=None)
cf = np.random.triangular(left=CF_LOW, mode=cf_c_b, right=CF_HIGH, size=None)
env.reset()
observation, reward, done, info = env.step([cr, cf])
f.write(str(cr_c_b)+'\t'+str(cf_c_b)+'\t'+str(cr)+'\t'+str(cf)+'\t'+str(reward)+'\n')
f.flush()
def compute_J(cr_c, cf_c):
filename = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
with open(PATH+'data/'+filename+'_J_'+str(cr_c)+'_'+str(cf_c)+'.csv', 'w+') as f:
f.write('CR\tCF\tSampledCR\tSampledCF\tReward\n')
for j in range(0, NUM_SAMPLES_FOR_J):
cr = np.random.triangular(left=CR_LOW, mode=cr_c, right=CR_HIGH, size=None)
cf = np.random.triangular(left=CF_LOW, mode=cf_c, right=CF_HIGH, size=None)
env.reset()
observation, reward, done, info = env.step([cr, cf])
f.write(str(cr_c)+'\t'+str(cf_c)+'\t'+str(cr)+'\t'+str(cf)+'\t'+str(reward)+'\n')
f.flush()
total_reward = 0
num_samples = 0
with open(PATH+'data/'+filename+'_J_'+str(cr_c)+'_'+str(cf_c)+'.csv', 'r') as f:
next(f) # skip header
for line in f:
[cr, cf, sampled_cr, sampled_cf, reward] = line.split('\t')
r = float(reward[:-1]) # exclude '\n'
total_reward += normalize_return(r)
num_samples += 1
return total_reward / num_samples
def safety_tests(K, datasets, cr_behavior, cr_evaluation, cf_behavior, cf_evaluation, highest_cr_ratio, highest_cf_ratio, attacker_reward):
print('Computing results for safety tests...')
with open(PATH+'results/without_panacea.csv', 'w+') as f:
f.write('k\tEstimator\tResult\tProblem\n')
for file in datasets:
is_weights, rewards = compute_IS_weights(file, cr_behavior, cr_evaluation, cf_behavior, cf_evaluation)
for k in K: # number of trajectories added inc:
adversarial_is_weights, adversarial_rewards = add_adversarial_trajectories(is_weights, rewards, (highest_cr_ratio * highest_cf_ratio), attacker_reward, k)
ch_wis_weights = create_wis_weights(adversarial_is_weights)
ch_is = CH(adversarial_is_weights * adversarial_rewards, DELTA, b=highest_cr_ratio*highest_cf_ratio)
ch_wis = CH(ch_wis_weights * adversarial_rewards, DELTA, b=1)
f.write(str(k)+'\tIS\t'+str(ch_is)+'\tDiabetes\n')
f.write(str(k)+'\tWIS\t'+str(ch_wis)+'\tDiabetes\n')
f.flush()
def panacea(K, datasets, cr_behavior, cr_evaluation, cf_behavior, cf_evaluation, highest_cr_ratio, highest_cf_ratio, lowest_cr_ratio, lowest_cf_ratio, attacker_reward):
#I_max_diabetes = 1879.306869629937
#I_min_diabetes = 0.09010563601580672
print('Computing results for Panacea...')
with open(PATH+'results/with_panacea.csv', 'w+') as f:
f.write('k\tEstimator\tpanaceaAlpha\tClip\tResult\tProblem\n')
for file in datasets:
for weighting in ['IS', 'WIS']:
is_weights, rewards = compute_IS_weights(file, cr_behavior, cr_evaluation, cf_behavior, cf_evaluation)
for k in K[1:]: # number of trajectories added to D of size 1500
if weighting == 'IS':
Alpha = ALPHA_IS
else:
Alpha = ALPHA_WIS
for alpha in Alpha:
# Compute clipping weight based on weighting
if weighting == 'IS':
c = compute_c('CH', 'IS', alpha, k, 1500)
else:
c = compute_c('CH', 'WIS', alpha, k, 1500, lowest_cr_ratio*lowest_cf_ratio)
assert c > 0
adversarial_is_weights, adversarial_rewards = add_adversarial_trajectories(is_weights, rewards, (highest_cr_ratio * highest_cf_ratio), attacker_reward, k)
adversarial_is_weights[adversarial_is_weights > c] = c # clip IS weights only
if weighting == 'IS':
ch_is = CH(adversarial_is_weights * adversarial_rewards, DELTA, b=c)
f.write(str(k)+'\tIS\t'+str(alpha)+'\t'+str(c)+'\t'+str(ch_is)+'\tDiabetes\n')
f.flush()
else:
ch_wis_weights = create_wis_weights(adversarial_is_weights)
ch_wis = CH(ch_wis_weights * adversarial_rewards, DELTA, b=1)
f.write(str(k)+'\tWIS\t'+str(alpha)+'\t'+str(c)+'\t'+str(ch_wis)+'\tDiabetes\n')
f.flush()
def run_diabetes(arg):
if arg == '1': # pick random behavior and evaluation policy
print('Behavior policy selected.')
cr_c_b, cf_c_b = pick_policy_distribution() # behavior policy
print('Creating datasets for behaviour policy...')
create_datasets(cr_c_b, cf_c_b)
print('Computing performance of behaviour policy...')
J_behavior = compute_J(cr_c_b, cf_c_b)
print('J(\pi_b): ', J_behavior)
print('Finding a suitable evaluation policy...')
flag = True
while flag:
cr_c_e, cf_c_e = pick_policy_distribution() # evalution policy
J_eval = compute_J(cr_c_e, cf_c_e)
if J_eval < J_behavior:
flag = False
print('Evaluation policy selected.')
print('J(\pi_e): ', J_eval)
cr_behavior, cf_behavior, cr_evaluation, cf_evaluation = create_rvs(cr_c_b, cf_c_b, cr_c_e, cf_c_e)
else: # recreate results in paper
print('Behavior policy selected.')
print('Creating datasets for behaviour policy...')
#create_datasets(CR_MEAN, CF_MEAN)
print('Computing performance of behaviour policy...')
#J_behavior = 0.21880416377956427
J_behavior = compute_J(CR_MEAN, CF_MEAN)
print('J(\pi_b): ', J_behavior)
print('Computing performance of evaluation policy...')
cr_c_e, cf_c_e = [22.15929135188824, 49.95430132738277] # evalution policy
#J_eval = 0.14460582621276313
J_eval = compute_J(cr_c_e, cf_c_e)
print('J(\pi_e): ', J_eval)
cr_behavior, cf_behavior, cr_evaluation, cf_evaluation = create_rvs(CR_MEAN, CF_MEAN, cr_c_e, cf_c_e)
# Datasets of behavior policy
filenames = os.listdir(PATH+'data/diabetes_b_policy/')
datasets = [filename for filename in filenames if filename.endswith('.csv')]
# Attacker adds k copies of this for CH+IS and CH+WIS
highest_cr_ratio = attacker_strategy(CR_LOW, CR_HIGH, cr_behavior, cr_evaluation)
highest_cf_ratio = attacker_strategy(CF_LOW, CF_HIGH, cf_behavior, cf_evaluation)
lowest_cr_ratio = attacker_strategy(CR_LOW, CR_HIGH, cr_behavior, cr_evaluation, False)
lowest_cf_ratio = attacker_strategy(CF_LOW, CF_HIGH, cf_behavior, cf_evaluation, False)
attacker_reward = 1
K = np.arange(0, 151, 1)
safety_tests(K, datasets, cr_behavior, cr_evaluation, cf_behavior, cf_evaluation, highest_cr_ratio, highest_cf_ratio, attacker_reward)
panacea(K, datasets, cr_behavior, cr_evaluation, cf_behavior, cf_evaluation, highest_cr_ratio, highest_cf_ratio, lowest_cr_ratio, lowest_cf_ratio, attacker_reward)
return J_behavior, J_eval
|
def count_consonants(str):
count = 0
str2 = str.lower()
for i in range(0, len(str2)):
if str2[i] in 'qwrtpsdfghjklzxcvbnm':
count = count + 1
return count
|
debug_level = "all"
def log(message,type="TEMP"):
#print("logging..."+type)
if (type == 'temp' or type == 't') and (debug_level =="all" or debug_level == 'temp'):
print("\ntest value: "+message+"----------\n")
elif type == 'debug' or type == 'd' and (debug_level =="all" or debug_level == 'debug' or debug_level == 'temp'):
print("->"+message)
elif type == "user" or type == 'u' and (True):#all debug levels
print(": "+message) |
import numpy as np
from api import *
import tensorflow as tf
import math
def sigmoid(z):
s = 1.0 / (1.0 + np.exp(-1.0*z))
return s
def relu(z):
for i in range(len(z)):
z[i] = max(0,z[i])
return z
def iterate_nn(input):
for i in range(len(weights)):
if i == 0:
h = relu(np.dot(input,weights[i]))
elif i == len(weights) - 1:
h = np.dot(h, weights[i])
else:
h = relu(np.dot(h,weights[i]))
return h
def update_traj(current, next_point):
num_of_points = int((len(current) - 4)/3)
for i in range(num_of_points - 1):
current[i*3:(i+1)*3] = current[(i+1)*3:(i+2)*3]
current[(num_of_points-1)*3:num_of_points*3] = next_point
return current
def getPoint(filename, length):
count = 0
data = np.zeros((6,length))
output = []
print (filename)
with open('build_cmake/' + folder + '/' + filename, 'rt') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if count < length:
data[0,count] = float(row[0])
data[1,count] = float(row[1])
data[2,count] = float(row[2])
data[3,count] = float(row[3])
data[4,count] = float(row[4])
data[5,count] = float(row[5])
count += 1
else:
for i in range(length):
for j in range(6):
output.append(float(data[j,i]))
for i in range(4):
output.append(float(row[i+6]))
return output
weights = []
for i in range(7):
w = np.load('nn_weights/w_' + str(i) + '_49000.npy')
weights.append(w)
file = fileFromVars(0.0, 0.0, 0.0, 0.0, 26)
print (file)
start = getPoint(file,2)
print (start)
data = loadData(0,0,file)
size = 100
#import pdb; pdb.set_trace()
colors = cm.rainbow(np.linspace(0, 1, size))
fig = plt.figure()
ax = plt.axes()
ax.scatter(data[:size,0],data[:size,1],c=colors)
# pylab.show()
test_data = np.array(start[3:])
colors = cm.rainbow(np.linspace(0, 1, size))
# fig = plt.figure()
# ax = plt.axes()
start_x = start[0]
start_y = start[1]
# fig2 = plt.figure(num=0)
# ax = plt.axes()
# ax.scatter(x,y,c=colors)
# pylab.show()
x = []
y = []
#x.append(start[0])
#y.append(start[1])
for i in range(size):
output = iterate_nn(test_data)
output += np.random.normal(0, 1, 3)
test_data = update_traj(test_data,output)
print (output)
#print (test_data)
#ax.scatter(output[0], output[1], output[2], c=colors)
start_x = start_x + output[0]/60.0
start_y = start_y + output[1]/60.0
x.append(start_x)
y.append(start_y)
ax.scatter(x,y,c=colors)
pylab.show()
|
class Solution:
def tribonacci(self, n: int) -> int:
x = 0
y = 1
z = 1
if n == 0:
return x
elif n == 1:
return y
elif n == 2:
return z
for i in range(3, n+1):
tmp = x + y + z
x, y, z = y, z, tmp
return z |
import numpy as np
import ctypes
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
import test_math
m = int(1e1)
n = int(9e0)
nz = int(.25*m*n)
nthreads = 4
np.random.seed(123)
X = np.random.gamma(1,1, size=(m,n))
X[np.random.randint(m, size=nz), np.random.randint(n, size=nz)] = 0
all_NA_row = (X == 0).sum(axis = 1) == n
X[all_NA_row, 0] = 1.234
all_NA_col = (X == 0).sum(axis = 0) == m
X[0, all_NA_col] = 5.678
Xcoo = coo_matrix(X)
Xcsr = csr_matrix(Xcoo)
Xcsc = csc_matrix(Xcoo)
X[X == 0] = np.nan
glob_mean = np.nanmean(X)
bias_B = np.nanmean(X - glob_mean, axis=0)
X_minusB = X - glob_mean - bias_B.reshape((1,-1))
bias_A_AB = np.nanmean(X_minusB, axis=1)
bias_A_A = np.nanmean(X - glob_mean, axis=1)
empty_1d = np.empty(0, dtype=ctypes.c_double)
empty_2d = np.empty((0,0), dtype=ctypes.c_double)
empty_int = np.empty(0, dtype=ctypes.c_int)
empty_size_t = np.empty(0, dtype=ctypes.c_size_t)
def get_biases():
biasA = np.empty(m, dtype=ctypes.c_double)
biasB = np.empty(n, dtype=ctypes.c_double)
glob_mean, resA, resB = test_math.py_initialize_biases(
biasA if user_bias else empty_1d,
biasB if item_bias else empty_1d,
X.copy() if xtype=="dense" else empty_2d,
Xcoo.row.astype(ctypes.c_int) if xtype!="dense" else empty_int,
Xcoo.col.astype(ctypes.c_int) if xtype!="dense" else empty_int,
Xcoo.data.astype(ctypes.c_double).copy() if xtype!="dense" else empty_1d,
Xcsr.indptr.astype(ctypes.c_size_t) if xtype=="csr" else empty_size_t,
Xcsr.indices.astype(ctypes.c_int) if xtype=="csr" else empty_int,
Xcsr.data.astype(ctypes.c_double).copy() if xtype=="csr" else empty_1d,
Xcsc.indptr.astype(ctypes.c_size_t) if xtype=="csr" else empty_size_t,
Xcsc.indices.astype(ctypes.c_int) if xtype=="csr" else empty_int,
Xcsc.data.astype(ctypes.c_double).copy() if xtype=="csr" else empty_1d,
m, n, user_bias, item_bias,
has_trans,
nthreads
)
return glob_mean, resA, resB
xtry = ["dense", "sparse", "csr"]
btry = [False,True]
ttry = [False,True]
for xtype in xtry:
for user_bias in btry:
for item_bias in btry:
for has_trans in ttry:
if (has_trans) and (xtype!="dense"):
continue
res_mean, resA, resB = get_biases()
diff0 = (glob_mean - res_mean)**2
if user_bias:
if item_bias:
diff1 = np.linalg.norm(resA - bias_A_AB)
else:
diff1 = np.linalg.norm(resA - bias_A_A)
else:
diff1 = 0.
if item_bias:
diff2 = np.linalg.norm(resB - bias_B)
else:
diff2 = 0.
is_wrong = (diff0>1e-1) or (diff1>1e0) or (diff2>1e0) or np.isnan(diff0) or np.isnan(diff1) or np.isnan(diff2)
if is_wrong:
print("\n\n\n****ERROR BELOW****", flush=True)
print("[X %s] [b:%d,%d] [t:%d] - err:%.2f, %.2f, %.2f"
% (xtype[0], user_bias, item_bias, has_trans, diff0, diff1, diff2),
flush=True)
if is_wrong:
print("****ERROR ABOVE****\n", flush=True)
|
import secrets
from typing import Any, Dict, List, Optional, Union
from pydantic import AnyHttpUrl, BaseSettings, EmailStr, HttpUrl, PostgresDsn, validator
class Settings(BaseSettings):
API_V1_STR: str = "/api/v1"
class Config:
case_sensitive = True
settings = Settings()
|
# -*- coding: utf-8 -*-
# coding:utf-8
__author__ = 'lancelrq'
import json
from wejudge.core import *
from wejudge.utils import *
from wejudge.utils import tools
from wejudge import const
import apps.oauth2.libs as libs
import apps.education.libs as EduLibs
from django.http.response import HttpResponseRedirect
def authorize_education(request, sid):
"""
Oauth2ๆๅญฆ็ณป็ปๆๆ้กต้ข
:param request:
:return:
"""
wejudge_session = WeJudgeEducationSession(request) # ๅๅปบไผ่ฏ
response = WeJudgeResponse(wejudge_session) # ๅๅปบๅๅบ
education_manager = EduLibs.EducationController(request, response, sid)
manager = libs.Oauth2Service(request, response)
rel, url, ctx = manager.authorize()
response.set_navlist([
const.apps.EDUCATION,
[education_manager.school.name, 'education.school', (education_manager.school.id,)],
['WeJudgeๅผๆพๅนณๅฐ'],
[manager.client.appname],
['ๅบ็จๆๆ']
])
if not rel:
ctx.update({
"school": education_manager.school,
"page_name": "INDEX"
})
return response.render_page(request, 'oauth2/authorize/education.tpl', ctx)
else:
return HttpResponseRedirect(url)
@WeJudgePOSTRequire
def access_token(request):
"""
AccessTokenๆขๅๆฅๅฃ
:param request:
:return:
"""
response = JsonResponse({})
manager = libs.Oauth2Service(request, response)
rel = manager.oauth2_success(manager.access_token())
response.content = json.dumps(rel)
return response
@WeJudgePOSTRequire
def valid_access_token(request):
"""
AccessTokenๆ ก้ชๆฅๅฃ
:param request:
:return:
"""
response = JsonResponse({})
manager = libs.Oauth2Service(request, response)
rel = manager.oauth2_success(manager.valid_access_token())
response.content = json.dumps(rel)
return response
@WeJudgePOSTRequire
def refresh_token(request):
"""
Refresh Access Token ๆฅๅฃ
:param request:
:return:
"""
response = JsonResponse({})
manager = libs.Oauth2Service(request, response)
rel = manager.oauth2_success(manager.refresh_token())
response.content = json.dumps(rel)
return response
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
##*************************************************************************************************************
##*************************************************************************************************************
## ** ๆไปถๅ็งฐ๏ผ init_hive_database.py
## ** ๅ่ฝๆ่ฟฐ๏ผ ๆฐๆฎๅบๅไฝฟๅ่ๆฌ
## **
## **
## ** ่พๅ
ฅ๏ผ
## ** ่พๅบ๏ผ
## **
## **
## ** ๅๅปบ่
๏ผ้ชไปๅ
## ** ๅๅปบๆฅๆ๏ผ2017-07-26
## ** ไฟฎๆนๆฅๆ๏ผ
## ** ไฟฎๆนๆฅๅฟ๏ผ
## **
## **
## **
## **
## **
## **
## ** ---------------------------------------------------------------------------------------
## **
## ** ---------------------------------------------------------------------------------------
## **
## ** ็จๅบ่ฐ็จๆ ผๅผ๏ผpyspark init_hive_database.py $version
## ** eg:pyspark test_pyspark_log.py v2.0
## **
## ******************************************************************************************
## ** XXXXXXXXXXXXXๅ
ฌๅธ
## ** All Rights Reserved.
## ******************************************************************************************
## **
## **ๅๆฐ่ฏดๆ๏ผ
## ** 1ใversion ็ๆฌ
## ** 2ใstart_time ๅผๅงๆถ้ด
## ** 3ใbak_time ๅคไปฝๆถ้ด
#ๅผ็จๅ
import os,sys
import datetime
import time
import math
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.sql import SQLContext,Row
from pyspark.sql.types import *
from pyspark.sql import HiveContext
# ๆ ก้ช่พๅ
ฅๅๆฐ
if len(sys.argv)<2 or sys.argv[1].strip()=="":
print "่พๅ
ฅๅๆฐ้่ฏฏ๏ผ่ฏท่พๅ
ฅ้่ฆๅไฝฟๅ็็ๆฌ็ฎๅฝ๏ผ"
sys.exit(1)
# ๅๆฐๅไฝฟๅ
version=sys.argv[1]
startTime=datetime.datetime.now()
bakTime=str(int(time.time()))
bakDbName="tmp_db"
currentPath=sys.argv[0][0:sys.argv[0].rfind("/")+1]
addDdlFile=currentPath+version+"/ddl_scripts/ddl_add_tables.hql"
modifiedDdlFile=currentPath+version+"/ddl_scripts/ddl_modified_tables.hql"
dataPath=currentPath+version+"/data_files/"
dbValidationCfg=currentPath+version+"/validation_cfg/db_table_cfg.txt"
tabValidationCfg=currentPath+version+"/validation_cfg/table_record_cfg.txt"
#============================================================================================
# ๅไฝฟๅ Spark
#============================================================================================
def initSparkContext():
print "ๅไฝฟๅ SparkContext"
conf=SparkConf().setMaster("yarn-client").setAppName("init_hive_database")
sc = SparkContext(conf=conf)
return HiveContext(sc),sc
#============================================================================================
# ้ๆพ่ตๆบ
#============================================================================================
def closeSpackContext(sc,startTime):
sc.stop()
endTime=datetime.datetime.now()
print '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+']*************็จๅบ่ฟ่กๆปๆถ้ฟไธบ'+str((endTime-startTime).seconds) + '็ง๏ผ*******************'
print "======็ปๆ ้ๆพ่ตๆบ====="
#============================================================================================
# ๆง่กsql
#============================================================================================
def execSql(sqlText,flag):
try:
#ๆๅฐlog
if flag==1:
print "sql่ฏญๅฅ๏ผ"+sqlText
resultData = sqlContext.sql(sqlText)
print '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+']*************ๆง่ก่ฏญๅฅๆๅ*******************'
return 0,resultData
else:
resultData = sqlContext.sql(sqlText)
return 0,resultData
except Exception,e:
print '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+']*************ๆง่ก่ฏญๅฅๅคฑ่ดฅ*******************'
print Exception,":",e
return -1,-1
#============================================================================================
# ๅคไปฝ่กจ
#============================================================================================
def backUp(dbName,tableName):
print "ๅคไปฝ่กจ๏ผ"+dbName+"."+tableName
descTab="desc "+dbName+"."+tableName
returnCode,resultData=execSql(descTab,1)
#่กจไธๅญๅจ่ฟๅ
if returnCode==(-1):
print dbName+"."+tableName,"่กจไธๅญๅจ๏ผ"
return 1
bakTabName=bakDbName+"."+tableName+bakTime
descTab="desc "+bakTabName
returnCode,resultData=execSql(descTab,1)
#ๆฌๆฌกๅคไปฝๅญๅจ่ฟๅ
if returnCode==0:
return returnCode
backUpSsql="create table "+bakTabName+" as select * from "+dbName+"."+tableName
returnCode,resultData=execSql(backUpSsql,1)
return returnCode
#============================================================================================
# ๆๅ่กจ่ๆฌโโไปฅๅๆก่ฏญๅฅๆง่ก
#============================================================================================
def splitSql(flag,fileName):
print "ๆง่กDDL่ๆฌ"
sqlText=""
tmpStr=""
tabList=[set(),set()]
for line in open(fileName):
#ๅฟฝ็ฅๆณจ้่ก
if line.find('--')<0:
index=line.find(';')
if index>=0:
sqlText=sqlText+line[0:index]
#ๅค็ไธ่กๅไธคๆก่ฏญๅฅๆ
ๅต
tmpStr=line[index+1:]
returnCode=0
#ๅปบ่กจ
if flag==0:
#่ฏๅซใๆผๆฅใๆง่ก่ฏญๅฅ
if sqlText.find("create ")>=0 and sqlText.find(" table ")>=0:
tmpStr1=sqlText[0:sqlText.find("(")].replace("create ","").replace("table ","").strip()
dbName=tmpStr1[0:tmpStr1.find(".")]
tabName=tmpStr1[tmpStr1.find(".")+1:]
if returnCode==-1:
print tmpStr1,"่กจๅคไปฝๅคฑ่ดฅ๏ผ่ฏทๆๅจๆง่กๅคไปฝๅๅๅปบ่กจๆไฝ๏ผ"
returnCode,resultData=execSql(sqlText,1)
if returnCode ==0:
tabList[0].add(tmpStr1)
else:
tabList[1].add(tmpStr1)
else:
tmpStr1=sqlText.replace("drop ","").replace("table ","").replace("if ","").replace("exists ","").strip()
dbName=tmpStr1[0:tmpStr1.find(".")]
tabName=tmpStr1[tmpStr1.find(".")+1:]
returnCode=backUp(dbName,tabName)
#ๅคไปฝๆๅๆง่กๅ ้ค่กจ
if returnCode==0:
execSql(sqlText,1)
#ไฟฎๆน่กจ
else:
tmpStr1=sqlText.replace("alter ","").replace("table ","").strip()
tmpStr1=tmpStr1[0:tmpStr1.find(" ")]
dbName=tmpStr1[0:tmpStr1.find(".")]
tabName=tmpStr1[tmpStr1.find(".")+1:]
returnCode=backUp(dbName,tabName)
#ๅคไปฝๆๅๆง่กไฟฎๆน่กจ
if returnCode==0:
returnCode,resultData=execSql(sqlText,1)
if returnCode ==0:
tabList[0].add(tmpStr1)
else:
tabList[1].add(tmpStr1)
else:
print tmpStr1,"่กจๅคไปฝๅคฑ่ดฅ๏ผ่ฏทๆๅจๆง่กๅคไปฝๅไฟฎๆน่กจๆไฝ๏ผ"
#ๅฐไธไธๆก่ฏญๅฅไปฃ็ ๅๆๅญ
sqlText=tmpStr
else:
#ๆผๆฅ่ฏญๅฅ๏ผไธๆก่ฏญๅฅๅๅค่กๆ
ๅต
sqlText =sqlText+line
return tabList
#============================================================================================
# ๆฐๆฎๅไฝฟๅ
#============================================================================================
def loadData(tabCfgFile):
print "ๆฐๆฎๅฏผๅ
ฅ"
for line in open(tabCfgFile):
tmpStr1=line[0:line.find(":")]
dbName=tmpStr1[0:tmpStr1.find(".")]
tabName=tmpStr1[tmpStr1.find(".")+1:]
tabRecordCnt=line[line.find(":")+1:]
returnCode=backUp(dbName,tabName)
if returnCode==0:
dataFile=dataPath+tabName+".txt"
sqlText="load data local inpath '{dataFile}' overwrite into table {dbName}.{tabName}".format(dataFile=dataFile,tabName=tabName,dbName=dbName)
execSql(sqlText,1)
else:
print tmpStr1,"่กจๅคไปฝๅคฑ่ดฅ๏ผ่ฏทๆๅจๆง่กๅคไปฝๅๅไฝฟๅ่กจๆไฝ๏ผ"
#============================================================================================
# ๆฐๆฎๅบ่กจ้ช่ฏ
#============================================================================================
def validationDbTab(dbCfgFile):
print "ๆฐๆฎๅบ่กจไธชๆฐ้ช่ฏ"
for line in open(dbCfgFile):
dbName=line[0:line.find(":")]
tabCnt=int(line[line.find(":")+1:])
execSql("use "+dbName,0)
returnCode,resultData=execSql("show tables",0)
if returnCode==0:
curentTabCnt=len(resultData.collect())
if curentTabCnt==tabCnt:
print "--------",dbName,"ๅบ๏ผ้ช่ฏ้่ฟ๏ผ้
็ฝฎ็่กจไธชๆฐ",tabCnt,"๏ผๅฝๅๅบ่กจไธชๆฐ",curentTabCnt,"--------"
else:
print "--------",dbName,"ๅบ๏ผ้ช่ฏไธ้่ฟ๏ผ้
็ฝฎ็่กจไธชๆฐ",tabCnt,"๏ผๅฝๅๅบ่กจไธชๆฐ",curentTabCnt,"--------"
#============================================================================================
# ่กจๆฐๆฎ่ฎฐๅฝ้ช่ฏ
#============================================================================================
def validationTabRecord(tabCfgFile):
print "ๆฐๆฎ่กจ่ฎฐๅฝ้ช่ฏ"
for line in open(tabCfgFile):
tmpStr1=line[0:line.find(":")]
dbName=tmpStr1[0:tmpStr1.find(".")]
tabName=tmpStr1[tmpStr1.find(".")+1:]
tabRecordCnt=int(line[line.find(":")+1:])
sqlText="select count(*) as cnt from "+dbName+"."+tabName
returnCode,resultData=execSql(sqlText,0)
if returnCode==0:
curentTabRecordCnt=resultData.collect()[0][0]
if curentTabRecordCnt==tabRecordCnt:
print "--------",tmpStr1,"่กจ๏ผ้ช่ฏ้่ฟ๏ผ้
็ฝฎ็่กจ่ฎฐๅฝๆฐ",tabRecordCnt,"๏ผๅฝๅๅบ่กจ่ฎฐๅฝๆฐ",curentTabRecordCnt,"--------"
else:
print "--------",tmpStr1,"่กจ๏ผ้ช่ฏไธ้่ฟ๏ผ้
็ฝฎ็่กจ่ฎฐๅฝๆฐ",tabRecordCnt,"๏ผๅฝๅๅบ่กจ่ฎฐๅฝๆฐ",curentTabRecordCnt,"--------"
#============================================================================================
# DDL่ๆฌๆง่ก็ปๆ่พๅบ
#============================================================================================
def printResult(item,tabList):
flag=1
print item,"่กจ"
for tabSet in tabList:
if flag==1:
print "ๆๅ{0}ไธช๏ผ".format(len(tabSet))
else:
print "ๅคฑ่ดฅ{0}ไธช๏ผ".format(len(tabSet))
for tab in tabSet:
print "--------",tab
flag=0
print "**********"
#============================================================================================
# ็จๅบๅ
ฅๅฃ
#============================================================================================
if __name__=="__main__":
sqlContext,sc=initSparkContext()
try:
print "version:",version,"\nstartTime:",startTime.strftime('%Y-%m-%d %H:%M:%S'),"\nbakTime",bakTime
print "===================================================================================="
#ๆฐๅข
addTabList=splitSql(0,addDdlFile)
print "===================================================================================="
#ไฟฎๆน
modifiedTabList=splitSql(1,modifiedDdlFile)
print "===================================================================================="
#ๆฐๆฎๅไฝฟๅ
loadData(tabValidationCfg)
print "===================================================================================="
#ๆง่ก็ปๆ่พๅบ
printResult("ๆฐๅข",addTabList)
printResult("ไฟฎๆน",modifiedTabList)
print "===================================================================================="
#ๆฐๆฎๆ ก้ช
#validationDbTab(dbValidationCfg)
validationTabRecord(tabValidationCfg)
except Exception,e:
print Exception,":",e
sys.exit(1)
finally:
closeSpackContext(sc,startTime)
|
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
#!/usr/bin/env python
import pickle as pickle
import glob, os
import numpy as np
import argparse
import time
import seaborn as sns
import matplotlib
#matplotlib.use('Agg')
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pandas as pd
import sys,inspect
import utils
from utils import *
import sys
sys.path.append('../Methods/Models/Utils')
import global_utils
if __name__ == '__main__':
# Check for command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--sysname", help="type of chaotic system", type=str, default='Lorenz3D')
parser.add_argument('--tidx', type=int, default=2)
parser.add_argument('--used', type=int, default=0)
parser.add_argument('--plot', type=int, default=0, help="0: RMSE, 1: spectral")
args = parser.parse_args()
sysname, tidx = args.sysname, args.tidx
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
results_dir = os.path.dirname(current_dir) + "/Results"
print(results_dir)
eval_path = os.path.join(results_dir, '{}/Evaluation_Data'.format(sysname))
print(eval_path)
model_path = os.path.join(results_dir, '{}/Trained_Models'.format(sysname))
print(model_path)
fig_path = os.path.join(results_dir, '{}/Eval_Figures'.format(sysname))
if os.path.isdir(fig_path) == False:
os.mkdir(fig_path)
maxLyp = 1.0
dt = 0.01
if sysname == 'Lorenz3D':
maxLyp = 0.9056
elif 'Lorenz96_F10' in sysname:
maxLyp = 2.27
elif 'Lorenz96_F8' in sysname:
maxLyp = 1.68
elif 'KuramotoSivashinskyGP64' in sysname:
maxLyp = 20
dt = 0.25
# list of models
models_1000 = [\
['hqrc_pinv-RDIM_1-N_used_1000-DL_200-Nqr_5-A_0.1-J_2.0-fJ_1-V_10-NL_1-IPL_500-IUL_0-REG_1e-11-AU_0-NICS_100', 'HQR,V=10'],
['hqrc_pinv-RDIM_1-N_used_1000-DL_200-Nqr_5-A_0.1-J_2.0-fJ_1-V_15-NL_1-IPL_500-IUL_0-REG_1e-07-AU_0-NICS_100', 'HQR,V=15'],
['hqrc_pinv-RDIM_1-N_used_1000-DL_200-Nqr_5-A_0.0-J_2.0-fJ_1-V_20-NL_1-IPL_500-IUL_0-REG_1e-09-AU_0-NICS_100', 'HQR,V=20'],
#['hqrc_pinv-RDIM_1-N_used_1000-DL_200-Nqr_5-A_0.0-J_2.0-fJ_1-V_25-NL_1-IPL_500-IUL_0-REG_1e-09-AU_0-NICS_100', 'HQR,V=25'],
['ESN_pinv-RDIM_1-N_used_1000-SIZE_80-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_200-NL_1-IPL_500-REG_1e-09-NICS_100', 'ESN-80'],
['ESN_pinv-RDIM_1-N_used_1000-SIZE_120-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_200-NL_1-IPL_500-REG_1e-09-NICS_100', 'ESN-120'],
['ESN_pinv-RDIM_1-N_used_1000-SIZE_150-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_200-NL_1-IPL_500-REG_1e-09-NICS_100', 'ESN-150'],
#['ESN_pinv-RDIM_1-N_used_1000-SIZE_5000-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_200-NL_1-IPL_500-REG_1e-09-NICS_100', 'ESN-5000'],
['RNN-lstm-RDIM_1-N_used_1000-NLAY_1-SLAY_150-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'LSTM-150'],
['RNN-lstm-RDIM_1-N_used_1000-NLAY_2-SLAY_1000-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'LSTM-1000(2)'],
['RNN-lstm-RDIM_1-N_used_1000-NLAY_1-SLAY_3000-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'LSTM-3000'],
['RNN-gru-RDIM_1-N_used_1000-NLAY_1-SLAY_80-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'GRU-80'],
['RNN-gru-RDIM_1-N_used_1000-NLAY_3-SLAY_120-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'GRU-120(3)'],
['RNN-gru-RDIM_1-N_used_1000-NLAY_3-SLAY_3000-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'GRU-3000(3)'],
]
models_10000 = [\
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_10-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-09-AU_0-NICS_100', 'HQR,V=10'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_100', 'HQR,V=15'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_20-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_100', 'HQR,V=20'],
['ESN_pinv-RDIM_1-N_used_10000-SIZE_150-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_1000-REG_1e-09-NICS_100', 'ESN-150'],
['ESN_pinv-RDIM_1-N_used_10000-SIZE_500-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_500-REG_1e-09-NICS_100', 'ESN-500'],
['ESN_pinv-RDIM_1-N_used_10000-SIZE_1000-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_500-REG_1e-09-NICS_100', 'ESN-1000'],
['RNN-lstm-RDIM_1-N_used_10000-NLAY_3-SLAY_100-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_1000-NL_1-NICS_100', 'LSTM-150(3)'],
['RNN-lstm-RDIM_1-N_used_10000-NLAY_1-SLAY_500-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_500-NL_1-NICS_100', 'LSTM-500'],
['RNN-lstm-RDIM_1-N_used_10000-NLAY_1-SLAY_1000-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_500-NL_1-NICS_100', 'LSTM-1000'],
['RNN-gru-RDIM_1-N_used_10000-NLAY_1-SLAY_100-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_500-NL_1-NICS_100', 'GRU-100'],
['RNN-gru-RDIM_1-N_used_10000-NLAY_1-SLAY_1000-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_500-NL_1-NICS_100', 'GRU-1000'],
['RNN-gru-RDIM_1-N_used_10000-NLAY_1-SLAY_3000-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_1000-NL_1-NICS_100', 'GRU-3000'],
]
models_100000 = [\
['hqrc_pinv-RDIM_1-N_used_100000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_10-NL_1-IPL_2000-IUL_0-REG_1e-07-AU_0-NICS_100', 'HQR,V=10'],
['hqrc_pinv-RDIM_1-N_used_100000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_15-NL_1-IPL_2000-IUL_0-REG_1e-09-AU_0-NICS_100', 'HQR,V=15'],
['hqrc_pinv-RDIM_1-N_used_100000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_20-NL_1-IPL_2000-IUL_0-REG_1e-09-AU_0-NICS_100', 'HQR,V=20'],
['ESN_pinv-RDIM_1-N_used_100000-SIZE_500-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_2000-REG_1e-07-NICS_100', 'ESN-500'],
['ESN_pinv-RDIM_1-N_used_100000-SIZE_1000-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_2000-REG_1e-07-NICS_100', 'ESN-1000'],
['ESN_pinv-RDIM_1-N_used_100000-SIZE_1500-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_2000-REG_1e-07-NICS_100', 'ESN-1500'],
['RNN-lstm-RDIM_1-N_used_100000-NLAY_2-SLAY_80-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_1000-NL_1-NICS_100', 'LSTM-80(2)'],
['RNN-lstm-RDIM_1-N_used_100000-NLAY_2-SLAY_100-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_1000-NL_1-NICS_100', 'LSTM-100(2)'],
['RNN-lstm-RDIM_1-N_used_100000-NLAY_1-SLAY_150-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_2000-IPL_2000-NL_1-NICS_100','LSTM-150'],
['RNN-gru-RDIM_1-N_used_100000-NLAY_3-SLAY_150-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_2000-IPL_2000-NL_1-NICS_100', 'GRU-150(3)'],
['RNN-gru-RDIM_1-N_used_100000-NLAY_1-SLAY_80-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_2000-IPL_2000-NL_1-NICS_100', 'GRU-80'],
['RNN-gru-RDIM_1-N_used_100000-NLAY_1-SLAY_1500-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_2000-IPL_2000-NL_1-NICS_100', 'GRU-1500']
]
models_compare = [\
['hqrc_pinv-RDIM_1-N_used_1000-DL_200-Nqr_5-A_0.1-J_2.0-fJ_1-V_15-NL_1-IPL_500-IUL_0-REG_1e-07-AU_0-NICS_100', 'HQR,10^3'],
['ESN_pinv-RDIM_1-N_used_1000-SIZE_80-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_200-NL_1-IPL_500-REG_1e-09-NICS_100', 'ESN,10^3'],
['RNN-lstm-RDIM_1-N_used_1000-NLAY_1-SLAY_3000-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'LSTM,10^3'],
['RNN-gru-RDIM_1-N_used_1000-NLAY_3-SLAY_3000-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_100-IPL_500-NL_1-NICS_100', 'GRU,10^3'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_100', 'HQR,10^4'],
['ESN_pinv-RDIM_1-N_used_10000-SIZE_150-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_1000-REG_1e-09-NICS_100', 'ESN,10^4'],
['RNN-lstm-RDIM_1-N_used_10000-NLAY_3-SLAY_100-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_1000-NL_1-NICS_100', 'LSTM,10^4'],
['RNN-gru-RDIM_1-N_used_10000-NLAY_1-SLAY_1000-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_500-NL_1-NICS_100', 'GRU,10^4'],
['hqrc_pinv-RDIM_1-N_used_100000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_15-NL_1-IPL_2000-IUL_0-REG_1e-09-AU_0-NICS_100', 'HQR,10^5'],
['ESN_pinv-RDIM_1-N_used_100000-SIZE_500-D_10.0-RADIUS_0.9-SIGMA_1.0-DL_2000-NL_1-IPL_2000-REG_1e-07-NICS_100', 'ESN,10^5'],
['RNN-lstm-RDIM_1-N_used_100000-NLAY_2-SLAY_80-ISH_statefull-SL_16-PL_4-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_1000-IPL_1000-NL_1-NICS_100', 'LSTM,10^5'],
['RNN-gru-RDIM_1-N_used_100000-NLAY_3-SLAY_150-ISH_statefull-SL_16-PL_16-LR_0.001-DKP_1.0-ZKP_1.0-HSPL_2000-IPL_2000-NL_1-NICS_100', 'GRU,10^5'],
]
models_alpha = [\
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_100', 'alpha=0.0'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.1-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_100', 'alpha=0.1'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.3-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_100', 'alpha=0.3'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.5-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-09-AU_0-NICS_100', 'alpha=0.5'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.7-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-09-AU_0-NICS_100', 'alpha=0.7'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.9-J_2.0-fJ_1-V_15-TAU_4.0-NL_1-IPL_1000-IUL_0-REG_1e-11-AU_0-NICS_100', 'alpha=0.9'],
]
models_demo = [\
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_0-V_10-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_2', 'V=10,alpha=0.0'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.1-J_2.0-fJ_0-V_10-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_2', 'V=10,alpha=0.1'],
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_0-V_15-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_2', 'V=15,alpha=0.0'],
#['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.1-J_2.0-fJ_0-V_15-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_2', 'V=15,alpha=0.1']
['hqrc_pinv-RDIM_1-N_used_10000-DL_2000-Nqr_5-A_0.0-J_2.0-fJ_0-V_5-NL_1-IPL_1000-IUL_0-REG_1e-07-AU_0-NICS_2', 'V=5,alpha=0.0']
]
trained_models = None
if args.used == 5:
models = [[os.path.join(eval_path, m[0]), m[1]] for m in models_demo]
trained_models = [[os.path.join(model_path, m[0]), m[1]] for m in models_demo]
samples = ['V=10,alpha=0.0', 'V=10,alpha=0.1', 'V=15,alpha=0.0', 'V=5,alpha=0.0']
title = 'demo'
elif args.used == 4:
models = [[os.path.join(eval_path, m[0]), m[1]] for m in models_alpha]
samples = ['alpha=0.0', 'alpha=0.1', 'alpha=0.5', 'alpha=0.9']
title = 'compare connection strength'
elif args.used == 3:
models = [[os.path.join(eval_path, m[0]), m[1]] for m in models_compare]
samples = ['HQR,10^3', '']
title = 'compare num samples'
elif args.used == 2:
models = [[os.path.join(eval_path, m[0]), m[1]] for m in models_100000]
samples = ['HQR,V=15', 'ESN-500', 'LSTM-80(2)', 'GRU-150(3)']
title = '10^5'
elif args.used == 1:
models = [[os.path.join(eval_path, m[0]), m[1]] for m in models_10000]
samples = ['HQR,V=10', 'ESN-500', 'LSTM-500', 'GRU-1000']
title = '10^4'
else:
models = [[os.path.join(eval_path, m[0]), m[1]] for m in models_1000]
samples = ['HQR,V=10', 'ESN-150', 'LSTM-150', 'GRU-120(3)']
title = '1000'
rmse_dict = dict()
vpt_dict = dict()
targets = dict()
outputs = dict()
sp_outputs = dict()
sp_targets = dict()
Wouts = dict()
coeffs = dict()
for i in range(len(models)):
rfolder, label = models[i][0], models[i][1]
fname = os.path.join(rfolder, 'results.pickle')
if os.path.isfile(fname):
with open(fname, 'rb') as rfile:
try:
rs = pickle.load(rfile)
except:
continue
#print(rs.keys())
qs = QResults()
qs.rmnse_avg_test = rs['rmnse_avg_TEST']
qs.rmnse_avg_train = rs['rmnse_avg_TRAIN']
qs.n_pred_005_avg_test = rs['num_accurate_pred_005_avg_TEST']
qs.n_pred_005_avg_train = rs['num_accurate_pred_005_avg_TRAIN']
qs.n_pred_050_avg_test = rs['num_accurate_pred_050_avg_TEST']
qs.n_pred_050_avg_train = rs['num_accurate_pred_050_avg_TRAIN']
qs.model_name = rs['model_name']
#if qs.rmnse_avg_test != np.inf and qs.rmnse_avg_train != np.inf:
#print(rs.keys())
#print(qs.model_name)
#print('train={}, test={}'.format(qs.rmnse_avg_train, qs.rmnse_avg_test))
#qs.info()
pred_test = rs['predictions_all_TEST']
truth_test = rs['truths_all_TEST']
M = len(pred_test)
print('{} Number of test'.format(qs.model_name), M)
rmsels = []
vpts = []
for j in range(M):
rmsels.append(calNRMSE(pred_test[j], truth_test[j]))
vpts.append(calVPT(pred_test[j], truth_test[j], maxLyp=maxLyp))
rmse_dict[label] = np.mean(np.array(rmsels), axis=0)
print(rmse_dict[label].shape)
vpt_dict[label] = np.array(vpts)
targets[label] = truth_test[tidx]
outputs[label] = pred_test[tidx]
# For frequency
sp_outputs[label] = rs['sp_pred_TEST']
sp_targets[label] = rs['sp_true_TEST']
else:
print('Not found {}'.format(fname))
if trained_models is not None:
rfolder, label = trained_models[i][0], trained_models[i][1]
fname = os.path.join(rfolder, 'data.pickle')
if os.path.isfile(fname):
print('File existed: ', fname)
with open(fname, 'rb') as rfile:
#try:
rs = pickle.load(rfile)
#except:
# continue
print(rs.keys())
coeffs[label] = np.array(rs['coeffs'])
Wouts[label] = rs['W_out'][:-1].reshape((coeffs[label].shape[0], -1))
print(Wouts[label].shape, coeffs[label].shape)
else:
print('Not found saved model {}'.format(fname))
# PLOTTING
cmap = plt.get_cmap("RdBu")
ecmap = plt.get_cmap("summer_r")
#plt.style.use('seaborn-colorblind')
plt.rc('font', family='serif')
plt.rc('mathtext', fontset='cm')
plt.rcParams['font.size']=9
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
#fig, axs = plt.subplots(1, 2, figsize=(16, 4))
#fig.subplots_adjust(hspace=0.4, wspace = 0.4)
#axs = axs.ravel()
fig = plt.figure(figsize=(18, 16))
fig.subplots_adjust(hspace=0.6, wspace = 0.2)
# Plot box plot
ax1 = plt.subplot2grid((7,4), (0,0), colspan=4, rowspan=2)
df = pd.DataFrame(vpt_dict)
#sns.stripplot(ax=ax, data=df, jitter=True, linewidth=1, alpha=0.9, marker='o', size=6)
sns.boxplot(data=df, ax = ax1)
ax1.set_ylim([0, 8.0])
ax1.set_ylabel('VPT')
# Plot test samples
ns = len(samples)
for i in range(ns):
label = samples[i]
if label not in targets.keys():
continue
ax = plt.subplot2grid((7, 4), (2,i), colspan=1, rowspan=1)
ts = np.array(range(len(targets[label]))) * dt / maxLyp
ax.plot(ts, targets[label], label='Target')
ax.plot(ts, outputs[label], label='Prediction')
ax.set_title(label)
ax.set_xlabel('$t\Lambda_1$', fontsize=16)
#ax.set_xlim([0, 5])
bx = plt.subplot2grid((7, 4), (3,i), projection='3d', colspan=1, rowspan=2)
tau = 10
data = targets[label]
data_lag0 = data[:-2].flatten()
data_lag1 = np.roll(data, -tau)[:-2].flatten()
data_lag2 = np.roll(data, -2 * tau)[:-2].flatten()
bx.plot3D(data_lag0, data_lag1, data_lag2, label='Target')
data = outputs[label]
data_lag0 = data[:-2].flatten()
data_lag1 = np.roll(data, -tau)[:-2].flatten()
data_lag2 = np.roll(data, -2 * tau)[:-2].flatten()
bx.plot3D(data_lag0, data_lag1, data_lag2, label='Predict')
bx.set_xticks([])
bx.set_yticks([])
bx.set_zticks([])
bx.w_xaxis.set_pane_color((0.0, 0.0, 0.0, 0.9))
bx.w_yaxis.set_pane_color((0.0, 0.0, 0.0, 0.9))
bx.w_zaxis.set_pane_color((0.0, 0.0, 0.0, 0.9))
bx.grid(False)
if i == 0:
bx.legend()
if len(Wouts.keys()) > 0:
cx = plt.subplot2grid((7, 4), (5,i), colspan=1, rowspan=1)
for j in range(Wouts[label].shape[0]):
Wout = Wouts[label][j].ravel()
sns.kdeplot(Wout, legend=False, shade=True, ax=cx, label='Wo={}'.format(j+1))
dx = plt.subplot2grid((7, 4), (6,i), colspan=1, rowspan=1)
for j in range(coeffs[label].shape[0]):
coef = coeffs[label][j].ravel()
sns.kdeplot(coef, legend=False, shade=True, ax=dx, label='Wf={}'.format(j+1))
if i == 0:
cx.legend()
dx.legend()
#bx.set_facecolor('k')
# ax2 = plt.subplot2grid((3,6), (0,4), colspan=2, rowspan=3)
# if args.plot == 0:
# # Plot NRMSE curves
# for k in rmse_dict.keys():
# rmse = rmse_dict[k]
# ts = np.array(range(len(rmse))) * dt / maxLyp
# ax2.plot(ts, rmse, label=k)
# else:
# # Plot spectral
# for i in range(4):
# label = samples[i]
# if label not in sp_targets.keys():
# continue
# sp_truth_test = sp_targets[label]
# sp_pred_test = sp_outputs[label]
# M = int(len(sp_pred_test) / 5)
# fids = range(M)
# ax2.scatter(fids, sp_pred_test[fids], marker='o', facecolors='none', edgecolor=colors[i], label=label)
# #fx.plot(fids, sp_pred_test)
# if i == 0:
# ax2.plot(fids, sp_truth_test[fids], color='k', label='Target')
# ax2.legend()
# #ax1.set_ylim([0, 1.0])
# #ax2.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
# ax2.set_title('used {} samples'.format(title))
# Plot frequency
outbase = 'tidx_{}_{}_v2'.format(tidx, title)
outbase = os.path.join(fig_path, outbase)
for ftype in ['pdf', 'svg', 'png']:
transparent = (ftype != 'png')
plt.savefig('{}_{}_rs4.{}'.format(outbase, sysname, ftype), bbox_inches='tight', transparent=transparent, dpi=600)
plt.show()
|
#!/usr/bin/env python
# ----------------------------------------------------------------------- #
# Copyright 2017, Gregor von Laszewski, Indiana University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------#
"""
Cloudmesh CMD5 setup.
"""
import io
from setuptools import find_packages, setup
def readfile(filename):
"""
Read a file
:param filename: name of the file
:return: returns the content of the file as string
"""
with io.open(filename, encoding="utf-8") as stream:
return stream.read()
requiers = """
oyaml
docopt
requests
colorama
tabulate
munch
pillow
markdown==3.0
cloudmesh-common
cloudmesh-git
ebooklib
""".splitlines()
# dependency_links = ['http://github.com/nicolaiarocci/eve.git@develop']
version = readfile("VERSION").strip()
with open('README.md') as f:
long_description = f.read()
NAME = "cyberaide-bookmanager"
DESCRIPTION = "Creating Books from content in different git and other cloud services."
AUTHOR = "Gregor von Laszewski"
AUTHOR_EMAIL = "laszewski@gmail.com"
URL = "https://github.com/cyberaide/bookmanager"
setup(
name=NAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
version=version,
license="Apache 2.0",
url=URL,
packages=find_packages(),
keywords='pandoc book proceedings markdown epub',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: MacOS X",
"Environment :: OpenStack",
"Environment :: Other Environment",
"Environment :: Plugins",
"Intended Audience :: Information Technology",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows :: Windows 10",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Internet",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: User Interfaces",
"Topic :: System",
"Topic :: System :: Distributed Computing",
"Topic :: System :: Shells",
"Topic :: Utilities",
],
include_package_data=True,
install_requires=requiers,
tests_require=[
"flake8",
"coverage",
"bump2version",
"wheel",
"twine",
"invoke",
"pytest"
],
zip_safe=False,
entry_points={
'console_scripts': [
'bookmanager = bookmanager.command:main',
],
}
)
|
from collections import deque
import numpy as np
class ExperienceMemory(object):
def __init__(self, max_memory_length, history = 4):
self.history = history
self.state_memory = np.zeros([max_memory_length, 84, 84, self.history], dtype=np.uint8)#deque(maxlen=max_memory_length)
self.action_memory = np.zeros([max_memory_length], dtype=np.uint8)#deque(maxlen=max_memory_length)
self.reward_memory = np.zeros([max_memory_length], dtype=np.float16)#deque(maxlen=max_memory_length)
self.done_memory = np.zeros([max_memory_length], dtype=np.uint8)#deque(maxlen=max_memory_length)
self.counter = 0
self.ptr = 0
self.max_memory_length= max_memory_length
def get_length(self):
return self.counter#len(self.state_memory)
# Store a new memory
def store_transition(self, state, action, reward, done):
if state.dtype == np.float32:
self.state_memory[self.ptr % self.max_memory_length ] = (state*255.).astype(np.uint8)
else:
self.state_memory[self.ptr % self.max_memory_length ] = state
self.action_memory[self.ptr % self.max_memory_length] = action
self.reward_memory[self.ptr % self.max_memory_length] = reward
self.done_memory[self.ptr % self.max_memory_length] = int(done)
self.ptr = (self.ptr + 1) % self.max_memory_length
self.counter = min( self.counter+1, self.max_memory_length )
def store_multiple_transitions( self, states, actions, rewards, dones):
for ind in range(states.shape[0]):
self.store_transition( states[ind], actions[ind], rewards[ind], dones[ind] )
# Get out batch_size samples from the memory
def sample_buffer(self, batch_size):
states = []
actions = []
rewards = []
next_states = []
dones = []
# choosable_inds = list( range( 0, self.ptr-1)) + list(range(self.ptr, self.counter))
# choosable_inds = list(set( choosable_inds ))
prob_choose_inds = np.ones([self.counter], dtype=np.float32)
prob_choose_inds[self.ptr-1] = 0
prob_choose_inds = prob_choose_inds / np.sum( prob_choose_inds)
sample_id = np.random.choice( self.counter, size=[batch_size], replace=False, p=prob_choose_inds )
states = self.state_memory[sample_id % self.counter].astype(np.float32) / 255.
actions = self.action_memory[sample_id % self.counter].astype(np.int32)
rewards = self.reward_memory[sample_id % self.counter].astype(np.float32)
next_states = self.state_memory[(sample_id+1) % self.counter].astype(np.float32) / 255.
dones = self.done_memory[sample_id % self.counter].astype(np.int32)
return states, actions, rewards, next_states, dones
if __name__ == '__main__':
from tqdm import tqdm
test_size = 512000
e = ExperienceMemory(test_size)
pbar = tqdm(total = test_size)
while (e.get_length() < test_size):
pbar.update(1)
e.store_transition( np.random.randn( 84, 84, 4 ), 1, 0., 0 )
print("Memory should be full now")
input("Press enter to continue")
for i in range(10000):
pbar.update(1)
e.store_transition( np.random.randn( 84, 84, 4 ), 1, 0., 0 )
s = e.sample_buffer(32)
print("Done!")
|
#!/bin/env python
# pylint: disable=invalid-name
"""This module contains the Atom class"""
PERIODIC_TABLE = {
1: {'symbol': 'H'},
2: {'symbol': 'He'},
3: {'symbol': 'Li'},
4: {'symbol': 'Be'},
5: {'symbol': 'B'},
6: {'symbol': 'C'},
7: {'symbol': 'N'},
8: {'symbol': 'O'},
9: {'symbol': 'F'},
10: {'symbol': 'Ne'},
11: {'symbol': 'Na'},
12: {'symbol': 'Mg'},
}
LIST_OF_ISO = {
'H': {'H': [1.007825, 0.99985], 'D': [2.01410178, 0.00015]},
'He': {'3He': [3.0160293, 0.00000137], '4He': [4.002602, 0.99999863]},
'C': {'12C': [12.0, 0.9893], '13C': [13.0033548378, 0.0107]},
'O': {'16O': [15.9949, 0.9976], '17O': [16.9991315, 0.00039],
'18O': [18, 0.00201]}, # mass of 18O is not correct
}
def elements(z):
"""Returns an element based on its atomic number"""
return PERIODIC_TABLE[z]
def iso(z):
"""Returns isotopes for an atomic number"""
return LIST_OF_ISO[z]
class Atom(object): # should input be number or string for Carbon, 'C' or '6'
"""Class that represents an atom"""
def __init__(self, Z=0, name=''):
"""Initilize internal variables"""
self.name = name
if Z > 0:
self.Z = Z
def __eq__(self, other):
"""Returns the equals value"""
if other.Z == self.Z:
equal = True
else:
equal = False
return equal
def symbol(self):
"""Returns the atomic symbol"""
info = elements(self.Z)
return info['symbol']
def iso(self):
"""Returns the atoms isotopes"""
info = iso(elements(self.Z)['symbol'])
return info
def mass(self):
"""Returns the atoms mass"""
info = iso(elements(self.Z)['symbol'])
amu = 0
for i in info.keys():
amu += (info[i][0] * info[i][1])
return amu
if __name__ == '__main__':
atom = Atom(1)
print atom.symbol()
print atom.iso()
print atom.mass()
atom.mass()
|
import sys
import unittest
import tests.mockanki
from unittest.mock import patch, call, Mock, MagicMock
from ankiscript.addin import Addin
sys.modules['anki'] = MagicMock()
sys.modules['anki.httpclient'] = MagicMock()
import anki.httpclient
class AddinTest(unittest.TestCase):
@patch('ankiscript.addin.mw.addonManager.addonName')
@patch('ankiscript.addin.mw.addonManager.allAddons')
def test_addin_is_installed(self, allAddons, addonName):
allAddons.return_value = [Mock()]
addonName.return_value = 'testing'
self.assertTrue(Addin('testing').isInstalled())
@patch('ankiscript.addin.mw.addonManager.allAddons')
def test_addin_is_not_installed(self, allAddons):
allAddons.return_value = ''
self.assertFalse(Addin('testing').isInstalled())
@unittest.skip('DownloadError type not found')
@patch('ankiscript.addin.mw.addonManager')
@patch('anki.httpclient.HttpClient')
@patch('ankiscript.addin.addons.download_and_install_addon')
@patch('ankiscript.addin.askUser')
@patch('ankiscript.addin.mw.addonManager.allAddons')
def test_addin_install(self, allAddons, askUser, download_and_install_addon, HttpClient, addonManager):
allAddons.return_value = ''
askUser.return_value = True
download_and_install_addon.return_value = ('198750399', '')
addin = Addin('testing')
addin.install('198750399', Mock())
download_and_install_addon.assert_called_with(addonManager, HttpClient, '198750399')
#HttpClient
@patch('ankiscript.addin.addons.download_and_install_addon')
@patch('ankiscript.addin.askUser')
@patch('ankiscript.addin.mw.addonManager.allAddons')
def test_addin_install_not_done_if_user_cancels(self, allAddons, askUser, download_and_install_addon):
allAddons.return_value = ''
askUser.return_value = False
addin = Addin('testing')
addin.install('198750399', Mock())
download_and_install_addon.assert_not_called()
@patch('ankiscript.addin.mw.addonManager.addonName')
@patch('ankiscript.addin.mw.addonManager.allAddons')
def test_addin_install_when_already_installed(self, allAddons, addonName):
allAddons.return_value = [Mock()]
addonName.return_value = 'testing'
Addin('testing').install('198750399', Mock())
|
seq_1 = 'ATCACAGT'
seq_2 = 'GACGCACG'
for i in reversed(range(len(seq_1)+1)):
for j in range(num_substrings(len(seq_1), i))
# seq_1[j:j+1] defines the common
seq_1[j:j+i]
if seq_1[i:j+i] in seq_2:
return seq_1[i:j+i]
n = range(len(seq_1)+1)
m = reversed(range(len(seq_1)+1))
if seq_1[n, m] in seq_2
|
import unittest
from hdlConvertor import ParseException
from hdlConvertor.language import Language
from hdlConvertor import hdlAst
from tests.basic_tc import BasicTC, parseFile as _parseFile
def parseFile(fname):
return _parseFile(fname, Language.VHDL)
class VhdlConversionTC(BasicTC):
def test_dump_mux(self):
f, res = parseFile("mux.vhd")
str(res)
def test_package_array_const(self):
f, res = parseFile("package_array_const.vhd")
str(res)
pkg = res.objs[0]
self.assertIsInstance(pkg, hdlAst.HdlNamespace)
self.assertEqual(pkg.name, 'array_const_pkg')
def test_package_component(self):
f, res = parseFile("package_component.vhd")
str(res)
pkg = res.objs[4] # first 4 objects are libraries and 'use' clauses
self.assertIsInstance(pkg, hdlAst.HdlNamespace)
self.assertEqual(pkg.name, 'components_pkg')
def test_package_constants(self):
f, res = parseFile("package_constants.vhd")
str(res)
pkg = res.objs[4] # first 4 objects are libraries and 'use' clauses
self.assertIsInstance(pkg, hdlAst.HdlNamespace)
self.assertEqual(pkg.name, 'constants_pkg')
def test_fourbit_adder(self):
f, res = parseFile("fourbit_adder.vhd")
str(res)
def test_mux2i(self):
f, res = parseFile("mux2i.vhd")
str(res)
def test_ram(self):
self.parseWithRef("ram.vhd", Language.VHDL)
def test_malformed(self):
with self.assertRaises(ParseException):
f, res = parseFile("malformed.vhdl")
def test_arch_with_assig(self):
self.parseWithRef("arch_with_assig.vhd", Language.VHDL)
def test_with_select(self):
self.parseWithRef("with_select.vhd", Language.VHDL)
def test_call(self):
self.parseWithRef("call.vhd", Language.VHDL)
def test_type_attribute_designator(self):
self.parseWithRef("type_attribute_designator.vhd", Language.VHDL)
def test_library_declaration(self):
f, res = parseFile("ram.vhd")
self.assertEqual(str(type(res.objs[0])),
"<class 'hdlConvertor.hdlAst._structural.HdlLibrary'>")
self.assertEqual(res.objs[0].name, 'ieee')
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(VhdlConversionTC('test_with_select'))
suite.addTest(unittest.makeSuite(VhdlConversionTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
class Out_put_vis():
def plot_fig(self,input1,pred,n):
plt.figure(1)
t=np.arange(1,10000,1)
p=input1.shape[0]
l=min(t.shape[0],pred.shape[0])
plt.plot(t[:p],input1[:p,0,0],'b--',t[:l],pred[:l,0,0], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/open_1'+n+'.png')
plt.clf()
plt.plot(t[:p],input1[:p,0,1],'b--',t[:l],pred[:l,0,1], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/high_1'+n+'.png')
plt.clf()
plt.plot(t[:p],input1[:p,0,2],'b--',t[:l],pred[:l,0,2], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/low_1'+n+'.png')
plt.clf()
plt.plot(t[:p],input1[:p,0,3],'b--',t[:l],pred[:l,0,3], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/close_1'+n+'.png')
plt.clf()
def plot_gen(self,pred):
l=pred.shape[0]
t=np.arange(1,l+1,1)
plt.plot(t[:l],pred[:l,0,0], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/openp_1.png')
plt.clf()
plt.plot(t[:l],pred[:l,0,1], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/highp_1.png')
plt.clf()
plt.plot(t[:l],pred[:l,0,2], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/lowp_1.png')
plt.clf()
plt.plot(t[:l],pred[:l,0,3], 'r--')
plt.savefig('C:/Users/Pranesh/Desktop/share/closep_1.png')
plt.clf()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
# from scipy.stats import norm
import scipy.stats as st
from mpmath import mp
mp.dps = 30
# ###########################################DATOS#########################
# muestra #1
S0 = pd.read_csv(
'POP/Datos/lon160E-80W/SST_N_PROM_80W.csv', index_col=['major', 'minor'])
# SST_80W_masc = pd.read_csv('MAR(1)\Datos\lon160E-80W\SST_MASK_PROM_80W.csv',index_col=[0])
date = pd.date_range('1982', '2016', freq='M')
a = sorted(S0.index.levels[0], reverse=True)
b = S0.index.levels[1]
multin = pd.MultiIndex.from_product([a, b], names=['Lat', 'Lon'])
SST_80W = pd.DataFrame(S0.values, index=multin, columns=date)
# muestra#2
S1 = pd.read_csv(
'POP/Datos/lon165E-125W/SST_N_PROM_125W.csv', index_col=['major', 'minor'])
a1 = sorted(S1.index.levels[0], reverse=True)
b1 = S1.index.levels[1]
multin1 = pd.MultiIndex.from_product([a1, b1], names=['Lat', 'Lon'])
SST_125W = pd.DataFrame(S1.values, index=multin1, columns=date)
pos = np.where(SST_80W.T.index.month == 1)[0]
d0 = SST_80W.T.ix[np.where(SST_80W.T.index.month == 1)[0]]
d1 = d0.T
# Separo los datos por meses
def B(matriz):
E = mp.eig(matriz, left=False, right=False)
E = mp.eig_sort(E)
bd = matriz
a = 0
for y in range(5):
a = a + 1
d = np.identity(100)
d[d == 1.0] = np.abs(mp.re(E[y]))
d1 = mp.matrix(d)
bd = bd + d1
o = mp.eig(bd, left=False, right=False)
o = mp.eig_sort(o)
if mp.re(o[0]) > 0.001:
break
B1 = mp.cholesky(bd)
return B1, a
def Mes(matriz, i):
d0 = matriz.ix[np.where(matriz.index.month == i)[0]]
d = mp.matrix(d0.values)
m = mp.matrix([np.mean(d.T[ii, :]) for ii in range(d.T.rows)])
return d.T, m
Ene_80W, M_ene = Mes(SST_80W.T, 1)
Feb_80W, M_Feb = Mes(SST_80W.T, 2)
Mar_80W, M_Mar = Mes(SST_80W.T, 3)
Abr_80W, M_Abr = Mes(SST_80W.T, 4)
May_80W, M_may = Mes(SST_80W.T, 5)
Jun_80W, M_Jun = Mes(SST_80W.T, 6)
Jul_80W, M_Jul = Mes(SST_80W.T, 7)
Ago_80W, M_Ago = Mes(SST_80W.T, 8)
Sep_80W, M_Sep = Mes(SST_80W.T, 9)
Oct_80W, M_Oct = Mes(SST_80W.T, 10)
Nov_80W, M_Nov = Mes(SST_80W.T, 11)
Dic_80W, M_Dic = Mes(SST_80W.T, 12)
Ene_125W, M125W_ene = Mes(SST_125W.T, 1)
Feb_125W, M125W_Feb = Mes(SST_125W.T, 2)
Mar_125W, M125W_Mar = Mes(SST_125W.T, 3)
Abr_125W, M125W_Abr = Mes(SST_125W.T, 4)
May_125W, M125W_may = Mes(SST_125W.T, 5)
Jun_125W, M125W_Jun = Mes(SST_125W.T, 6)
Jul_125W, M125W_Jul = Mes(SST_125W.T, 7)
Ago_125W, M125W_Ago = Mes(SST_125W.T, 8)
Sep_125W, M125W_Sep = Mes(SST_125W.T, 9)
Oct_125W, M125W_Oct = Mes(SST_125W.T, 10)
Nov_125W, M125W_Nov = Mes(SST_125W.T, 11)
Dic_125W, M125W_Dic = Mes(SST_125W.T, 12)
Y = Mar_80W # presente
X = Feb_80W # pasado
# estimar M0 yM1
M0_act = ((Y * Y.T) / 100)
M0_in = (M0_act**-1)
MP = (M0_act * M0_in)
o = MP[0:5, 0:5]
o0 = M0_in[0:5, 0:5]
M1_act = ((Y * X.T) / 100)
M0_ant = ((X * X.T) / 100)
A = M1_act * (M0_ant ** -1)
BBT = M0_act - (A * M1_act.T)
####
E = mp.eig(BBT, left=False, right=False)
E = mp.eig_sort(E)
# reajuste 0
d = np.identity(100)
d[d == 1.0] = abs(-24.7174268506955680247383180094514)
d1 = mp.matrix(d)
BBT0 = BBT + d1
E0 = mp.eig(BBT0, left=False, right=False)
E0 = mp.eig_sort(E0)
# reajuste 0
d0 = np.identity(100)
d0[d0 == 1.0] = abs(-0.00000062521499573893501604735293439538)
d2 = mp.matrix(d0)
BBT1 = BBT0 + d2
E1 = mp.eig(BBT1, left=False, right=False)
E1 = mp.eig_sort(E1)
# choslesky
B0 = mp.cholesky(BBT0)
B1 = mp.cholesky(BBT1)
# el modelo
x_ant = Feb_80W.T[0, :].T
x0 = x_ant - M_Feb
alea = mp.randmatrix(100, 1)
med_sim = M_Mar
mes_sim = ((A * x0) + (B1 * alea)) + med_sim
s0 = mp.chop(BBT0 - B1 * B1.H)
s1 = mp.chop(BBT1 - B1 * B1.H)
Z_ant = np.array(x_ant, dtype=float)
Z_sim = np.array(mes_sim, dtype=float)
SK = st.ks_2samp(Z_ant, Z_sim)
# funciones
def parametros(Y, X):
M0_act = ((Y * Y.T) / 100)
M1_act = ((Y * X.T) / 100)
M0_ant = ((X * X.T) / 100)
A = M1_act * (M0_ant**-1)
BBT = M0_act - (A * M1_act.T)
return A, BBT
# Y=PRESEMยฟNTE
# x=PASADO
A_00, BBT_00 = parametros(Ene_80W, Dic_80W)
A_01, BBT_01 = parametros(Feb_80W, Ene_80W)
A_02, BBT_02 = parametros(Mar_80W, Feb_80W)
A_03, BBT_03 = parametros(Abr_80W, Mar_80W)
A_04, BBT_04 = parametros(May_80W, Abr_80W)
A_05, BBT_05 = parametros(Jun_80W, May_80W)
A_06, BBT_06 = parametros(Jul_80W, Jun_80W)
A_07, BBT_07 = parametros(Ago_80W, Jul_80W)
A_08, BBT_08 = parametros(Sep_80W, Ago_80W)
A_09, BBT_09 = parametros(Oct_80W, Sep_80W)
A_10, BBT_10 = parametros(Nov_80W, Oct_80W)
A_11, BBT_11 = parametros(Dic_80W, Nov_80W)
B_0 = B(BBT_00)
|
friends = ["Tim", "Sasa", "Casey", "Craig", "Gigot"]
print(friends[2:4]) # prints friends[2] and friends[3], but not [4]
print(friends[1:]) # prints from friends[1] to the end of the list (all but friends[0]).
print(friends[:4]) # prints from the start of the list and prints friends[0] thru friends[3], but not friends[4]
print(friends[0][1:])
print(friends[4][1:4]) # prints the [1] through [3] (but not [4]) characters of friends[4]
print(friends[-3:]) # starts at the end of the list and prints the last 3 entries.
print(friends[-3:-1]) # starts at the end of the list and prints from the 3rd from the last entry up to, but not including, the last from the list.
print(friends[:-2]) # starts at the beginning of the list and prints all but the last 2 entries.
print(friends[-4:3]) # starts at the end of the list and prints from the 4th from the last entry and the next 2 (not 3) entries.
print(friends)
|
# ##################################################################################################
# Copyright (c) 2020 - Fundaรงรฃo CERTI
# All rights reserved.
# ##################################################################################################
import numpy
import rasterio as rio
import pytest
from qda_modelos import total_suspended_solids_turbidity as turbidity
class TestTSSTurbidityZhangEtAl2010:
def test_expected_result_type(self, setup_bands):
R20m_bands = setup_bands["20m"]
B04 = R20m_bands["B04"]
B02 = R20m_bands["B02"]
zhang_et_al_2010_result = turbidity.zhang_et_al_2010(B04, B02)
assert isinstance(zhang_et_al_2010_result, numpy.ndarray)
def test_expected_result_shape(self, setup_bands):
R20m_bands = setup_bands["20m"]
B04 = R20m_bands["B04"]
B02 = R20m_bands["B02"]
zhang_et_al_2010_result = turbidity.zhang_et_al_2010(B04, B02)
assert zhang_et_al_2010_result.shape == B04.shape
def test_expected_error_for_wrong_number_of_bands(self, setup_bands):
R20m_bands = setup_bands["20m"]
B04 = R20m_bands["B04"]
with pytest.raises(TypeError):
turbidity.zhang_et_al_2010(B04)
def test_expected_error_for_bands_of_different_shapes(self, setup_bands):
R20m_bands = setup_bands["20m"]
B04 = setup_bands["20m"]["B04"]
B02 = setup_bands["10m"]["B02"]
with pytest.raises(ValueError):
turbidity.zhang_et_al_2010(B04, B02)
|
import random
from math import exp
from agent.agent import Agent
class ReinforcementLearning(Agent):
type = "RL"
'''
q(state, action)
The states are:
Each combination of owning stocks: 2 exp 10 stocks
The actions are:
3 for each stock: buy, sell
10 stocks
The matrix is going to be 100x30
c1 v1 c2 v2 c3 v3 c4 v4 n
s1 0 0 0 0 0 0 0 0 0
s2 0 0 0 0 0 0 0 0 0
s3 0 0 0 0 0 0 0 0 0
'''
def __init__(self, central_bank, initial_cash=1000, soft_max=False):
super().__init__(central_bank, initial_cash)
self.current_step = 0
self.q = []
self.discount = 0.9
self.total = 1000000
self.learningRate = 0.8
self.epsilon = 0.9
self.rand_factor = 0.05
self.reward_modifier = 100
self.init_q_values()
self.original_state = 0
self.original_action = 0
self.dec = (self.epsilon - 0.1) / self.total
self.soft_max = False
def init_q_values(self):
num_col = 2 * len(self.central_bank.get_all_stock())
num_lines = 2 ** len(self.central_bank.get_all_stock())
for i in range(num_lines):
tmp = [0 for _ in range(num_col)]
# tmp.append(0)
self.q.append(tmp)
def get_state(self):
l = len(self.central_bank.get_all_stock())
owned_stocks = set(self.stocks_owned.keys())
s = "".join(["0" if i in owned_stocks else "1" for i in range(l)])
return int(s, 2)
def learn(self):
u = self.reward()
prev_q = self.get_q(self.original_state, self.original_action)
self.epsilon = max(self.epsilon - self.dec, 0.05)
'''
Q-function update
'''
pred_error = u + self.discount * self.get_max_q(self.get_state()) - prev_q
new_q = prev_q + (self.learningRate * pred_error)
self.q[self.original_state][self.original_action] = new_q
return
def _decide(self):
self.original_state = self.get_state()
self.epsilon -= self.dec
act = 0
if random.uniform(0, 1) < self.rand_factor:
act = self.do_random_action(self.get_available_actions())
else:
if self.soft_max:
act = self.do_soft_max()
else:
act = self.do_e_greedy()
self.original_action = act
def get_available_actions(self):
owned_stocks = set(self.stocks_owned.keys())
l = len(self.central_bank.get_all_stock())
buy_actions = [2 * i for i in range(l) if self.central_bank.stocks[i].price <= self.cash]
sell_actions = [2 * i + 1 for i in range(l) if i in owned_stocks]
return [*buy_actions, *sell_actions]
def do_e_greedy(self):
valid_actions = self.get_available_actions()
if random.uniform(0, 1) < self.rand_factor:
return self.do_random_action(valid_actions)
state = self.get_state()
act = self.get_max_action_q(state, valid_actions)
self.do_action(act)
return act
def do_soft_max(self):
valid_actions = self.get_available_actions()
act = -1
l = len(valid_actions)
tmp = self.get_q(self.get_state(), valid_actions[0]) / (self.epsilon * 100.0)
cumulative = [exp(tmp)]
for i in range(1, l):
tmp = self.get_q(self.get_state(), valid_actions[i]) / (self.epsilon * 100.0)
cumulative.append(exp(tmp) + cumulative[i - 1])
total = cumulative[l - 1]
cut = random.random() * total
for i in range(l):
if cut <= cumulative[i]:
act = valid_actions[i]
break
if act >= 0:
self.do_action(act)
return act
def get_random_available_action(self):
valid_actions = self.get_available_actions()
action = valid_actions[random.randint(0, len(valid_actions) - 1)]
return action
def do_random_action(self, valid_actions):
action = valid_actions[random.randint(0, len(valid_actions) - 1)]
self.do_action(action)
return action
def do_action(self, action):
#if action == len(self.q[0])-1:
# return
stock_id = action // 2
if action % 2:
# odd, means sell
max_sell = self.how_many_can_i_sell(stock_id)
to_sell = random.randint(0, max_sell)
self.sell(stock_id, to_sell)
# print("sell: " + str(stock_id) + " quantity: " + str(to_sell))
else:
# even, means buy
max_buy = self.how_many_can_i_buy(stock_id) - 1
to_buy = random.randint(0, max_buy)
self.buy(stock_id, to_buy)
# print("buy: " + str(stock_id) + " quantity: " + str(to_buy) )
def reward(self):
l = len(self.stock_history)
current_value = self.value_history[- 1]
pre_value = self.value_history[- 2]
r = current_value - pre_value
#print(str(r))
return r
def get_q(self, original_state, original_action):
return self.q[original_state][original_action]
def get_max_q(self, state):
return max(self.q[state])
def get_max_action_q(self, state, valid_actions):
max = float("-inf")
max_i = -1
line = self.q[state]
for i in range(len(valid_actions)):
q_action = line[valid_actions[i]]
if q_action > max:
max = q_action
max_i = valid_actions[i]
return max_i
|
# encoding=utf-8
__author__ = 'xiaowang'
__date__ = '17/2/16'
from itertools import *
# natuals = count(1, 2)
# for n in natuals:
# print n
# cs = cycle('ABC')
# for c in cs:
# print c
# ns = repeat('A', 10)
# x = [n for n in ns]
# print x
# natuals = count(1)
# ns = takewhile(lambda x:x<=10, natuals)
# ls = [i for i in ns]
# print ls
# for c in chain('ABC', 'XYZ'):
# print c
# for key, group in groupby('123334'):
# print key,list(group)
# for x in imap(lambda x, y: x*y, [1, 2, 3], count(1)):
# print x
# for x in ifilter(lambda x:x>2, [1, 2, 3, 4,5]):
# print x
|
from django.contrib import admin
from .models import Order,OrderDetails
admin.site.register(Order)
admin.site.register(OrderDetails) |
'''
ๅญ็ฌฆไธฒๅน้
ๅๆ็ดข
'''
import re
if __name__ == "__main__":
text1 = '11/27/2012'
text2 = 'Nov 27, 2012'
datepat = re.compile(r'\d+/\d+/\d+')
if datepat.match(text1):
print("match text1")
if datepat.match(text2):
print("match text2")
text = 'Today is 11/27/2012. PyCon starts 3/13/2013.'
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
match_obj = datepat.match(text1)
print(match_obj.group())
print(match_obj.groups())
match_obj = datepat.findall(text)
print(match_obj)
matchs = datepat.finditer(text)
for match_obj in matchs:
print(match_obj.group())
match_obj = re.findall(r'(\d+)/(\d+)/(\d+)', text)
print(match_obj) |
import json
from typing import List, Dict
import stanza
from tqdm import tqdm
import numpy as np
from data_processing.class_defs import SquadExample, SquadMultiQAExample, RepeatQExample, RepeatQFeature
from data_processing.dataset import Dataset
from defs import UNKNOWN_TOKEN
class RepeatQDataset:
def __init__(self,
ds_json_path,
vocabulary: Dict[str, int],
feature_vocab: Dict[str, int],
unk_token=UNKNOWN_TOKEN,
pad_sequences=True,
pad_id=0,
data_limit=-1,
use_pos_features=True,
use_ner_features=True,
reduced_ner_indicators=False):
"""
Dataset to use in conjunction with the RepeatQ model.
:param ds_json_path: Path to a JSON file containing facts, base questions and target questions.
:param vocabulary: A dictionary which maps words to their ids (used to convert words to ids).
:param feature_vocab: A dictionary mapping feature words to ids.
:param unk_token: The unknown token/word. Default is the one used by NQG (<unk>).
:param pad_sequences: If sequences are to be padded to create a rectangular dataset. Default is True.
:param pad_id: The id of the padding token (default is 0).
:param data_limit: Number of examples to keep
:param use_pos_features: Whether to use POS features or not.
:param use_ner_features: Whether to use NER features or not.
"""
super(RepeatQDataset, self).__init__()
self.ds_path = ds_json_path
self.vocab = vocabulary
self.feature_vocab = feature_vocab
self.unk_token = unk_token
self.pad_sequences = pad_sequences
self.pad_id = pad_id
self.use_pos_features = use_pos_features
self.use_ner_features = use_ner_features
self.reduced_ner_indicators = reduced_ner_indicators
self.ds = self.read_dataset(data_limit)
def read_dataset(self, data_limit):
with open(self.ds_path, mode='r') as f:
data = RepeatQExample.from_json(json.load(f))
if data_limit < 0:
return data
return data[:data_limit]
def get_dataset(self) -> List[RepeatQExample]:
base_questions, base_questions_features, facts_list, facts_features, targets = [], [], [], [], []
# Indicator of if a target word comes from the base question or one of the facts and at which location
targets_question_copy_indicator = []
# If a target word is present in the base question
is_from_base_question = []
max_fact_length, max_nb_facts = 0, 0
for example in tqdm(self.ds):
if example.rephrased_question == "":
continue
target = self.words_to_ids(example.rephrased_question.split())
targets.append(target)
base_question = self.words_to_ids(example.base_question.split())
base_questions.append(base_question)
base_questions_features.append(self.features_to_ids(example.base_question_features))
facts = [self.words_to_ids(fact.split(' ')) for fact in example.facts][:3]
facts_features.append([self.features_to_ids(fact_features) for fact_features in example.facts_features][:3])
max_fact_length = max(max_fact_length, max(len(fact) for fact in facts))
max_nb_facts = max(max_nb_facts, len(facts))
facts_list.append(facts)
is_from_base_question.append([True if w in base_question else False for w in target])
base_questions = self.sequence_padding(base_questions)
targets = self.sequence_padding(targets)
is_from_base_question = self.sequence_padding(is_from_base_question)
facts_list = np.array(self.matrix_padding(facts_list, max_length=32, max_width=max_nb_facts))
for k in range(len(self.ds)):
base_question = base_questions[k]
target = targets[k]
# Index in the base question if the word comes from the base question, -1 otherwise
copy_indicators = [np.where(base_question == w)[0][0] if w != 0 and w in base_question else -1 for w in target]
# Same for facts but offset by the base question's length. Each fact is also offset by the fact that comes
# before itself. The final logits order is: [vocabulary, base question, fact 1, fact 2, ..., fact l]
offset = len(base_question)
for fact in facts_list[k]:
for i in range(len(copy_indicators)):
if target[i] != 0 and target[i] in fact:
copy_indicators[i] = np.where(fact == target[i])[0][0] + offset
offset += len(fact)
# Updates the data with the padded features and inputs
self.ds[k].base_question = base_question
self.ds[k].base_question_features = base_questions_features[k]
self.ds[k].facts = facts_list[k]
self.ds[k].facts_features = facts_features[k]
self.ds[k].is_from_base_question = is_from_base_question
self.ds[k].target_question_copy_indicator = copy_indicators
self.ds[k].rephrased_question = target
return self.ds
def words_to_ids(self, sentence: List[str]):
return [self.vocab.get(word.lower(), self.vocab[self.unk_token]) for word in sentence]
def features_to_ids(self, feature: RepeatQFeature):
if self.reduced_ner_indicators:
feature.entity_tags = feature.entity_tags.replace("BA", "BN").replace("IA", "IN")
if self.use_pos_features:
pos_features = [self.feature_vocab[tag] for tag in feature.pos_tags.split()]
else:
pos_features = []
if self.use_ner_features:
entity_features = [self.feature_vocab[tag] for tag in feature.entity_tags.split()]
else:
entity_features = []
return pos_features, entity_features
def sequence_padding(self, sequences: List[List[int]], max_length=None):
"""
Pads liner sequences to the longest sequence length.
"""
if max_length is None:
max_length = max([len(seq) for seq in sequences])
padded_sequences = list(
seq[:max_length] + [self.pad_id for _ in range(max_length - len(seq))] for seq in sequences)
return np.array(padded_sequences)
def matrix_padding(self, matrices: List[List[List[int]]], max_length, max_width):
"""
Pads in 2-dimensions, to the max sequence length on the first axis and to the max number of sequences per
"batch" of sequences on the second axis.
Ex:
[[[1 8 39 4 19]
[93 8 3]],
[[7 4]]]
becomes
[[[1 8 39 4 19]
[93 8 3 0 0]],
[[7 4 0 0 0],
[0 0 0 0 0]]]
"""
# First dimension
matrices = [self.sequence_padding(matrix, max_length=max_length) for matrix in matrices]
# Second dimension
pad_seq = [self.pad_id for _ in range(max_length)]
matrices = np.array([matrix if len(matrix) == max_width else np.append(
matrix,
[pad_seq for _ in range(max_width - len(matrix))],
axis=0
) for matrix in matrices])
return matrices
|
class ventaDetalle:
def __init__(self,pro,pre,cant):
self.producto=pro
self.precio=pre
self.cantidad=cant |
# -*- coding: utf-8 -*-
from PyQt4 import QtGui
import sys
sys.path.append('../Controladores')
from main_controller import *
class MainWindows(QtGui.QWidget):
def __init__(self):
super(MainWindows, self).__init__()
self.controlador = MainControlador(self)
self.init_ui()
def init_ui(self):
self.label = QtGui.QLabel('Cantidad de personas')
h_layout = QtGui.QHBoxLayout()
h_layout.addWidget(self.label)
button_subir = QtGui.QPushButton('Subir persona')
button_subirmuchas = QtGui.QPushButton('Subir muchas personas')
button_bajar = QtGui.QPushButton('Bajar persona')
button_bajarmuchas = QtGui.QPushButton('Bajar muchas personas')
button_subir_varias = QtGui.QPushButton('Subir estas Personas')
button_bajar_varias = QtGui.QPushButton('Bajar estas Personas')
h_layout.addWidget(button_subir)
h_layout.addWidget(button_subirmuchas)
h_layout.addWidget(button_bajar)
h_layout.addWidget(button_bajarmuchas)
h_layout.addWidget(button_subir_varias)
h_layout.addWidget(button_bajar_varias)
self.ingreso_numero = QtGui.QLineEdit(self)
button_subir.clicked.connect(self.controlador.handler_subir_persona)
button_subirmuchas.clicked.connect(self.controlador.handler_subir_muchas_personas)
button_bajar.clicked.connect(self.controlador.handler_bajar_persona)
button_bajarmuchas.clicked.connect(self.controlador.handler_bajar_muchas_personas)
button_subir_varias.clicked.connect(self.controlador.handler_subir_varias_personas)
button_bajar_varias.clicked.connect(self.controlador.handler_bajar_varias_personas)
self.setLayout(h_layout)
self.setWindowTitle('Proyecto del auto')
self.setGeometry(200, 200, 200, 200)
self.show()
app = QtGui.QApplication(sys.argv)
windows = MainWindows()
sys.exit(app.exec_())
|
#!/usr/bin/env python
"""
Copyright (C) 2022 Andy Piltser-Cowan <awc34@cornell.edu>.
Released under Creative Commons Attribution-Sharealike License 4.0
Available at https://creativecommons.org/licenses/by-sa/4.0/
Contact the author if a different license is desired.
This should be considered alpha-quality software, the author makes no warranty
as to its quality. Expect bugs.
"""
import re
import argparse
from pathlib import Path
import subprocess # nosec
import shlex
from warnings import warn
import os
import tempfile
from config import * # pylint: disable=wildcard-import, unused-wildcard-import
def read_names_file(filename: str) -> dict: # pylint: disable=too-many-locals
"""
Reads in the list .tex file and returns a dict with macronames as keys
and (full_name, file_name, blues, greens) as values.
Full_name and file_name are strings.
Blues and Greens are each Lists of strings.
TODO (Andy): make the values a NamedTuple
"""
player_characters: list[str]
parsed_characters: dict[str, tuple]
parsed_characters = {}
re_for_character = re.compile(r"^\\NEW{PC}.*?^}$", re.M | re.S)
re_for_macroname = re.compile(r"(?:^\\NEW{PC}{\\c)(\w+)(?:})")
# breakpoint()
re_for_filename = re.compile(r"(?:\\s\\MYfile\s+{)(\w+\.\w+)")
re_for_name = re.compile(r"(?:\\s\\MYname\s+{)([ -~\s]+?)(?:}|\\suf)")
re_for_blues_line = re.compile(r"(?:\\s\\MYblues\s+{\\b)[ -~\s]*?\n")
re_for_greens_line = re.compile(r"(?:\\s\\MYgreens\s+{\\g)[ -~\s]*?\n")
re_blues_from_line = re.compile(r"(?:\\b)([\w]+)")
re_greens_from_line = re.compile(r"(?:\\g)([\w]+)")
# The comment below disables a pylint warning for not specifying file
# encoding. This is safe for a text-mode read in Python 3.10
# pylint: disable=W1514
with open(filename, "r") as file:
player_characters = re.findall(re_for_character, file.read())
for character in player_characters:
macroname = str(re.findall(re_for_macroname, character)[0])
full_name = str(re.findall(re_for_name, character)[0])
blues = re.findall(re_for_blues_line, character)
if len(blues) > 0:
blues = re.findall(re_blues_from_line, blues[0])
greens = re.findall(re_for_greens_line, character)
if len(greens) > 0:
greens = re.findall(re_greens_from_line, greens[0])
try:
file_name = str(re.findall(re_for_filename, character)[0])
except IndexError:
file_name = None
warn(f"Warning! Missing file name for {macroname}")
parsed_characters[macroname] = full_name, file_name, blues, greens
return parsed_characters
def build_cmd(charname: str, filename: str, draft=False) -> list[str]:
"""Builds the shell command used in the render_pdf() functions"""
if not isinstance(charname, str):
raise TypeError("Buildcmd received a 'charname' that was not a string!")
if not isinstance(filename, str):
raise TypeError("Buildcmd received a filename that was not a string!")
job_flags = JOBFLAG + charname.replace(" ", "_") + "_Charsheet"
command = TEXCMD + job_flags + OUTDIR + RUNFLAGS
if draft:
command += "--draft"
command = command + INCLUDES + " " + filename
arrgh = shlex.split(command, posix=POSIX) # POSIX is a bool defined in config.py
return arrgh
def render_pdf(charname: str, texfile: str, draft=False) -> int:
"""
This function calls 'pdflatex' to render a PDF file for the character
named charname
"""
# print(f"texsource (filename) = {texfile}")
absname = os.path.join(GAMEBASE, "Charsheets", texfile)
# print(f"Fully qualified filename = {absname}")
exists = Path.exists(Path(absname))
if exists:
arrgh = build_cmd(charname, absname, draft)
return subprocess.run(arrgh, check=True).returncode # nosec
raise FileNotFoundError(f"Error! Character sheet file {absname} does not exist!")
def render_pdf_from_list(
charname: str, macroname: str, compendium=False, production=False
) -> int:
"""
Same as render_pdf, but instead of looking up a character file name,
this one invokes listchar-PRINT.tex.
"Compendium" and "production" are the flags that file defines.
"""
# pylint: disable=unused-argument
list_file_path = os.path.join(GAMEBASE, "Production", "listchar-PRINT.tex")
exists = Path.exists(Path(list_file_path))
if exists:
with tempfile.NamedTemporaryFile(mode="wt", suffix=".tex") as temp_file:
text_to_write = (
r"\documentclass[listchar]{GL2020}"
r"\begin{document}"
f"\\c{macroname}{{}}"
r"\end{document}"
)
temp_file.write(text_to_write)
temp_file.flush()
arrgh = build_cmd(charname, temp_file.name)
retcode = subprocess.run(arrgh, check=True).returncode # nosec
return retcode
raise FileNotFoundError(f"Error! File {list_file_path} not found!")
def main() -> int:
"""
Parse out command line arguments and call other functions to select and render
the appropriate .tex file for the macro selected by the user.
Usage: $ name_pdfs.py MyCharacterMaccro
Where MyCharacterMacro is one of the names defined in char-LIST.tex
"""
parser = argparse.ArgumentParser()
parser.add_argument("name")
parser.add_argument("--listfile", default=GAMEBASE + "/Lists/char-LIST.tex")
arguments = parser.parse_args()
name = arguments.name
output = read_names_file(arguments.listfile)
try:
# return render_pdf(output[name][0], output[name][1])
return render_pdf_from_list(output[name][0], name)
except KeyError:
print(f"Fatal error: macroname {name} not found!")
return 1
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import re
import urllib2
import crawler
url = "http://en.wikipedia.org/wiki/List_of_venture_capital_firms"
web_page = urllib2.urlopen(url)
#print web_page
vc_dict = dict()
crawler.crawl(url, vc_dict)
working_dict = vc_dict['http://en.wikipedia.org/wiki/List_of_venture_capital_firms']
#retrieve all the wiki links
for key in working_dict['sub_pages']:
#/w/ denotes a non-existement page, and I dont care about anchors, so only look at /wiki/ links
if '/wiki/' is not in key:
continue
#now crawl that new one
url = 'http://en.wikipedia.org' + key
sub_dict = {}
crawler.crawl(url, sub_dict)
#let's find out if it's actually a venture capital firm
page_text_list =
names = set(sub_dict['title'].split(" "))
bad_words = set(['a','an','Wikipedia,','free','-','the','encyclopedia','and'])
names = names.difference(bad_words)
for link in sub_dict[url]['external_urls']
for name in names:
re_string = re.compile("www\..*"name
for line in web_page:
# print "next line:"
# print line
if "external text" in line:
re_string = re.compile("<th><a .*>(.*)</a>")
vc_name = re.search(re_string, line)
if vc_name is not None:
vc_name = vc_name.group(1)
#print vc_name
re_string = re.compile('href="(.{,100}?)".*?>')
vc_url = re.search(re_string, line)
if vc_url is not None:
vc_url = vc_url.group(1)
vc_dict[vc_name] = vc_url
#print vc_dict[vc_name]
else:
continue
else:
re_string = re.compile("<th><a.*>(.*)</a>")
vc_name = re.search(re_string, line)
if vc_name is not None:
vc_name = vc_name.group(1)
#print vc_name
re_string = re.compile('href="(.{,100}?)".*?>')
wiki_url = re.search(re_string, line)
if wiki_url is not None:
wiki_url = "http://en.wikipedia.org" + wiki_url.group(1)
# print wiki_url
else:
continue
try:
second_page = urllib2.urlopen(wiki_url)
second_page_iter = iter(second_page)
for line in second_page_iter:
if "Website</th>" in line:
line = next(second_page_iter)
re_string = re.compile('href="(.{,100}?)".*?>')
vc_url = re.search(re_string, line)
if vc_url is not None:
vc_url = vc_url.group(1)
vc_dict[vc_name] = vc_url
# print vc_dict[vc_name]
break
except:
print " "
for key in vc_dict.keys():
print "%s : %s" % (key, vc_dict[key])
|
# Generated by Django 2.2.4 on 2020-12-17 03:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_fav_books', '0002_book'),
]
operations = [
migrations.AddField(
model_name='book',
name='description',
field=models.TextField(default='default desc'),
preserve_default=False,
),
]
|
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage
from matplotlib import pyplot as plt
import math
x_points = [2,2,8,5,7,6,1,4]
y_points = [10,5,4,8,5,4,2,9]
# x_points = [0.4,0.22,0.35,0.26,0.08,0.45]
# y_points = [0.53,0.38,0.32,0.19,0.41,0.3]
points = np.array([x_points,y_points])
def get_eucledian_distance(x1,x2,y1,y2):
return round(math.sqrt((x1 - x2)**2 + (y1 - y2)**2),2)
def init_distance_matrix(points):
dimension = len(points[0])
distance_matrix = np.zeros((dimension,dimension))
for row in range(dimension):
# current_point is : X= points[0][row] Y= points[1][row]
x = points[0][row]; y = points[1][row]
for column in range(dimension):
tx = points[0][column]; ty = points[1][column]
distance_matrix[row][column] = get_eucledian_distance(x,tx,y,ty)
return distance_matrix
def get_min_in_matrix(matrix): #returns row and column of min distance
dimension = len(matrix[0])
mini =9999
min = [1,1]
for row in range(dimension):
for column in range(dimension):
if matrix[row][column] != 0 and matrix[row][column] < mini:
mini = matrix[row][column]
min[0] = row; min[1] = column
return min
def get_clustered_matrix(distance_matrix,flag):
mini = get_min_in_matrix(distance_matrix)
cluster1_index = mini[0] #cluster 1 is where the new cluster will be placed because it has lower index
cluster2_index = mini[1]
dimension = len(distance_matrix[0]) - 1
clustered_matrix = np.zeros((dimension,dimension))
if dimension == 2:
for row in range(dimension):
for column in range(dimension):
if row != cluster1_index and row != cluster2_index: #if it is one of the unchanged clusters just copy it
clustered_matrix[row][column] = distance_matrix[row][column]
else:
if flag =='min':
clustered_matrix[row][column] = min(distance_matrix[cluster1_index][column], distance_matrix[cluster2_index][column])
else:
clustered_matrix[row][column] = max(distance_matrix[cluster1_index][column], distance_matrix[cluster2_index][column])
print('\n')
print('[updated matrix]')
print(clustered_matrix)
print('\n\n\n')
return clustered_matrix
else:
for row in range(dimension):
for column in range(dimension):
if row != cluster1_index and row != cluster2_index: #if it is one of the unchanged clusters just copy it
clustered_matrix[row][column] = distance_matrix[row][column]
else:
clustered_matrix[row][column] = min(distance_matrix[cluster1_index][column], distance_matrix[cluster2_index][column])
print('cluster distance from all points')
print(clustered_matrix[cluster1_index])
print('\n')
print('[updated matrix]')
print(clustered_matrix)
print('\n\n\n')
return get_clustered_matrix(clustered_matrix,flag)
distance_matrix = init_distance_matrix(points)
print('=======================using min==================================')
m = get_clustered_matrix(distance_matrix,'min')
print('=======================using max==================================')
m = get_clustered_matrix(distance_matrix,'max')
# X = np.array([[2,10],
# [2,5],
# [8,4],
# [5,8],
# [7,5],
# [6,4],
# [1,2],
# [4,9]
# ])
# single = linkage(X, 'single')
# fig = plt.figure(figsize=(5,5))
# dn = dendrogram(single)
# complete = linkage(X, 'complete')
# fig = plt.figure(figsize=(5,5))
# dn = dendrogram(complete)
# plt.show()
# print(single[0])
|
# -*- coding: UTF-8 -*-
from . import bp_2DVectors
from flask import render_template
@bp_2DVectors.route('/',methods=['GET'])
def index():
return render_template('2DVectors/index.html')
@bp_2DVectors.route('/2DVectorsRep',methods=['GET'])
def VectorsRep2D():
return render_template('2DVectors/2DVectorsRep.html')
@bp_2DVectors.route('/2DVectorOperations',methods=['GET'])
def VectorOperations2D():
return render_template('2DVectors/2DVectorOperations.html')
|
#!/usr/bin/env python
from collections import namedtuple
from .quaternion import Quaternion
class Point(namedtuple('Point', ('x', 'y', 'z'))):
def __new__(cls, x=0, y=0, z=0):
return super(Point, cls).__new__(cls, x, y, z)
def rotate(self, q: Quaternion):
ix = q.w * self.x + q.y * self.z - q.z * self.y
iy = q.w * self.y + q.z * self.x - q.x * self.z
iz = q.w * self.z + q.x * self.y - q.y * self.x
iw = -q.x * self.x - q.y * self.y - q.z * self.z
self.__x = ix * q.w + iw * -q.x + iy * -q.z - iz * -q.y
self.__y = iy * q.w + iw * -q.y + iz * -q.x - ix * -q.z
self.__z = iz * q.w + iw * -q.z + ix * -q.y - iy * -q.x
|
"""
Util classes for HTTP/REST
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
# pylint: disable=C0103
class HTTPStatusCodes(object):
"""
Constants for HTTP status codes
"""
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_202_ACCEPTED = 202
HTTP_204_NO_CONTENT = 204
HTTP_304_NOT_MODIFIED = 304
HTTP_400_BAD_REQUEST = 400
HTTP_401_UNAUTHORIZED = 401
HTTP_402_PAYMENT_REQUIRED = 402
HTTP_403_FORBIDDEN = 403
HTTP_404_NOT_FOUND = 404
HTTP_405_METHOD_NOT_ALLOWED = 405
HTTP_406_NOT_ACCEPTABLE = 406
HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_408_REQUEST_TIMEOUT = 408
HTTP_409_CONFLICT = 409
HTTP_410_GONE = 410
HTTP_411_LENGTH_REQUIRED = 411
HTTP_412_PRECONDITION_FAILED = 412
HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_414_REQUEST_URI_TOO_LARGE = 414
HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_416_REQUEST_RANGE_NOT_SATISFIABLE = 416
HTTP_417_EXPECTATION_FAILED = 417
HTTP_418_IM_A_TEAPOT = 418
HTTP_422_UNPROCESSABLE_ENTITY = 422
HTTP_423_LOCKED = 423
HTTP_424_FAILED_DEPENDENCY = 424
HTTP_426_UPGRADE_REQUIRED = 426
HTTP_428_PRECONDITION_REQUIRED = 428
HTTP_429_TOO_MANY_REQUESTS = 429
HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431
HTTP_500_INTERNAL_SERVER_ERROR = 500
HTTP_501_NOT_IMPLEMENTED = 501
HTTP_503_SERVICE_UNAVAILABLE = 503
HTTP_504_GATEWAY_TIMEOUT = 504
HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_506_VARIANT_ALSO_NEGOTIATES = 506
HTTP_507_INSUFFICIENT_STORAGE = 507
HTTP_508_LOOP_DETECTED = 508
HTTP_509_BANDWIDTH_LIMIT_EXCEEDED = 509
HTTP_510_NOT_EXTENDED = 510
HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511
# Mapping of vAPI standard errors to HTTP error codes:
# https://wiki.eng.vmware.com/VAPI/Specs/VMODL2toREST/Reference/REST-error-mapping
vapi_to_http_error_map = {
'com.vmware.vapi.std.errors.already_exists':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.already_in_desired_state':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.feature_in_use':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.internal_server_error':
HTTPStatusCodes.HTTP_500_INTERNAL_SERVER_ERROR,
'com.vmware.vapi.std.errors.invalid_argument':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.invalid_element_configuration':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.invalid_element_type':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.invalid_request':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.not_found':
HTTPStatusCodes.HTTP_404_NOT_FOUND,
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.operation_not_found':
HTTPStatusCodes.HTTP_404_NOT_FOUND,
'com.vmware.vapi.std.errors.resource_busy':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.resource_in_use':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.resource_inaccessible':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.service_unavailable':
HTTPStatusCodes.HTTP_503_SERVICE_UNAVAILABLE,
'com.vmware.vapi.std.errors.timed_out':
HTTPStatusCodes.HTTP_504_GATEWAY_TIMEOUT,
'com.vmware.vapi.std.errors.unable_to_allocate_resource':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
'com.vmware.vapi.std.errors.unauthenticated':
HTTPStatusCodes.HTTP_401_UNAUTHORIZED,
'com.vmware.vapi.std.errors.unauthorized':
HTTPStatusCodes.HTTP_403_FORBIDDEN,
'com.vmware.vapi.std.errors.unsupported':
HTTPStatusCodes.HTTP_400_BAD_REQUEST,
}
# Mapping of HTTP error codes to vAPI standard errors:
# https://wiki.eng.vmware.com/VAPI/Specs/REST/error_mapping
http_to_vapi_error_map = {
HTTPStatusCodes.HTTP_400_BAD_REQUEST:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_401_UNAUTHORIZED:
'com.vmware.vapi.std.errors.unauthenticated',
HTTPStatusCodes.HTTP_402_PAYMENT_REQUIRED:
'com.vmware.vapi.std.errors.unauthorized',
HTTPStatusCodes.HTTP_403_FORBIDDEN:
'com.vmware.vapi.std.errors.unauthorized',
HTTPStatusCodes.HTTP_404_NOT_FOUND:
'com.vmware.vapi.std.errors.not_found',
HTTPStatusCodes.HTTP_405_METHOD_NOT_ALLOWED:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_406_NOT_ACCEPTABLE:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_407_PROXY_AUTHENTICATION_REQUIRED:
'com.vmware.vapi.std.errors.unauthenticated',
HTTPStatusCodes.HTTP_408_REQUEST_TIMEOUT:
'com.vmware.vapi.std.errors.timed_out',
HTTPStatusCodes.HTTP_409_CONFLICT:
'com.vmware.vapi.std.errors.concurrent_change',
HTTPStatusCodes.HTTP_410_GONE:
'com.vmware.vapi.std.errors.not_found',
HTTPStatusCodes.HTTP_411_LENGTH_REQUIRED:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_412_PRECONDITION_FAILED:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_413_REQUEST_ENTITY_TOO_LARGE:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_414_REQUEST_URI_TOO_LARGE:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_415_UNSUPPORTED_MEDIA_TYPE:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_416_REQUEST_RANGE_NOT_SATISFIABLE:
'com.vmware.vapi.std.errors.resource_inaccessible',
HTTPStatusCodes.HTTP_417_EXPECTATION_FAILED:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_418_IM_A_TEAPOT:
'com.vmware.vapi.std.errors.error',
HTTPStatusCodes.HTTP_422_UNPROCESSABLE_ENTITY:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_423_LOCKED:
'com.vmware.vapi.std.errors.resource_busy',
HTTPStatusCodes.HTTP_424_FAILED_DEPENDENCY:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_426_UPGRADE_REQUIRED:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_428_PRECONDITION_REQUIRED:
'com.vmware.vapi.std.errors.concurrent_change',
HTTPStatusCodes.HTTP_429_TOO_MANY_REQUESTS:
'com.vmware.vapi.std.errors.service_unavailable',
HTTPStatusCodes.HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_500_INTERNAL_SERVER_ERROR:
'com.vmware.vapi.std.errors.internal_server_error',
HTTPStatusCodes.HTTP_501_NOT_IMPLEMENTED:
'com.vmware.vapi.std.errors.error',
HTTPStatusCodes.HTTP_503_SERVICE_UNAVAILABLE:
'com.vmware.vapi.std.errors.service_unavailable',
HTTPStatusCodes.HTTP_504_GATEWAY_TIMEOUT:
'com.vmware.vapi.std.errors.timed_out',
HTTPStatusCodes.HTTP_505_HTTP_VERSION_NOT_SUPPORTED:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_506_VARIANT_ALSO_NEGOTIATES:
'com.vmware.vapi.std.errors.internal_server_error',
HTTPStatusCodes.HTTP_507_INSUFFICIENT_STORAGE:
'com.vmware.vapi.std.errors.unable_to_allocate_resource',
HTTPStatusCodes.HTTP_508_LOOP_DETECTED:
'com.vmware.vapi.std.errors.internal_server_error',
HTTPStatusCodes.HTTP_509_BANDWIDTH_LIMIT_EXCEEDED:
'com.vmware.vapi.std.errors.unable_to_allocate_resource',
HTTPStatusCodes.HTTP_510_NOT_EXTENDED:
'com.vmware.vapi.std.errors.invalid_request',
HTTPStatusCodes.HTTP_511_NETWORK_AUTHENTICATION_REQUIRED:
'com.vmware.vapi.std.errors.unauthenticated',
}
# List of HTTP status codes representing successful operations
successful_status_codes = [
HTTPStatusCodes.HTTP_200_OK,
HTTPStatusCodes.HTTP_201_CREATED,
HTTPStatusCodes.HTTP_202_ACCEPTED,
HTTPStatusCodes.HTTP_204_NO_CONTENT,
HTTPStatusCodes.HTTP_304_NOT_MODIFIED,
]
|
# create plots
import matplotlib.pyplot as plt
from datetime import datetime
# dictionary for conversion of variables (velicina)
prevodnik = {
'kumulativni_pocet_nakazenych': 'Kumulativnรญ poฤet nakaลพenรฝch',
'kumulativni_pocet_vylecenych': 'Kumulatnvnรญ poฤet vylรฉฤenรฝch',
'kumulativni_pocet_umrti': 'Kumulativnรญ poฤet รบmrtรญ',
'kumulativni_pocet_testu': 'Kumulativnรญ poฤet testลฏ',}
def create_plot(x,y, velicina, date_modified):
'''
x - vstupni data pro osu x (datum)
y - vstupni data pro osu y
velicina - typ grafu (lisi se podle veliciny/promenne)
'''
# vytvoreni noveho okna pro graf
plt.figure(figsize=(10,6))
# reformat date
x_reformated = list()
for datum in x:
x_reformated.append(datetime.strptime(datum,"%Y-%m-%d").strftime("%d.%m.%Y"))
# plotting the points
plt.plot(x_reformated, y, linestyle='dashed', color='r')
# change xticks to weeks
# data co 14 dni; vytvari list skrze cyklus
plt.xticks(range(0,len(x_reformated),14), [x_reformated[w] for w in range(0,len(x_reformated),14)], rotation='90', fontsize = 10)
# namig the x axis
plt.xlabel('datum [14 dnรญ]', fontsize = 12)
# naming the y axis
plt.ylabel(prevodnik[velicina], fontsize = 12)
# giving a title to my graph
plt.title('Vรฝvoj epidemie koronaviru v ฤR', fontsize = 16)
# fit everything to figure
plt.tight_layout()
# nastaveni mrizky (od urciteho data)
plt.xlim([x_reformated[0],x_reformated[-1]])
#grid
plt.grid(True)
# saving plots
plot_name = velicina + '_chart_' + date_modified + '.png'
plt.savefig(plot_name, dpi=200)
print(f'Succesfully created {plot_name}')
|
from django.db import models
from datetime import datetime,timezone;
import string;
import random;
def getPromoCode():
'''
generate alphanumercial string used as promo code
'''
alpha_numeric=string.ascii_letters + string.digits;
random_code=''.join(random.choice(alpha_numeric) for _ in range(8));
return random_code;
'''
represents time now in UTC
'''
now=datetime.now(timezone.utc) |
import socket
import sys
import select
import getpass
import getopt
import time
#Klient TCP
try:
opts, ar = getopt.getopt(sys.argv[1:], 'p:s:l:i')
except getopt.GetoptError as ge:
print(repr(ge))
sys.exit()
value = ''
port = int(55500)
serwer = 'localhost'
pseudonim = getpass.getuser()
wyl_wys = 1
if len(opts) > 0:
for opt, arg in opts:
if opt in ('-p'):
value = int(arg)
port = value
elif opt in ('-s'):
value = str(arg)
serwer = value
elif opt in ('-l'):
value = str(arg.strip())
pseudonim = value
elif opt in ('-i'):
wyl_wys = 2
else:
print('brak wybranych opcji')
sys.exit(1)
g = socket.socket()
g.connect((serwer, port))
pseudo = 'LLOGIN' + ' ' + str(pseudonim) + '\r\n'
g.sendall(pseudo.encode('utf-8'))
while True:
r, w, x = select.select([g, sys.stdin], [], [])
if g in r:
odp = g.recv(1024)
od = odp.decode('utf-8')
od = od.split()
if len(odp) > 0:
if od[0] == 'PUB_MSG':
if wyl_wys != 2:
print('>>>', odp.decode('utf-8'))
else:
print('>>>', odp.decode('utf-8'))
if not odp:
print('Koniec pracy serwera')
sys.exit(3)
if sys.stdin in r:
wprow = input()
wpro = wprow.split()
if len(wpro) > 0:
n = wpro[0]
if n == '/rename':
if len(wpro) > 1:
n1 = wpro[1]
wys = ''
wys = 'LLOGIN' + ' ' + str(n1)
wys = str(wys)
wys = wys.encode('utf-8')
g.sendall(wys)
elif n == '/list':
wys = ''
wys = 'LLIST'
wys = str(wys)
wys = wys.encode('utf-8')
g.sendall(wys)
elif n == '/priv':
if len(wpro) > 2:
n1 = wpro[1]
wys = ''
for w in wpro[2:]:
wys = wys + ' ' + str(w)
wy = 'PPRIV ' + str(n1) + ' ' + wys
wy = str(wy)
wy = wy.encode('utf-8')
g.sendall(wy)
else:
wprow = wprow.encode('utf-8')
g.sendall(wprow)
|
import datetime
render = 0
pin = 0
starttime = 0
def initsc(renderA, pinA, starttimeA):
global render
global pin
global starttime
render = renderA
pin = pinA
starttime= starttimeA
def digitalRead(pinno):
global pin
if(pinno > 13):
return(bool(vars(pin)['pinA'+str(pinno-14)].st))
else:
return(bool(vars(pin)['pin'+str(pinno)].st))
def pinMode(pinno, mode):
global pin
if(mode == "INPUT"):
pin.input(pinno)
else:
pin.output(pinno)
def digitalWrite(pinno, mode):
global render
global pin
if(mode == True):
pin.setvol(pinno, 5)
elif(mode == False):
pin.setvol(pinno, 0)
def analogWrite(pinno, mode):
global render
global pin
pin.setvol(pinno, mode/51)
def millis():
return(datetime.datetime.now() - starttime)
def analogRead(pinA):
return(int(pin.analogread(pinA)))
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class DolbyVisionMetadataSource(Enum):
INPUT_STREAM = "INPUT_STREAM"
EMBEDDED = "EMBEDDED"
|
class Basic:
@staticmethod
def pesquisar(id_client):
import sqlite3
conexao = sqlite3.connect('dadosCliente.bd')
cursor = conexao.cursor()
cursor.execute('''
select * from Cliente inner join Endereco on Cliente.id_endereco = Endereco.id_endereco where id_cliente = ?''',
[id_client])
dados_e = cursor.fetchall()
cursor.close()
conexao.close()
return dados_e
@staticmethod
def excluir(id_to_delet, id_to_endereco):
import sqlite3
conexao = sqlite3.connect('dadosCliente.bd')
cursor = conexao.cursor()
cursor.execute('''
DELETE FROM Endereco WHERE id_endereco = ?
''', [id_to_endereco])
cursor.execute('''
DELETE FROM Cliente WHERE id_cliente = ?
''', [id_to_delet])
conexao.commit()
conexao.close()
print('excluido com sucesso')
@staticmethod
def all():
import sqlite3
conexao = sqlite3.connect('dadosCliente.bd')
cursor = conexao.cursor()
cursor.execute('''
select * from Cliente
''')
dados_l = cursor.execute(''' select * from Cliente INNER JOIN Endereco ON Cliente.id_endereco = Endereco.id_endereco
''').fetchall()
cursor.close()
conexao.close()
return dados_l
@staticmethod
def atualizar(id_client):
import sqlite3
conexao = sqlite3.connect('dadosCliente.bd')
conexao.row_factory = sqlite3.Row
cursor = conexao.cursor()
cursor.execute('''
select * from Cliente inner join Endereco on Cliente.id_endereco = Endereco.id_endereco where id_cliente = ?''',
[id_client])
dados_e = cursor.fetchone()
cursor.close()
conexao.close()
return dados_e
@staticmethod
def inserindo_att_cliente(id, nome, sobrenome, nascimento, sexo):
import sqlite3
conexao = sqlite3.connect('dadosCliente.bd')
cursor = conexao.cursor()
cursor.execute('''
UPDATE Cliente SET nome_cliente= ?, sobrenome = ?, data_nascimento = ?, sexo = ? where id_cliente = ?
''', [nome, sobrenome, nascimento, sexo, id])
conexao.commit()
cursor.close()
conexao.close()
print('Cliente atualizado com sucesso')
@staticmethod
def inserindo_att_endereco(logradouro, numero, bairro, cidade, estado, cep, id):
import sqlite3
conexao = sqlite3.connect('dadosCliente.bd')
cursor = conexao.cursor()
cursor.execute('''
UPDATE Endereco SET logradouro= ?, numero = ?, bairro = ?, cidade = ?, estado = ?, cep = ?
where id_endereco = ?''', [logradouro, numero, bairro, cidade, estado, cep, id])
conexao.commit()
cursor.close()
conexao.close()
print('endereรงo atualizado com sucesso')
|
from numpy import *
x = input("insira a string:")
y = x[0]
z = x[-1]
a = len(x)
b = x.lower()
c = x.upper()
d = x * 500
print(y)
print(z)
print(a)
print(b)
print(c)
print(d)
|
def add_submission(user: str, language: str, points: int, results: dict):
if user not in results['users']:
results['users'][user] = []
if language not in results['submissions']:
results['submissions'][l] = 0
results['users'][user].append(points)
results['submissions'][l] += 1
return results
command, results_data = input(), {"users": {}, "submissions": {}}
while command != "exam finished":
command = command.split("-")
if "banned" in command:
u = command[0]
del results_data['users'][u]
else:
u, l, p = command
results_data = add_submission(u, l, int(p), results_data)
command = input()
print(f"Results:")
for u, p in sorted(results_data['users'].items(), key=lambda x: (-max(x[1]), x[0])):
print(f"{u} | {max(p)}")
print(f"Submissions:")
for l, c in sorted(results_data['submissions'].items(), key=lambda x: (-x[1], x[0])):
print(f"{l} - {c}")
|
notes = [100, 50, 10, 5, 2, 1]
t = int(input())
for test in range(t):
n = int(input())
count = 0
for note in notes:
while n >= note:
n -= note
count += 1
print(count) |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={"figure.figsize" : (12, 8)})
def plot_history(history) :
hist = pd.DataFrame(history.history)
hist["epoch"] = history.epoch
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Logcosh Error")
plt.plot(hist["epoch"], hist["logcosh"], label="Train Error")
plt.plot(hist["epoch"], hist["val_logcosh"], label="Val Error")
# plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Mean Abs Error")
plt.plot(hist["epoch"], hist["mean_absolute_error"], label="Train Error")
plt.plot(hist["epoch"], hist["val_mean_absolute_error"], label="Val Error")
# plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Mean Square Error")
plt.plot(hist["epoch"], hist["mean_squared_error"], label="Train Error")
plt.plot(hist["epoch"], hist["val_mean_squared_error"], label="Val Error")
# plt.ylim([0,20])
plt.legend()
plt.show()
def norm(df, stats=None, method=None):
df = df.astype(float)
result = df.copy()
for feature_name in df.columns:
if feature_name != 'Identifier':
max_value = df[feature_name].max()
min_value = df[feature_name].min()
diff = max(1e-6, max_value - min_value)
result[feature_name] = (df[feature_name] - min_value) / diff
return result
def denorm(df, stats, method=None):
diff = stats['max'].iloc[0] - stats['min'].iloc[0]
df['Temperature'] = df['Temperature'].map(lambda x: x * diff + stats['min'].iloc[0])
return df
def add_axes_to_data(x):
return x[np.newaxis, np.newaxis, :, np.newaxis] |
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
# Importando as Bibliotecas
from bs4 import BeautifulSoup
import urllib.request
import os
import time
import re
import json
import csv
class SmartWatcher(): # Classe com o Robรด Web Crawler
def __init__(self):
while True: # Chamada da funรงรฃo principal, que apenas irรก terminar o loop de chamada quando o usuรกrio digitar 1 na etapa final do menu
resposta = self.main()
if resposta == "1":
break
def main(self): # Funรงรฃo principal do menu interativo do robรด Web Crawler
url1 = 'https://www.vultr.com/products/cloud-compute/#pricing/' # Link de leitura da primeira parte do desafio
url2 = 'https://www.digitalocean.com/pricing/' # Link de leitura da segunda parte do desafio
print("Olรก, este รฉ o robรด SmartWatcher, que extrai informaรงรตes das pรกginas alvo.")
print("O SmartWatcher pode adquirir informaรงรตes de dois links diferentes. Selecione o link desejado.")
print("Link 1: {}".format(url1))
print("Link 2: {}".format(url2))
opcao = input(("Selecione a opรงรฃo desejada (1 ou 2): ")) # Seleรงรฃo do link de leitura desejado
if int(opcao)==1 or int(opcao)==2:
if int(opcao)==1: # O link selecionado irรก para o procedimento do BeautifulSoup
url = url1
value_check = 10
else:
url = url2
value_check = 6
print("Certo! Agora, de forma deseja mostrar os dados obtidos?")
print("Opรงรฃo 1: --print") # Seleรงรฃo do modo print na tela
print("Opรงรฃo 2: --save_json") # Seleรงรฃo do modo salvar em arquivo json
print("Opรงรฃo 3: --save_csv") # Seleรงรฃo do modo salvar em arquivo csv
modo = input(("Selecione a opรงรฃo desejada (1, 2 ou 3): "))
if int(modo)==1 or int(modo)==2 or int(modo)==3: # Casting da variรกvel de opรงรฃo de string para inteiro
if int(modo)==1:
print("Certo! Os dados serรฃo mostrados no terminal.")
self.print_tela(url,value_check)
elif int(modo)==2:
print("Certo! Os dados serรฃo salvados no formato JSON.")
self.save_json(url,value_check)
else:
print("Certo! Os dados serรฃo salvados no formato CSV.")
self.save_csv(url,value_check)
resposta = input(("Deseja finalizar a rotina (1), ou realizar novas leituras(2)? ")) # Opรงรฃo de terminar o programa ou de voltar ao inรญcio do menu
if int(resposta)==1 or int(resposta)==2:
os.system('clear')
return resposta
else:
print("Opรงรฃo invรกlida, reiniciando o sistema.") # Qualquer valor invรกlido no input do terminal irรก fazer o programa retornar ao inรญcio do loop
time.sleep(1.5)
os.system('clear')
return resposta
else:
print("Opรงรฃo invรกlida, reiniciando o sistema.") # Qualquer valor invรกlido no input do terminal irรก fazer o programa retornar ao inรญcio do loop
time.sleep(1.5)
os.system('clear')
else:
print("Opรงรฃo invรกlida, reiniciando o sistema.") # Qualquer valor invรกlido no input do terminal irรก fazer o programa retornar ao inรญcio do loop
time.sleep(1.5)
os.system('clear')
# Barra de progresso
def printProgressBar (self,iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = 'โ', printEnd = "\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) # Percentual do progresso
filledLength = int(length * iteration // total) # Tamanho da barra progresso
bar = fill * filledLength + '-' * (length - filledLength) # Barra de progresso
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd) # Print completo
# Gera uma nova linha quando completo
if iteration == total:
print()
def find_pattern(self,texts,pattern): # Funรงรฃo que aplica os filtros do Regex e retorna o resultado
if re.search(pattern, texts.text) is not None: # Se a pesquisa nรฃo devolver None, o resultado รฉ retornado
for result in re.finditer(pattern, texts.text):
return result[0]
def webscraper(self,url): # Funรงรฃo de extrair informaรงรตes de sites da web
cpu = [] # Lista com valores do CPU
memory =[] # Lista com valores de Memory
storage = [] # Lista com valores de SSD
bandwidth = [] # Lista com valores de Bandwidth
price = [] # Lista com valores de Preco
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' # Parรขmetros do cabeรงalho da requisiรงรฃo
headers={'User-Agent':user_agent,} # Cabeรงalho da Requisiรงรฃo.
request=urllib.request.Request(url,None,headers) # Requisiรงรฃo com cabeรงalho e endereรงo
html = urllib.request.urlopen(request) # Leitura da url
bs = BeautifulSoup(html, 'lxml') #Aplicando a biblioteca BeautifulSoup na url lida
if url == "https://www.vultr.com/products/cloud-compute/#pricing/":
results = bs.select('div strong') # Resultados do filtro da Biblioteca Beautiful Soup para a identificaรงรฃo dos parรขmetros requisitados (Storage, CPU, Memory, Bandwidth, Price)
counter = 0 # Contador de tipo de filtro aplicado
count = False # Variรกvel que quando acionada, irรก incrementar o contador
reset = False # Variรกvel que quando acionada, irรก reiniciar o contador
for typing in results:
if counter==0: # Quando o contador for 0, aplicar o padrรฃo regex de busca por valores de Storage
pattern = re.compile(r'[0-9]{2,3}\b GB')
storage_data = self.find_pattern(typing,pattern)
if storage_data != None:
storage.append(storage_data)
count = True
elif counter==1: # Quando o contador for 1, aplicar o padrรฃo regex de busca por valores de CPU
pattern = re.compile(r'[0-9]{1,2}\b CPU')
cpu_data = self.find_pattern(typing,pattern)
if cpu_data != None:
cpu.append(cpu_data)
count = True
elif counter==2: # Quando o contador for 2, aplicar o padrรฃo regex de busca por valores de Memory
pattern= re.compile(r'[0-9]{1,4}\b ((\bMB\b)|(\bGB\b))')
memory_data = self.find_pattern(typing,pattern)
if memory_data != None:
memory.append(memory_data)
count = True
elif counter==3: # Quando o contador for 3, aplicar o padrรฃo regex de busca por valores de Bandwidth
pattern = re.compile(r'([0-9]*[.])?[0-9]+\b TB')
bandwidth_data = self.find_pattern(typing,pattern)
if bandwidth_data != None:
bandwidth.append(bandwidth_data)
count = True
elif counter==4: # Quando o contador for 4, aplicar o padrรฃo regex de busca por valores de Price
pattern = re.compile(r'[\$]([0-9]*[.])?[0-9]+')
price_data = self.find_pattern(typing,pattern)
if price_data != None:
price.append(price_data)
reset = True
if count == True: # Quando a variรกvel รฉ acionada, o contador รฉ incrementado
counter+=1
count = False
if reset == True: # Quando a variรกvel รฉ acionada, o contador รฉ reiniciado para 0
counter=0
reset = False
complete = [storage,cpu,memory,bandwidth,price] # Lista com todas as listas anteriores, que serรก retornada como resultado da funรงรฃo
return complete
else:
results = bs.select('div span.largePrice') # Resultados do filtro da Biblioteca Beautiful Soup para a identificaรงรฃo dos parรขmetros requisitados (Price)
for typing in results:
price.append("$"+typing.text)
results2 = bs.select('div li.priceBoxItem div ul li') # Resultados do filtro da Biblioteca Beautiful Soup para a identificaรงรฃo dos parรขmetros requisitados (Storage, CPU, Memory, Bandwidth)
counter = 0 # Contador de tipo de filtro aplicado
count = False # Variรกvel que quando acionada, irรก incrementar o contador
reset = False # Variรกvel que quando acionada, irรก reiniciar o contador
for typing2 in results2:
if counter==0: # Quando o contador for 0, aplicar o padrรฃo regex de busca por valores de Memory e depois de CPU
pattern= re.compile(r'[0-9]{1,4}\b GB')
memory_data = self.find_pattern(typing2,pattern)
if memory_data != None:
memory.append(memory_data)
pattern = re.compile(r'[0-9]{1,2}\b ((\bCPU\b)|(\bCPUs\b))')
cpu_data = self.find_pattern(typing2,pattern)
if cpu_data != None:
cpu.append(cpu_data)
count = True
elif counter==1: # Quando o contador for 1, aplicar o padrรฃo regex de busca por valores de Storage
pattern = re.compile(r'[0-9]{2,3}\b GB SSD')
storage_data = self.find_pattern(typing2,pattern)
if storage_data != None:
storage.append(storage_data)
count = True
elif counter==2: # Quando o contador for 2, aplicar o padrรฃo regex de busca por valores de Bandwidth
pattern = re.compile(r'[0-9]{1,4}\b ((\bGB\b)|(\bTB\b))')
bandwidth_data = self.find_pattern(typing2,pattern)
if bandwidth_data != None:
bandwidth.append(bandwidth_data)
reset = True
if count == True: # Quando a variรกvel รฉ acionada, o contador รฉ incrementado
counter+=1
count = False
if reset == True: # Quando a variรกvel รฉ acionada, o contador รฉ reiniciado para 0
counter=0
reset = False
complete = [storage,cpu,memory,bandwidth,price] # Lista com todas as listas anteriores, que serรก retornada como resultado da funรงรฃo
return complete
def print_tela(self,url,value_check): # Funรงรฃo de demonstrar resultados no terminal
self.printProgressBar(0, value_check, prefix = 'Progresso:', suffix = 'Completo', length = 50) # Chamada da barra de progresso com 0% completo
data = self.webscraper(url) # Recebe os dados do conteudo do site
if (len(data[0])==value_check)and(len(data[1])==value_check)and(len(data[2])==value_check)and(len(data[3])==value_check)and(len(data[4])==value_check): # Verificaรงรฃo de leitura correta, para nรฃo executar erros
for item in range(value_check):
time.sleep(0.1)
print("Instance {}: |Storage: {}|CPU: {}|Memory: {}| Bandwidth: {}| Price: {}".format(item+1,data[0][item],data[1][item],data[2][item],data[3][item],data[4][item])) # Valores da leitura do Site mostrados no terminal
self.printProgressBar(item + 1, value_check, prefix = 'Progresso:', suffix = 'Completo', length = 50) # Update na barra de progresso
else:
print("Houve alguma falha na leitura, o sistema serรก reiniciado")
def save_json(self,url,value_check): # Funรงรฃo de salvar resultados como arquivos JSON
arquivo = input(("Digite o nome do arquivo JSON: "))+".json" # Selecionar o nome do arquivo
results = self.webscraper(url) # Recebe os dados do conteudo do site
data = {}
self.printProgressBar(0, value_check, prefix = 'Progresso:', suffix = 'Completo', length = 50) # Chamada da barra de progresso com 0% completo
for item in range(value_check): # Organizaรงรฃo dos dados em formato json
instance = 'Instance {}'.format(item+1)
data[instance] = {
'Storage': results[0][item],
'CPU': results[1][item],
'Memory': results[2][item],
'Bandwidth': results[3][item],
'Price': results[4][item]
}
self.printProgressBar(item + 1, value_check, prefix = 'Progresso:', suffix = 'Completo', length = 50) # Update na barra de progresso
with open(arquivo, "w") as outfile: # Salva no arquivo JSON
json.dump(data, outfile)
def save_csv(self,url,value_check): # Funรงรฃo de salvar resultados como arquivos JSON
arquivo = input(("Digite o nome do arquivo CSV: "))+".csv" # Selecionar o nome do arquivo
results = self.webscraper(url) # Recebe os dados do conteudo do site
f = csv.writer(open(arquivo, 'w')) # Cria o arquivo csv
f.writerow(['Instances','Storage', 'CPU', 'Memory', 'Bandwidth', 'Price'])
self.printProgressBar(0, value_check, prefix = 'Progresso:', suffix = 'Completo', length = 50) # Chamada da barra de progresso com 0% completo
for item in range(value_check): # Organizaรงรฃo dos dados na tabela
instance = 'Instance {}'.format(item+1)
f.writerow([instance,results[0][item],results[1][item],results[2][item],results[3][item],results[4][item]]) # Escreve os dados nas linhas
self.printProgressBar(item + 1, value_check, prefix = 'Progresso:', suffix = 'Completo', length = 50) # Update na barra de progresso
runrobot = SmartWatcher() |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.cm as cm
from sklearn.cluster import KMeans, AgglomerativeClustering, FeatureAgglomeration, MeanShift, SpectralClustering, estimate_bandwidth
from itertools import combinations
# Read DF
df_bear = pd.read_csv('bearing_final_data.csv', index_col=0)
# Feature Normalization
min_max_scaler = preprocessing.MinMaxScaler()
df_bear = min_max_scaler.fit_transform(df_bear)
df_bear = pd.DataFrame(df_bear)
# Function to iterate through all possible combinations between 2 features
def iter_feature(arr, r):
return list(combinations(arr, r))
# Get Column names in dataset as list
arr = df_bear.columns.to_list()
#Function for Silhouette scores and Plots
def scores_and_plots(cluster_model, model_name, df, feat_one, feat_two):
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# Clustering Model fit
clusterer = cluster_model
cluster_labels = clusterer.fit_predict(df)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed clusters
silhouette_avg = silhouette_score(df, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(df, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
ax2.set_xticks([-0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(df_vib_amp[feat_one], df_vib_amp[feat_two], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle((f"Silhouette analysis for {model_name} clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
# KMEANS CLUSTERING MODEL
# Cluster all combinations of 2 features & Silhouette Score & Plots
r = 2
feature_comb = iter_feature(arr,r)
for index, tup in enumerate(feature_comb):
element_one = tup[0]
element_two = tup[1]
df_vib_amp = df_bear[element_one].to_frame().join(df_bear[element_two].to_frame())
df_vib_amp = df_vib_amp.reset_index()
print(f"Features: {element_one} and {element_two}")
model_name = 'KMeans'
cluster_model = KMeans(n_clusters=n_clusters, init='k-means++', random_state=10)
df = df_vib_amp
range_n_clusters = [2, 3, 4, 5, 6, 7]
for n_clusters in range_n_clusters:
scores_and_plots(cluster_model, model_name, df, element_one, element_two)
# Cluster all combinations of 3 to 6 features & Silhouette Score & Plots (2 first features only)
r = 3
feature_comb = iter_feature(arr,r)
for index, tup in enumerate(feature_comb):
element_one = tup[0]
element_two = tup[1]
element_three = tup[2]
df_vib_amp = df_bear[element_one].to_frame().join(df_bear[element_two].to_frame()).join(df_bear[element_three].to_frame())
df_vib_amp = df_vib_amp.reset_index()
print(f"Features: {element_one} and {element_two} and {element_three}")
model_name = 'KMeans'
cluster_model = KMeans(n_clusters=n_clusters, init='k-means++', random_state=10)
df = df_vib_amp
range_n_clusters = [2, 3, 4, 5, 6, 7]
for n_clusters in range_n_clusters:
scores_and_plots(cluster_model, model_name, df, element_one, element_two)
r = 4
feature_comb = iter_feature(arr,r)
for index, tup in enumerate(feature_comb):
element_one = tup[0]
element_two = tup[1]
element_three = tup[2]
element_four = tup[3]
df_vib_amp = df_bear[element_one].to_frame().join(df_bear[element_two].to_frame()).join(df_bear[element_three].to_frame()).join(df_bear[element_four].to_frame())
df_vib_amp = df_vib_amp.reset_index()
print(f"Features: {element_one} and {element_two} and {element_three} and {element_four}")
model_name = 'KMeans'
cluster_model = KMeans(n_clusters=n_clusters, init='k-means++', random_state=10)
df = df_vib_amp
range_n_clusters = [2, 3, 4, 5, 6, 7]
for n_clusters in range_n_clusters:
scores_and_plots(cluster_model, model_name, df, element_one, element_two)
r = 5
feature_comb = iter_feature(arr,r)
for index, tup in enumerate(feature_comb):
element_one = tup[0]
element_two = tup[1]
element_three = tup[2]
element_four = tup[3]
element_five = tup[4]
df_vib_amp = df_bear[element_one].to_frame().join(df_bear[element_two].to_frame()).join(df_bear[element_three].to_frame()).join(df_bear[element_four].to_frame()).join(df_bear[element_five].to_frame())
df_vib_amp = df_vib_amp.reset_index()
print(f"Features: {element_one} and {element_two} and {element_three} and {element_four} and {element_five}")
model_name = 'KMeans'
cluster_model = KMeans(n_clusters=n_clusters, init='k-means++', random_state=10)
df = df_vib_amp
range_n_clusters = [2, 3, 4, 5, 6, 7]
for n_clusters in range_n_clusters:
scores_and_plots(cluster_model, model_name, df, element_one, element_two)
r = 6
feature_comb = iter_feature(arr,r)
for index, tup in enumerate(feature_comb):
element_one = tup[0]
element_two = tup[1]
element_three = tup[2]
element_four = tup[3]
element_five = tup[4]
element_six = tup[5]
df_vib_amp = df_bear[element_one].to_frame().join(df_bear[element_two].to_frame()).join(df_bear[element_three].to_frame()).join(df_bear[element_four].to_frame()).join(df_bear[element_five].to_frame()).join(df_bear[element_six].to_frame())
df_vib_amp = df_vib_amp.reset_index()
print(f"Features: {element_one} and {element_two} and {element_three} and {element_four} and {element_five} and {element_six}")
model_name = 'KMeans'
cluster_model = KMeans(n_clusters=n_clusters, init='k-means++', random_state=10)
df = df_vib_amp
range_n_clusters = [2, 3, 4, 5, 6, 7]
for n_clusters in range_n_clusters:
scores_and_plots(cluster_model, model_name, df, element_one, element_two)
# MODEL COMPARISON
# Reset df with 2 best features for model comparison
df_compare = df_bear['a2_x_mean'].to_frame().join(df_bear['a2_x_amp_max'].to_frame())
df_compare = df_compare.reset_index()
feature_one = 'a2_x_mean'
feature_two = 'a2_x_amp_max'
print(f"Features: a2_x_mean and a2_x_amp_max")
# AGGLOMERATIVE CLUSTERING
df = df_compare
model_name = 'AgglomerativeClustering'
cluster_model = AgglomerativeClustering(n_clusters=n_clusters)
range_n_clusters = [2, 3, 4, 5, 6, 7]
for n_clusters in range_n_clusters:
scores_and_plots(cluster_model, model_name, df, feature_one, feature_two)
# MEANSHIFT CLUSTERING
df = df_compare
model_name = 'MeanShift Clustering'
bandwidth = estimate_bandwidth(df, quantile=0.2, n_samples=600)
cluster_model = MeanShift(bandwidth=bandwidth, bin_seeding=True)
scores_and_plots(cluster_model, model_name, df, feature_one, feature_two)
# SPACTRAL CLUSTERING
df = df_compare
model_name = 'Spactral Clustering'
cluster_model = SpectralClustering(n_clusters=n_clusters, assign_labels='discretize', random_state=42)
range_n_clusters = [2, 3, 4, 5, 6, 7]
for n_clusters in range_n_clusters:
scores_and_plots(cluster_model, model_name, df, feature_one, feature_two) |
import os
import os.path
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from base import BaseDataLoader
from torch.utils.data import Dataset
import torch
path_to_img = "C:/Users/10138/Documents/yottacloud/code/water-meter-detect/data/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007/JPEGImages/"
def read_images(root, train=True):
txt_fname = root+('VOC2007')+('train.txt' if train else 'test.txt')
with open(txt_fname) as f:
lines = f.readlines()
fnames = []
boxes = []
labels = []
for line in lines:
splited = line.split()
fnames.append(splited[0])
num_boxes = (len(splited) - 1) // 5
box = []
label = []
for i in range(num_boxes):
x = float(splited[1 + 5 * i])
y = float(splited[2 + 5 * i])
x2 = float(splited[3 + 5 * i])
y2 = float(splited[4 + 5 * i])
c = splited[5 + 5 * i]
box.append([x, y, x2, y2])
label.append(int(c) + 1)
boxes.append(torch.Tensor(box))
labels.append(torch.LongTensor(label))
return fnames, boxes, labels
def encoder(boxes, labels):
"""
boxes (tensor) [[x1,y1,x2,y2],[]]
labels (tensor) [...]
return 7x7x30
"""
grid_num = 7
target = torch.zeros((grid_num, grid_num, 30))
cell_size = 1. / grid_num
wh = boxes[:, 2:] - boxes[:, :2]
cxcy = (boxes[:, 2:] + boxes[:, :2]) / 2
for i in range(cxcy.size()[0]):
cxcy_sample = cxcy[i]
ij = (cxcy_sample / cell_size).ceil() - 1 #
target[int(ij[1]), int(ij[0]), 4] = 1
target[int(ij[1]), int(ij[0]), 9] = 1
target[int(ij[1]), int(ij[0]), int(labels[i]) + 9] = 1
xy = ij * cell_size # ๅน้
ๅฐ็็ฝๆ ผ็ๅทฆไธ่ง็ธๅฏนๅๆ
delta_xy = (cxcy_sample - xy) / cell_size
target[int(ij[1]), int(ij[0]), 2:4] = wh[i]
target[int(ij[1]), int(ij[0]), :2] = delta_xy
target[int(ij[1]), int(ij[0]), 7:9] = wh[i]
target[int(ij[1]), int(ij[0]), 5:7] = delta_xy
return target
def subMean(bgr, mean):
mean = np.array(mean, dtype=np.float32)
bgr = bgr - mean
return bgr
class WaterMeterDataset(Dataset):
"""
txtๆ่ฟฐๆไปถ image_name.jpg x y w h c x y w h c ่ฟๆ ทๅฐฑๆฏ่ฏดไธๅผ ๅพ็ไธญๆไธคไธช็ฎๆ
"""
def __init__(self, root, trsfm, train=True):
self.root = root
self.train = train
self.tranform = trsfm
self.fnames, self.boxes, self.labels = read_images(root, train=self.train)
self.num_samples = len(self.boxes)
self.mean = (123, 117, 104) # RGB
def __getitem__(self, idx):
fname = self.fnames[idx]
img = cv2.imread(os.path.join(path_to_img+fname))
boxes = self.boxes[idx].clone()
labels = self.labels[idx].clone()
# # debug
# box_show = boxes.numpy().reshape(-1)
# print(box_show)
# img_show = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# pt1 = (int(box_show[0]), int(box_show[1]))
# pt2 = (int(box_show[2]), int(box_show[3]))
# cv2.rectangle(img_show, pt1=pt1, pt2=pt2, color=(0, 255, 0), thickness=1)
# plt.figure()
#
# cv2.rectangle(img, pt1=(10, 10), pt2=(100, 100), color=(0, 255, 0), thickness=1)
# plt.imshow(img_show)
# plt.show()
# # debug
h, w, _ = img.shape
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = subMean(img, self.mean)
img = Image.fromarray(np.uint8(img))
img = self.tranform(img)
boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes)
target = encoder(boxes, labels)
return img, target
def __len__(self):
return self.num_samples
class WaterMeterDataLoader(BaseDataLoader):
"""
Water-Meter data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.Resize((448, 448)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = WaterMeterDataset(self.data_dir, trsfm, train = training)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
|
import os
import sys
import re
import shlex
GCC_DRIVER_LINE = re.compile('^Driving:')
POSIX_STATIC_EXT = re.compile('\S+\.a')
POSIX_LIB_FLAGS = re.compile('-l\S+')
def is_output_verbose(out):
for line in out.splitlines():
if not GCC_DRIVER_LINE.search(line):
if POSIX_STATIC_EXT.search(line) or POSIX_LIB_FLAGS.search(line):
return True
return False
# linkflags which match those are ignored
LINKFLAGS_IGNORED = [r'-lang*', r'-lcrt[a-zA-Z0-9]*\.o', r'-lc$', r'-lSystem',
r'-libmil', r'-LIST:*', r'-LNO:*']
if os.name == 'nt':
LINKFLAGS_IGNORED.extend([r'-lfrt*', r'-luser32',
r'-lkernel32', r'-ladvapi32', r'-lmsvcrt',
r'-lshell32', r'-lmingw', r'-lmoldname'])
else:
LINKFLAGS_IGNORED.append(r'-lgcc*')
RLINKFLAGS_IGNORED = [re.compile(f) for f in LINKFLAGS_IGNORED]
def _match_ignore(line):
"""True if the line should be ignored."""
if [i for i in RLINKFLAGS_IGNORED if i.match(line)]:
return True
else:
return False
def parse_flink(output):
"""Given the output of verbose link of fortran compiler, this
returns a list of flags necessary for linking using the standard
linker."""
# TODO: On windows ?
final_flags = []
for line in output.splitlines():
if not GCC_DRIVER_LINE.match(line):
_parse_f77link_line(line, final_flags)
return final_flags
SPACE_OPTS = re.compile('^-[LRuYz]$')
NOSPACE_OPTS = re.compile('^-[RL]')
def _parse_f77link_line(line, final_flags):
line = line.encode("utf-8")
lexer = shlex.shlex(line, posix = True)
lexer.whitespace_split = True
t = lexer.get_token()
tmp_flags = []
while t:
def parse(token):
# Here we go (convention for wildcard is shell, not regex !)
# 1 TODO: we first get some root .a libraries
# 2 TODO: take everything starting by -bI:*
# 3 Ignore the following flags: -lang* | -lcrt*.o | -lc |
# -lgcc* | -lSystem | -libmil | -LANG:=* | -LIST:* | -LNO:*)
# 4 take into account -lkernel32
# 5 For options of the kind -[[LRuYz]], as they take one argument
# after, the actual option is the next token
# 6 For -YP,*: take and replace by -Larg where arg is the old
# argument
# 7 For -[lLR]*: take
# step 3
if _match_ignore(token):
pass
# step 4
elif token.startswith('-lkernel32') and sys.platform == 'cygwin':
tmp_flags.append(token)
# step 5
elif SPACE_OPTS.match(token):
t = lexer.get_token()
# FIXME: this does not make any sense ... pull out
# what we need from this section
#if t.startswith('P,'):
# t = t[2:]
# for opt in t.split(os.pathsep):
# tmp_flags.append('-L%s' % opt)
# step 6
elif NOSPACE_OPTS.match(token):
tmp_flags.append(token)
# step 7
elif POSIX_LIB_FLAGS.match(token):
tmp_flags.append(token)
else:
# ignore anything not explicitely taken into account
pass
t = lexer.get_token()
return t
t = parse(t)
final_flags.extend(tmp_flags)
return final_flags
|
# Faรงa um Programa que leia trรชs nรบmeros e mostre o maior deles.
a = float(input('Informar o 1ยฐ nรบmero: '))
b = float(input('Informar o 2ยฐ nรบmero: '))
c = float(input('Informar o 3ยฐ nรบmero: '))
if b > a:
print(b)
elif c > a:
print(c)
else:
print(a)
|
from random import choice
from random import randint
import csv
def Utili():
BT = ['Miser','Geek','Generous']
GT = ['Choosy','Normal','Desperate']
GFT= ['Essential','Luxury','Utility']
Boy = [('B'+str(i),randint(2,20),randint(44,120),randint(100,300),randint(1,16),choice(BT))for i in range(1,51)]
Girl = [('G'+str(i),randint(2,18),randint(36,100),randint(90,250),choice(GT))for i in range(1,26)]
Gift = [('GFT'+str(i),randint(70,200),randint(90,150),choice(GFT))for i in range(1,100)]
Create('Boys.csv',Boy)
Create('Girls.csv',Girl)
Create('Gifts.csv',Gift)
def Create(file,l):
f = open(file,"w")
write = csv.writer(f,delimiter = ',')
for i in l:
write.writerow(i)
|
from torch import nn
from torch.nn import MaxPool2d
from torch.nn.modules.conv import Conv2d
from torch.nn.modules.activation import Sigmoid, ReLU
def dcn_vgg(input_channels):
model = nn.Sequential(
Conv2d(input_channels, 64, kernel_size=(3, 3), padding=0),
ReLU(),
Conv2d(64, 64, kernel_size=(3, 3), padding=0),
ReLU()
MaxPool2d(kernel_size=(2,2), stride=(2,2))
Conv2d(64, 128, kernel_size=(3, 3), padding=0),
ReLU(),
Conv2d(128, 128, kernel_size=(3, 3), padding=0),
ReLU()
MaxPool2d(kernel_size=(2,2), stride=(2,2))
Conv2d(128, 256, kernel_size=(3, 3), padding=0),
ReLU(),
Conv2d(256, 256, kernel_size=(3, 3), padding=0),
ReLU()
MaxPool2d(kernel_size=(2,2), stride=(2,2))
Conv2d(256, 512, kernel_size=(3, 3), padding=0),
ReLU(),
Conv2d(512, 512, kernel_size=(3, 3), padding=0),
ReLU()
MaxPool2d(kernel_size=(2,2), stride=(2,2))
Conv2d(512, 512, kernel_size=(3, 3), padding=0),
ReLU(),
Conv2d(512, 512, kernel_size=(3, 3), padding=0),
ReLU()
Conv2d(512, 512, kernel_size=(3, 3), padding=0),
ReLU()
)
return model
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 order for servers
"""
import boto
import boto.ec2
from boto.mashups.server import Server, ServerSet
from boto.mashups.iobject import IObject
from boto.pyami.config import Config
from boto.sdb.persist import get_domain, set_domain
import time
from boto.compat import StringIO
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge']
class Item(IObject):
def __init__(self):
self.region = None
self.name = None
self.instance_type = None
self.quantity = 0
self.zone = None
self.ami = None
self.groups = []
self.key = None
self.ec2 = None
self.config = None
def set_userdata(self, key, value):
self.userdata[key] = value
def get_userdata(self, key):
return self.userdata[key]
def set_region(self, region=None):
if region:
self.region = region
else:
l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()]
self.region = self.choose_from_list(l, prompt='Choose Region')
def set_name(self, name=None):
if name:
self.name = name
else:
self.name = self.get_string('Name')
def set_instance_type(self, instance_type=None):
if instance_type:
self.instance_type = instance_type
else:
self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')
def set_quantity(self, n=0):
if n > 0:
self.quantity = n
else:
self.quantity = self.get_int('Quantity')
def set_zone(self, zone=None):
if zone:
self.zone = zone
else:
l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
def set_ami(self, ami=None):
if ami:
self.ami = ami
else:
l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
self.ami = self.choose_from_list(l, prompt='Choose AMI')
def add_group(self, group=None):
if group:
self.groups.append(group)
else:
l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))
def set_key(self, key=None):
if key:
self.key = key
else:
l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
self.key = self.choose_from_list(l, prompt='Choose Keypair')
def update_config(self):
if not self.config.has_section('Credentials'):
self.config.add_section('Credentials')
self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
if not self.config.has_section('Pyami'):
self.config.add_section('Pyami')
sdb_domain = get_domain()
if sdb_domain:
self.config.set('Pyami', 'server_sdb_domain', sdb_domain)
self.config.set('Pyami', 'server_sdb_name', self.name)
def set_config(self, config_path=None):
if not config_path:
config_path = self.get_filename('Specify Config file')
self.config = Config(path=config_path)
def get_userdata_string(self):
s = StringIO()
self.config.write(s)
return s.getvalue()
def enter(self, **params):
self.region = params.get('region', self.region)
if not self.region:
self.set_region()
self.ec2 = self.region.connect()
self.name = params.get('name', self.name)
if not self.name:
self.set_name()
self.instance_type = params.get('instance_type', self.instance_type)
if not self.instance_type:
self.set_instance_type()
self.zone = params.get('zone', self.zone)
if not self.zone:
self.set_zone()
self.quantity = params.get('quantity', self.quantity)
if not self.quantity:
self.set_quantity()
self.ami = params.get('ami', self.ami)
if not self.ami:
self.set_ami()
self.groups = params.get('groups', self.groups)
if not self.groups:
self.add_group()
self.key = params.get('key', self.key)
if not self.key:
self.set_key()
self.config = params.get('config', self.config)
if not self.config:
self.set_config()
self.update_config()
class Order(IObject):
def __init__(self):
self.items = []
self.reservation = None
def add_item(self, **params):
item = Item()
item.enter(**params)
self.items.append(item)
def display(self):
print('This Order consists of the following items')
print()
print('QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair')
for item in self.items:
print('%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type,
item.ami.id, item.groups, item.key.name))
def place(self, block=True):
if get_domain() is None:
print('SDB Persistence Domain not set')
domain_name = self.get_string('Specify SDB Domain')
set_domain(domain_name)
s = ServerSet()
for item in self.items:
r = item.ami.run(min_count=1, max_count=item.quantity,
key_name=item.key.name, user_data=item.get_userdata_string(),
security_groups=item.groups, instance_type=item.instance_type,
placement=item.zone.name)
if block:
states = [i.state for i in r.instances]
if states.count('running') != len(states):
print(states)
time.sleep(15)
states = [i.update() for i in r.instances]
for i in r.instances:
server = Server()
server.name = item.name
server.instance_id = i.id
server.reservation = r
server.save()
s.append(server)
if len(s) == 1:
return s[0]
else:
return s
|
import pytest
from zig.main_components import Graph
class TestDomElements:
#test passing of attributes to DomElement works
def test_div_creation(self):
test_id = "123"
test_figure = "figure"
test_section = []
# HOW TO TEST ALL attributes efficiently?
graph = Graph(test_figure, id=test_id)
assert graph.figure == test_figure
assert graph.id == test_id
# check for section
assert graph.section == test_section
|
consumer_key = 'xgkA30ZnNtxpx5rZ4M2ZeADyt'
consumer_secret = 'xUiCmURo5NVXYVeNrL2pvO8br4B355zGBWrAwOCfa1L1yweZtE'
access_token = '1184086168608088064-jPANfIKR0OsxoubRlO6t8RhdyJ7LCf'
access_secret = 'PhgH1FVI2Gkg2JFYchlcuS2PHz0Y6XGpjD1ifIbUpP932'
|
import webbrowser
class Movie():
# Class for symbolize a movie
def __init__(self, title, poster_url, trailer_url):
"""
Initialize a Movie object
title = a string of the movie title
poster_url = a string containing a URL to a poster image
trailer_url = a string containing a youtube URL trailer
"""
self.title = title
self.poster_image_url = poster_url
self.trailer_youtube_url = trailer_url
# Define the instance method `show_trailer
def show_trailer(self):
# Opens trailer in a browser
webbrowser.open(self.trailer_url)
|
#!/usr/bin/python3
import RPi.GPIO as GPIO
import pigpio
import time
servo = 2
# more info at http://abyz.me.uk/rpi/pigpio/python.html#set_servo_pulsewidth
pwm = pigpio.pi()
pwm.set_mode(servo, pigpio.OUTPUT)
pwm.set_PWM_frequency( servo, 50 )
while 1:
for i in range(500, 2500, 50):
print( "{} deg".format(180/2500*i) )
pwm.set_servo_pulsewidth( servo, i ) ;
time.sleep( 0.5 )
for i in range(2500, 500, -50):
print( "{} deg".format(180/2500*i) )
pwm.set_servo_pulsewidth( servo, i ) ;
time.sleep( 0.5 )
# turning off servo
pwm.set_PWM_dutycycle( servo, 0 )
pwm.set_PWM_frequency( servo, 0 )
|
# Generated by Django 3.1.6 on 2021-03-19 05:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Feedback_Management_System', '0013_auto_20210319_1107'),
]
operations = [
migrations.RemoveField(
model_name='descriptions',
name='grid',
),
migrations.AlterField(
model_name='descriptions',
name='criteria',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Feedback_Management_System.assessmentcriterias'),
),
]
|
import h5py
import numpy as np
import os
import matplotlib.pyplot as plt
import math
import affine
import h5py
import argparse
import sklearn
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture as GMM
from sklearn.decomposition import PCA
import pickle
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--readfile', help = 'Of the format fftfeat.h5')
parser.add_argument('--savefile', help = 'Just the name, eg. 7')
parser.add_argument('--n_clusters', help = 'Number of clusters')
parser.add_argument('--n_trajs', help = 'Number of trajectories to consider for training clustering')
args = parser.parse_args()
n_trajs = int(args.n_trajs)
hf = h5py.File('/projects/kumar-lab/mehtav/fft/'+ args.readfile, 'r')
hfw = h5py.File('/projects/kumar-lab/mehtav/fft/'+ args.savefile + '_fftlabels.h5', 'w')
gmmsave = '/projects/kumar-lab/mehtav/fft/' + args.savefile + 'gmm' + '.sav'
pcasave = '/projects/kumar-lab/mehtav/fft/' + args.savefile + 'pca' + '.sav'
key_list = [key for key in hf.keys()]
pointdata = []
fft_features = []
time_features = []
traj_key_list = []
# train = []
for traj in range(n_trajs):
print(traj)
key = key_list[traj]
# print(hf[key].keys())
pointdata.append(hf.get(key+ '/points')[()]) # 240,7,2
traj_key_list.append(hf.get(key+ '/traj_key')[()])
fft_features.append(hf.get(key+ '/fft_features')[()])
time_features.append(hf.get(key+ '/rangles')[()])
# train = train + fft_features.tolist()
fft_features = np.array(fft_features)[:,:-1,:]
time_features = np.array(time_features)[:, 8:-8]
print(fft_features.shape, time_features.shape)
train = np.reshape(fft_features, (fft_features.shape[0]*fft_features.shape[1], fft_features.shape[2]*fft_features.shape[3]))
time_features = np.reshape(time_features, (time_features.shape[0]*time_features.shape[1], time_features.shape[2]))
print(train.shape)
print('Fitting PCA')
pca = PCA(n_components=6, random_state=0).fit(train)
train_pca = pca.transform(train)
print('Fitted with explained variance of ', pca.explained_variance_ratio_.sum())
pickle.dump(pca, open(pcasave, 'wb'))
print(train_pca.shape)
train_time_fft = np.concatenate((train_pca, time_features ), axis = 1)
print(train_time_fft.shape)
print('Fitting GMM')
gmm = GMM(n_components=int(args.n_clusters), random_state=0).fit(train_time_fft)
# kmeans = KMeans(n_clusters=int(args.n_clusters), random_state=0).fit(train_time_fft)
labels = gmm.predict(train_time_fft)
db_score = (sklearn.metrics.davies_bouldin_score(train_time_fft, labels))
print("Converged: ", str(gmm.converged_) )
print('Fitted with DB score: ', db_score)
pickle.dump(gmm, open(gmmsave, 'wb'))
labels = np.reshape(labels, (fft_features.shape[0], fft_features.shape[1]))
unique_elements, counts_elements = np.unique(labels, return_counts=True)
print(labels.shape)
print("Unique elements: ", unique_elements)
print("Frequency: ", counts_elements)
def list2string(list):
string = ""
for i in range(len(list)):
string = string + str(list[i]) + ","
return string
labels_string = [list2string(labels[i]) for i in range(labels.shape[0])]
video_labels = pd.DataFrame({"traj_key": traj_key_list, "label": labels_string})
video_labels.to_csv('/projects/kumar-lab/mehtav/fft/' + args.savefile + 'videolabels' + '.csv')
for traj in range(n_trajs):
key = key_list[traj]
g = hfw.create_group(key)
pointtraj = hf.get(key+ '/points')[()] # 240,7,2
# fft_features = hf.get(key+ '/fft_features')[()]
g.create_dataset('points', data = pointtraj)
# g.create_dataset('fft_features', data = fft_features)
g.create_dataset('labels', data = labels[traj])
hfw.close() |
from sgmon.log import get_logger
import requests
from requests.exceptions import RequestException
logger = get_logger(__name__)
class HTTPClientError(Exception):
pass
def handle_exception(func):
"""
Decorator to catch exception
"""
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except RequestException as err:
raise HTTPClientError("Exception into {}(): {}".format(
func.__name__, err))
return wrapped
class HTTPClient(object):
def __init__(self, url):
self.url = url
self.ses = requests.Session()
def set_headers(self, headers):
self.ses.headers.update(headers)
def has_failed(self, response):
status_code = str(response.status_code)
return status_code.startswith('4') or \
status_code.startswith('5')
@handle_exception
def get(self):
response = self.ses.get(self.url)
if response.status_code != 200:
return []
logger.info("Get success: {}".format(response))
return response.json()
@handle_exception
def post(self, data):
response = self.ses.post(self.url, data=data)
if self.has_failed(response):
logger.error("Post failed: %s", response)
response.raise_for_status()
return response.text
|
from odoo import models, fields, api
import logging
_logger = logging.getLogger(__name__)
class DocumentRejection(models.TransientModel):
_name = 'document.rejection'
_description = "Reject Documents"
document_id = fields.Many2one(
'document.management', string="Document", readonly="True")
note = fields.Text(string="Reject Reason", required="True")
def action_reject_document(self):
"""This method is used to log the reject reason into the mail.message model
so that a history of the document can be viewed"""
state = self.env.context.get('state_new', False)
self.document_id.message_post(
body=("Document Rejected Due To. : %s") % (self.note), subject="Rejected")
self.document_id.write({'state': state})
@api.model
def default_get(self, fields):
res = super(DocumentRejection, self).default_get(fields)
try:
res["document_id"] = self.env.context.get('parent_id', False)
except Exception as e:
print(str(e))
return res
|
import cv2
import numpy as np
img = cv2.imread('images/Frog.jpg')
new_img = np.zeros(img.shape, dtype='uint8')
# ะะฐั
ะพะถะดะตะฝะธะต ะบะพะฝัััะพะฒ ะพะฑัะตะบัะฐ
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (5, 5), 0)
img = cv2.Canny(img, 100, 140)
con, hir = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(new_img, con, -1, (230, 111, 148), 1)
cv2.imshow('Result', new_img)
cv2.waitKey(0)
|
#iput fom sys aruments
import sys
x=sys.argv[1]
y=sys.argv[2]
print("sys.argv[1]=",sys.argv[0])
print("x=",x)
print("y=",y)
print("sum= ",x+y)
|
import multiprocessing as mp
from threading import Thread
from common.constants import SCENE_GRID_BLOCK_WIDTH, SCENE_GRID_BLOCK_HEIGHT, SCENE_WIDTH, SCENE_HEIGHT
from common.enums.climb_state import ClimbState
from common.enums.collision_control_methods import CCMethods
from common.enums.direction import Direction
from common.enums.layout_block import LayoutBlock
from server.models.collision.pipe_message import Message
class CollisionControl(mp.Process):
def __init__(self, endpoint):
mp.Process.__init__(self)
self.server_endpoint = endpoint
self.endpoints = []
self.threads = []
self.start()
def add_endpoint(self, pipe_endpoint):
self.endpoints.append(pipe_endpoint)
index = self.endpoints.index(pipe_endpoint)
thread = Thread(target=self.do_work, args=[index])
thread.start()
def do_work(self, index):
while True:
msg = self.endpoints[index].recv()
if msg.func == "end":
break
self.dispatch(msg, index=index)
def run(self):
while True:
msg = self.server_endpoint.recv()
if msg.func == "end":
break
self.dispatch(msg)
def dispatch(self, msg: Message, index=None):
handler = getattr(self, msg.func, None)
if handler:
if index is None:
handler(*msg.args)
else:
handler(index, *msg.args)
def check_end_of_screen_left(self, index, x):
ret_val = False
if x <= 0:
ret_val = True
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_val))
def check_falling(self, index, player_x, player_y, direction, layout):
ret_value = False
y = int((player_y + 35) / SCENE_GRID_BLOCK_HEIGHT)
if direction == Direction.LEFT:
x = int((player_x + 20) / SCENE_GRID_BLOCK_WIDTH)
elif direction == Direction.RIGHT:
x = int(player_x / SCENE_GRID_BLOCK_WIDTH)
else:
return False
if layout[y][x] is None:
ret_value = True
elif layout[y][x] == LayoutBlock.Platform or layout[y][x] == LayoutBlock.Ladder:
if (y * SCENE_GRID_BLOCK_HEIGHT) > player_y + 35:
ret_value = True
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_value))
def check_climbing_up(self, index, player_x, player_y, layout):
# default return value, if it's NONE, don't climb
ret_value = ClimbState.NONE
# get x center point
player_x_center = player_x + 13
# get x of the block the player is in
x = int(player_x / SCENE_GRID_BLOCK_WIDTH)
# get y of the block the player is in (feet level)
y = int((player_y + 34) / SCENE_GRID_BLOCK_HEIGHT)
if layout[y][x] == LayoutBlock.Ladder:
# get climbable ladder x coordinates (the whole ladder is not climbable)
ladder_x_from = x * SCENE_GRID_BLOCK_WIDTH + 5
ladder_x_to = x * SCENE_GRID_BLOCK_WIDTH + 30
# check if the player is between climbable x coordinates
if (ladder_x_from < player_x_center) and (ladder_x_to > player_x_center):
# get y of the block at player head level
y = int((player_y + 3) / SCENE_GRID_BLOCK_HEIGHT)
# check if block above the player's head is empty
if layout[y][x] is None:
ret_value = ClimbState.FINISH
else:
ret_value = ClimbState.CLIMB
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_value))
def check_climbing_down(self, index, player_x, player_y, layout):
# default return value, if it's NONE, don't climb
ret_value = ClimbState.NONE
# get x center point
player_x_center = player_x + 13
# get x of the block the player is in
x = int(player_x / SCENE_GRID_BLOCK_WIDTH)
# get y of the block the player is in (feet level)
y = int((player_y + 35) / SCENE_GRID_BLOCK_HEIGHT)
if layout[y][x] == LayoutBlock.Ladder:
# get climbable ladder x coordinates (the whole ladder is not climbable)
ladder_x_from = x * SCENE_GRID_BLOCK_WIDTH + 5
ladder_x_to = x * SCENE_GRID_BLOCK_WIDTH + 30
# check if the player is between climbable x coordinates
if (ladder_x_from < player_x_center) and (ladder_x_to > player_x_center):
# get y of the block at player head level
y = int((player_y + 3) / SCENE_GRID_BLOCK_HEIGHT)
# check if block above the player's head is empty
if layout[y][x] is None:
ret_value = ClimbState.FINISH
else:
ret_value = ClimbState.CLIMB
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_value))
def check_end_of_screen_right(self, index, x):
ret_val = False
if x >= SCENE_WIDTH - 26:
ret_val = True
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_val))
def check_end_of_screen_vertical(self, index, y):
ret_val = False
if y >= SCENE_HEIGHT - 60:
ret_val = True
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_val))
def check_barrel_collision(self, index, b_x, b_y, p_x, p_y):
ret_val = True
barrel_pos_y_from = b_y
barrel_pos_y_to = b_y + 29
barrel_pos_x_from = b_x
barrel_pos_x_to = barrel_pos_x_from + 29
player_pos_y_from = p_y
player_pos_y_to = p_y + 35
player_pos_x_from = p_x
player_pos_x_to = p_x + 26
if ((barrel_pos_y_from < player_pos_y_from) and (barrel_pos_y_to < player_pos_y_from)) or (
(barrel_pos_y_from > player_pos_y_to) and (barrel_pos_y_to > player_pos_y_to)):
ret_val = False
if ((barrel_pos_x_from < player_pos_x_from) and (barrel_pos_x_to < player_pos_x_from)) or \
((barrel_pos_x_from > player_pos_x_to) and (barrel_pos_x_to > player_pos_x_to)):
ret_val = False
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_val))
def check_princess_collision(self, index, pr_x, pr_y, p_x, p_y):
ret_val = True
princess_pos_y_from = pr_y
princess_pos_y_to = pr_y + 37
princess_pos_x_from = pr_x
princess_pos_x_to = pr_x + 34
player_pos_y_from = p_y
player_pos_y_to = p_y + 35
player_pos_x_from = p_x
player_pos_x_to = p_x + 26
if ((princess_pos_y_from < player_pos_y_from) and (princess_pos_y_to < player_pos_y_from)) or (
(princess_pos_y_from > player_pos_y_to) and (princess_pos_y_to > player_pos_y_to)):
ret_val = False
if ((princess_pos_x_from < player_pos_x_from) and (princess_pos_x_to < player_pos_x_from)) or (
(princess_pos_x_from > player_pos_x_to) and (princess_pos_x_to > player_pos_x_to)):
ret_val = False
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_val))
def check_coin_collision(self, index, c_x, c_y, p_x, p_y):
ret_val = True
coin_pos_y_from = c_y
coin_pos_y_to = c_y + 40
coin_pos_x_from = c_x
coin_pos_x_to = c_x + 40
player_pos_y_from = p_y
player_pos_y_to = p_y + 35
player_pos_x_from = p_x
player_pos_x_to = p_x + 26
if ((coin_pos_y_from < player_pos_y_from) and (coin_pos_y_to < player_pos_y_from)) or (
(coin_pos_y_from > player_pos_y_to) and (coin_pos_y_to > player_pos_y_to)):
ret_val = False
if ((coin_pos_x_from < player_pos_x_from) and (coin_pos_x_to < player_pos_x_from)) or (
(coin_pos_x_from > player_pos_x_to) and (coin_pos_x_to > player_pos_x_to)):
ret_val = False
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_val))
def check_gorilla_collision(self, index, g_x, g_y, p_x, p_y):
ret_val = True
gorilla_y_from = g_y
gorilla_pos_y_to = g_y + 55
gorilla_pos_x_from = g_x
gorilla_pos_x_to = g_x + 58
player_pos_y_from = p_y
player_pos_y_to = p_y + 35
player_pos_x_from = p_x
player_pos_x_to = p_x + 26
if ((gorilla_y_from < player_pos_y_from) and (gorilla_pos_y_to < player_pos_y_from)) or (
(gorilla_y_from > player_pos_y_to) and (gorilla_pos_y_to > player_pos_y_to)):
ret_val = False
if ((gorilla_pos_x_from < player_pos_x_from) and (gorilla_pos_x_to < player_pos_x_from)) or (
(gorilla_pos_x_from > player_pos_x_to) and (gorilla_pos_x_to > player_pos_x_to)):
ret_val = False
self.endpoints[index].send(Message(CCMethods.EMPTY, ret_val))
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chromedriver = 'C:/Users/jinyoung/Pictures/Crawling/janjaemi/chromedriver'
driver = webdriver.Chrome(chromedriver)
driver.get('https://python.org')
# title์ Python์ด ์์ผ๋ฉด ์๋ฌ๋ฅผ ๋ฐ์
assert "Python" in driver.title
# <input id="id-search-field" name="q">
# ํ๊ทธ name์ผ๋ก ํน์ ํ ํ๊ทธ๋ฅผ ์ฐพ์ ์ ์์
# find_element_by_name('q')
# find_elements_by_name('')
elem = driver.find_element_by_name('q')
# clear() : input ํ
์คํธ ์ด๊ธฐํ ํ๊ธฐ
# send_keys(ํค์๋) : ํค๋ณด๋ ์
๋ ฅ๊ฐ ์ ๋ฌํ๊ธฐ
# - Keys.RETURN - ์ํฐํค
# - dir(Keys)๋ก ํค์ ๋์๋๋ ์ด๋ฆ ์ฐพ๊ธฐ
# input ํ
์คํธ ์ด๊ธฐํ
elem.clear()
# ํค ์ด๋ฒคํธ ์ ์ก
elem.send_keys('Python')
# ์ํฐ ์
๋ ฅ
elem.send_keys(Keys.RETURN)
# assert๋ก driver.page_source์์ ํน์ ํค์๋ ํ์ธํ๊ธฐ
# time.sleep()ํจ์๋ก ์ผ์ ์๊ฐ ๋ธ๋ผ์ฐ์ ๋ด์ฉ ํ์ธํ ์ ์๋๋ก ํ๊ธฐ
# driver.quit()ํจ์๋ก ๋ธ๋ผ์ฐ์ ๋๋ด๊ธฐ
assert "No results found." not in driver.page_source
# ๋ช
์์ ์ผ๋ก ์ผ์ ์๊ฐ ๊ธฐ๋ค๋ฆฌ๊ธฐ
time.sleep(1)
h3s = driver.find_elements_by_tag_name("h3")
for h3 in h3s:
print(h3.text) |
import boto3
import os
region = 'eu-west-1'
ec2 = boto3.client('ec2', region_name=region)
def handler(event, context):
print(event)
start_event_arn=os.environ["START_EVENT_ARN"]
stop_event_arn=os.environ["STOP_EVENT_ARN"]
event_arn=event["resources"][0]
if event_arn==stop_event_arn:
filters = [{
'Name': f'tag:{os.environ["STOP_TAG"]}:',
'Values': ['true']
}]
instances = ec2.describe_instances(Filters=filters)['Reservations']['Instances']
print(f'Stopping {len(instances)} instances...')
for instance in instances:
if instance['Status']['Name']=='running':
instance_id=instance['InstanceId']
print(f'Stopping {instance_id}...')
ec2.stop_instances(InstanceIds=[instance_id])
else:
print(f'{instance_id} is not running.')
return {
'status': 200,
'message': "Instances stopped"
}
if event_arn==start_event_arn:
filters = [{
'Name': f'tag:{os.environ["START_TAG"]}:',
'Values': ['true']
}]
instances = ec2.describe_instances(Filters=filters)['Reservations']['Instances']
print(f'Starting {len(instances)} instances...')
for instance in instances:
if instance['Status']['Name']!='running':
instance_id=instance['InstanceId']
print(f'Starting {instance_id}...')
ec2.start_instances(InstanceIds=[instance_id])
else:
print(f'{instance_id} is already running.')
print("Starting the instance")
return {
'status' : 200,
'message' : "Instances started"
}
return {
'status' : 400,
'message' : 'Bad request: Unhandled event'
}
|
import pytest
from parso.grammar import load_grammar
from parso import utils
def test_load_inexisting_grammar():
# This version shouldn't be out for a while, but if we ever do, wow!
with pytest.raises(NotImplementedError):
load_grammar(version='15.8')
# The same is true for very old grammars (even though this is probably not
# going to be an issue.
with pytest.raises(NotImplementedError):
load_grammar(version='1.5')
@pytest.mark.parametrize(('string', 'result'), [
('2', (2, 7)), ('3', (3, 6)), ('1.1', (1, 1)), ('1.1.1', (1, 1)), ('300.1.31', (300, 1))
])
def test_parse_version(string, result):
assert utils._parse_version(string) == result
@pytest.mark.parametrize('string', ['1.', 'a', '#', '1.3.4.5'])
def test_invalid_grammar_version(string):
with pytest.raises(ValueError):
load_grammar(version=string)
def test_grammar_int_version():
with pytest.raises(TypeError):
load_grammar(version=3.8)
|
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('./data/bok_statistics_CD_2.csv', header=0, index_col=0)
print(df.head(), '\n')
df['CD_rate'].plot(kind='hist')
df['change'].plot(kind='hist')
plt.show() |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from numpy.random import randn
from pandas import Series, DataFrame
import scipy
from scipy import stats
address='mtcars.csv'
cars=pd.read_csv(address)
cars.columns=['car_name','mpg','cyl','disp','hp','drat',
'wt','qsec','vs','am','gear','carb']
#print(cars.sum())
#print(cars.sum(axis=1))
'''Finds the median value for each column'''
#print(cars.median())
'''average of each column'''
#print(cars.mean())
#print(cars.max())
mpg=cars.mpg
#print(mpg.idxmax())
'''Standard deviation'''
#print(cars.std())
'''Variance'''
#print(cars.var())
'''Count how many times a value appear'''
gear=cars.gear
#print(gear.value_counts())
'''Full statistics'''
print(cars.describe())
|
def is_pangram(sentence):
alphabet = range(97, 123)
for index in alphabet:
if chr(index) not in sentence.lower():
return False
return True
|
# Generated by Django 2.1 on 2019-01-29 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=128, verbose_name='First name')),
('last_name', models.CharField(blank=True, max_length=128, verbose_name='Last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='Email')),
('phone', models.CharField(help_text='Phone for driver to contact', max_length=16, unique=True, verbose_name='Phone')),
('address', models.TextField(help_text='Address for pizza delivery', verbose_name='Address')),
],
options={
'ordering': ('first_name', 'last_name'),
},
),
]
|
from django.core.mail import send_mail
from django.utils import timezone
from swatusers.models import UserTask
from subprocess import check_call
from swatluu import geotools
import numpy as np
import os
import shutil
env = os.environ.copy()
env['PATH'] = '{0}{1}{2}'.format('/usr/local/bin', os.pathsep, env['PATH'])
class LUUCheckerProcess(object):
def __init__(self, logger, data=""):
"""
A LUU Checker process requested by a user. It contains the core
variables and methods for creating the new composite landuse layer.
"""
# set initial paths for the input data
if data == '':
self.subbasin_shapefile_filepath = ''
self.subbasin_shapefile_filename = ''
self.base_landuse_raster_filepath = ''
self.base_landuse_raster_adf_filepath = ''
self.base_landuse_raster_filename = ''
self.new_landuse_raster_filepaths = ''
self.new_landuse_raster_filenames = ''
self.landuse_percent = ''
self.process_root_dir = ''
self.output_dir = ''
self.output_directory = ''
self.temp_output_directory = ''
self.task_id = ''
self.user_email = ''
self.user_first_name = ''
self.error_log = ''
self.subbasin_count = ''
self.base_raster_array = ''
self.base_raster_mask = ''
self.status = [0, 'Everything checks out.']
self.new_landuse_layer = ''
else:
self.subbasin_shapefile_filepath = data[
'subbasin_shapefile_filepath']
self.subbasin_shapefile_filename = data[
'subbasin_shapefile_filename']
self.base_landuse_raster_filepath = data[
'base_landuse_raster_filepath']
self.base_landuse_raster_adf_filepath = data[
'base_landuse_raster_adf_filepath']
self.base_landuse_raster_filename = data[
'base_landuse_raster_filename']
self.new_landuse_raster_filepaths = data[
'new_landuse_raster_filepaths']
self.new_landuse_raster_filenames = data[
'new_landuse_raster_filenames']
self.landuse_percent = data['landuse_percent']
self.process_root_dir = data['process_root_dir']
self.output_dir = data['output_dir']
self.output_directory = data['output_directory']
self.temp_output_directory = data['temp_output_directory']
self.task_id = data['task_id']
self.user_email = data['user_email']
self.user_first_name = data['user_first_name']
self.error_log = []
self.subbasin_count = ''
self.base_raster_array = []
self.base_raster_mask = []
self.status = [0, 'Everything checks out.']
self.new_landuse_layer = ''
self.tool_name = 'LUU Checker'
self.logger = logger
def start(self):
"""
Directs the work flow of the LUU Checker task. Essentially these steps
are taken:
1) Create output and temporary output directory structure
2) Convert base landuse raster to geotiff
3) Read base landuse raster into numpy array
4) Convert subbasin shapefile into geotiff
5) Create Emerging LULC (EL) Report
6) Loop through each new landuse raster
L1a) Update EL Report with new entry for current new landuse raster
L1b) Convert new landuse raster to geotiff
L1c) Read new landuse raster into numpy array
L1d) Loop through each subbasin
L2a) Compare (for current subbasin) lulc codes in base landuse
raster and new landuse raster
L2b) Isolate lulc codes that are emerging in new landuse raster
L2c) Update EL Report with emerging lulc codes
L2d) Loop through each emerging lulc code
L3a) Randomize the indicies in a copy of the base landuse
raster that correspond with the current subbasin
L3b) Calculate how many pixels should be reclassified to
the new lulc code by using the subbasin size and the
user provided percentage value
L3c) Use the randomized indicies to inject the new lulc
into the base landuse raster
7) Create composite landuse raster using the updated base landuse raster
8) Close the EL Report
9) Remove temporary output directory
Parameters
----------
None
Returns
-------
None
"""
try:
self.setup_logger()
self.logger.info('Processing started.')
# create output directory structure
self.create_output_dir()
except Exception as e:
print(e)
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception('Unable to initialize logger.')
# convert base landuse raster from .adf to .tif
try:
output_filepath = self.temp_output_directory + '/' + os.path.basename(
self.base_landuse_raster_filepath) + '.tif'
geotools.convert_adf_to_tif(
self.base_landuse_raster_filepath,
output_filepath)
except Exception:
self.logger.error('Unable to convert raster from .adf to .tif.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception('Unable to convert raster from .adf to .tif.')
self.logger.info('Converting base raster geotiff into numpy array.')
try:
# read base landuse raster (.tif) into numpy array
base_raster_array = geotools.read_raster(
self.temp_output_directory + '/' + \
self.base_landuse_raster_filename + '.tif')[0]
# construct shapefile layer information
rows = str(len(base_raster_array))
cols = str(len(base_raster_array[0]))
layer_info = {
"attribute_name": "Subbasin",
"extent": [cols, rows],
"layername": "subs1",
}
except Exception:
self.logger.error('Unable to read the base landuse raster.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception('Unable to read the base landuse raster.')
self.logger.info(
'Getting count of subbasins from the subbasin shapefile.')
try:
# get number of subbasins in shapefile
total_subbasin_count = len(
geotools.read_shapefile(self.subbasin_shapefile_filepath))
except Exception:
self.logger.error('Unable to read the subbasin shapefile.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception('Unable to read the subbasin shapefile.')
# path and filename for the soon to be created subbasin geotiff
output_tif_filepath = self.temp_output_directory + '/subbasin.tif'
# create geotiff raster of the subbasin shapefile
self.logger.info('Converting subbasin .shp to .tif.')
try:
# convert shapefile to raster
geotools.rasterize_shapefile(layer_info,
self.subbasin_shapefile_filepath,
output_tif_filepath)
except Exception:
self.logger.error(
'Error converting shapefile to raster. Please make ' + \
'sure you uploaded file.shp.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception(
'Error converting shapefile to raster. Please make ' + \
'sure you uploaded file.shp.')
self.logger.info('Converting subbasin geotiff into numpy array.')
try:
# read rasterized shapefile into numpy array
rasterized_shapefile = \
geotools.read_raster(self.temp_output_directory + '/subbasin.tif')[
0]
except Exception:
self.logger.error(
'Unable to read the rasterized subbasin geotiff.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception('Unable to read the rasterized subbasin geotiff.')
self.logger.info('Opening Emerging_LULC_Report for writing.')
try:
# remove emerging lulcs report if it already exists
if os.path.isfile(self.output_directory + '/Emerging_LULCs.txt'):
os.remove(self.output_directory + '/Emerging_LULCs.txt')
# create emerging_lulcs text file to store new landuse information
emerging_lulc_report = open(
self.output_directory + '/Emerging_LULC_Report.txt', 'w')
except Exception:
self.logger.error(
'Unable to create emerging_lulcs text file to store new landuse information.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception(
'Unable to create emerging_lulcs text file to store new landuse information.')
self.logger.info('Begin looping through new landuse layers.\n\n')
# Keep track of indices already used for new code injection
# Key: subbasin id, Val: indices
injection_history = {}
try:
# loop through each new landuse layer selected by the user
for landuse_layer in self.new_landuse_raster_filepaths:
self.logger.info(
'LANDUSE LAYER:' + os.path.basename(landuse_layer))
# write the landuse layer name to report
emerging_lulc_report.write(landuse_layer + '\n')
# convert the new landuse layer raster to array
geotools.convert_adf_to_tif(landuse_layer,
self.temp_output_directory + '/' + os.path.basename(
landuse_layer) + '.tif')
self.logger.info(
'Converting new landuse geotiff into numpy array.')
self.logger.info(
self.temp_output_directory + '/' + os.path.basename(
landuse_layer) + '.tif')
# read new landuse raster (.tif) into numpy array
new_landuse_raster = geotools.read_raster(
self.temp_output_directory + '/' + \
os.path.basename(landuse_layer) + '.tif')[0]
self.logger.info('Begin looping through subbasins.')
# create feature layers based off the FID field & then use as mask
# to extract subbasin landuse information
for i in range(0, total_subbasin_count):
self.logger.info('SUBBASIN #' + str(i + 1) + ':')
# write the subbasin number to report
emerging_lulc_report.write('Subbasin ' + str(i + 1) + '\n')
self.logger.info('Finding indicies in base raster array ' +
'that correspond with current subbasin.')
# find indicies where the value < 255 (remove the NoData)
idx = np.nonzero(rasterized_shapefile == i + 1)
# find lulc codes in the new layer that aren't in the base layer
new_lulc_codes = self.find_unique_lulc_codes(
idx,
base_raster_array,
new_landuse_raster)
# write the emerging lulc to report
emerging_lulc_report.write(str(new_lulc_codes) + '\n\n')
# inject new lulc codes into the base raster array
base_raster_array, injection_history = self.inject_new_lulc_codes(
i + 1,
idx,
new_lulc_codes,
base_raster_array,
injection_history)
self.logger.info('End looping through subbasins.')
except:
self.logger.error(
'An error occurred while creating the emerging lulc report.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception(
'An error occurred while creating the emerging lulc report.')
self.logger.info('End looping through new landuse layers.\n\n')
try:
# convert the updated base raster array (composite) to geotiff
self.create_composite_raster(
base_raster_array,
self.base_landuse_raster_adf_filepath,
self.temp_output_directory + '/base_new1.tif')
except:
self.logger.error(
'An error occurred while creating the composite raster.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception(
'An error occurred while creating the composite raster.')
self.logger.info('Closing Emerging_LULC_Report for writing.')
# close emerging lulc report
emerging_lulc_report.close()
self.logger.info('Removing temporary output directories.')
# remove temporary output directory
if os.path.exists(self.temp_output_directory):
shutil.rmtree(self.temp_output_directory)
self.logger.info('Processing completed.\n\n')
def setup_logger(self):
# Initialize logger for requested process and set header
self.logger.info('Task ID: ' + self.task_id)
self.logger.info('User: ' + self.user_email)
self.logger.info('Task started.')
self.logger.info('Initializing variables.')
def create_output_dir(self):
"""
Create output directory and its sub-folders. Remove any
pre-existing output directory. Set up output directory.
"""
self.logger.info('Creating output directory structure.')
# create temporary and output folders
if os.path.exists(self.output_directory):
shutil.rmtree(self.output_directory)
if not os.path.exists(self.output_directory):
os.makedirs(self.output_directory)
if os.path.exists(self.temp_output_directory):
shutil.rmtree(self.temp_output_directory)
if not os.path.exists(self.temp_output_directory):
os.makedirs(self.temp_output_directory)
def find_unique_lulc_codes(self, idx, base_raster_array,
new_landuse_raster):
"""
Finds the unique landuse/landcover values in the base landuse raster
array and the new landuse raster array. Then it identifies any
landuse/landcover values present in the new landuse raster array that
are not in the base landuse raster array.
Parameters
----------
idx: tuple
Contains row and column indicies for the subbasin
base_raster_array: array
Base landuse raster as numpy array
new_landuse_raster: array
New landuse raster as numpy array
Returns
-------
new_lulc_codes: array
Single dimensional array containing landuse/landcover values
completely unique to the new landuse raster array
"""
self.logger.info(
'Finding the landuse/landcover values unique to the new landuse raster.')
# parse the idx to separate variables
row, col = idx
# find unique LULC in base landuse array
base_raster_lulc_codes = np.unique(base_raster_array[row, col])
# find unique LULC in new landuse array
new_raster_lulc_codes = np.unique(new_landuse_raster[row, col])
# now compare new landuse raster's lulc codes with base raster's lulc codes
new_lulc_codes = np.setdiff1d(new_raster_lulc_codes,
base_raster_lulc_codes)
return new_lulc_codes
def inject_new_lulc_codes(self, subid, idx, new_lulc_codes, base_raster_array, injection_history):
"""
Takes the lulc codes that emerged in the new landuse raster and then
injects instances of each code into the base landuse raster. The
number of pixels to be reclassified to the new lulc codes is determined
by the size of the subbasin and the percentage value chosen by the user.
Parameters
----------
subid: int
Subbasin id
Contains row and column indicies for the subbasin
new_lulc_codes: array
Single dimensional array containing landuse/landcover values
completely unique (emerging) to the new landuse raster array
base_raster_array: array
Original base landuse raster as numpy array
idx: tuple
injection_history: dictionary
Contains randomized indices used by each subbasin
Returns
-------
base_raster_array: array
Base landuse raster updated with emerging lulcs
"""
self.logger.info(
'Injecting emerging lulc codes into base raster array.')
# if newLULC > 0, this implies landuses are present in the new LULC
if np.size(new_lulc_codes, axis=0) > 0:
# this implies that an alternative LULC is required containing few cells with additional landuses
base_raster_array_temp = base_raster_array
row, col = idx
# this makes sure we don't overwrite one of our landuses in the base raster array
# it checks if the number of unique lulc codes in the updated base raster array
# matches the sum of the original base raster array plus new_lulc_codes
# if they don't match, that means we erased a landuse so we reshuffle and try again
# while len(valBase) + len(new_lulc_codes) != len(np.unique(base_raster_array[row, col])):
index_array = list(range(0, np.shape(idx)[1]))
np.random.shuffle(index_array)
# If key does not already exist in dictionary, add it
if subid not in injection_history.keys():
injection_history[subid] = index_array
# Number of new code injections to make
number_of_new_lulc_cells = int(round(
(float(self.landuse_percent) / 100.0) * np.shape(idx)[1]))
# Make sure we have enough randomized indices to handle request
if len(injection_history[subid]) < number_of_new_lulc_cells:
# Resets randomized indices
injection_history[subid] = index_array
previous_new_lulc_ending_index = 0
for j in range(0, len(new_lulc_codes)):
newLULC_idx = injection_history[subid][
previous_new_lulc_ending_index:number_of_new_lulc_cells + previous_new_lulc_ending_index]
previous_new_lulc_ending_index = previous_new_lulc_ending_index + number_of_new_lulc_cells
base_raster_array[row[newLULC_idx], col[newLULC_idx]] = \
new_lulc_codes[j]
# Remove index from injection history to prevent its re-use
for injection_index in range(0, len(newLULC_idx)):
injection_history[subid].pop(injection_index)
return base_raster_array, injection_history
def create_composite_raster(self, base_raster_array,
base_landuse_raster_adf_filepath,
composite_raster_filepath):
self.logger.info('Creating composite landuse raster.')
geotools.create_raster(base_raster_array,
base_landuse_raster_adf_filepath,
composite_raster_filepath)
try:
check_call(
['gdal_translate', '-co', 'compress=lzw', '-a_nodata', '255',
'-of', 'GTiff',
self.temp_output_directory + '/base_new1.tif',
self.output_directory + '/base_new.tif'], env=env)
except Exception:
self.logger.error(
'Error converting new base raster to geotiff.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
def copy_results_to_depot(self):
"""
Copies output from process over to web directory for user's consumption.
"""
self.logger.info('Copying output directory to user directory on depot.')
# If output directory already exists in web directory, remove it
if os.path.exists(self.output_dir):
shutil.rmtree(self.output_dir)
# Copy output over to web directory
shutil.copytree(self.output_directory, self.output_dir)
def clean_up_input_data(self):
""" Removes input data from tmp directory. """
self.logger.info('Removing input files from tmp.')
try:
shutil.rmtree(self.process_root_dir)
except PermissionError:
logger.warning("Unable to remove the input data from /tmp.")
def email_user_link_to_results(self):
"""
Emails the user a link to their data that just finished processing. The
user is informed their data will expire at midnight three days from the
present date.
Parameters
----------
None
Returns
-------
None
"""
self.logger.info('Sending user email with link to their data.')
subject = self.tool_name + ' data is ready'
message = 'Hi ' + self.user_first_name + ',<br><br>'
message += 'Your data has finished processing. Please sign in to '
message += 'the SWAT Tools website and go to the '
message += '<strong>Task Status</strong> page (found in the navigation menu). '
message += 'There you will find a record of your completed '
message += 'task and a link to download the results data. '
message += 'The link will expire on ' + self.get_expiration_date()
message += ' (48 hours).<br><br>Sincerely,<br>SWAT Tools'
try:
send_mail(
subject,
"",
'SWAT Tools User',
[self.user_email],
fail_silently=False,
html_message=message)
except Exception:
self.logger.error('Unable to convert raster from .adf to .tif.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
self.email_error_alert_to_user()
raise Exception('Unable to convert raster from .adf to .tif.')
def email_error_alert_to_user(self):
"""
Emails the user when an error occurs that prevents their data from
being processed.
Parameters
----------
None
Returns
-------
None
"""
self.logger.info(
'Sending user email informing them an error has occurred.')
subject = self.tool_name + ' error'
message = 'An error has occurred within ' + self.tool_name + ' while processing your data. '
message += 'Please verify your inputs are not missing any required files. '
message += 'If the problem persists, please sign in to SWAT Tools and use '
message += 'the Contact Us form to request assistance from the SWAT Tools '
message += 'Admins.'
message += '<br><br>Sincerely,<br>SWAT Tools'
try:
send_mail(
subject,
"",
'SWAT Tools User',
[self.user_email],
fail_silently=False,
html_message=message)
except Exception:
self.logger.error(
'Error sending the user the email informing ' +
'them of an error occurrence while processing their data.')
UserTask.objects.filter(task_id=self.task_id).update(task_status=2)
raise Exception('Error sending the user the email informing ' +
'them of an error occurrence while processing their data.')
def get_expiration_date(self):
"""
Uses Python's datetime to calculate expiration date for processed data.
Parameters
----------
None
Returns
-------
date_string: string
Date (mm-dd-YYYY) three days from the present in string format.
"""
self.logger.info('Calculating the date three days from now.')
return (
timezone.datetime.now() + timezone.timedelta(hours=48)).strftime(
"%m-%d-%Y %H:%M:%S %Z")
def update_task_status_in_database(self):
"""
Adds current task to the database. Helps with removal of task's data
after it expires (3 days from completion date).
Parameters
----------
None
Returns
-------
None
"""
self.logger.info('Updating the user\'s task status.')
UserTask.objects.filter(
task_id=self.task_id).update(
task_status=1,
time_completed=timezone.datetime.now())
|
#!/usr/bin/python
import sys
import csv
inputlist= sys.argv
inputlist.pop(0)
for filename in inputlist:
f = open(filename, 'r')
reader = csv.reader(f, delimiter='\t', quotechar=None, doublequote=False)
out = open(filename[:-4] + "onecol.vcf", 'w')
outcsv = csv.writer(out, delimiter='\t', quotechar=None)
for row in reader:
if row[0][0] == "#":
outcsv.writerow(row)
continue
if row[9] == "./.":
row.pop(9)
elif row[10] == "./.":
row.pop(10)
outcsv.writerow(row)
f.close()
out.close()
|
def duplicate_count(text):
text = text.lower()
print text
from sets import Set
ls = list(text)
st = Set(ls)
greater = []
for i in st:
if ls.count(i) > 1:
greater.append(i)
return len(greater)
|
# -*- coding: utf-8 -*-
from unittest import mock
from oauthlib.oauth2 import TokenExpiredError
from wbia.utils import call_houston
def disabled_test_call_houston(request):
client_patch = mock.patch('wbia.utils.BackendApplicationClient')
BackendApplicationClient = client_patch.start()
request.addfinalizer(client_patch.stop)
session = mock.Mock()
session_patch = mock.patch('wbia.utils.OAuth2Session', return_value=session)
OAuth2Session = session_patch.start()
request.addfinalizer(session_patch.stop)
getenv_patch = mock.patch(
'wbia.utils.os.getenv',
side_effect={
'HOUSTON_CLIENT_ID': 'houston-client-id',
'HOUSTON_CLIENT_SECRET': 'houston-client-secret',
}.get,
)
getenv_patch.start()
request.addfinalizer(getenv_patch.stop)
response = mock.Mock()
session.request.return_value = response
# Case 1: Call houston for the first time
result = call_houston(
'houston+http://houston:5000/api/v1/users/me',
misc=10,
)
assert BackendApplicationClient.call_count == 1
assert OAuth2Session.call_count == 1
assert session.fetch_token.call_count == 1
assert session.fetch_token.call_args == mock.call(
token_url='http://houston:5000/api/v1/auth/tokens',
client_id='houston-client-id',
client_secret='houston-client-secret',
)
assert session.request.call_count == 1
assert session.request.call_args == mock.call(
'GET',
'http://houston:5000/api/v1/users/me',
misc=10,
)
assert result == response
BackendApplicationClient.reset_mock()
OAuth2Session.reset_mock()
session.request.reset_mock()
session.fetch_token.reset_mock()
# Case 2: Call houston again
result = call_houston('houston+http://houston:5000/favicon.ico')
assert not BackendApplicationClient.called
assert not OAuth2Session.called
assert not session.fetch_token.called
assert session.request.call_count == 1
assert session.request.call_args == mock.call(
'GET',
'http://houston:5000/favicon.ico',
)
assert result == response
session.request.reset_mock()
# Case 3: Token expired
def session_request(*args, **kwargs):
if session.request.call_count == 1:
raise TokenExpiredError
return response
session.request.side_effect = session_request
result = call_houston('houston+https://houston:5000/')
assert not BackendApplicationClient.called
assert not OAuth2Session.called
assert session.fetch_token.call_count == 1
assert session.fetch_token.call_args == mock.call(
token_url='https://houston:5000/api/v1/auth/tokens',
client_id='houston-client-id',
client_secret='houston-client-secret',
)
assert session.request.call_count == 2
assert session.request.call_args_list == [
mock.call('GET', 'https://houston:5000/'),
mock.call('GET', 'https://houston:5000/'),
]
session.reset_mock()
# Case 4: Token not expired but 401 returned
def session_request(*args, **kwargs):
if session.request.call_count == 1:
response.status_code = 401
return response
session.request.side_effect = session_request
result = call_houston('houston+https://houston:5000/')
assert not BackendApplicationClient.called
assert not OAuth2Session.called
assert session.fetch_token.call_count == 1
assert session.fetch_token.call_args == mock.call(
token_url='https://houston:5000/api/v1/auth/tokens',
client_id='houston-client-id',
client_secret='houston-client-secret',
)
assert session.request.call_count == 2
assert session.request.call_args_list == [
mock.call('GET', 'https://houston:5000/'),
mock.call('GET', 'https://houston:5000/'),
]
|
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(PROJECT_DIR, os.pardir)
sys.path.append(PARENT_DIR)
sys.path.append(PROJECT_DIR)
MONGO_DB = os.getenv('MONGO_DB') or 'instance_db'
MONGO_URL = os.getenv('MONGO_URL') or 'mongodb://127.0.0.1:27017/instance_db'
SMS_ACCOUNT = os.getenv('SMS_ACCOUNT') or ''
SMS_TOKEN = os.getenv('SMS_TOKEN') or ''
SMS_TENC_ID = os.getenv('SMS_TENC_ID') or ''
SMS_TENC_KEY = os.getenv('SMS_TENC_KEY') or ''
COS_SECRET_ID = os.getenv("COS_SECRET_ID") or "123"
COS_SECRET_KEY = os.getenv("COS_SECRET_KEY") or "456"
EMAIL_SMTP = 'smtp.exmail.qq.com'
EMAIL_SMTP_PORT = '465'
EMAIL_ADMIN = 'help@xiusl.com'
EMAIL_ADMIN_PWD = 'He110120.'
QINIU_ACCESS = os.getenv('QINIU_ACCESS')
QINIU_SECRET = os.getenv('QINIU_SECRET') |
from django.db import models
from django.conf import settings
from .constants import WidthChoices
class Comment(models.Model):
name = models.CharField(max_length=100)
content = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.name
class ImageUpload(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='images_created')
image = models.ImageField(upload_to='comments/')
width = models.IntegerField(choices=WidthChoices.CHOICES, default=WidthChoices.NAM_MUOI)
|
import matplotlib.pyplot as plt
import numpy as np
x=np.linspace(-5,5,100)
print ('type(x) = ',type(x),' len = ', len(x) )
plt.plot(x,np.sin(x)) # on utilise la fonction sinus de Numpy
plt.ylabel('fonction sinus')
plt.xlabel("l'axe des abcisses")
plt.grid()
plt.show()
|
# -*- coding: utf-8 -*-
from flask.ext.wtf import Form
from wtforms import (
StringField,
PasswordField,
TextAreaField
)
from wtforms import validators
from wtforms.fields.html5 import EmailField
class AcademyForm(Form):
academy_name = StringField(
u'ํ์์ด๋ฆ',
[validators.data_required(u'ํ์์ด๋ฆ์ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'ํ์์ด๋ฆ์ ์
๋ ฅํด์ฃผ์ธ์.'}
)
teacher_name = StringField(
u'๊ฐ์ฌ์ด๋ฆ',
[validators.data_required(u'๊ฐ์ฌ์ด๋ฆ์ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'๊ฐ์ฌ์ด๋ฆ์ ์
๋ ฅํด์ฃผ์ธ์.'}
)
academy_introduce = TextAreaField(
u'ํ์์๊ฐ',
[validators.data_required(u'ํ์์๊ฐ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'ํ์์๊ฐ๋ฅผ ํด์ฃผ์ธ์.'}
)
teacher_introduce = TextAreaField(
u'๊ฐ์ฌ์๊ฐ',
[validators.data_required(u'๊ฐ์ฌ์๊ฐ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'๊ฐ์ฌ์๊ฐ๋ฅผ ํด์ฃผ์ธ์.'}
)
curriculum_introduce = TextAreaField(
u'์ปค๋ฆฌํ๋ผ์๊ฐ',
[validators.data_required(u'์ปค๋ฆฌํ๋ผ์๊ฐ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ปค๋ฆฌํ๋ผ์ ์๋ดํด์ฃผ์ธ์.'}
)
academy_address = StringField(
u'ํ์์ฃผ์',
[validators.data_required(u'ํ์์ฃผ์๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'ํ์์ฃผ์๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
welcome_line = StringField(
u'ํ์์ธ์ฌ',
[validators.data_required(u'ํ์ค์ธ์ฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'ํ์ค์ธ์ฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
phone_number = StringField(
u'์ฐ๋ฝ์ฒ',
[validators.data_required(u'์ฐ๋ฝ์ฒ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ฐ๋ฝ์ฒ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
class_time = TextAreaField(
u'์์
์๊ฐ',
[validators.data_required(u'์์
์๊ฐ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์์
์๊ฐ์ ์
๋ ฅํด์ฃผ์ธ์.'}
)
class_fee = StringField(
u'์๊ฐ๋ฃ',
[validators.data_required(u'์๊ฐ๋ฃ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์๊ฐ๋ฃ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
homepage = StringField(
u'ํํ์ด์ง',
[validators.data_required(u'ํํ์ด์ง๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'ํํ์ด์ง๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
location = StringField(
u'์ง์ญ',
[validators.data_required(u'์ง์ญ์ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ง์ญ์ ์
๋ ฅํด์ฃผ์ธ์.'}
)
category = StringField(
u'์นดํ
๊ณ ๋ฆฌ',
[validators.data_required(u'์นดํ
๊ณ ๋ฆฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์นดํ
๊ณ ๋ฆฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
image_1 = StringField(
u'์ด๋ฏธ์ง1',
[validators.data_required(u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
image_2 = StringField(
u'์ด๋ฏธ์ง2',
[validators.data_required(u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
image_3 = StringField(
u'์ด๋ฏธ์ง3',
[validators.data_required(u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
image_4 = StringField(
u'์ด๋ฏธ์ง4',
[validators.data_required(u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
image_5 = StringField(
u'์ด๋ฏธ์ง5',
[validators.data_required(u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ด๋ฏธ์ง ๋งํฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
teacher_image = StringField(
u'์ ์๋์ฌ์ง',
[validators.data_required(u'์ ์๋ ์ฌ์ง ๋งํฌ๋ฅผ ์
๋ ฅํ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ ์๋ ์ฌ์ง ๋งํฌ๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
academy_address = StringField(
u'์ฃผ์',
[validators.data_required(u'์ฃผ์๋ฅผ ์
๋ ฅํ์ฌ ์์น๋ฅผ ์ฐพ์์์ฃผ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ฃผ์๋ฅผ ์
๋ ฅํด์ฃผ์ธ์.'}
)
academy_latlng = StringField(
u'',
[validators.data_required(u'์์น๋ฅผ ์ง๋์์ ํด๋ฆญํด์, ๋ง์ปค๋ฅผ ํ์ํด์ฃผ์๊ธฐ ๋ฐ๋๋๋ค.')],
description={'placeholder': u'์ด ํญ๋ชฉ์ ์๋์
๋ ฅํ์ง ์์ต๋๋ค.'}
) |
import numpy as np
import torch
from torch import nn
from sklearn.metrics import r2_score
import scipy
import matplotlib.pylab as plt
def mape_error(y_pred, y_true):
# y_pred=y_pred[:len(y_true)]
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true))
# =============================================== L1 NORM =====================================================
def l1_norm_error(source, candidate):
# candidate = candidate[:len(source)]
error = np.abs(source - candidate)
source[source == 0] = 1e-30
error = error / source
error = error.mean()
return error
def mae_error(source, candidate, N):
# candidate = candidate[:len(source)]
# candidate = candidate / N
# source = source / N
error = np.abs(source - candidate)
source[source == 0] = 1e-30
error = error / source
error = error.mean()
return (error / N) * 1000000
# =============================================== RMSLE =====================================================
def rmsle_error(source, candidate):
# candidate = candidate[:len(source)]
candidate += 1e-30
error = np.log10((source + 1) / (candidate + 1))
error = error * error
error = error.mean()
error = np.sqrt(error)
return error
def rmsle_(source, candidate, N=1):
# candidate = candidate[:len(source)]
candidate += 1e-30
error = (source) - (candidate)
error = error * error
error = error.mean()
error = np.sqrt(error)
return (error / N)
# =============================================== R2 =====================================================
def r2(pred, y_true):
return r2_score(pred, y_true[:len(pred)])
# =============================================== GRADIENT SMOOTH =====================================================
class GradientSmoothLoss(nn.Module):
def __init__(self, refGrad, future, decayFunc=None):
'''
Function that minimizes the rate of change of a time series prediction,
as the times evolves. It tries to give a desired "shape".
:param refGrad: the maximum gradient that is used for scaling
:param future: number of future predictions in the timeseries
:param decayFunc: decay function for weights (the weights decrease as time increases, such that the last
timestamps will have a smoother rate of change)
'''
super().__init__()
self.future = future
self.refGrad = refGrad
# compute decay weights
decay = np.linspace(0, 1, future)
decay = self.__linear_decay(decay) if decayFunc is None \
else decayFunc(decay)
decay = torch.from_numpy(decay)
self.decay = decay * refGrad
# =============================================== LINEAR DECAY =====================================================
def __linear_decay(self, linSpace):
return 0.8 - linSpace * 0.5
# =============================================== FORWARD ==========================================================
def forward(self, inTensor, clampVal = 0.25):
'''
:param inTensor: input tensor on which to apply the loss
:param clampVal: clamp errors before averaging for better stability
:return:
'''
self.decay = self.decay.to(inTensor.device)
gradOut = inTensor[:, 1:] - inTensor[:, :-1]
gradOut = gradOut.abs() - self.decay
gradOut = torch.clamp(gradOut, min=0, max=clampVal)
gradOut = gradOut.mean()
return gradOut
|
#!/usr/bin/python
# Copyright (C) 2017-2020 Alex Manuskin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import absolute_import
from setuptools import setup
import s_tui.helper_functions as AUX
setup(
name="s-tui",
packages=['s_tui', 's_tui.sources', 's_tui.sturwid'],
version=AUX.__version__,
author="Alex Manuskin",
author_email="amanusk@tuta.io",
description="Stress Terminal UI stress test and monitoring tool",
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
license="GPLv2",
url="https://github.com/amanusk/s-tui",
keywords=['stress', 'monitoring', 'TUI'], # arbitrary keywords
entry_points={
'console_scripts': ['s-tui=s_tui.s_tui:main']
},
classifiers=[
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Monitoring',
],
install_requires=[
'urwid>=2.0.1',
'psutil>=5.6.0',
],
)
|
"""This class defines a device that measures the angle of a radio transmission comes from"""
from math import asin, sqrt, pow
C = 299792458
class Radiogoniometer:
def __init__(self, probes=None):
"""Constructor"""
if probes:
self.probes = probes
else:
self.probes = []
def getprobes(self):
"""Probes getter"""
return self.probes
def setprobes(self, probes):
"""Probes setter"""
self.probes = probes
def gettimelapseangle(self):
angle = None
probe1 = self.probes[0]
probe2 = self.probes[1]
if probe1 and probe2:
measure1 = probe1.popmeasure()
measure2 = probe2.popmeasure()
time1 = measure1.getarrivaltime()
time2 = measure2.getarrivaltime()
pos1 = probe1.getposition()
pos2 = probe2.getposition()
distance = sqrt(pow((pos1.getx() - pos2.getx()), 2) +
pow((pos1.gety() - pos2.gety()), 2) +
pow((pos1.getz() - pos2.getz()), 2))
timestep = time2 - time1
if not distance == 0:
angle = asin((C * timestep) / distance)
else:
angle = None
return angle
|
from __future__ import print_function
try:
import time
import os
import sys
import pickle
import argparse
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from plots import plot_bar_probs
from plots import plot_confusion_matrix
from plots import plot_rotation_metrics
from riconv.layers import Convolution2D_4
from riconv.load_data import load_image, MyPreProcessor
from riconv.config_utils import ModelConfigurator, DataDirStruct, ModelDirStruct
from keras.models import model_from_json
from sklearn.metrics import classification_report, confusion_matrix
except ImportError as e:
print(e)
raise ImportError
# Layer dictionary for loading json weights w/custom layer
custom_layer_dict = {'Convolution2D_4': Convolution2D_4}
# Dictionary printer
def print_dict(dct):
for key, value in sorted(dct.items(), reverse=True):
print("{}: {}".format(key, value))
def main():
global args
parser = argparse.ArgumentParser(description="Convolutional NN Inference Script")
parser.add_argument("-c", "--config", dest="configfile", default='config.yml', help="Path to yaml configuration file")
parser.add_argument("-m", "--modelname", dest="modelname", type=str, required=True, help="Model name to test (only one)")
parser.add_argument("-i", "--input", dest="input", nargs="*", required=True, help="Path to image directory or single image for inference")
parser.add_argument("-s", "--seed", dest="rngseed", default=123, type=int, help="RNG Seed to test different samples")
rot_parse = parser.add_mutually_exclusive_group()
rot_parse.add_argument("-r", "--rand_rot_angle", dest="rand_rot_angle", default=0., type=float, help="Random image rotation angle range [deg]")
rot_parse.add_argument("-f", "--fixed_rot_angle", dest="fixed_rot_angle", nargs=3, type=float, help="(low, high, spacing) fixed image rotation angle [deg]")
args = parser.parse_args()
# Get requested image samples
img_list = args.input
n_images = len(img_list)
images = []
shapes = []
for img_path in img_list:
img, shape = load_image(img_path)
images.append(img)
shapes.append(shape)
# Determine which rotation to apply
run_fixed_rotation = False
i_results_prefix = 'random'
rot_angle_list = [args.rand_rot_angle]
rot_comment = "Random rotation range (deg): [-{}, {}]".format(rot_angle_list[0],
rot_angle_list[0])
if args.fixed_rot_angle is not None:
i_results_prefix = 'fixed'
run_fixed_rotation = True
ang_range = args.fixed_rot_angle
rot_angle_list = np.arange(ang_range[0], ang_range[1], ang_range[2])
rot_comment = "Fixed rotation angle(s) (deg): {}".format(rot_angle_list)
# Get configuration file
hconfig = ModelConfigurator(args.configfile)
# Class names
class_labels = hconfig.labels
# Get requested model
modelname = args.modelname
print('\nTesting {} over following rotations: {} ...\n'.format(modelname, rot_angle_list))
# Set model config parameters
hconfig.model_config(modelname)
# Extract model path from config
model_dir_struct = ModelDirStruct(main_dir=hconfig.model_outpath, test_model=True)
## Load model to test
# Load pretrained model from file
json_file = open(model_dir_struct.model_file, 'r')
trained_model_json = json_file.read()
json_file.close()
trained_model = model_from_json(trained_model_json, custom_layer_dict)
# Load weights into model
trained_model.load_weights(model_dir_struct.weights_file)
print("Loaded model from disk")
# Compile trained model
trained_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# Loop over images
for iidx, test_image in enumerate(images):
# Image name
ibase = os.path.splitext(os.path.basename(img_list[iidx]))[0]
# Dictionary of test results
out_dict = {}
out_dict['theta'] = np.array(rot_angle_list, dtype='float32')
# List of probabilities for each rotation
prob_rot_list = []
# Run over rotation angles in list,
# or just single value used for random range
for i, rot_angle in enumerate(rot_angle_list):
print('On {} angle {}'.format(i_results_prefix, rot_angle))
# Preprocess image
img_shape = (hconfig.height, hconfig.width, hconfig.channels)
assert img_shape[0] <= shapes[iidx][0] and img_shape[1] <= shapes[iidx][1], \
"Model expected shape {} not equal to or less than loaded image shape {}" \
.format(img_shape, shapes[iidx])
prep = MyPreProcessor(img_shape=img_shape,
rescale=1./255,
fixed_rot_angle_deg=rot_angle)
proc_image = prep.preprocess_img(test_image)
proc_image = np.expand_dims(proc_image, axis=0)
# Choose same batch
np.random.seed(args.rngseed)
test_prefix = 'test_%s_rot_%.0f'%(i_results_prefix, rot_angle)
# Predict classification
Y_pred = trained_model.predict(proc_image)
y_predict = np.argmax(Y_pred, axis=1)
print('Prediction Probabilities: {}'.format(Y_pred))
print('Class Prediction: {}'.format(y_predict))
# Classification probability for each orientation
prob_rot_list.append(Y_pred)
# Transpose list
prob_rot_arr = np.array(prob_rot_list, dtype='float32')
class_prob_arr = prob_rot_arr.T
# Save to dictionary
for lidx, label in enumerate(hconfig.labels):
class_probs = class_prob_arr[lidx][0]
# Model's accuracies
newkey = label + '_probability'
out_dict[newkey] = class_probs
print('Probabilities for class {} with model {}: {}'.format(label, modelname, class_probs))
print('\nRotations and class probabilities for all')
print_dict(out_dict)
print('Saved some figures in {}'.format(model_dir_struct.plots_dir))
if run_fixed_rotation:
rot_seq = rot_angle_list[0]
rot_names = '%s'%rot_seq
if len(rot_angle_list) > 1:
rot_seq = (rot_angle_list[0], len(rot_angle_list)-2, rot_angle_list[-1])
rot_names = '_'.join(map(str, rot_seq)).replace(" ", "")
# Prefix
pprefix = ibase + '_rot_' + i_results_prefix + \
'_test_' + rot_names
# Save to pickel file
pklname = pprefix + '.pkl'
filename = os.path.join(model_dir_struct.main_dir, pklname)
with open(filename, 'wb') as file_pi:
pickle.dump(out_dict, file_pi)
print("\nSaved rotation test to disk: {}\n".format(filename))
# Plot some prediction probabilites for some rotations
plot_bar_probs(out_dict, hconfig.labels, pprefix, model_dir_struct.plots_dir)
# Plot rotation metrics
plot_rotation_metrics(out_dict, ['Probability'], pprefix, model_dir_struct.plots_dir)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.