seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
35361620995 | import pygame, sys, os
from pygame.locals import *
pygame.init()
DISPLAYSURF = pygame.dispay.set_mode((400,300),0,32)
pygame.display.set_caption('Drawing')
black = [0,0,0]
white = [255,255,255]
red = [255,0,0]
green = [0,255,0]
blue = [0,0,255]
DISPLAYSURF.fill(white)
pygame.draw.polygon(DISPLAYSURF, green ((146,0),(291,106),(236,236)))
pixobj = pygame.PixelArray(DISPLAYSURF)
pixobj[380][280] = black
pixobj[382][282] = black
pixobj[384][284]= black
del pixobj
while True:
for event in pygame.events.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update() | shrikrushnazirape/Python-Game-Development | tutefour.py | tutefour.py | py | 628 | python | en | code | 0 | github-code | 13 |
38371228067 | """name: Lyle Martin
"""
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.button import Button
class CreateWidgetApp(App):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.names = ['Lyle', 'David', 'Martin']
def build(self):
self.title = "Dynamic Widgets"
self.root = Builder.load_file('dynamic_widgets.kv')
self.create_widgets()
return self.root
def create_widgets(self):
for name in self.names:
temp_button = Button(text=name)
self.root.ids.boxes.add_widget(temp_button)
CreateWidgetApp().run() | jc304696/Practicals | Lab6/dynamic_widgets.py | dynamic_widgets.py | py | 628 | python | en | code | 0 | github-code | 13 |
33105364661 | #!/usr/bin/env python3
from os import system,path,listdir,geteuid
from shutil import move,copy,copytree
import gzip
import subprocess
import sys
import urllib.request
def space(n):
for i in range(n):
print("")
def sudo_check():
if geteuid() == 0:
print("We're Root!")
print("Moving on...")
else:
print("We're not Root")
subprocess.call(['/usr/bin/sudo', './setup.py'])
exit(1)
def get_img():
print('Downloading CentOS base image..')
web = urllib.request.urlopen('https://s3-us-west-2.amazonaws.com/jacobjeffers/git/TryLinux_centos.img.gz')
deb_img = web.read()
with open(vm_space + 'TryLinux_centos.img.gz', 'wb') as file:
file.write(deb_img)
print('image has been downloaded; checking integrity.. ')
check_img()
print('Extracting image.. ')
extract_img()
def check_img():
import hashlib
new_md5 = hashlib.md5(open(vm_space + 'TryLinux_centos.img.gz', 'rb').read()).hexdigest()
new_md5 = str(new_md5)
old_md5 = 'f3e3bd285dbc727848991917e2e9a8c1'
# print(new_md5)
# print(old_md5)
if old_md5 == new_md5:
print('The image is good')
else:
print('The image did not download correctly; Trying again')
system('rm -f ' + vmspace + 'TryLinux_centos.img.gz')
get_img()
def extract_img():
system('sudo gunzip -qf ' + vm_space + 'TryLinux_centos.img.gz')
print('img has been extracted')
e = pool_set()
e = str(e)
e = e[2:-2]
# input('extract refresh pool is ' + e)
system('sudo virsh pool-refresh ' + str(e))
def refresh_pool(l):
system('virsh pool-refresh --pool ' + str(l))
def get_pool_name():
vms = listdir(vm_space)
for a in range(len(vms)):
# input('get_pool_name last 4 are ' + vms[a][-4:])
if vms[a][-4:] == '.img':
responce = subprocess.check_output('sudo virsh vol-pool ' + vm_space + vms[a], shell=True)
responce = str(responce)
responce = responce[2:-5]
return responce
def get_storage_pool_info():
vm_space = input("Where is your VM storage pool: ")
exist_check = path.exists(vm_space)
dir_check = path.isdir(vm_space)
if exist_check == False:
space(2)
print(" That directory does not exist.")
space(2)
exit(1)
elif dir_check == False:
space(2)
print(" That is not a directory")
space(2)
exit(1)
else:
print(" OK!!")
return(vm_space)
def pool_list_fix(old):
i = 4
# print('first new is ' + str(new))
global new
if not i >= len(old):
new = []
for a in range(len(old) - 1):
if old[i][-2:] == "\n\n":
break
new.append(old[i])
i = i + 3
# print('new in fix is ' + str(new))
# input()
return new
def pool_set():
pools = []
import re
responce = subprocess.check_output('virsh pool-list', shell=True)
responce = responce.decode().split(" ")
for field in responce:
if len(field) > 1:
pools.append(field)
# print('pools in pool_set are ' + str(pools))
# input()
pool_list = pool_list_fix(pools)
return pool_list
def auto_get_pool_info(w):
pool_list = []
pools = pool_set()
# if pools[len(pools) - 1] == '\n-------------------------------------------\n\n':
if not pools:
print('there are no storage pools; creating one..')
vm_space = build_pool()
pools = pool_set()
pool_list = pool_list_fix(pools)
# print(pool_list)
# input()
if len(pool_list) > 1:
print('There is more than one storage pool: ')
for i in range(len(pool_list)):
a = i + 1
print(str(a) + ": " + pool_list[i])
c = input('Which one Should I use:')
c = int(c) - 1
pool = str(pool_list[c])
elif len(pool_list) == 1:
pool = pool_list
pool = str(pool)
pool = pool[2:-2]
responce = subprocess.check_output('virsh pool-dumpxml --pool ' + pool + ' | grep path', shell = True)
responce = str(responce)
vm_space = responce[12:-10]
if w == 'pool':
return(pool)
elif w == 'vm_space':
return(vm_space)
else:
print('bad call to auto_get_pool_info(). need to specify what var to retrieve. vm_space or pool')
def build_pool():
subprocess.call(['sudo','virsh','pool-define','../vm_space/new_pool.xml'])
subprocess.call(['sudo','virsh','pool-start','TryLinux_images'])
subprocess.call(['sudo','virsh','pool-autostart','TryLinux_images'])
def install_dep():
subprocess.call(['sudo','apt','-y','install','php7.0','libvirt-bin','qemu-kvm','virtinst','bridge-utils','cpu-checker'])
def build_vars():
if not path.exists('../web/vars.php'):
print("Creating vars file")
system('touch ../web/vars.php')
with open("../web/vars.php", "w") as vars:
vars.write("<?php\n")
vars.write("//the directory for the virtual machine storage\n")
vars.write('$dir = \"' + vm_space + '\";\n')
vars.write("?>\n")
else:
print("vars file is present")
def build_start(pool):
if not path.exists('../web/start.sh'):
print("Creating start file")
system('touch ../web/start.sh')
with open("../web/start.sh", "w") as start:
start.write('#!/bin/bash\n')
start.write('name=$1\n')
start.write('dist=$2\n')
start.write('/usr/bin/virsh vol-clone $dist.img $name.img --pool ' + pool + ' > /dev/null 2>&1\n')
else:
print("start file is present")
new = []
#system('clear')
space(1)
print("This is the setup program for Try_Linux!")
space(1)
print(" This setup program needs root access to install")
print(" and setup all necissary files.")
print(" I will be check for admin privileges now.")
space(2)
input("Press Enter to continue.")
sudo_check()
#system('clear')
print("Checking dependencies...")
dep = 0
install = False
while(dep < 1):
virt = system('which virsh')
php = system('which php')
if virt > 0 or php > 0:
space(5)
print(" Dependency Check failed!")
space(2)
print(" Please make sure libvirt and PHP7.0 are installed")
space(2)
install = input('Do you want me to install Libvirt and PHP?(y/n) ')
if install == "y" or install == "Y":
install_dep()
dep = dep + 1
else:
space(5)
print(" Check Passed! Moving on")
dep = dep + 1
space(1)
input("Press Enter when you're ready to begin!")
#system('clear')
space(5)
if install == False:
print(" The VM storage pool is where the images that libvirt")
print(" creates. If you are unsure run the command:")
print(" $: virsh pool-list")
print(" This will list the current storage pools. If there is")
print(" only one run the following command and replace <pool>")
print(" with the name of the storage pool")
print(" $: virsh pool-xmldump <pool> | grep path")
print(" This will output the path to the storage pool. You")
print(" can copy and paste it here.")
space(1)
vm_space = get_storage_pool_info()
else:
vm_space = auto_get_pool_info('vm_space')
print("Making Directory stucture and installing files")
if vm_space[len(vm_space) - 1] != "/":
vm_space += "/"
if not path.isdir(vm_space + '.Try_Linux/config.d'):
system('sudo mkdir -p ' + vm_space + '.Try_Linux/config.d')
print("Creating config.d directory")
else:
print("config.d dir is present")
if not path.isdir(vm_space + '.Try_Linux/MID'):
system('sudo mkdir -p ' + vm_space + '.Try_Linux/MID')
print("Creating MID directory")
else:
print("MID dir is present")
if not path.exists(vm_space + '.Try_Linux/recycle.sh'):
copy('../vm_space/recycle.sh', vm_space + '.Try_Linux/')
print("Copying recycle.sh to " + vm_space + '.Try_Linux/')
else:
print("recycle.sh is present")
#make folder permissions
if path.isdir(vm_space + '.Try_Linux'):
system('sudo chmod 777 ' + vm_space + '.Try_Linux')
if path.isdir(vm_space + '.Try_Linux/MID'):
system('sudo chmod 777 ' + vm_space + '.Try_Linux/MID')
if path.isdir(vm_space + '.Try_Linux/config.d'):
system('sudo chmod 777 ' + vm_space + '.Try_Linux/config.d')
if path.exists(vm_space + '.Try_Linux/recycle.sh'):
system('sudo chmod 777 ' + vm_space + '.Try_Linux/recycle.sh')
if not path.exists('/etc/cron.d/Try_Linux'):
print("Creating cron file")
system('touch /etc/cron.d/Try_Linux')
with open("/etc/cron.d/Try_Linux", "w") as cron:
cron.write("#-------------------------------------------------------#")
cron.write("#Try_Linux cleanup for virtual machines and config files#")
cron.write("#-------------------------------------------------------#")
cron.write("\n")
cron.write("*/15 * * * * root /bin/rm -f /srv/storage/virtual_machines/config.d/*" + "\n")
cron.write("*/5 * * * * root /srv/storage/virtual_machines/recycle.sh" + "\n")
else:
print("cron file is present")
#system('clear')
input('Completed Sucsessfully! Press Enter to continue')
space(2)
refresh_now = True #testing without Download
vm_list = listdir(vm_space)
if vm_list == []:
print("You have no base images!")
print("Moving base CentOS image to " + vm_space)
get_img()
refresh_now = True
else:
print("your current virtual machines are:")
for i in range(len(vm_list)):
if not vm_list[i][0] == ".":
print(vm_list[i])
download = input('Do you want to download the CentOS image?(y/n) ')
if download == 'yes' or download == 'y' or download == "Y":
get_img()
refresh_now = True
if refresh_now == True:
r = get_pool_name()
# print('r is ' + str(r))
refresh_pool(r)
build_vars()
build_start(r)
webdir = input('Where is your Web folder: ')
if not path.exists(webdir + 'Try_Linux'):
system('sudo mkdir -p ' + webdir + 'Try_Linux')
system('sudo cp -ru ../web/* ' + webdir + 'Try_Linux/')
system('sudo chmod 777 ' + webdir + 'Try_Linux/*')
system('sudo chmod 777 ' + vm_space)
system('sudo chmod 777 ' + vm_space + '.Try_Linux')
system('sudo chmod 777 ' + vm_space + '.Try_Linux/*')
space(2)
print(" all done")
space(2)
| bigogre55/Try_Linux | setup/setup.py | setup.py | py | 9,681 | python | en | code | 0 | github-code | 13 |
6112575807 | # thanks to: https://github.com/lucidrains/byol-pytorch/blob/master/byol_pytorch/byol_pytorch.py
import torch
import torchvision.models
from torch import nn
import copy
import collections
import src.optimizers.loss as losses
def load_model(config, model_name, checkpoint_path=None):
model = None
if model_name == 'BYOL':
model = BYOL(config)
elif model_name == 'Downstream':
model = DownstreamNetwork(config)
if checkpoint_path is not None:
print('>> load checkppoints ...')
device = torch.device('cpu')
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
return model
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
class MLPNetwork(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(MLPNetwork, self).__init__()
self.network = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
)
def forward(self, x):
return self.network(x)
class Encoder(nn.Module):
def __init__(self, vision_model_name='resnet50', pre_trained=True):
super(Encoder, self).__init__()
self.encoder = nn.Sequential()
if vision_model_name == 'resnet50':
self.encoder.add_module(
"encoder_layer",
nn.Sequential(
torchvision.models.resnet50(pretrained=pre_trained).conv1,
torchvision.models.resnet50(pretrained=pre_trained).bn1,
torchvision.models.resnet50(pretrained=pre_trained).relu,
torchvision.models.resnet50(pretrained=pre_trained).maxpool,
torchvision.models.resnet50(pretrained=pre_trained).layer1,
torchvision.models.resnet50(pretrained=pre_trained).layer2,
torchvision.models.resnet50(pretrained=pre_trained).layer3,
torchvision.models.resnet50(pretrained=pre_trained).layer4,
torchvision.models.resnet50(pretrained=pre_trained).avgpool,
)
)
elif vision_model_name == 'resnet34':
self.encoder.add_module(
"encoder_layer",
nn.Sequential(
torchvision.models.resnet34(pretrained=pre_trained).conv1,
torchvision.models.resnet34(pretrained=pre_trained).bn1,
torchvision.models.resnet34(pretrained=pre_trained).relu,
torchvision.models.resnet34(pretrained=pre_trained).maxpool,
torchvision.models.resnet34(pretrained=pre_trained).layer1,
torchvision.models.resnet34(pretrained=pre_trained).layer2,
torchvision.models.resnet34(pretrained=pre_trained).layer3,
torchvision.models.resnet34(pretrained=pre_trained).layer4,
torchvision.models.resnet34(pretrained=pre_trained).avgpool,
)
)
elif vision_model_name == 'resnet18':
self.encoder.add_module(
"encoder_layer",
nn.Sequential(
torchvision.models.resnet18(pretrained=pre_trained).conv1,
torchvision.models.resnet18(pretrained=pre_trained).bn1,
torchvision.models.resnet18(pretrained=pre_trained).relu,
torchvision.models.resnet18(pretrained=pre_trained).maxpool,
torchvision.models.resnet18(pretrained=pre_trained).layer1,
torchvision.models.resnet18(pretrained=pre_trained).layer2,
torchvision.models.resnet18(pretrained=pre_trained).layer3,
torchvision.models.resnet18(pretrained=pre_trained).layer4,
torchvision.models.resnet18(pretrained=pre_trained).avgpool,
)
)
self.flatten = nn.Flatten()
def forward(self, x):
out = self.encoder(x)
out = self.flatten(out)
return out
class BYOL(nn.Module):
def __init__(self, config):
super(BYOL, self).__init__()
self.config = config
self. target_ema_updater = EMA(beta=config['ema_decay'])
self.online_encoder = Encoder(vision_model_name=config['vision_model_name'], pre_trained=config['pre_trained'])
self.online_projector = MLPNetwork(input_dim=config['input_dim'],
output_dim=config['output_dim'], hidden_dim=config['hidden_dim'])
self.online_predictor = MLPNetwork(input_dim=config['hidden_dim'],
output_dim=config['output_dim'], hidden_dim=config['hidden_dim'])
self.target_encoder = None
self.target_projector = None
def get_target_network(self):
self.target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(self.target_encoder, False)
self.target_projector = copy.deepcopy(self.online_projector)
set_requires_grad(self.target_projector, False)
def update_target_network(self):
if self.target_encoder is not None and self.target_projector is not None:
update_moving_average(self.target_ema_updater, self.target_encoder, self.online_encoder)
update_moving_average(self.target_ema_updater, self.target_projector, self.online_projector)
else:
self.get_target_network()
def forward(self, x1, x2):
out_encoder_x1 = self.online_encoder(x1)
out_encoder_x2 = self.online_encoder(x2)
out_projector_x1 = self.online_projector(out_encoder_x1)
out_projector_x2 = self.online_projector(out_encoder_x2)
out_predictor_x1 = self.online_predictor(out_projector_x1)
out_predictor_x2 = self.online_predictor(out_projector_x2)
with torch.no_grad():
if self.target_encoder is None or self.target_projector is None:
self.get_target_network()
out_target_encoder_x1 = self.target_encoder(x1)
out_target_encoder_x2 = self.target_encoder(x2)
out_target_projector_x1 = self.target_projector(out_target_encoder_x1)
out_target_projector_x2 = self.target_projector(out_target_encoder_x2)
loss01 = losses.loss_fn(out_predictor_x1, out_target_projector_x2.detach())
loss02 = losses.loss_fn(out_predictor_x2, out_target_projector_x1.detach())
loss = loss01 + loss02
return loss.mean()
class DownstreamNetwork(nn.Module):
def __init__(self, config):
super(DownstreamNetwork, self).__init__()
self.encoder = None
self.input_dim = config['input_dim']
self.hidden_dim = config['hidden_dim']
self.class_num = config['class_num']
self.classifier = nn.Sequential(
collections.OrderedDict(
[
('linear01-1', nn.Linear(self.input_dim, self.hidden_dim)),
('bn01', nn.BatchNorm1d(self.hidden_dim)),
('act01', nn.ReLU()),
('linear01-2', nn.Linear(self.hidden_dim, self.hidden_dim)),
('linear02-1', nn.Linear(self.hidden_dim, self.hidden_dim)),
('bn02', nn.BatchNorm1d(self.hidden_dim)),
('act02', nn.ReLU()),
('linear02-2', nn.Linear(self.hidden_dim, self.hidden_dim)),
('linear03-1', nn.Linear(self.hidden_dim, self.hidden_dim)),
('bn03', nn.BatchNorm1d(self.hidden_dim)),
('act03', nn.ReLU()),
('linear03-2', nn.Linear(self.hidden_dim, self.class_num)),
]
)
)
def forward(self, x):
out = self.encoder(x)
out = self.classifier(out)
return out
if __name__ == '__main__':
test_config = {
"vision_model_name": "resnet34",
"pre_trained": True,
"ema_decay":0.99,
"input_dim": 512,
"hidden_dim": 4096,
"output_dim": 4096,
"class_num": 10
}
test_encoder = BYOL(config=test_config)
input_data = torch.rand(2, 3, 512, 512)
output_data = test_encoder.online_encoder(input_data)
test_downstream = DownstreamNetwork(config=test_config, encoder = test_encoder.online_encoder)
output_data = test_downstream(input_data)
print(output_data.size())
| waverDeep/ImageBYOL | src/models/model.py | model.py | py | 9,147 | python | en | code | 0 | github-code | 13 |
28008866559 | #Velocidades del carro y distancias del carro
#Primer gráfica
# Primer grafica
# Velocidad del carro X=0-------100;20 km/m
# Segunda gráfica
# Distancia al auto X=20-------80;20 m
#Tercera grafica X=0-------12.5;2.5 km/h
#Velocidad del peaton
#Cuarta gráfica agregado
import numpy as np
import skfuzzy as sk
from matplotlib import pyplot as plt
#Defining the Numpy array for Tip Quality
#Generar datos de 0 a 50
velAuto = np.arange(0, 120)
#Defining the Numpy array for Triangular membership functions
velAuto_baja = sk.trimf(velAuto, [0, 0, 40])
velAuto_media = sk.trimf(velAuto, [30, 60, 100])
velAuto_alta = sk.trimf(velAuto, [80, 120, 120])
plt.plot(velAuto,velAuto_baja)
#plt.plot(temp,temp_mediaBaja)
plt.plot(velAuto,velAuto_media)
#plt.plot(velAuto,temp_mediaAlta)
plt.plot(velAuto,velAuto_alta)
plt.show()
| felipeflourwears/Fundamentacion-Robotica | Control Inteligente/FuzzySets/actividad.py | actividad.py | py | 843 | python | es | code | 0 | github-code | 13 |
15603269902 | from copy import deepcopy
def n_arr(sizes):
if len(sizes) == 0:
return []
current_size = sizes[-1]
if len(sizes) == 1:
return ['""'] * current_size
else:
sizes = sizes[:-1]
nested_arr = n_arr(sizes)
return [deepcopy(nested_arr) for _ in range(current_size)]
if __name__ == '__main__':
print(n_arr([2, 2, 2]))
| MaksTresh/python-hw-course | hw23/main.py | main.py | py | 375 | python | en | code | 0 | github-code | 13 |
26727342571 | from selenium import webdriver
from NotABot import Logger
driver = webdriver.Chrome('C:\\chromedriver_win32\\chromedriver.exe')
def Site_parser(URL):
a = []
b = []
driver.get(URL)
try:
table = driver.find_element_by_css_selector('body > div.layout-wrapper.padding-top-default.bg-white.position-relative > \
div.layout-columns-wrapper > main > div.tabs-big > div:nth-child(2) > div:nth-child(1) > section > div > \
div:nth-child(1) > div > div:nth-child(2)')
except Exception:
Logger.log.exception("Error!Sigment is not found")
return (2)
else:
for x in table.get_property('children'):
if (len(x.get_property('dataset')) != 0):
a.append([y.get_property('innerText') for y in
x.find_elements_by_class_name('table-flex__rate.font-size-large.text-nowrap')][0:2])
b.append([y.get_property('innerText') for y in x.find_elements_by_class_name('font-bold')][0:1])
data = []
for i in range(len(a)):
data.append((b[i][0], (a[i][0][0:7]), a[i][1]))
data.sort(key=lambda i: i[1], reverse=1)
return data
def Site_parser2(URL, current):
driver.get(URL)
try:
table = driver.find_element_by_css_selector('body > div.layout-wrapper.padding-top-default.bg-white.position-relative > div.layout-columns-wrapper > main > \
div.widget > table > tbody').get_property('children')
date = driver.find_element_by_css_selector(
'body > div.layout-wrapper.padding-top-default.bg-white.position-relative > div.layout-columns-wrapper > main > div.widget > header > h2 > span').text
except Exception:
Logger.log.exception("Error!Sigment in a currency by date is not found")
return (2, "0")
for x in table:
if x.text.lower()[0:3] == current:
data = (x.text[0:len(x.text) - 8])
return (data, date)
return (2, "0")
| deimosfox/ExchangeRatesTelegramBot | NotABot/SiteParser.py | SiteParser.py | py | 1,977 | python | en | code | 0 | github-code | 13 |
16778985513 | #!/usr/bin/env python
n = int(input())
for TC in range(1, n+1):
print(f"Case {TC}: ", end="")
y = int(input())
if ((y % 4 == 0) and (y % 100 != 0)) or (y % 400 == 0):
print("a leap year")
else:
print("a normal year")
| 10946009/yuihuang_zj_ans | syntax/python/zj-d072.py | zj-d072.py | py | 251 | python | en | code | 0 | github-code | 13 |
72329907217 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import zipfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import json
import tensorflow as tf
dataset_urls = ["http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Amazon_Instant_Video_5.json.gz",
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Books_5.json.gz",
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Movies_and_TV_5.json.gz",
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz",
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_CDs_and_Vinyl_5.json.gz"
]
def _build_vocab(filename, vocab_dir, vocab_name):
"""Reads a file to build a vocabulary.
Args:
filename: file to read list of words from.
vocab_dir: directory where to save the vocabulary.
vocab_name: vocab file name.
Returns:
text encoder.
"""
vocab_path = os.path.join(vocab_dir, vocab_name)
if not tf.gfile.Exists(vocab_path):
data=[]
with tf.gfile.GFile(filename, "r") as f:
for line in f:
r=json.loads(line)
data.extend(r["reviewText"].split())
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)
encoder.store_to_file(vocab_path)
else:
encoder = text_encoder.TokenTextEncoder(vocab_path)
return encoder
def _maybe_download_corpus(tmp_dir, vocab_type,dataset_url,dir_name):
"""Download and unpack the corpus.
Args:
tmp_dir: directory containing dataset.
vocab_type: which vocabulary are we using.
Returns:
The list of names of files.
"""
# if vocab_type == text_problems.VocabType.CHARACTER:
#
# dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext"
# "/wikitext-103-raw-v1.zip")
# dir_name = "wikitext-103-raw"
# else:
# dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext"
# "/wikitext-103-v1.zip")
# dir_name = "wikitext-103"
fname = os.path.basename(dataset_url)
compressed_filepath = generator_utils.maybe_download(tmp_dir, fname,
dataset_url)
unpacked_dir = os.path.join(tmp_dir, dir_name)
if not tf.gfile.Exists(unpacked_dir):
tf.gfile.MakeDirs(unpacked_dir)
unpacked_file=os.path.join(compressed_filepath , unpacked_dir + "/" + os.path.splitext(fname)[0])
generator_utils.gunzip_file(compressed_filepath , unpacked_file)
txt=os.path.splitext(unpacked_file)[0]+".txt"
if not tf.gfile.Exists(txt):
with open(unpacked_file,"rb") as jf, open(txt,"w") as wf:
for line in jf:
wf.write(json.loads(line)["reviewText"]+"\n")
files = os.path.join(tmp_dir, dir_name, "*.txt")
train_file, valid_file, test_file = None, None, None
for f in tf.gfile.Glob(files):
# fname = os.path.basename(f)
# if "train" in fname:
train_file = f
# elif "valid" in fname:
# valid_file = f
# elif "test" in fname:
# test_file = f
# assert train_file, "Training file not found"
# assert valid_file, "Validation file not found"
# assert test_file, "Testing file not found"
return train_file # , valid_file, test_file
@registry.register_problem
class Amzlm(text_problems.Text2SelfProblem):
"""amz dataset token-level."""
def __init__(self, *args, **kwargs):
super(Amzlm, self).__init__(*args, **kwargs)
self.dataset_url = dataset_urls[0]
self.dir_name = "amzlm_videos"
@property
def dataset_splits(self):
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 8,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}, {
"split": problem.DatasetSplit.TEST,
"shards": 1,
}]
def dataset_filename(self):
return self.dir_name
@property
def is_generate_per_split(self):
# If we have pre-existing data splits for (train, eval, test) then we set
# this to True, which will have generate_samples be called for each of the
# dataset_splits.
#
# If we do not have pre-existing data splits, we set this to False, which
# will have generate_samples be called just once and the Problem will
# automatically partition the data into dataset_splits.
return False
@property
def vocab_type(self):
# return text_problems.VocabType.TOKEN
return text_problems.VocabType.SUBWORD
@property
def approx_vocab_size(self):
return 2**15 # 32768
def generate_samples(self, data_dir, tmp_dir, dataset_split):
del dataset_split
train_file = _maybe_download_corpus(
tmp_dir, self.vocab_type,self.dataset_url,self.dir_name)
filepath = train_file
if self.vocab_type==text_problems.VocabType.TOKEN:
_build_vocab(train_file, data_dir, self.vocab_filename)
def _generate_samples():
with tf.gfile.GFile(filepath, "r") as f:
for line in f:
line = " ".join(line.strip().split())
if line:
# yield {"targets": json.loads(line)["reviewText"]}
yield {"targets": line.lower()}
return _generate_samples()
@registry.register_problem
class AmzlmBook(Amzlm):
def __init__(self, *args, **kwargs):
super(AmzlmBook, self).__init__(*args, **kwargs)
self.dataset_url = dataset_urls[1]
self.dir_name = "amzlm_books"
def dataset_filename(self):
return self.dir_name
@registry.register_problem
class AmzlmCd(Amzlm):
def __init__(self, *args, **kwargs):
super(AmzlmCd, self).__init__(*args, **kwargs)
self.dataset_url = dataset_urls[4]
self.dir_name = "amzlm_cds"
def dataset_filename(self):
return self.dir_name
@registry.register_problem
class AmzlmMovie(Amzlm):
def __init__(self, *args, **kwargs):
super(AmzlmMovie, self).__init__(*args, **kwargs)
self.dataset_url = dataset_urls[2]
self.dir_name = "amzlm_movies"
def dataset_filename(self):
return self.dir_name
@registry.register_problem
class AmzlmElec(Amzlm):
def __init__(self, *args, **kwargs):
super(AmzlmElec, self).__init__(*args, **kwargs)
self.dataset_url = dataset_urls[3]
self.dir_name = "amzlm_elecs"
def dataset_filename(self):
return self.dir_name | Cyber-Neuron/nlp_proj | comp550/amzreviews.py | amzreviews.py | py | 7,261 | python | en | code | 1 | github-code | 13 |
41633302193 | from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import numpy as np
import pandas as pd
import joblib
from starter.ml.data import process_data
from starter.ml.model import inference
app = FastAPI()
# Input data
class InputData(BaseModel):
age: int
workclass: Literal['State-gov',
'Self-emp-not-inc',
'Private',
'Federal-gov',
'Local-gov',
'Self-emp-inc',
'Without-pay',
'Never-worked']
fnlgt: int
education: Literal['Bachelors',
'HS-grad',
'11th',
'Masters',
'9th',
'Some-college',
'Assoc-acdm',
'Assoc-voc',
'7th-8th',
'Doctorate',
'Prof-school',
'5th-6th',
'10th',
'Preschool',
'12th',
'1st-4th']
education_num: int
marital_status: Literal['Never-married',
'Married-civ-spouse',
'Divorced',
'Married-spouse-absent',
'Separated',
'Married-AF-spouse',
'Widowed']
occupation: Literal['Adm-clerical',
'Exec-managerial',
'Handlers-cleaners',
'Prof-specialty',
'Other-service',
'Sales',
'Craft-repair',
'Transport-moving',
'Farming-fishing',
'Machine-op-inspct',
'Tech-support',
'Protective-serv',
'Armed-Forces',
'Priv-house-serv']
relationship: Literal['Not-in-family', 'Husband', 'Wife', 'Own-child', 'Unmarried',
'Other-relative']
race: Literal['White', 'Black', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo',
'Other']
sex: Literal['Male', 'Female']
capital_gain: int
capital_loss: int
hours_per_week: int
native_country: Literal['United-States', 'Cuba', 'Jamaica', 'India', '?', 'Mexico',
'Puerto-Rico', 'Honduras', 'England', 'Canada', 'Germany', 'Iran',
'Philippines', 'Poland', 'Columbia', 'Cambodia', 'Thailand',
'Ecuador', 'Laos', 'Taiwan', 'Haiti', 'Portugal',
'Dominican-Republic', 'El-Salvador', 'France', 'Guatemala',
'Italy', 'China', 'South', 'Japan', 'Yugoslavia', 'Peru',
'Outlying-US(Guam-USVI-etc)', 'Scotland', 'Trinadad&Tobago',
'Greece', 'Nicaragua', 'Vietnam', 'Hong', 'Ireland', 'Hungary',
'Holand-Netherlands']
class Config:
schema_extra = {
"example": {
"age": 30,
"workclass": 'Private',
"fnlgt": 88416,
"education": 'Masters',
"education_num": 13,
"marital_status": "Married-spouse-absent",
"occupation": "Tech-support",
"relationship": "Wife",
"race": "White",
"sex": "Female",
"capital_gain": 2000,
"capital_loss": 0,
"hours_per_week": 35,
"native_country": 'United-States'
}
}
# Load model artifacts
model = joblib.load('./model/model.pkl')
encoder = joblib.load('./model/encoder.pkl')
lb = joblib.load('./model/lb.pkl')
# GET on the root giving a welcome message.
@app.get("/")
async def say_hello():
return {
"greeting": "Hello! I`m Najla. This is my project on machine learning pipelines.",
"description": "This model predicts whether income exceeds $50K/yr based on census data."
}
# POST on predict-income taking data and giving inference
@app.post("/predict-income")
async def predict(input: InputData):
input_data = np.array([[
input.age,
input.workclass,
input.fnlgt,
input.education,
input.education_num,
input.marital_status,
input.occupation,
input.relationship,
input.race,
input.sex,
input.capital_gain,
input.capital_loss,
input.hours_per_week,
input.native_country]])
columns = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"capital_gain",
"capital_loss",
"hours-per-week",
"native-country"]
input_df = pd.DataFrame(data=input_data, columns=columns)
cat_features = [
"workclass",
"education",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"native-country",
]
X, _, _, _ = process_data(
input_df,
categorical_features=cat_features,
encoder=encoder,
lb=lb,
training=False)
y = inference(model, X)
pred = lb.inverse_transform(y)[0]
return {"Income prediction": pred}
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True) | NajlaSaud/predict_person_income | .ipynb_checkpoints/main-checkpoint.py | main-checkpoint.py | py | 5,645 | python | en | code | 0 | github-code | 13 |
28127517162 | import sys
from .Player import Player
sys.path.append('../')
from const import *
from utils import *
from rule_checker import rule_checker, get_opponent_stone, get_legal_moves
from board import make_point, board, get_board_length, make_empty_board, parse_point
class AlphaBetaPlayer(Player):
def __init__(self, depth="1"):
self.depth = depth
super().__init__()
def register(self):
if self.receive_flag or self.register_flag:
self.go_crazy()
else:
self.register_flag = True
return self.name
def receive_stones(self, stone):
if not is_stone(stone):
self.go_crazy()
if self.receive_flag or not self.register_flag:
self.go_crazy()
self.receive_flag = True
self.stone = stone
def go_crazy(self):
self.crazy_flag = True
return crazy
def end_game(self):
self.receive_flag = False
return "OK"
def make_a_move(self, boards):
move = (self.ab_minimax(0, self.depth, True, NEG_INF, POS_INF, boards))
return move[1]
def heuristic(self, curr_board):
return board(curr_board).calculate_score()[self.stone]
def ab_minimax(self, depth, max_depth, is_maximizer, alpha, beta, boards):
curr_board = boards[0]
if is_maximizer:
legal_moves = get_legal_moves(boards, self.stone)
else:
legal_moves = get_legal_moves(boards, get_opponent_stone(self.stone))
if (depth == max_depth) or (len(legal_moves) == 0):
return [self.heuristic(curr_board), "hello"] # heuristic for game evaluation
updated_board = curr_board
if is_maximizer:
max_eval = [alpha, None]
for move in legal_moves:
if move != "pass":
updated_board = board(curr_board).place(self.stone, move)
updated_history = update_board_history(updated_board, boards)
result = self.ab_minimax(depth + 1, max_depth, not is_maximizer, alpha, beta, updated_history)
result[1] = move
max_eval = max(max_eval, result, key=lambda x: x[0])
alpha = max(alpha, result[0])
if beta <= alpha:
break
return max_eval
else:
min_eval = [beta, None]
for move in legal_moves:
if move != "pass":
updated_board = board(curr_board).place(self.stone, move)
updated_history = update_board_history(updated_board, boards)
result = self.ab_minimax(depth + 1, max_depth, not is_maximizer, alpha, beta, updated_history)
result[1] = move
min_eval = min(min_eval, result, key=lambda x: x[0])
beta = min(beta, result[0])
if beta <= alpha:
break
return min_eval
| MicahThompkins/go_project | Deliverables/10/10.1/tournament/player_pkg/AlphaBetaPlayer.py | AlphaBetaPlayer.py | py | 2,943 | python | en | code | 0 | github-code | 13 |
1457730055 | n = int(input())
d = input().strip()
p = [*map(int, input().strip().split())]
if 'RL' not in d:
print(-1)
else:
di = iter(d)
t = 2e9
i = 0
for _ in di:
if d[i : i + 2] == 'RL':
t = min(t, (p[i + 1] - p[i]) // 2)
next(di)
i += 1
i += 1
print(t) | userr2232/PC | Codeforces/A/699.py | 699.py | py | 319 | python | en | code | 1 | github-code | 13 |
2199252767 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib.parse import urlparse, urlsplit
mapping = {
'realtime.china': '即时报道;中港台即时',
'realtime.world': '即时报道;国际即时',
'news.china': '新闻;中国新闻',
'news.world': '新闻;国际新闻'
}
# mapping2 = {
# 'realtime': {
# 'china': '中港台即时',
# 'world': '国际即时'
# },
# 'news': {
# 'china': '中国新闻',
# 'world': '国际新闻'
# }
#
# }
url = 'http://www.zaobao.com/realtime/china/story20190105-921429'
o = urlparse(url)
print(o.path)
print(o.path.strip('/').split('/'))
| a289237642/companySpider | news/news/testcode/zaobao.py | zaobao.py | py | 645 | python | en | code | 0 | github-code | 13 |
370918942 | import pandas as pd
import math
from sklearn.model_selection import train_test_split, GridSearchCV
import xgboost as xgb
from sklearn.metrics import mean_squared_error,r2_score
#1.加载数据
file_path = '../datas/slump_test.txt'
df = pd.read_csv(file_path, sep=',')
# print(help(pd.read_csv))
# print(df.head(5))
#2.获取特征矩阵X和目标属性Y
x = df.iloc[:, 1:8]
y = df.iloc[:, -1]
#3.数据分割
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.9, random_state=28)
print('训练集大小:{}'.format(x_train.shape))
print('测试集大小:{}'.format(x_test.shape))
'''
开始使用xgboost的相关API
'''
#一:直接使用xgboost的相关API
#a.数据转换
dtrain = xgb.DMatrix(data=x_train, label=y_train)
dtest = xgb.DMatrix(data=x_test)
#b.模型参数构建
params = {'max_depth': 2, 'eta': 0.1, 'objective': 'reg:linear'} #eta是学习率
num_boost_round = 2 #迭代次数
#c.模型训练
model = xgb.train(params=params, dtrain=dtrain,num_boost_round=num_boost_round)
#d.模型保存
model.save_model('xgb.model')
print(model)
#a.加载模型预测
model2 = xgb.Booster()
model2.load_model('xgb.model')
print(model2)
print('训练集MSE:{}'.format(mean_squared_error(y_test, model2.predict(dtest))))
print('训练集R2:{}'.format(r2_score(y_test, model2.predict(dtest))))
| yyqAlisa/python36 | 自学/sklearn self-study/Ensemble learing/XGBoost案例代码.py | XGBoost案例代码.py | py | 1,329 | python | en | code | 0 | github-code | 13 |
41841594272 | # import PIL module
from screeninfo import get_monitors
from PIL import Image
import numpy as np
import cv2 as cv
# screen dimintions
screen_width = get_monitors()[0].width
screen_height = get_monitors()[0].height
unite = int(screen_height*.1)
def paste_buttons(back,front_left=None,front_right=None,front_midel =None):
background = Image.open(back)
background = background.convert("RGBA")
if front_left :
position = (0, 0)
frontImage = Image.open(front_left)
frontImage = frontImage.resize((int(unite*1.5), unite), Image.ANTIALIAS)
frontImage = frontImage.convert("RGBA")
background.paste(frontImage, position, frontImage)
if front_right :
frontImage2 = Image.open(front_right)
frontImage2 = frontImage2.resize((int(unite*1.5), unite), Image.ANTIALIAS)
frontImage2 = frontImage2.convert("RGBA")
position = (int((background.width*.99)-(frontImage2.width)), int(0))
background.paste(frontImage2, position, frontImage2)
if front_midel :
frontImage3 = Image.open(front_midel )
frontImage3 = frontImage3.resize((int(unite*1.5), unite), Image.ANTIALIAS)
frontImage3 = frontImage3.convert("RGBA")
position = (int((background.width*.99)-(frontImage3.width)), int(0))
background.paste(frontImage3, position, frontImage3)
#converting BRG img fromate to RGB img fromate which working with cv
img = np.asarray(background)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
return img
| Eslamomar007/LaRose | paste_buttons.py | paste_buttons.py | py | 1,580 | python | en | code | 0 | github-code | 13 |
8801596830 | import torch
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from os.path import isfile, join
from os import listdir
from os import system
from PIL import Image
from matplotlib import pyplot as plt
from model import ConvolutionalNeuralNetwork, ConvolutionalNeuralNetwork2Layers, ConvolutionalNeuralNetwork4Layers, ConvolutionalNeuralNetwork1Layers, FlexibleCNN
from dataset import ImageDataset
from time import time
from torchsummary import summary
import numpy as np
from math import floor, fabs
# =============================================== Parameters ===============================================
seed = 3
lr = 0.1
numOfEpoch = 210
batch_size = 32
evalEvery = 10
# =============================================== Parameters ===============================================
# =============================================== Misc Functions ===============================================
# load the images in ./Mypics
def organizeFolders(myImages, myFileNames, path):
# make folders and organize into different folders depends on letters
current = myFileNames[0].split("_")[1]
system("mkdir " + path + current)
for i in range(0, len(myImages)):
splited = myFileNames[i].split("_")
if splited[1] != current:
current = splited[1]
system("mkdir " + path + current)
myImages[i].save(path + current + "/" + splited[1] + splited[2])
# show four images in four subplots
def imshowFour(imgList, labelTitle, mean = 2, std = 0.5): # should have 4 images
coord = [(2,2,1), (2,2,2), (2,2,3), (2,2,4)]
counter = 0
for img in imgList:
npimg = img.numpy()
plt.subplot(coord[counter][0],coord[counter][1],coord[counter][2])
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title(labelTitle[counter])
counter = counter + 1
plt.show()
# will calculate accuracy given prediction and labels
def evaluateAccuracy(predictions, labels):
# model.eval()
accCounter = 0
for i in range (0, predictions.size()[0]):
difference = predictions[i,:].double() - torch.DoubleTensor(oneHotDict[labels[i].item()])
difference = difference.squeeze()
for i in range(0, difference.size()[0]):
difference[i] = fabs(difference[i])
ones = torch.ones(difference.size())
zeros = torch.zeros(difference.size())
difference = torch.where((difference) < 0.5, zeros, ones)
if difference.sum() == 0:
accCounter = accCounter + 1
return accCounter/predictions.size()[0]
def evalulate(model, valLoader):
accCounter = 0
for i, batch in enumerate(valLoader):
data, labels = batch
data = data.type(torch.FloatTensor)
predictions = model(data)
for i in range(0, predictions.size()[0]):
difference = predictions[i, :].double() - labels[i, :].double()
for i in range (0, difference.size()[0]):
difference[i] = fabs(difference[i])
ones = torch.ones(difference.size())
zeros = torch.zeros(difference.size())
difference = torch.where((difference) < 0.5, zeros, ones)
if difference.sum() == 0:
accCounter = accCounter + 1
# print(float(total_corr)/len(val_loader.dataset))
return float(accCounter)/len(valLoader.dataset)
def evalulateWithLoss(model, valLoader, lossFunc):
accCounter = 0
lossCounter = 0
for i, batch in enumerate(valLoader):
data, labels = batch
data = data.type(torch.FloatTensor)
model.eval()
predictions = model(data)
lossCounter = lossCounter + lossFunc(input=predictions.squeeze(), target=labels.long())
for i in range(0, predictions.size()[0]):
difference = predictions[i, :].double() - torch.DoubleTensor(oneHotDict[labels[i].item()])
difference = difference.squeeze()
for i in range (0, difference.size()[0]):
difference[i] = fabs(difference[i])
ones = torch.ones(difference.size())
zeros = torch.zeros(difference.size())
difference = torch.where((difference) < 0.5, zeros, ones)
if difference.sum() == 0:
accCounter = accCounter + 1
# print(float(total_corr)/len(val_loader.dataset))
return (float(accCounter)/len(valLoader.dataset), loss/len(valLoader.dataset))
def isTrainingFile(path):
for i in range(39, 46):
if str(i) in path:
return False
return True
def isValidationFile(path):
for i in range(39, 46):
if str(i) in path:
return True
return False
def generateConfusionMatrix(model, valLoader):
predictionList = []
labelList = []
for k, batch in enumerate(valLoader):
data, labels = batch
predictions = model(data)
for i in range(0, predictions.size()[0]):
maxVal = torch.max(predictions[i, :])
for j in range (0, 10):
if predictions[i, j].item() == maxVal:
predictionList.append(j)
break
labelList.append(labels[i].item())
print(confusion_matrix(labelList, predictionList, labels=[0,1,2,3,4,5,6,7,8,9]))
# =============================================== Misc Functions ===============================================
# ===============================================Getting the data readt================================================
myDireNames = []
myFileNames = []
for f in listdir("./asl_images"):
if f != '.DS_Store':
myDireNames.append("./asl_images/" + f + "/")
for dir in myDireNames:
for img in listdir(dir):
myFileNames.append(dir+img)
# also try to find mean and std by getting a list of the numpy version of the images
myNPImages = []
myImages = []
for pic in myFileNames:
if ".jpg" in pic:
img = Image.open(pic)
npIMG = np.array(img)
myNPImages.append(npIMG)
# calculate mean and std of the images to normalize, these are actually not necessary
myNPImages = np.array(myNPImages)
means = [0,0,0]
stds = [0,0,0]
for i in range (0, 3):
means[i] = np.mean(myNPImages[:,:,:,i])/255
stds[i] = np.std(myNPImages[:,:,:,i])/255
# load images into folders, I've already called it
# organizeFolders(myImages, myFileNames, "./MyProcessedPics/")
# ===============================================Getting the data into python================================================
# =============================================== getting the data loaders ready================================================
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(means, stds)])
myValidationDataSet = torchvision.datasets.ImageFolder(root="./asl_images/", transform = transform, is_valid_file = isValidationFile)
myTrainingDataSet = torchvision.datasets.ImageFolder(root="./asl_images/", transform = transform, is_valid_file = isTrainingFile)
# instead of modifying the dataset object, I'm simply going to make a dictionary that can reference oneHOT values from numbers here
oneh_encoder = OneHotEncoder(categories='auto')
letterDict = myTrainingDataSet.class_to_idx # from
inv_letterDict = {v: k for k, v in letterDict.items()}
oneHotDictKey = np.array(list(myTrainingDataSet.class_to_idx.values()))
oneHotDictVal = oneh_encoder.fit_transform(np.array(list(myTrainingDataSet.class_to_idx.values())).reshape((len(myTrainingDataSet.class_to_idx.values()), 1))).todense()
oneHotDict = {}
for i in range(0, oneHotDictKey.shape[0]):
oneHotDict[oneHotDictKey[i]] = oneHotDictVal[i]
myTrainingDataLoader = DataLoader(myTrainingDataSet, batch_size=len(myTrainingDataSet), shuffle=True)
myValidationDataLoader = DataLoader(myValidationDataSet, batch_size=len(myValidationDataSet), shuffle=True)
counter = 0
trainingImageData = []
trainingLabelData = []
validationImageData = []
validationLabelData = []
# this for loop extract the label and images from the original dataloader, so I can one-hot-encode it.
for i, batch in enumerate(myTrainingDataLoader):
feat, label = batch
for i in range (0, label.squeeze().shape[0]):
# trainingLabelData.append(np.array(oneHotDict[label[i].item()]))
trainingLabelData.append(np.array(label[i].item()))
trainingImageData.append(np.array(feat[i]))
# try to encode the data:
for i, batch in enumerate(myValidationDataLoader):
feat, label = batch
for i in range (0, label.squeeze().shape[0]):
# validationLabelData.append(np.array(oneHotDict[label[i].item()]))
validationLabelData.append(np.array(label[i].item()))
validationImageData.append(np.array(feat[i]))
trainingLabelData = np.array(trainingLabelData).squeeze()
trainingImageData = np.array(trainingImageData).squeeze()
validationLabelData = np.array(validationLabelData).squeeze()
validationImageData = np.array(validationImageData).squeeze()
print(trainingLabelData.shape)
print(trainingImageData.shape)
print(validationLabelData.shape)
print(validationImageData.shape)
trainingDataSet = ImageDataset(trainingImageData, trainingLabelData)
trainingDataLoader = DataLoader(trainingDataSet, batch_size=batch_size, shuffle=True)
validationDataSet = ImageDataset(validationImageData, validationLabelData)
validationDataLoader = DataLoader(validationDataSet, batch_size=batch_size, shuffle=True)
# ========================e======================= getting the data loaders ready================================================
# =============================================== training ================================================
torch.manual_seed(seed=seed)
model = ConvolutionalNeuralNetwork4Layers()
print(summary(model, input_size=(3, 56, 56)))
# lossFunc = torch.nn.MSELoss()
lossFunc = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
lossTrend = []
accuracyTrend = []
epochTrend = []
vlossTrend = []
vaccuracyTrend = []
vepochTrend = []
startTime = time()
for epoch in range(0, numOfEpoch):
for i, batch in enumerate(trainingDataLoader):
feat, label = batch
optimizer.zero_grad()
prediction = model(feat)
loss = lossFunc(input=prediction.squeeze(), target=label.long())
loss.backward()
optimizer.step()
#record data
if i == floor(trainingImageData.shape[0]/batch_size - 1):
epochTrend.append(epoch)
accuracyTrend.append(evaluateAccuracy(prediction, label))
lossTrend.append(loss/batch_size)
if epoch%evalEvery == 0:
accuracy, loss = evalulateWithLoss(model, validationDataLoader, lossFunc)
vaccuracyTrend.append(accuracy)
vlossTrend.append(loss)
vepochTrend.append(epoch)
print("accuracy of epoch " + str(epoch) + " is " + str(accuracy))
endtime = time()
print(evalulateWithLoss(model, validationDataLoader, lossFunc)[0])
print(endtime-startTime)
generateConfusionMatrix(model, validationDataLoader)
# torch.save(model.state_dict(),"MyBestSmall.pt")
epochTrend = np.array(epochTrend)
accuracyTrend = np.array(accuracyTrend)
lossTrend = np.array(lossTrend)
vaccuracyTrend = np.array(vaccuracyTrend)
vepochTrend = np.array(vepochTrend)
plt.subplot(2, 1, 1)
plt.title("Loss and Accuracy of training data vs Epochs")
plt.plot(epochTrend, lossTrend, label = "training")
plt.plot(vepochTrend, vlossTrend, 'bo', label = "validation")
plt.legend(loc = 'lower right')
plt.xlabel("Epochs")
plt.ylabel("loss")
plt.subplot(2, 1, 2)
plt.plot(epochTrend, accuracyTrend, label = "training")
plt.plot(vepochTrend, vaccuracyTrend, 'bo', label = "validation")
plt.legend(loc = 'lower right')
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.show()
# =============================================== training ================================================ | ece324-2019/-hashtag | Assignment_4_2/main_2.py | main_2.py | py | 11,992 | python | en | code | 0 | github-code | 13 |
73883212497 | from django.urls import path
from . import views
urlpatterns = [
path('', views.blog_home, name='blog_home'),
path('post/<slug:slug>/', views.view_blog_article, name='view_post'),
path('like/<slug:slug>/', views.PostLikes.as_view(), name='post_likes'),
path(
'dislike/<slug:slug>/',
views.PostDislikes.as_view(),
name='post_dislikes'
),
]
| SamuelUkachukwu/PICKnSTRUM | blog/urls.py | urls.py | py | 389 | python | en | code | 1 | github-code | 13 |
3759861646 | # coding=utf-8
import urllib
import urllib2
import json
from utils import xml_to_json
import logging
# from google.appengine.api import urlfetch
# urlfetch.set_default_fetch_deadline(45)
base_url = 'http://rs.mgimo.ru'
url = base_url + "/ReportServer/Pages/ReportViewer.aspx?%2freports%2f%D0%A0%D0%B0%D1%81%D0%BF%D0%B8%D1%81%D0%B0%D0%BD%D0%B8%D0%B5+%D1%8F%D0%B7%D1%8B%D0%BA%D0%BE%D0%B2%D0%BE%D0%B9+%D0%B3%D1%80%D1%83%D0%BF%D0%BF%D1%8B&rs:Command=Render"
headers = {
'HTTP_USER_AGENT': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml; q=0.9,*/*; q=0.8',
'Content-Type': 'application/x-www-form-urlencoded'
}
def _find_viewstate(data):
val = 'id="__VIEWSTATE" value="'
beg = data.index(val) + len(val)
end = data.index('"', beg)
return data[beg:end]
def _find_eventvalid(data):
val = 'id="__EVENTVALIDATION" value="'
beg = data.index(val) + len(val)
end = data.index('"', beg)
return data[beg:end]
def _find_export_url(data):
val = 'OpType=Export'
back = fow = data.index(val)
while data[back] != '"':
back -= 1
while data[fow] != '"':
fow += 1
return data[back + 1: fow]
def _render_payload(obj, data):
payload = (
(r'__EVENTTARGET', r'ReportViewerControl$ctl00$ctl05$ctl00'),
(r'__VIEWSTATE', _find_viewstate(data)),
(r'__VIEWSTATEGENERATOR', r'177045DE'),
(r'__EVENTARGUMENT', ''),
(r'__LASTFOCUS', ''),
(r'__EVENTVALIDATION', _find_eventvalid(data)),
(r'ReportViewerControl$ctl00$ctl03$ctl00', obj['date']),
(r'ReportViewerControl$ctl00$ctl05$ctl00', obj['program_type']),
(r'ReportViewerControl$ctl00$ctl07$ctl00', obj['faculty']),
(r'ReportViewerControl$ctl00$ctl09$ctl00', obj['department']),
(r'ReportViewerControl$ctl00$ctl11$ctl00', obj['course']),
(r'ReportViewerControl$ctl00$ctl13$ctl00', obj['academic_group']),
(r'ReportViewerControl$ctl00$ctl15$ctl00', obj['lang_group']),
(r'ReportViewerControl$ctl00$ctl00', r'Просмотр отчета'),
(r'ReportViewerControl$ctl04', ''),
(r'ReportViewerControl$ctl05', ''),
(r'ReportViewerControl$ctl06', r'1'),
(r'ReportViewerControl$ctl07', r'0'),
)
return payload
def _scrape_last_page(obj):
data = urllib2.urlopen(url, timeout=60).read()
logging.debug(data)
for i in range(7):
payload = _render_payload(obj, data)
encodedFields = urllib.urlencode(payload)
req = urllib2.Request(url, encodedFields, headers)
f = urllib2.urlopen(req, timeout=60)
data = f.read()
return data
def get_timetable(obj):
data = _scrape_last_page(obj)
export_url = base_url + _find_export_url(data) + 'XML'
logging.debug('export url')
logging.debug(export_url)
f = urllib2.urlopen(export_url, timeout=60)
data = f.read()
js = xml_to_json(data)
return json.dumps(js).decode('unicode-escape')
def get_cached_timetable(obj):
pass
page = get_timetable({
'date': '27.02.2017',
'program_type': '1',
'faculty': '1',
'department': '1',
'course': '1',
'academic_group': '1',
'lang_group': '1',
})
| swoopyy/MGIMO-timetable | backend/scrapper.py | scrapper.py | py | 3,311 | python | en | code | 0 | github-code | 13 |
34183126799 | import json
import csv
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from rest_framework import serializers, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from django.http.response import HttpResponse
from django.shortcuts import render
from rest_framework import generics, status
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import (DestroyModelMixin, ListModelMixin,
RetrieveModelMixin)
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework.views import APIView
from .models import *
from .serializers import *
def indexpage(request):
return render(request, 'assesment_system/index.html')
def basepage(request):
return render(request, 'assesment_system/base.html')
fs = FileSystemStorage(location='tmp/')
class CandidateRegister(APIView):
serializer_class =CustomUserSerializer
def post(self,request):
serializer = CustomUserSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def candidateregister(request):
return render(request, 'assesment_system/CandidateRegister.html')
class candidateList(APIView):
def get(self, request):
tasks = Candidate.objects.all()
serialized = CustomUserSerializer(tasks, many=True)
return Response(serialized.data)
class user_details_operation(generics.RetrieveUpdateDestroyAPIView):
queryset=Candidate.objects.all()
serializer_class=CandidateSerializer
def candidatelogin(request):
return render(request, 'assesment_system/login.html')
#Csv file upload
class CandidateViewSet(viewsets.ModelViewSet):
queryset = Candidate.objects.all()
serializer_class = CustomUserSerializer
@action(detail=False, methods=['POST'])
def upload_data(self, request):
file = request.FILES["file"]
content = file.read() # these are bytes
file_content = ContentFile(content)
file_name = fs.save(
"_tmp.csv", file_content
)
tmp_file = fs.path(file_name)
csv_file = open(tmp_file, errors="ignore")
reader = csv.reader(csv_file)
next(reader)
product_list = []
for email, row in enumerate(reader):
(
email,
user_name,
first_name,
last_name,
) = row
product_list.append(
Candidate(
email=email,
user_name=user_name,
first_name=first_name,
last_name=last_name,
)
)
Candidate.objects.bulk_create(product_list)
return Response("Successfully upload the data")
#Aptitude
class ApptitudeAPI(generics.GenericAPIView):
serializer_class = AptitudeSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
apptitude = serializer.save()
return Response({
"apptitude": AptitudeSerializer(apptitude, context=self.get_serializer_context()).data
})
class ApptitudeList(APIView):
def get(self, request):
tasks = Aptitude.objects.all()
serialized = AptitudeSerializer(tasks, many=True)
return Response(serialized.data)
class Show(GenericAPIView,RetrieveModelMixin):
queryset=Aptitude.objects.all()
serializer_class=AptitudeSerializer
def get(self,request,*args,**kwargs):
return self.retrieve(request,*args,**kwargs)
class ReasoningShow(GenericAPIView,RetrieveModelMixin):
queryset=Reasoning.objects.all()
serializer_class=ReasoningSerializer
def get(self,request,*args,**kwargs):
return self.retrieve(request,*args,**kwargs)
def homepage(request):
return render(request, 'assesment_system/QuestionPage.html')
class VerbalAPI(generics.GenericAPIView):
serializer_class= VerbalSerializer
def post(self,request,*args,**kwargs):
serializer=self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
verbal= serializer.save()
return Response({
"verbal":VerbalSerializer(verbal,context=self.get_serializer_context()).data
})
class ProfileList(APIView):
def get(self,request):
tasks= Verbal.objects.all()
serialized= VerbalSerializer(tasks,many=True)
return Response(serialized.data)
def homepage1(request):
return render(request,'assesment_system/questionpaper.html')
class VerbalCreateApi(generics.CreateAPIView):
queryset=Verbal.objects.all()
serializer_class=VerbalSerializer
class VerbalListApi(generics.ListAPIView):
queryset=Verbal.objects.all()
serializer_class=VerbalSerializer
class VerbalUpdateApi(generics.RetrieveUpdateDestroyAPIView):
queryset=Verbal.objects.all()
serializer_class=VerbalSerializer
class VerbalDeleteApi(generics.DestroyAPIView):
queryset=Verbal.objects.all()
serializer_class=VerbalSerializer
class User_Verbal_mapperAPI(generics.CreateAPIView):
serializer_class = User_Verbal_mapper_Serializer
queryset=User_Verbal_mapper.objects.all()
class User_Verbal_mapperList(APIView):
def get(self, request):
tasks = User_Verbal_mapper.objects.all()
serialized = User_Verbal_mapper_Serializer(tasks, many=True)
return Response(serialized.data)
#Self development Curd operations
class Self_development_User_mapperDeleteApi(generics.DestroyAPIView):
queryset = Self_development_User_mapper.objects.all()
serializer_class = Self_development_User_mapperSerializer
class Self_development_User_mapperList(APIView):
def get(self, request):
tasks = Self_development_User_mapper.objects.all()
serialized = Self_development_User_mapperSerializer(tasks, many=True)
return Response(serialized.data)
class Self_development_User_mapperApi(generics.CreateAPIView):
serializer_class = Self_development_User_mapperSerializer
queryset = Self_development_User_mapper.objects.all()
#####################################
class Self_developmentCreateApi(generics.CreateAPIView):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
class Self_developmentShow(GenericAPIView,RetrieveModelMixin):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
def get(self,request,*args,**kwargs):
return self.retrieve(request,*args,**kwargs)
class Self_developmentUpdateApi(generics.RetrieveUpdateDestroyAPIView):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
class Self_developmentDeleteApi(generics.DestroyAPIView):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
#############################################################
class Self_developmentAPI(generics.GenericAPIView):
serializer_class= Self_developmentSerializer
def post(self,request,*args,**kwargs):
serializer=self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self_development= serializer.save()
return Response({
"self_development":Self_developmentSerializer(self_development,context=self.get_serializer_context()).data
})
class Self_developmentCreateApi(generics.CreateAPIView):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
class Self_developmentShow(GenericAPIView,RetrieveModelMixin):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
def get(self,request,*args,**kwargs):
return self.retrieve(request,*args,**kwargs)
class Self_developmentUpdateApi(generics.RetrieveUpdateDestroyAPIView):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
class Self_developmentDeleteApi(generics.DestroyAPIView):
queryset=Self_development.objects.all()
serializer_class=Self_developmentSerializer
class User_selfdevelop_mapperAPI(generics.GenericAPIView):
serializer_class = User_Verbal_mapper_Serializer
def post(self, request):
serializer = self.get_serializer(data=request.data,many=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response()
class User_selfdevelop_mapperList(APIView):
def get(self, request):
tasks = User_selfdevelop_mapper.objects.all()
serialized = User_selfdevelop_mapperSerializer(tasks, many=True)
return Response(serialized.data)
#by gaurav
# class ReasoningAPI(generics.CreateAPIView):
# queryset = Reasoning.objects.all()
# serializer_class = ReasoningSerializer
def QuestionPage(request):
return render(request, 'assesment_system/question_page.html')
class ReasoningUpdateApi(generics.RetrieveUpdateDestroyAPIView):
queryset = Reasoning.objects.all()
serializer_class = ReasoningSerializer
class User_Reasoning_mapperAPI(generics.CreateAPIView):
serializer_class = User_Reasoning_mapper_Serializer
queryset = User_Reasoning_mapper.objects.all()
class User_Reasoning_mapperList(APIView):
def get(self, request):
tasks = User_Reasoning_mapper.objects.all()
serialized = User_Reasoning_mapper_Serializer(tasks, many=True)
return Response(serialized.data)
class ReasoningCreateApi(generics.CreateAPIView):
queryset = Reasoning.objects.all()
serializer_class = ReasoningSerializer
class ReasoningViewApi(generics.ListAPIView):
queryset = Reasoning.objects.all()
serializer_class = ReasoningSerializer
class crud(generics.RetrieveUpdateDestroyAPIView):
queryset=Aptitude.objects.all()
serializer_class=AptitudeSerializer
class User_Aptitude_mapperAPI(generics.CreateAPIView):
queryset = User_Aptitude_mapper.objects.all()
serializer_class = User_Aptitude_mapper_Serializer
class User_Apptitude_mapper_crud(generics.RetrieveUpdateDestroyAPIView):
queryset=User_Aptitude_mapper.objects.all()
serializer_class=User_Aptitude_mapper_Serializer
class User_Aptitude_mapperList(APIView):
def get(self, request):
tasks = User_Aptitude_mapper.objects.all()
serialized = User_Aptitude_mapper_Serializer(tasks, many=True)
return Response(serialized.data)
class UserFeedback(generics.CreateAPIView):
queryset = user_feedback.objects.all()
serializer_class = User_Feedback_Serializer
class UserFeedbackList(APIView):
def get(self, request):
tasks = user_feedback.objects.all()
serialized = User_Feedback_Serializer(tasks, many=True)
return Response(serialized.data)
def userfeedback(request):
return render(request, 'assesment_system/feedback.html')
def checkanswer(request):
cresult=0
wresult=0
post1=User_Aptitude_mapper.objects.all()
for i in post1:
qid=i.q_id
user_answer=i.user_answer
post2=Aptitude.objects.filter(q_id=qid)
for j in post2:
q_ans=j.q_ans
if user_answer == q_ans:
cresult=cresult+1
else:
user_wresult=wresult+1
b = Result(user_cresult=cresult,user_wresult=user_wresult)
b.save()
return render(request,'assesment_system/index.html')
class ResultList(APIView):
def get(self, request):
tasks = Result.objects.all()
serialized = Result_Serializer(tasks, many=True)
return Response(serialized.data)
def Resultlist(request):
productData =Result.objects.all()
print("products are ",productData)
return render(request,'assesment_system/result.html',{"products": productData})
class UserLoginView(generics.GenericAPIView):
permission_classes = (AllowAny,)
serializer_class = CandidateloginSerializer
def post(self, request):
serializer=self.serializer_class(data=request.data)
status_code = status.HTTP_200_OK
username=serializer.initial_data['username']
password=serializer.initial_data['password']
user=Candidate.objects.filter(username=username,password=password)
if user:
response = {
'success': 'True',
'status code': status_code,
'message': 'User logged in successfully',
'username': serializer.initial_data["username"],
}
return HttpResponse("successfully logged in")
else:
return HttpResponse("wrong username or password")
def ExamDashboard(request):
return render(request,'assesment_system/exam_dashboard.html')
class CandidateList(GenericAPIView, ListModelMixin):
queryset = Candidate.objects.all()
serializer_class = CandidateSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class CandidateRetrive(GenericAPIView, RetrieveModelMixin):
queryset = Candidate.objects.all()
serializer_class = CandidateSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class CandidateUpdate(generics.RetrieveUpdateDestroyAPIView):
queryset = Candidate.objects.all()
serializer_class = CandidateSerializer
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
class CandidateDestroy(GenericAPIView, DestroyModelMixin):
queryset = Candidate.objects.all()
serializer_class = CandidateSerializer
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class UserFeedackList(APIView):
def get(self,request):
tasks=user_feedback.objects.all()
serailzed= user_feedback_Serializer(tasks,many=True)
return Response(serailzed.data)
# class RegisterListView(APIView):
# def get(self, request):
# tasks = Register.objects.all()
# serialized = RegisterSerializer(tasks, many=True)
# return Response(serialized.data)
# class RegisterCreateApi(generics.CreateAPIView):
# queryset = Candidate.objects.all()
# serializer_class = CandidateSerializer
# class RegisterUpdateApi(generics.RetrieveUpdateDestroyAPIView):
# queryset = Register.objects.all()
# serializer_class = RegisterSerializer
def index(request):
return render(request, 'assesment_system/index.html')
def ResultView(request):
user_cresult = 0
user_wresult=0
res = User_Reasoning_mapper.objects.all()
for i in res:
question_id = i.question_id
user_answer = i.user_answer
res2 = Reasoning.objects.filter(question_id=question_id)
for j in res2:
answer = j.answer
if user_answer == answer:
user_cresult = user_cresult+1
else:
user_wresult = user_wresult+1
print("Your " +str(user_cresult)+ " answer is correct and " +str(user_wresult)+ " answer is incorrect.")
result = Result(user_cresult = user_cresult, user_wresult = user_wresult)
result.save()
return render(request, 'assesment_system/result.html')
class BlacklistTokenUpdateView(APIView):
permission_classes = [AllowAny]
authentication_classes = ()
def post(self, request):
try:
refresh_token = request.data["refresh_token"]
token = RefreshToken(refresh_token)
token.blacklist()
return Response(status=status.HTTP_205_RESET_CONTENT)
except Exception as e:
return Response(status=status.HTTP_400_BAD_REQUEST)
| codingwarriors01/PsychomatricAssessment | Assesment/views.py | views.py | py | 16,257 | python | en | code | 0 | github-code | 13 |
14994549713 | # Exercício 2.2
# Dada uma sequência de números inteiros diferentes de zero, terminada por
# um zero, calcular a sua soma. Por exemplo, para a sequência:
# 12 17 4 -6 8 0
# o seu programa deve escrever o número 35.
# link: https://panda.ime.usp.br/aulasPython/static/aulasPython/aula02.html
def main():
numero = int(input('Informe um número: '))
soma = 0
while numero != 0:
soma += numero
numero = int(input('Informe um número: '))
print(f'A soma dos números informados é: {soma}')
#-----------------------------------------------
# a linha a seguir inicia a execução do programa
main() | josenaldo/python-learning | exercicios/extras/ex-s03e2.2-soma.py | ex-s03e2.2-soma.py | py | 643 | python | pt | code | 1 | github-code | 13 |
3895730767 | from tkinter import *
import random
import datetime
from tkinter import messagebox, filedialog
operador = ""
precios_comida = [1.32, 1.65, 2.31, 3.22, 1.22, 1.99, 2.05, 2.65, 1, 2]
precios_bebida = [0.25, 0.99, 1.21, 1.54, 1.08, 1.10, 2.00, 1.58, 1, 2]
precios_postres = [1.54, 1.68, 1.32, 1.97, 2.55, 2.14, 1.94, 1.74, 1, 2]
# funciones
def click_boton(numero):
global operador
operador = operador + str(numero)
visor_calculadora.delete(0, END)
visor_calculadora.insert(END, operador)
def borrar():
global operador
operador = ""
visor_calculadora.delete(0, END)
def obtener_resultado():
global operador
resultado = str(eval(operador))
visor_calculadora.delete(0, END)
visor_calculadora.insert(0, resultado)
operador = ""
# revisar Check
def revisar_check():
x = 0
for c in cuadros_comida:
if variables_comida[x].get() == 1:
cuadros_comida[x].config(state=NORMAL)
if cuadros_comida[x].get() == "0":
cuadros_comida[x].delete(0, END)
cuadros_comida[x].focus()
else:
cuadros_comida[x].config(state=DISABLED)
texto_comida[x].set("0")
x += 1
x = 0
for c in cuadros_bebidas:
if variables_bebidas[x].get() == 1:
cuadros_bebidas[x].config(state=NORMAL)
if cuadros_bebidas[x].get() == "0":
cuadros_bebidas[x].delete(0, END)
cuadros_bebidas[x].focus()
else:
cuadros_bebidas[x].config(state=DISABLED)
texto_bebidas[x].set("0")
x += 1
x = 0
for c in cuadros_postres:
if variables_postres[x].get() == 1:
cuadros_postres[x].config(state=NORMAL)
if cuadros_postres[x].get() == "0":
cuadros_postres[x].delete(0, END)
cuadros_postres[x].focus()
else:
cuadros_postres[x].config(state=DISABLED)
texto_postres[x].set("0")
x += 1
# definicion del total
def total():
sub_total_comida = 0
p = 0
for cantidad in texto_comida:
sub_total_comida = sub_total_comida + (float(cantidad.get()) * precios_comida[p])
p += 1
sub_total_bebidas = 0
p = 0
for cantidad in texto_bebidas:
sub_total_bebidas = sub_total_bebidas + (float(cantidad.get()) * precios_bebida[p])
p += 1
sub_total_postres = 0
p = 0
for cantidad in texto_postres:
sub_total_postres = sub_total_postres + (float(cantidad.get()) * precios_postres[p])
p += 1
sub_total = sub_total_comida + sub_total_bebidas + sub_total_postres
impuesto = sub_total * 0.15
total = sub_total + impuesto
var_costo_comida.set("$ " + str(round(sub_total_comida, 2)))
var_costo_bebida.set("$ " + str(round(sub_total_bebidas, 2)))
var_costo_postres.set("$ " + str(round(sub_total_postres, 2)))
var_subtotal.set("$ " + str(round(sub_total, 2)))
var_impuesto.set("$ " + str(round(impuesto, 2)))
var_total.set("$ " + str(round(total, 2)))
# definicion de recibo
def recibo():
texto_recibo.delete(1.0, END)
num_recibo = f'N#-{random.randint(1000, 9999)}'
fecha = datetime.datetime.now()
fecha_recibo = f'{fecha.day}/{fecha.month}/{fecha.year}-{fecha.hour}:{fecha.minute}'
texto_recibo.insert(END, f'Datos:t\t{num_recibo}\t\t{fecha_recibo}\n')
texto_recibo.insert(END, f'*' * 47 + '\n')
texto_recibo.insert(END, 'Items\t\tCant.\tCosto Items\n')
texto_recibo.insert(END, f'-' * 54 + '\n')
x = 0
for comida in texto_comida:
if comida.get() != '0':
texto_recibo.insert(END, f'{lista_comidas[x]}\t\t{comida.get()}\t'
f'${int(comida.get()) * precios_comida[x]}\n')
for bebida in texto_bebidas:
if bebida.get() != '0':
texto_recibo.insert(END, f'{lista_bebidas[x]}\t\t{bebida.get()}\t'
f'$ {int(bebida.get()) * precios_bebida[x]}\n')
x += 1
x = 0
for postres in texto_postres:
if postres.get() != '0':
texto_recibo.insert(END, f'{lista_postres[x]}\t\t{postres.get()}\t'
f'$ {int(postres.get()) * precios_postres[x]}\n')
x += 1
texto_recibo.insert(END, f'-' * 54 + '\n')
texto_recibo.insert(END, f' Costo de la Comida:\t\t\t{var_costo_comida.get()}\n')
texto_recibo.insert(END, f' Costo de la Bebida:\t\t\t{var_costo_bebida.get()}\n')
texto_recibo.insert(END, f' Costo de los Postres:\t\t\t{var_costo_postres.get()}\n')
texto_recibo.insert(END, f'-' * 54 + '\n')
texto_recibo.insert(END, f'Subtotal:\t\t\t{var_subtotal.get()}\n')
texto_recibo.insert(END, f'Impuestos: \t\t\t{var_impuesto.get()}\n')
texto_recibo.insert(END, f'Total: \t\t\t{var_total.get()}\n')
texto_recibo.insert(END, f'*' * 47 + '\n')
texto_recibo.insert(END, 'Los esperamos pronto')
# Definimos Guardar
def guardar():
info_recibo = texto_recibo.get(1.0, END)
archivo = filedialog.asksaveasfile(mode='w', defaultextension='.txt')
archivo.write(info_recibo)
archivo.close()
messagebox.showinfo('Atencion', 'su recibo ha sido guardado')
# reset
def resetear():
texto_recibo.delete(0.1, END)
for texto in texto_comida:
texto.set('0')
for texto in texto_bebidas:
texto.set('0')
for texto in texto_postres:
texto.set('0')
for cuadros in cuadros_comida:
cuadros.config(state=DISABLED)
for cuadros in cuadros_bebidas:
cuadros.config(state=DISABLED)
for cuadros in cuadros_postres:
cuadros.config(state=DISABLED)
for v in variables_comida:
v.set(0)
for v in variables_bebidas:
v.set(0)
for v in variables_postres:
v.set(0)
var_costo_comida.set('')
var_costo_bebida.set('')
var_costo_postres.set('')
var_subtotal.set('')
var_impuesto.set('')
var_total.set('')
# iniciar app
aplicacion = Tk()
aplicacion.geometry("1300x630+0+0")
# No maximizar
aplicacion.resizable(1, 1)
# titulo
aplicacion.title("Restaurante")
aplicacion.config(bg="DarkSlateGray")
# panel superior
panel_superior = Frame(aplicacion, bd=1, relief=RAISED)
panel_superior.pack(side=TOP)
# titulos
etiqueta_titulo = Label(panel_superior, text="Restaurante",
font=("Arial", 30, "bold"), bg="DarkOrange4", width=10)
etiqueta_titulo.grid(row=0, column=0)
# panel izquierdo
panel_izquierdo = Frame(aplicacion, bd=1, relief=FLAT)
panel_izquierdo.pack(side=LEFT)
# panel de costos
panel_costos = Frame(panel_izquierdo, bd=1, relief=FLAT, bg="Azure", padx=50)
panel_costos.pack(side=BOTTOM)
# panel de comida
panel_comida = LabelFrame(panel_izquierdo, text="Comida", font=("Dosis", 19, "bold"),
bd=1, relief=FLAT, bg="Azure4")
panel_comida.pack(side=LEFT)
# panel de bebidas
panel_bebidas = LabelFrame(panel_izquierdo, text="Bebidas", font=("Dosis", 19, "bold"),
bd=1, relief=FLAT, bg="Azure")
panel_bebidas.pack(side=LEFT)
# panel de postres
panel_postres = LabelFrame(panel_izquierdo, text="Postres", font=("Dosis", 19, "bold"),
bd=1, relief=FLAT, bg="Azure2")
panel_postres.pack(side=LEFT)
# panel derecho
panel_derecha = Frame(aplicacion, bd=1, relief=FLAT)
panel_derecha.pack(side=RIGHT)
# panel calculadora
panel_calculadora = Frame(panel_derecha, bd=1, relief=FLAT, bg="burlywood")
panel_calculadora.pack()
# panel recibo
panel_recibo = Frame(panel_derecha, bd=1, relief=FLAT, bg="burlywood")
panel_recibo.pack()
# panel de botones
panel_botones = Frame(panel_derecha, bd=1, relief=FLAT, bg='burlywood')
panel_botones.pack()
# lista de productos
lista_comidas = ["Hamburguesa", "Pizza", "Hot Dog", "Tacos", "Burritos", "Torta", "Torta Cubana", "Torta Hawaiana",
"Torta de Jamon", "Torta de Queso"]
lista_bebidas = ["Coca Cola", "Pepsi", "Fanta", "Sprite", "Agua", "Agua Mineral", "Agua de Sabor", "Agua de Frutas",
"Agua de Jamaica", "Agua de Horchata"]
lista_postres = ["Pastel de Chocolate", "Pastel de Vainilla", "Pastel de Fresa", "Pastel de Limon", "Pastel de Nuez",
"Pastel de Zanahoria", "Pastel de Moka", "Pastel de Cafe", "Pastel de Tres Leches", "Pastel de Queso"]
# generar items de comida
variables_comida = []
cuadros_comida = []
texto_comida = []
contador = 0
# iniciar llenado de datos
for comida in lista_comidas:
# crear checkbox
variables_comida.append("")
variables_comida[contador] = IntVar()
comida = Checkbutton(panel_comida,
text=comida.title(),
font=("Arial", 15, "bold"),
onvalue=1,
offvalue=0,
variable=variables_comida[contador],
command=revisar_check
)
comida.grid(row=contador, column=0, sticky=W)
# crear cuadro de texto
cuadros_comida.append("")
texto_comida.append("")
texto_comida[contador] = StringVar()
texto_comida[contador].set("0")
cuadros_comida[contador] = Entry(panel_comida,
font=("Arial", 15, "bold"),
bd=1,
width=6,
state=DISABLED,
textvariable=texto_comida[contador])
cuadros_comida[contador].grid(row=contador, column=1)
contador += 1
# generar items de bebidas
variables_bebidas = []
cuadros_bebidas = []
texto_bebidas = []
contador = 0
for bebida in lista_bebidas:
# Checkbox
variables_bebidas.append("")
variables_bebidas[contador] = IntVar()
bebida = Checkbutton(panel_bebidas,
text=bebida.title(),
font=("Arial", 15, "bold"),
onvalue=1,
offvalue=0,
variable=variables_bebidas[contador],
command=revisar_check
)
bebida.grid(row=contador, column=0, sticky=W)
# Cuadro de texto
cuadros_bebidas.append("")
texto_bebidas.append("")
texto_bebidas[contador] = StringVar()
texto_bebidas[contador].set("0")
cuadros_bebidas[contador] = Entry(panel_bebidas,
font=("Arial", 15, "bold"),
bd=1,
width=6,
state=DISABLED,
textvariable=texto_bebidas[contador])
cuadros_bebidas[contador].grid(row=contador, column=1)
contador += 1
# generar items de postres
variables_postres = []
cuadros_postres = []
texto_postres = []
contador = 0
for postre in lista_postres:
# Checkbox
variables_postres.append("")
variables_postres[contador] = IntVar()
postre = Checkbutton(panel_postres,
text=postre.title(),
font=("Arial", 15, "bold"),
onvalue=1,
offvalue=0,
variable=variables_postres[contador],
command=revisar_check
)
postre.grid(row=contador, column=0, sticky=W)
# Cuadro de texto
cuadros_postres.append("")
texto_postres.append("")
texto_postres[contador] = StringVar()
texto_postres[contador].set("0")
cuadros_postres[contador] = Entry(panel_postres,
font=("Arial", 15, "bold"),
bd=1,
width=6,
state=DISABLED,
textvariable=texto_postres[contador])
cuadros_postres[contador].grid(row=contador, column=1)
contador += 1
# variables dela calculadora
var_costo_comida = StringVar()
var_costo_bebida = StringVar()
var_costo_postres = StringVar()
var_subtotal = StringVar()
var_impuesto = StringVar()
var_total = StringVar()
# etiqueta de costo de comida y costo de entrada
etiqueta_costo_comida = Label(panel_costos,
text="Costo de Comida:",
font=("Arial", 15, "bold"),
bg="azure4",
fg="white")
etiqueta_costo_comida.grid(row=0, column=0)
texto_costo_comida = Entry(panel_costos,
font=("Arial", 15, "bold"),
bd=1,
width=10,
state='readonly',
textvariable=var_costo_comida)
texto_costo_comida.grid(row=0, column=1, padx=41)
etiqueta_costo_bebida = Label(panel_costos,
text="Costo de Bebida:",
font=("Arial", 15, "bold"),
bg="azure4",
fg="white")
etiqueta_costo_bebida.grid(row=1, column=0)
texto_costo_bebida = Entry(panel_costos,
font=("Arial", 15, "bold"),
bd=1,
width=10,
state='readonly',
textvariable=var_costo_bebida)
texto_costo_bebida.grid(row=1, column=1, padx=41)
etiqueta_costo_postres = Label(panel_costos,
text='Costo Postres',
font=('Dosis', 12, 'bold'),
bg='azure4',
fg='white')
etiqueta_costo_postres.grid(row=2, column=0)
texto_costo_postres = Entry(panel_costos,
font=('Dosis', 12, 'bold'),
bd=1,
width=10,
state='readonly',
textvariable=var_costo_postres)
texto_costo_postres.grid(row=2, column=1, padx=41)
# etiqueta de subtotal, impuesto y total
etiqueta_subtotal = Label(panel_costos,
text="Subtotal:",
font=("Arial", 15, "bold"),
bg="azure4",
fg="white")
etiqueta_subtotal.grid(row=0, column=2)
texto_subtotal = Entry(panel_costos,
font=("Arial", 15, "bold"),
bd=1,
width=10,
state='readonly',
textvariable=var_subtotal)
texto_subtotal.grid(row=0, column=3, padx=41)
etiqueta_impuesto = Label(panel_costos,
text="Impuesto:",
font=("Arial", 15, "bold"),
bg="azure4",
fg="white")
etiqueta_impuesto.grid(row=1, column=2)
texto_impuesto = Entry(panel_costos,
font=("Arial", 15, "bold"),
bd=1,
width=10,
state='readonly',
textvariable=var_impuesto)
texto_impuesto.grid(row=1, column=3, padx=41)
etiqueta_total = Label(panel_costos,
text="Total:",
font=("Arial", 15, "bold"),
bg="azure4",
fg="white")
etiqueta_total.grid(row=2, column=2)
texto_total = Entry(panel_costos,
font=("Arial", 15, "bold"),
bd=1,
width=10,
state='readonly',
textvariable=var_total)
texto_total.grid(row=2, column=3, padx=41)
# creacion de botones
botones = ['total', 'recibo', 'guardar', 'resetear']
botones_creados = []
columnas = 0
for boton in botones:
boton = Button(panel_botones,
text=boton.title(),
font=("Arial", 15, "bold"),
fg="white",
bg="azure4",
bd=1,
width=9)
botones_creados.append(boton)
boton.grid(row=0, column=columnas)
columnas += 1
botones_creados[0].config(command=total)
botones_creados[1].config(command=recibo)
botones_creados[2].config(command=guardar)
botones_creados[3].config(command=resetear)
# area de recibo
texto_recibo = Text(panel_recibo,
font=('Dosis', 12, 'bold'),
bd=1,
width=42,
height=10)
texto_recibo.grid(row=0,
column=0)
# calculadora
visor_calculadora = Entry(panel_calculadora,
font=('Dosis', 12, 'bold'),
width=32,
bd=1)
visor_calculadora.grid(row=0,
column=0,
columnspan=4)
botones_calculadora = ['7', '8', '9', '+', '4', '5', '6', '-',
'1', '2', '3', 'x', 'R', 'B', '0', '/']
botones_guardados = []
fila = 1
columna = 0
for boton in botones_calculadora:
boton = Button(panel_calculadora,
text=boton.title(),
font=('Dosis', 12, 'bold'),
fg='black',
bg='azure4',
bd=1,
width=8)
botones_guardados.append(boton)
boton.grid(row=fila, column=columna)
if columna == 3:
fila += 1
columna += 1
if columna == 4:
columna = 0
botones_guardados[0].config(command=lambda: click_boton('7'))
botones_guardados[1].config(command=lambda: click_boton('8'))
botones_guardados[2].config(command=lambda: click_boton('9'))
botones_guardados[3].config(command=lambda: click_boton('+'))
botones_guardados[4].config(command=lambda: click_boton('4'))
botones_guardados[5].config(command=lambda: click_boton('5'))
botones_guardados[6].config(command=lambda: click_boton('6'))
botones_guardados[7].config(command=lambda: click_boton('-'))
botones_guardados[8].config(command=lambda: click_boton('1'))
botones_guardados[9].config(command=lambda: click_boton('2'))
botones_guardados[10].config(command=lambda: click_boton('3'))
botones_guardados[11].config(command=lambda: click_boton('*'))
botones_guardados[12].config(command=obtener_resultado)
botones_guardados[13].config(command=borrar)
botones_guardados[14].config(command=lambda: click_boton('0'))
botones_guardados[15].config(command=lambda: click_boton('/'))
# eviar cierrre
aplicacion.mainloop()
| Oviwan999/Arabots-Classes2 | Clase Phyton/Clase Presencial restauriante.py | Clase Presencial restauriante.py | py | 18,457 | python | es | code | 0 | github-code | 13 |
15584052354 | from decimal import Decimal
import requests
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from flask import Flask, abort, jsonify
from markupsafe import escape
from order_service.connector import ConnectorFactory
app = Flask(__name__)
connector = ConnectorFactory().get_connector()
user_host = os.getenv('USERS_SERVICE', '127.0.0.1:8080')
stock_host = os.getenv('STOCK_SERVICE', '127.0.0.1:8080')
payment_host = os.getenv('PAYMENT_SERVICE', '127.0.0.1:8080')
@app.route('/orders/create/<user_id>', methods=['POST'])
def create_order(user_id):
"""
creates an order for the given user, and returns an order_id
:param user_id: id of user to create order for
:return: the order’s id
"""
res = requests.get(f"http://{user_host}/users/find/{user_id}")
if not res.ok:
abort(404)
order_id = connector.create_order(user_id)
res = {
"order_id": order_id
}
return jsonify(res)
@app.route('/orders/remove/<order_id>', methods=['DELETE'])
def delete_order(order_id):
"""
deletes an order by ID
:param order_id: id of order to be deleted
"""
try:
connector.delete_order(escape(order_id))
except ValueError:
abort(404)
return jsonify({"success": True}), 200
@app.route('/orders/find/<order_id>', methods=['GET'])
def retrieve_order(order_id):
try:
order_paid, order_items, order_userid, \
total_cost = connector.get_order_info(escape(order_id))
response = {
"order_id": order_id,
"paid": str(order_paid),
"items": order_items,
"user_id": order_userid,
"total_cost": str(total_cost)
}
return jsonify(response)
except ValueError:
abort(404)
@app.route('/orders/findByUser/<user_id>', methods=['GET'])
def retrieve_order_by_user(user_id):
try:
order_ids = connector.get_order_ids_by_user(user_id)
return jsonify({'order_ids': order_ids})
except ValueError:
abort(404)
@app.route('/orders/deleteByUser/<user_id>', methods=['DELETE'])
def delete_order_by_user(user_id):
try:
order_ids = connector.get_order_ids_by_user(user_id)
for order_id in order_ids:
connector.delete_order(escape(order_id))
except ValueError:
abort(404)
return jsonify({"success": True}), 200
@app.route('/orders/addItem/<order_id>/<item_id>', methods=['POST'])
def add_item(order_id, item_id):
try:
order_paid, _, _, _ = connector.get_order_info(escape(order_id))
if order_paid:
raise ValueError('Order already completed')
item_in, price = connector.find_item(order_id, item_id)
if not item_in:
response = requests.get(f"http://{stock_host}/stock/find/{item_id}")
price = Decimal(response.json()['price'])
item_num = connector.add_item(order_id=order_id, item_id=item_id, item_price=price)
return jsonify({'item_amount': str(item_num)})
except ValueError:
abort(404)
@app.route('/orders/removeItem/<order_id>/<item_id>', methods=['DELETE'])
def remove_item(order_id, item_id):
try:
order_paid, order_items, user_id, total_cost = connector.get_order_info(escape(order_id))
if order_paid:
raise ValueError('Order already completed')
if str(item_id) not in order_items:
raise ValueError('Item not in order')
item_num = connector.remove_item(order_id, item_id)
return jsonify({'item_amount': str(item_num)})
except ValueError as error:
abort(404, error.args[0])
@app.route('/orders/checkout/<order_id>', methods=['POST'])
def checkout(order_id):
"""
makes the payment (via calling the payment service), subtracts
the stock (via the stock service) and returns a status (success/
failure).
"""
try:
order_paid, order_items, user_id, total_cost = connector.get_order_info(escape(order_id))
if order_paid:
raise ValueError("Order already completed")
pay_order(user_id, order_id, total_cost)
reserve_items(order_id, user_id, order_items)
connector.set_paid(order_id=order_id)
return jsonify({'status': 'success'})
except ValueError as error:
abort(400, error.args[0])
def pay_order(user_id, order_id, amount):
response = requests.post(f'http://{payment_host}/payment/pay/{user_id}/{order_id}/{amount}')
if not response.ok:
raise ValueError("Not enough credit")
def rollback_payment(user_id, order_id):
return requests.post(f'http://{payment_host}/payment/cancel/{user_id}/{order_id}')
def reserve_item(item_id, number):
return requests.post(f'http://{stock_host}/stock/subtract/{item_id}/{number}')
def reserve_items(order_id, user_id, items):
reserved_items = []
for item in list(set(items)):
item_num = connector.get_item_num(order_id=order_id, item_id=item)
response = reserve_item(item, item_num)
if response.ok:
reserved_items.append(item)
else:
rollback_payment(user_id, order_id)
rollback_items(order_id, reserved_items)
raise ValueError("Not enough stock")
def rollback_item(item_id, number):
return requests.post(f'http://{stock_host}/stock/add/{item_id}/{number}')
def rollback_items(order_id, item_ids):
for item_id in list(set(item_ids)):
number = connector.get_item_num(order_id=order_id, item_id=item_id)
rollback_item(item_id, number)
| YusufnoorEWI/scylladb_postgres_group-5 | order_service/service.py | service.py | py | 5,608 | python | en | code | 0 | github-code | 13 |
412320647 | def remove_dup(arr):
# consider arr is sorted or else arr = sorted(arr)
print("input arr {}".format(arr))
i = 1
insert_pos = 1
while i < len(arr):
if arr[i] != arr[insert_pos-1]:
arr[insert_pos] = arr[i]
insert_pos += 1
i += 1
print("modified arr {}".format(arr))
remove_dup([-1, -1, -1, 0, 0, 0, 1, 1, 1])
remove_dup([1, 2, 3, 4, 5])
remove_dup([1, 2, 2, 4, 5, 5, 5])
| atulkumar-mittal/cp | arrays/remove_duplicates.py | remove_duplicates.py | py | 435 | python | en | code | 0 | github-code | 13 |
25320356819 | import re
from functools import reduce
from itertools import groupby
import numpy as np
in_file = "input.txt"
def read_input_lines():
with open(in_file, 'r') as f:
data = [x.strip() for x in f.readlines()]
data.append("")
return data
def format_input(in_list):
ticketdata_list = (list(data_line) for _, data_line in groupby(in_list, key=''.__ne__))
return [a + b for a, b in zip(ticketdata_list, ticketdata_list)]
def extract_rules(rules_raw):
rule_pattern = re.compile("^(\w+|\w+ \w+): (\d+)-(\d+) or (\d+)-(\d+)$")
rule_dict = {}
for rule in rules_raw[:-1]:
rule_data = re.match(rule_pattern, rule)
rule_dict[rule_data.group(1)] = ((int(rule_data.group(2)), int(rule_data.group(3))), (int(rule_data.group(4)), int(rule_data.group(5))))
return rule_dict
def validate_tickets(tickets_raw, comp_list):
result = []
filter_list = []
ticket_list = tickets_raw[1:-1]
for i, ticket in enumerate(ticket_list):
ticket = [int(d) for d in ticket.split(',')]
for value in ticket:
if value not in comp_list:
result.append(value)
filter_list.append(i)
filter_list = sorted(list(set(filter_list)))
for i in filter_list[::-1]:
ticket_list.pop(i)
return sum(result), ticket_list
def puzzle1():
in_list = read_input_lines()
rules_raw, own_ticket, tickets_raw = format_input(in_list)
own_ticket = own_ticket[1:-1]
rule_dict = extract_rules(rules_raw=rules_raw)
comp_list = []
for rule in rule_dict.values():
for rule_set in rule:
comp_list += [x for x in range(rule_set[0], rule_set[1] + 1)]
comp_list = set(comp_list)
result, tickets = validate_tickets(tickets_raw=tickets_raw, comp_list=comp_list)
print(f"Ticket scanning error rate: {result}, Valid tickets: {len(tickets)}")
return rule_dict, tickets, own_ticket
def puzzle2():
rule_dict, tickets, own_ticket = puzzle1()
tickets += own_ticket
own_ticket = [int(d) for d in own_ticket[0].split(',')]
ticket_arr = []
for ticket in tickets:
ticket_arr.append([int(d) for d in ticket.split(',')])
ticket_arr = np.array(ticket_arr)
field_dict = {}
for i, column in enumerate(ticket_arr.T):
possible_fields = []
for key, rule in rule_dict.items():
lower = rule[0]
upper = rule[1]
if all(lower[0] <= value <= lower[1] or upper[0] <= value <= upper[1] for value in column):
possible_fields.append(key)
field_dict[i] = possible_fields
id_list = []
field_list = []
while field_dict.keys():
temp = field_dict.copy()
for id, field in temp.items():
if len(field) == 1:
id_list.append(id)
field_list.append(field[0])
field_dict.pop(id)
for name in field:
if name in field_list:
field.remove(name)
field_dict = dict(zip(field_list, id_list))
temp = []
for key, column in field_dict.items():
if key.startswith("departure "):
temp.append(own_ticket[column])
print(f"Product of 'departure' fields: {reduce(lambda x, y: x * y, temp)}")
return field_dict
if __name__ == '__main__':
a = puzzle2()
| voidlessVoid/advent_of_code_2020 | day_16/dominik/main.py | main.py | py | 3,350 | python | en | code | 0 | github-code | 13 |
45195018406 | from random import randint
def partition(T, p, r):
x = T[r]
i = p - 1
for j in range(p, r):
if T[j] <= x:
i += 1
T[i], T[j] = T[j], T[i]
T[i+1], T[r] = T[r], T[i+1]
return i+1
def quickersort(T, p, r):
while p < r:
q = partition(T, p, r)
if (q - p) < (r - q):
quickersort(T, p, q-1)
p = q + 1
else:
quickersort(T, q+1, r)
r = q - 1
return T
n = 10
arr = [20, 8, 7, 5, 4, 4, 3, 2, 2, 1, 0, -2, -3, -3, -5]
T = quickersort(arr, 0, 14)
print(T)
| kkorta/ASD | SortingAlgorithms/quickosrt.py | quickosrt.py | py | 618 | python | en | code | 0 | github-code | 13 |
43010057746 | import tkinter as tk
from tkinter import ttk,messagebox
from tkinter.constants import ANCHOR, CENTER, LEFT, N, NW, RIGHT, TOP, W
game = tk.Tk()
game.title('Falcon Game')
game.geometry('300x350+500+50')
a = '\U0001F600'
entry_1 = tk.Label(game,text=a,font='arial 50')
entry_1.grid(row=1,column=0,padx=10)
entry_2 = tk.Label(game,text=a,font='arial 50')
entry_2.grid(row=1,column=1,padx=10)
entry_3 = tk.Label(game,text=a,font='arial 50')
entry_3.grid(row=1,column=2,padx=10)
money = [0,0,0,0,0,0,0,0,0,0]
money_lable = ttk.Label(game,text=f'The Amount Of Money Is : {len(money)}$')
money_lable.grid(row=0,column=0,columnspan=3)
def start():
if len(money) != 0:
emoji = ('\U0001F600','\U0001F60D','\U0001F911')#,'\U0001F60E'
last_range = 3
money.pop()
print(money)
import random
a = random.randrange(0,last_range)
import random
b = random.randrange(0,last_range)
import random
c = random.randrange(0,last_range)
money_lable = ttk.Label(game,text=f'The Amount Of Money Is : {len(money)}$')
money_lable.grid(row=0,column=0,columnspan=3)
entry_1 = tk.Label(game,text=emoji[a],font='arial 50')
entry_1.grid(row=1,column=0,padx=10)
entry_2 = tk.Label(game,text=emoji[b],font='arial 50')
entry_2.grid(row=1,column=1,padx=10)
entry_3 = tk.Label(game,text=emoji[c],font='arial 50')
entry_3.grid(row=1,column=2,padx=10)
if a==b and b==c:
messagebox.showinfo('Message','You Are Win ! And You Win 10$ To Play Game !')
for i in range(0,11):
money.append(0)
else:
messagebox.showinfo('Message','End Game')
submit = ttk.Button(game,text='Start',command=start)
submit.grid(row=2,column=0,padx=10,pady=20)
game.mainloop() | mdsahil369/Python-Problem-Solve | 3 emoji wining game.py | 3 emoji wining game.py | py | 1,818 | python | en | code | 0 | github-code | 13 |
10587837536 | import numpy as np
def distance(a, b):
"""
返回两个向量间的欧式距离
"""
return np.sqrt(np.sum(np.power(a - b, 2)))
def rand_center(data, k):
"""
随机设置k个中心点
"""
m = data.shape[1] # 数据的维度
centroids = np.zeros((k, m))
for j in range(m):
d_min, d_max = np.min(data[:, j]), np.max(data[:, j])
centroids[:, j] = d_min + (d_max - d_min) * np.random.rand(k)
return centroids
def converged(centroids1, centroids2):
"""
通过判断中心点有没有改变,来决定是否收敛
"""
set1 = set([tuple(c) for c in centroids1])
set2 = set([tuple(c) for c in centroids2])
return set1 == set2
def sse(data, centroids, label):
n = data.shape[0]
SSE = np.zeros(n, dtype=np.float) # 类内凝聚度:簇内误差平方和SSE
for i in range(n):
SSE[i] = distance(data[i], centroids[label[i]]) ** 2
return np.sum(SSE)
def k_means(data, k=2, max_iter=300):
n = data.shape[0]
centroids = rand_center(data, k)
label = np.zeros(n, dtype=np.int)
finished = False
count = 0
while (not finished) and count < max_iter:
count += 1
old_centroids = np.copy(centroids)
for i in range(n):
min_dist, min_index = np.inf, -1
for j in range(k):
dist = distance(data[i], centroids[j])
if dist < min_dist:
min_dist, min_index = dist, j
label[i] = j
for i in range(k):
k_cluster = data[label == i]
if len(k_cluster) != 0:
centroids[i] = np.mean(k_cluster, axis=0)
finished = converged(old_centroids, centroids)
return centroids, label, sse(data, centroids, label) | IOTDB-Elites/ClassificationAndClustering | clustering/kmeans/k_means.py | k_means.py | py | 1,792 | python | en | code | 0 | github-code | 13 |
14042599067 | # URL du site
main_url = "https://www.cessionpme.com"
# Liste des départements à traiter en associant les clés aux valeurs correspondante
departement_imo = {
"64": "93",
"33": "87"
}
# Rubrique à traiter
rubrique_imo = {
"Locaux, Entrepôts, Terrains": "2",
"Bureaux, Coworking": "52"
}
# Bien à la vente
nature_imo= "V" | FannyDFT/python_immo | scrap/conf.py | conf.py | py | 345 | python | fr | code | 0 | github-code | 13 |
72053136977 | import threading
import tkinter as tk
from functools import partial
from tkinter import messagebox
import numpy as np
import Controller as gc
# thread
class GuiInterface:
event = gc.Event()
arrButton = {}
memory = []
sizeRow = 13
sizeCol = 13
checked = np.zeros((sizeRow, sizeCol))
def setCondition(self, condition):
self.event.setCondition(condition)
def setCondition_Choise(self, condition_choise):
self.event.setCondition_Choise(condition_choise)
def setGuiI(self, guiI):
self.guiI = guiI
def callInfinite(self):
self.root.mainloop()
def drawReset(self, x, y):
self.reset = tk.Button(self.window, text="reset game", font=('arial', 15, 'bold'), relief="raised", bg='gray',
foreground="white",
command=partial(self.event.clickedReset, guiI=self.guiI))
self.reset.place(x=x, y=y)
def drawUndo(self, x, y):
self.undo = tk.Button(self.window, text="undo", font=('arial', 15, 'bold'), relief="raised", bg='gray',
foreground="white",
command=partial(self.event.clickedUndo, guiI=self.guiI)
)
self.undo.place(x=x, y=y)
def drawAutoAi(self, x, y):
self.undo = tk.Button(self.window, text="AI VS AI", font=('arial', 15, 'bold'), relief="raised", bg='gray',
foreground="white",
command=partial(self.event.AivsAi, guiI=self.guiI)
)
self.undo.place(x=x, y=y)
def drawPlayAi(self, x, y):
self.undo = tk.Button(self.window, text="AI VS USER", font=('arial', 15, 'bold'), relief="raised", bg='gray',
foreground="white",
command=partial(self.event.AivsUser, guiI=self.guiI)
)
self.undo.place(x=x, y=y)
def drawBox(self):
self.root = tk.Tk()
self.window = tk.Frame(self.root, width=640, height=640, background="bisque")
self.window.place(x=0, y=0)
self.window.pack(fill=None, expand=False)
self.root.title("Welcome to Caro")
self.root.geometry("640x600")
self.root.eval('tk::PlaceWindow . center')
# self.root.resizable(0, 0)
def drawContourLines(self):
self.place_y = 30
for x in range(self.sizeRow):
self.place_x = 30
for y in range(self.sizeCol):
self.arrButton[x, y] = tk.Button(self.window, font=('arial', 15, 'bold'), height=1, width=2,
borderwidth=1, relief="solid", bg='white'
, command=partial(self.event.clicked, x=x, y=y, guiI=self.guiI)
)
self.arrButton[x, y].grid(row=x, column=y)
self.arrButton[x, y].place(x=self.place_x, y=self.place_y)
self.place_x += 35
self.place_y += 42
self.place_y = 30
self.place_x = 30
def drawChessBoard(self):
self.drawBox()
self.drawContourLines()
self.drawReset(self.place_x * (self.sizeCol + 4), self.place_y)
self.drawUndo(self.place_x * (self.sizeCol + 4), self.place_y + 42)
self.drawAutoAi(self.place_x * (self.sizeCol + 4), self.place_y + 84)
self.drawPlayAi(self.place_x * (self.sizeCol + 4), self.place_y + 126)
def changeColor(self):
sz = len(self.memory)
print("size = ", sz)
if sz > 0:
x = self.memory[sz - 1][0]
y = self.memory[sz - 1][1]
self.arrButton[x, y].configure(bg="green")
if sz > 1:
x = self.memory[sz - 2][0]
y = self.memory[sz - 2][1]
self.arrButton[x, y].configure(bg="white")
def isDifferent(self, x, y, id):
return self.checked[x][y] > 0 and self.checked[x][y] != id
def getArrButton(self, x, y):
return self.arrButton[x, y]['text']
def setArrButton(self, x, y, val):
self.arrButton[x, y]['text'] = val
def on_closing(self):
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.root.destroy()
def showTableCaro(self):
self.drawChessBoard()
self.root.protocol("WM_DELETE_WINDOW", self.on_closing)
| tryCod3/PyThon-Caro | Enviroment/MyGame/Gui.py | Gui.py | py | 4,477 | python | en | code | 0 | github-code | 13 |
25259027504 | #!/usr/bin/env python3
import argparse
import sys
def get_fh(filename, mode):
"""
get_fh function will 1) read in file name. 2) open a file and 'read' or 'write'
Purpose of the function is to open the file name passed in and pass back the file handle.
* All opening and closing of files in your program should use the get_fh function
"""
fh = None
try:
if comm == 'r':
fh = open(filename,'r')
elif comm == 'w':
fh = open(filename,'w')
else:
raise ValueError('Command should be r or w')
except IOError as e:
print(e)
except ValueError as e:
print(e)
return fh
def get_header_and_sequence_lists(fh):
header = []
sequence = []
firstline = True
for line in fh:
if line[0] == '>':
if firstline == True:
firstline = False
else:
sequence.append(linestore)
header.append(line.strip())
linestore = ""
else:
linestore = linestore+line.strip()
sequence.append(linestore)
_check_size_of_lists(header, sequence)
return header, sequence
def _check_size_of_lists(header_list, sequence_list):
"""
function will read in the header and sequence list (returned from get_header_and_sequence_lists).
function will initiate sys.exit if the size of the lists passed in are not the same.
"_" indicates for internal use.
"""
if len(header_list) != len(sequence_list):
print("Exit")
sys.exit(0)
else:
return True
def _get_accession(string):
accession = string.split()
accession = accession[0]
accession=accession[1:]
return accession
def _get_nt_occurrence(letter, string):
return string.count(letter)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--infile', dest = 'infile', type = str, help = 'Path to the file to open', required = True)
parser.add_argument('--outfile', dest = 'outfile', type = str, help = 'Path to the output file', required = True)
args = parser.parse_args()
fh_in = get_fh(args.infile, "r")
header, sequence = get_header_and_sequence_lists(fh_in)
fh_in.close()
fh_out = get_fh(args.outfile, "w")
fh_out.write("Number Accession A's G's C's T's N's Length GC%\n")
for i in range(len(header)):
accession = _get_accession(header[i])
num_As = _get_nt_occurrence('A', sequence[i])
num_Gs = _get_nt_occurrence('G', sequence[i])
num_Cs = _get_nt_occurrence('C', sequence[i])
num_Ts = _get_nt_occurrence('T', sequence[i])
num_Ns = _get_nt_occurrence('N', sequence[i])
gc = (num_Gs + num_Cs)*100/(len(sequence[i]))
fh_out.write('{:5} {:10} {:5} {:5} {:5} {:5} {:5} {:5} {:5.2f}\n'.format(i+1, accession, num_As, num_Gs, num_Cs, num_Ts, num_Ns, len(sequence[i]), gc))
fh_out.close()
if __name__ == '__main__':
main()
| Parinitha-kompala/Nucleotide-Analysis-from-FASTA-files | nucleotide_statistics_from_fasta22.py | nucleotide_statistics_from_fasta22.py | py | 2,914 | python | en | code | 0 | github-code | 13 |
43403295483 | import os
#Manually fix the GPU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import sys
#Add EDM to path to load models properly
# change this line to point to your specific path
sys.path.append("/home/sravula/MRI_Sampling_Diffusion/edm")
from utils.exp_utils import set_all_seeds, parse_args, parse_config
from learners.gradientlearner import MaskLearner
def main():
args = parse_args(globals()['__doc__'])
hparams = parse_config(args.config)
print("\nWriting to ", os.path.join(hparams.save_dir, args.doc), '\n')
set_all_seeds(hparams.seed)
learner = MaskLearner(hparams, args)
if args.test or args.baseline:
learner.test()
else:
learner.run_meta_opt()
return 0
if __name__ == '__main__':
sys.exit(main())
| Sriram-Ravula/MRI_Sampling_Diffusion | main.py | main.py | py | 813 | python | en | code | 4 | github-code | 13 |
28773531340 | """Auxiliary functions."""
try:
import cPickle as pickle
except ImportError:
import pickle
import re
import os
from SPARQLWrapper import SPARQLWrapper, JSON
YAGO_ENPOINT_URL = "https://linkeddata1.calcul.u-psud.fr/sparql"
RESOURCE_PREFIX = 'http://yago-knowledge.org/resource/'
def safe_mkdir(dir_name):
"""Checks if a directory exists, and if it doesn't, creates one."""
try:
os.stat(dir_name)
except OSError:
os.mkdir(dir_name)
def pickle_to_file(object_, filename):
"""Abstraction to pickle object with the same protocol always."""
with open(filename, 'wb') as file_:
pickle.dump(object_, file_, pickle.HIGHEST_PROTOCOL)
def pickle_from_file(filename):
"""Abstraction to read pickle file with the same protocol always."""
with open(filename, 'rb') as file_:
return pickle.load(file_)
def get_input_files(input_dirpath, pattern):
"""Returns the names of the files in input_dirpath that matches pattern."""
all_files = os.listdir(input_dirpath)
for filename in all_files:
if re.match(pattern, filename) and os.path.isfile(os.path.join(
input_dirpath, filename)):
yield os.path.join(input_dirpath, filename)
# TODO(mili): Is is better a pandas DataFrame
def query_sparql(query, endpoint):
"""Run a query again an SPARQL endpoint.
Returns:
A double list with only the values of each requested variable in
the query. The first row in the result contains the name of the
variables.
"""
sparql = SPARQLWrapper(endpoint)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
response = sparql.query().convert()
bindings = response['results']['bindings']
variables = response['head']['vars']
result = [variables]
for binding in bindings:
row = []
for variable in variables:
row.append(binding[variable]['value'])
result.append(row)
return result
def download_category(category_name, limit):
"""Downloads a single category and stores result in directory_name."""
query = """SELECT DISTINCT ?entity ?wikiPage WHERE {
?entity rdf:type <http://yago-knowledge.org/resource/%s> .
?entity <http://yago-knowledge.org/resource/hasWikipediaUrl> ?wikiPage
} LIMIT %s""" % (category_name, limit)
return query_sparql(query, YAGO_ENPOINT_URL)
def get_categories_from_file(category_filename):
"""Read categories and ofsets"""
with open(category_filename, 'r') as input_file:
lines = input_file.read().split('\n')
return lines
def query_subclasses(category_name, populated=True):
query = """SELECT DISTINCT ?subCategory WHERE {
?subCategory rdfs:subClassOf <%s%s> .
""" % (RESOURCE_PREFIX, category_name)
if populated:
query += """
?entity rdf:type ?subCategory .
}"""
else:
query += '}'
return query_sparql(query, YAGO_ENPOINT_URL)[1:]
def add_subcategories(category_name, graph, ancestors=[]):
"""Updates the children categories and level of category name.
"""
def add_ancestor(category_name):
graph.add_edge(ancestors[-1], category_name, path_len=len(ancestors))
response = query_subclasses(category_name)
for result in response:
child_category = result[0].replace(RESOURCE_PREFIX, '')
if 'wikicat' in child_category:
continue
add_subcategories(child_category, graph,
ancestors=ancestors + [category_name])
if category_name not in graph:
if len(ancestors):
add_ancestor(category_name)
else:
graph.add_node(category_name)
return
# We have seen the node before
if len(graph.predecessors(category_name)) == 0: # There is no ancestor yet.
if len(ancestors): # it's not the first recursive call
add_ancestor(category_name)
else: # There is a previous ancestor
added = False
for prev_ancestor in graph.predecessors(category_name):
if prev_ancestor in ancestors:
added = True
if len(ancestors) > graph.get_edge_data(
prev_ancestor, category_name)['path_len']:
# The current ancestor has a longer path
graph.remove_edge(prev_ancestor, category_name)
add_ancestor(category_name)
# The new ancestor doesn't overlap with any previous ancestor's path.
if not added and len(ancestors):
add_ancestor(category_name)
| MIREL-UNC/mirel-scripts | yago_scripts/utils.py | utils.py | py | 4,600 | python | en | code | 0 | github-code | 13 |
70268962578 | from setuptools import setup, Extension
from setuptools.command.test import test as TestCommand
import sys
install_requires = [
'colorama',
'enum34',
'tabulate',
'six',
'mako',
'Twisted',
'autobahn',
'ply'
]
tests_requires = [
'tox',
'virtualenv'
]
class Tox(TestCommand):
user_options = [('tox-args=', 'a', 'Arguments to pass to tox')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
setup(name = 'ducky',
version = '4.0',
description = 'Simple virtual CPU/machine simulator',
long_description = 'Ducky is a simple virtual CPU/machine simulator, with modular design and interesting features.',
url = 'https://github.com/happz/ducky',
download_url = 'https://github.com/happz/ducky/tarball/4.0',
author = 'Milos Prchlik',
author_email = 'happz@happz.cz',
license = 'MIT',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Forth',
'Programming Language :: Assembly',
'Topic :: Software Development :: Assemblers',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Interpreters'
],
keywords = 'virtual CPU simulator',
packages = [
'ducky',
'ducky.asm',
'ducky.cpu',
'ducky.cpu.coprocessor',
'ducky.mm',
'ducky.devices',
'ducky.tools'
],
entry_points = {
'console_scripts': [
'ducky-as = ducky.tools.as:main',
'ducky-ld = ducky.tools.ld:main',
'ducky-vm = ducky.tools.vm:main',
'ducky-objdump = ducky.tools.objdump:main',
'ducky-coredump = ducky.tools.coredump:main',
'ducky-profile = ducky.tools.profile:main',
'ducky-img = ducky.tools.img:main',
'ducky-defs = ducky.tools.defs:main'
]
},
package_dir = {'ducky': 'ducky'},
zip_safe = False,
install_requires = install_requires,
tests_require = tests_requires,
cmdclass = {
'test': Tox
}
)
| happz/ducky-legacy | setup.py | setup.py | py | 2,992 | python | en | code | 5 | github-code | 13 |
43082687746 | #! /usr/bin/env python
# # -*- coding: utf-8 -*
"""
rcs-keywords-post-checkout
This module provides code to act as an event hook for the git
post-checkout event. It detects which files have been changed
and forces the files to be checked back out within the
repository. If the checkout event is a file based event, the
hook exits without doing any work. If the event is a branch
based event, the files are checked again after the the commit
information is available after the merge has completed.
"""
import sys
import os
import errno
import subprocess
__author__ = "David Rotthoff"
__email__ = "drotthoff@gmail.com"
__version__ = "git-rcs-keywords-1.1.0"
__date__ = "2021-02-07 10:51:24"
__credits__ = []
__status__ = "Production"
def execute_cmd(cmd, cmd_source=None):
"""Execute the supplied program.
Arguments:
cmd -- string or list of strings of commands. A single string may
not contain spaces.
cmd_source -- The function requesting the program execution.
Default value of None.
Returns:
Process stdout file handle
"""
# Ensure there are no embedded spaces in a string command
if isinstance(cmd, str) and ' ' in cmd:
exit(1)
# Execute the command
try:
cmd_handle = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(cmd_stdout, cmd_stderr) = cmd_handle.communicate()
if cmd_stderr:
for line in cmd_stderr.strip().decode("utf-8").splitlines():
sys.stderr.write("%s\n" % line)
# If the command fails, notify the user and exit immediately
except subprocess.CalledProcessError as err:
sys.stderr.write(
"{0} - Program {1} called by {2} not found! -- Exiting."
.format(str(err), str(cmd), str(cmd_source))
)
raise
except OSError as err:
sys.stderr.write(
"{0} - Program {1} called by {2} not found! -- Exiting."
.format(str(err), str(cmd), str(cmd_source))
)
raise
# Return from the function
return cmd_stdout
def check_for_cmd(cmd):
"""Make sure that a program necessary for using this script is
available.
Arguments:
cmd -- string or list of strings of commands. A single string may
not contain spaces.
Returns:
Nothing
"""
# Ensure there are no embedded spaces in a string command
if isinstance(cmd, str) and ' ' in cmd:
exit(1)
# Execute the command
execute_cmd(cmd=cmd, cmd_source='check_for_cmd')
# Return from the function
return
def git_ls_files():
"""Find files that are relevant based on all files for the
repository branch.
Arguments:
None
Returns:
A list of filenames.
"""
cmd = ['git', 'ls-files']
# Get a list of all files in the current repository branch
cmd_stdout = execute_cmd(cmd=cmd, cmd_source='git_ls_files')
# Return from the function
return cmd_stdout
def get_checkout_files(first_hash, second_hash):
"""Find files that have been modified over the range of the supplied
commit hashes.
Arguments:
first_hash - The starting hash of the range
second_hash - The ending hash of the range
Returns:
A list of filenames.
"""
file_list = []
# Get the list of files impacted. If argv[1] and argv[2] are the same
# commit, then pass the value only once otherwise the file list is not
# returned
if first_hash == second_hash:
cmd = ['git',
'diff-tree',
'-r',
'--name-only',
'--no-commit-id',
'--diff-filter=ACMRT',
first_hash]
else:
cmd = ['git',
'diff-tree',
'-r',
'--name-only',
'--no-commit-id',
'--diff-filter=ACMRT',
first_hash,
second_hash]
# Fetch the list of files modified by the last commit
cmd_stdout = execute_cmd(cmd=cmd, cmd_source='get_checkout_files')
# Convert the stdout stream to a list of files
file_list = cmd_stdout.decode('utf8').splitlines()
# Deal with unmodified repositories
if file_list and file_list[0] == 'clean':
exit(0)
# Only return regular files.
file_list = [i for i in file_list if os.path.isfile(i)]
# if VERBOSE_FLAG:
# sys.stderr.write(' %d real files found for processing\n'
# % len(file_list))
# Return from the function
return file_list
def remove_modified_files(files):
"""Filter the found files to eliminate any that have changes that have
not been checked in.
Arguments:
files - list of files to checkout
Returns:
A list of files to checkout that do not have pending changes.
"""
cmd = ['git', 'status', '-s']
# Get the list of files that are modified but not checked in
cmd_stdout = execute_cmd(cmd=cmd, cmd_source='remove_modified_files')
# Convert the stream output to a list of output lines
modified_files_list = cmd_stdout.decode('utf8').splitlines()
# Deal with unmodified repositories
if not modified_files_list:
return files
# Pull the file name (second field) of the output line and
# remove any double quotes
modified_files_list = [l.split(None, 1)[-1].strip('"')
for l in modified_files_list]
# Remove any modified files from the list of files to process
if modified_files_list:
files = [f for f in files if f not in modified_files_list]
# Return from the function
return files
def check_out_file(file_name):
"""Checkout file that was been modified by the latest branch checkout.
Arguments:
file_name -- the file name to be checked out for smudging
Returns:
Nothing.
"""
# Remove the file if it currently exists
try:
os.remove(file_name)
except OSError as err:
# Ignore a file not found error, it was being removed anyway
if err.errno != errno.ENOENT:
exit(err.errno)
cmd = ['git', 'checkout', '-f', '%s' % file_name]
# Check out the file so that it is smudged
execute_cmd(cmd=cmd, cmd_source='check_out_files')
# Return from the function
return
def main():
"""Main program.
Arguments:
argv: command line arguments
Returns:
Nothing
"""
# If argv[3] is zero (file checkout rather than branch checkout),
# then exit the hook as there is no need to re-smudge the file.
# (The commit info was already available)
if sys.argv[3] == '0':
exit(0)
# Check if git is available.
check_for_cmd(cmd=['git', '--version'])
# Get the list of files impacted.
files = get_checkout_files(first_hash=sys.argv[1], second_hash=sys.argv[2])
# Filter the list of modified files to exclude those modified since
# the commit
files = remove_modified_files(files=files)
# Force a checkout of the remaining file list
files_processed = 0
if files:
files.sort()
for file_name in files:
check_out_file(file_name=file_name)
files_processed += 1
# Execute the main function
if __name__ == '__main__':
main()
| mdrotthoff/git-rcs-keywords | dist/rcs-post-checkout.py | rcs-post-checkout.py | py | 7,461 | python | en | code | 3 | github-code | 13 |
14583375235 | from umqtt.simple import MQTTClient
from machine import Pin
led=Pin(5, Pin.OUT)
led.value(1)
def msg(a,b):
data=(str(b,'utf-8'))
print(data)
if "LED on" in data:
led.value(0)
if "LED off" in data:
led.value(1)
def client():
server="test.mosquitto.org"
c = MQTTClient("umqtt_client", server)
c.set_callback(msg)
c.connect()
c.subscribe("test2")
c.wait_msg()
c.check_msg()
c.disconnect()
while True:
client()
| freedomwebtech/raspberry-pi-4-voice-homeautomation-part2 | esp8266sub.py | esp8266sub.py | py | 509 | python | en | code | 1 | github-code | 13 |
31234485909 | def homework_1(nums): # 請同學記得把檔案名稱改成自己的學號(ex.1104813.py)
max = 1
count = 1
for i in range(len(nums)):
if i==(len(nums)-1):
break
x = nums[i]
if x == nums[i+1]:
count+=1
a = count
if a > max:
max = a
else:
count = 1
return max
if __name__ == '__main__':
lst = [0, 0, 1, 1, 1, 1, 0, 0, 0, 1]
print(homework_1(lst))
| daniel880423/Member_System | file/hw1/1100434/s1100434_10.py | s1100434_10.py | py | 508 | python | en | code | 0 | github-code | 13 |
6799671932 | """" Tgus module handles the windowing fucntions for ImageWatcher"""
from src.ui.imageviewerkeyhandler import ImageViewerKeyHandler
from src.ui.utils import get_resolution_linux, print_cb_data
import dearpygui.dearpygui as dpg
import logging
import time
from PIL import Image
class ImageViewerWindow:
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.clientW, self.clientH = get_resolution_linux()
self.w, self.h = (1024, 768)
self.vp_min_width, self.vp_min_height = (640, 480)
self.image_w, self.image_h = (0, 0)
self.dragTimer = 0.0
# create viewport takes in config options too!
self.img_paths = []
self.init_viewport()
self.init_window()
self.key_handler = ImageViewerKeyHandler(
self.img_paths, self.quit, self.toggle_fullscreen, self.set_image)
self.init_dpg()
self.currW = dpg.get_viewport_width()
self.currH = dpg.get_viewport_height()
def handle_windowdrag(self, sender, dragpos, user_data):
if self.dragTimer == 0:
self.dragTimer = time.time()
if(time.time() - self.dragTimer > 0.01):
currX, currY = dpg.get_viewport_pos()
posX = dragpos[1] + currX
posY = dragpos[2] + currY
if all([posX, posY, currX, currY]) > 0.0:
dpg.set_viewport_pos(
[posX, posY])
self.stored_pos = dpg.get_viewport_pos()
self.dragTimer = 0.0
def init_dpg(self):
dpg.setup_registries()
dpg.set_viewport_always_top(True)
dpg.add_key_press_handler(callback=self.key_handler.handle_keys)
dpg.add_mouse_drag_handler(callback=self.handle_windowdrag)
def init_viewport(self):
dpg.create_viewport(
title='ImageWatcher',
width=self.w,
height=self.h,
decorated=False, resizable=True,
clear_color=[0, 0, 0, 0],
min_width=self.vp_min_width,
min_height=self.vp_min_height,
x_pos=self.clientW//2,
y_pos=self.clientH//2)
self.viewport = dpg.get_viewport_title()
dpg.set_viewport_clear_color([0.0, 0.0, 0.0, 0.0])
dpg.setup_dearpygui(viewport=self.viewport)
logging.debug(f"setup viewport with id: {self.viewport}")
# self.center_viewport()
self.logger.debug(
"clientW: {} \n clientH: {}".format(self.clientW, self.clientH))
dpg.set_viewport_clear_color([0, 0.0, 0.0, 0.0])
dpg.show_viewport(self.viewport)
self.stored_pos = dpg.get_viewport_pos()
self.logger.debug(f"init viewport pos: {self.stored_pos}")
self.stored_w = dpg.get_viewport_width()
self.stored_h = dpg.get_viewport_height()
def init_window(self):
with dpg.window(label="Imagewatcher",
collapsed=True,
modal=False,
width=self.w,
height=self.h,
id="main_window",
no_scrollbar=True,
no_background=True) as w:
self.window = w
with dpg.theme(id="theme_id"):
dpg.add_theme_style(dpg.mvStyleVar_ScrollbarSize,
0)
dpg.add_theme_style(dpg.mvStyleVar_WindowPadding,
0)
dpg.add_theme_color(dpg.mvThemeCol_WindowBg,
(15, 15, 15, 0))
dpg.add_theme_color(dpg.mvThemeCol_Border,
(0, 0, 0, 0))
dpg.set_item_theme("main_window", "theme_id")
dpg.set_primary_window("main_window", True)
dpg.add_resize_handler(
self.window, callback=self.resize_image_to_viewport)
def center_viewport(self, xoff=0, yoff=0):
self.currW = dpg.get_viewport_width()
self.currH = dpg.get_viewport_height()
self.logger.debug(f"currH: {self.currW} \n currH: {self.currH}")
dpg.set_viewport_pos(
[self.clientW//2-xoff, self.clientH//2 - yoff])
def resize_viewport(self, width, height):
if not self.fullscreen or dpg.is_viewport_decorated():
if self.img_path is not "":
dpg.set_viewport_min_width(width)
dpg.set_viewport_min_height(height)
dpg.set_viewport_height(height)
dpg.set_viewport_width(width)
pos = [0, 0]
elif self.fullscreen or dpg.is_viewport_decorated():
pos = [self.clientW//2 - width//2,
self.clientH//2 - height//2]
logging.info(f"setting pos: {pos}")
return pos
def store_vp_dims(self):
x, y = dpg.get_viewport_pos()
w, h = (dpg.get_viewport_width(), dpg.get_viewport_height())
if x or y == 0:
return
if w > self.clientW or h >= self.clientH:
return
if x > 0 or y > 0:
self.stored_pos = [x, y]
self.logger.debug(f"storing pos {x,y}")
self.stored_w = dpg.get_viewport_width()
self.stored_h = dpg.get_viewport_height()
def toggle_fullscreen(self):
""" Toggles fullscreen off and on for the current viewport"""
# TODO: modify to send a fullscreen event to the inherited class
# TODO: simplify logic
self.fullscreen = not self.fullscreen
if self.fullscreen:
self.store_vp_dims()
time.sleep(0.1)
self.logger.debug(
f"stored pos: {self.stored_pos} stored_dim: {'x'.join((str(self.stored_w), str(self.stored_h)))}")
dpg.set_viewport_resizable(True)
dpg.configure_viewport(dpg.get_viewport_title(
), width=self.clientW, height=self.clientH, x_pos=0, y_pos=0)
time.sleep(0.1)
# for some reason need 200 padding
dpg.set_viewport_pos([0, self.clientH-self.clientH+200])
self.logger.debug(
f"vp pos: {dpg.get_viewport_pos()}")
else:
self.logger.debug(
f"window mode, setting to stored pos: {self.stored_pos} stored_dim: {'x'.join((str(self.stored_w), str(self.stored_h)))}")
time.sleep(0.2)
dpg.set_viewport_resizable(True)
dpg.configure_viewport(dpg.get_viewport_title(
), width=self.stored_w, height=self.stored_h, x_pos=self.stored_pos[0], y_pos=self.stored_pos[1])
time.sleep(0.2)
dpg.set_viewport_pos(self.stored_pos)
def resize_image_to_viewport(self, sender, size, user_data):
w, h = size
if not self.img_id:
dpg.set_item_pos(
self.logo_id, [w//2 - self.image_w//2, h//2 - self.image_h//2])
self.position_intro_text()
return
dpg.set_item_width(self.img_id, self.image_w)
dpg.set_item_height(self.img_id, self.image_h)
if self.fullscreen:
dpg.set_item_pos(
self.img_id, [w//2 - self.image_w//2, h//2 - self.image_h//2])
else:
if dpg.is_viewport_resizable():
dpg.set_item_pos(
self.img_id, [w//2 - self.image_w//2, h//2 - self.image_h//2])
else:
dpg.set_item_pos(
self.img_id, [0, 0])
image_w = dpg.get_item_width(self.img_id)
image_h = dpg.get_item_height(self.img_id)
image_ratio = image_w / image_h
window_ratio = w/h
self.logger.info(f"image_ratio: {image_ratio}")
self.logger.info(f"image wxh: {image_w}x{image_h}")
self.logger.info(f"window wxh: {w}x{h}")
self.logger.info(f"window_ratio: {window_ratio}")
def quit(self):
self.logger.info("ImageWatcher quitting...")
dpg.stop_dearpygui()
| burstMembrane/imagewatcher | src/ui/imageviewerwindow.py | imageviewerwindow.py | py | 7,968 | python | en | code | 0 | github-code | 13 |
25540710754 | from rest_framework import serializers
from heroes.models import (
Resource,
Town,
Class,
SecondarySkill,
Spell,
Creature,
Specialty,
Hero,
)
class ResourceSerializer(serializers.ModelSerializer):
class Meta:
model = Resource
fields = ("id", "name", "picture_url")
class TownSerializer(serializers.ModelSerializer):
class Meta:
model = Town
fields = ("id", "name", "picture_url")
class ClassSerializer(serializers.ModelSerializer):
class Meta:
model = Class
fields = ("id", "name", "attack", "defense", "power", "knowledge")
class SecondarySkillSerializer(serializers.ModelSerializer):
class Meta:
model = SecondarySkill
fields = (
"id",
"name",
"level",
"description",
"picture_url",
)
def to_representation(self, instance):
result = super(SecondarySkillSerializer, self).to_representation(instance)
level = {0: "Base", 1: "Advance", 2: "Expert"}
result["level"] = level[result["level"]]
return result
class SpellSerializer(serializers.ModelSerializer):
class Meta:
model = Spell
fields = (
"id",
"name",
"level",
"magic_school",
"description_base",
"description_advance",
"description_expert",
"picture_url",
)
def to_representation(self, instance):
result = super(SpellSerializer, self).to_representation(instance)
school = {0: "Fire", 1: "Air", 2: "Earth", 3: "Water", None: "All schools"}
result["magic_school"] = school[result["magic_school"]]
return result
class CreatureSerializer(serializers.ModelSerializer):
class Meta:
model = Creature
fields = (
"id",
"name",
"town",
"level",
"upgrade",
"attack",
"defense",
"min_damage",
"max_damage",
"hp",
"speed",
"growth",
"ai_value",
"gold",
"picture_url",
)
class SpecialtySerializer(serializers.ModelSerializer):
class Meta:
model = Specialty
fields = ("id", "creature", "resource", "spell", "secondary_skill", "name")
read_only_fields = ("name",)
def to_representation(self, instance):
result = super(SpecialtySerializer, self).to_representation(instance)
return {key: value for key, value in result.items() if value is not None}
class SpecialtyListSerializer(serializers.ModelSerializer):
class Meta:
model = Specialty
fields = ("id", "name")
read_only_fields = ("name",)
class SpecialtyDetailSerializer(SpecialtySerializer):
creature = CreatureSerializer(many=False, read_only=True)
resource = ResourceSerializer(many=False, read_only=True)
spell = SpellSerializer(many=False, read_only=True)
secondary_skill = SecondarySkillSerializer(many=False, read_only=True)
class HeroSerializer(serializers.ModelSerializer):
class Meta:
model = Hero
fields = (
"id",
"name",
"hero_class",
"specialty",
"secondary_skill_first",
"secondary_skill_second",
"spell",
"picture_url",
)
def to_representation(self, instance):
result = super(HeroSerializer, self).to_representation(instance)
return {key: value for key, value in result.items() if value is not None}
def validate(self, attrs):
data = super(HeroSerializer, self).validate(attrs=attrs)
Hero.validate_skill(
attrs["hero_class"],
attrs["specialty"],
attrs["secondary_skill_first"],
attrs["secondary_skill_second"],
)
return data
class HeroListSerializer(HeroSerializer):
hero_class = serializers.SlugRelatedField(
many=False, read_only=True, slug_field="name"
)
specialty = serializers.StringRelatedField(many=False)
secondary_skill_first = serializers.SlugRelatedField(
many=False, read_only=True, slug_field="name"
)
secondary_skill_second = serializers.SlugRelatedField(
many=False, read_only=True, slug_field="name"
)
spell = serializers.SlugRelatedField(many=False, read_only=True, slug_field="name")
class HeroDetailSerializer(HeroSerializer):
hero_class = ClassSerializer(many=False, read_only=True)
specialty = SpecialtyDetailSerializer(many=False, read_only=True)
secondary_skill_first = SecondarySkillSerializer(many=False, read_only=True)
secondary_skill_second = SecondarySkillSerializer(many=False, read_only=True)
spell = SpellSerializer(many=False, read_only=True)
| volodymyr-vereshchak/heroes3-rest-api | heroes/serializers.py | serializers.py | py | 4,891 | python | en | code | 0 | github-code | 13 |
24654334231 | __author__ = 'Michael Kaldawi'
"""
Programmer: Michael Kaldawi
Class: CE 4348.501
Assignment: P01 (Program 1)
Program Description:
This program implements a prime number finder utilizing the sieve
of Eratosthenes, multiprocessing, and communication via pipes.
"""
# Note: we are using numpy for our array processing to speed up
# runtime. numpy needs to be installed/ imported for this
# program to work.
from multiprocessing import Process, Pipe
import math
import cProfile
import numpy as np
# This function finds primes between 'start' and 'end' indices.
# The function returns a 1x(end-start) array of boolean values
# indicating primes as 'True'.
def find_primes(start, end, conn=None):
# create an array of boolean True values, size: 1 x 'end'
array = np.ones(shape=end, dtype=np.bool)
# For each value 'i' in the True array, starting at 2, until the
# square root of the end value, mark multiples of 'i' as False.
# Hence, for i = 2, the values marked False would be {4, 6, 8, ...}
for i in range(2, int(math.sqrt(end))):
if array[i]:
j = i**2
while j < end:
array[j] = False
j += i
# If no connection is passed, return the array with a start value.
if conn is None:
return {'start': start, 'array': array[start:]}
# If requested by the parent process, return the array with a start value.
conn.send({'start': start, 'array': array[start:]})
conn.close()
# This function prints the prime numbers marked by True in a
# passed True/ False array.
def print_primes(start, array):
total = 0
pos = start
for i in array:
if i:
print(pos, i)
total += 1
pos += 1
# print(total)
# a function to print the prime numbers marked by True in a
# passed True/ False array into a file
def write_primes(file_name, mode, start, array):
f = open(file_name, mode)
total = 0
pos = start
for i in array:
if i:
f.write(pos.__str__() + "\n")
total += 1
pos += 1
# f.write("total: " + total.__str__())
# Due to the nature of the profiling package cProfile, we require
# an additional function to start the child process.
def start_child(process):
process.start()
# This function calculates the prime numbers between
# 2 and 1,000, then starts 10 child processes to complete
# the calculation of prime numbers between 1,001 and 1,000,000.
# The parent process requests for the calculated primes from
# the child processes via Pipes. The child processes then
# return the calculated primes via the pipes.
def main():
# 'data' stores all boolean arrays. 'children' is an
# array of child processes.
data = []
children = []
# Find the primes between 2 and 1000.
for i in find_primes(start=2, end=1000)['array']:
data.append(i)
# Make 10 pipes and 10 corresponding child processes.
for process in range(0, 10):
parent_conn, child_conn = Pipe()
if process == 0:
children.append(Process(target=find_primes,
args=(1001, 100000, child_conn)))
else:
children.append(Process(target=find_primes, args=(
process*100001, (process+1)*100000, child_conn)))
# Start each child process. Profile the run time of each process.
cProfile.runctx('start_child(children[process])',
globals(), locals())
# Request each boolean array from the child processes,
# and append the arrays to 'data'.
for i in parent_conn.recv()['array']:
data.append(i)
children[process].join()
# write the prime numbers to 'primes.txt'
cProfile.runctx('write_primes(file_name="primes.txt", '
'mode="w", start=2, array=data)',
globals(), locals())
# This is our 'main' function. The first line of code
# executed in this program is 'main()'
#
# This function executes main().
# Only the parent process can run this function.
if __name__ == '__main__':
main() | michael-kaldawi/Prime-Number-Multiprocessing | PipeTest.py | PipeTest.py | py | 4,121 | python | en | code | 1 | github-code | 13 |
17057594194 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PageVisitDataResponse(object):
def __init__(self):
self._page_pv = None
self._page_uv = None
self._url = None
@property
def page_pv(self):
return self._page_pv
@page_pv.setter
def page_pv(self, value):
self._page_pv = value
@property
def page_uv(self):
return self._page_uv
@page_uv.setter
def page_uv(self, value):
self._page_uv = value
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
def to_alipay_dict(self):
params = dict()
if self.page_pv:
if hasattr(self.page_pv, 'to_alipay_dict'):
params['page_pv'] = self.page_pv.to_alipay_dict()
else:
params['page_pv'] = self.page_pv
if self.page_uv:
if hasattr(self.page_uv, 'to_alipay_dict'):
params['page_uv'] = self.page_uv.to_alipay_dict()
else:
params['page_uv'] = self.page_uv
if self.url:
if hasattr(self.url, 'to_alipay_dict'):
params['url'] = self.url.to_alipay_dict()
else:
params['url'] = self.url
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PageVisitDataResponse()
if 'page_pv' in d:
o.page_pv = d['page_pv']
if 'page_uv' in d:
o.page_uv = d['page_uv']
if 'url' in d:
o.url = d['url']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/PageVisitDataResponse.py | PageVisitDataResponse.py | py | 1,702 | python | en | code | 241 | github-code | 13 |
31741741795 | # 初始化
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from skimage import io
# from __future__ import print_function
# %matplotlib内联
plt.rcParams['figure.figsize'] = (15.0, 12.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# 自动重载外部模块
# %load_ext autoreload
# %autoreload 2
# 为聚类生成随机数据点
# 采用seed保证生成结果的一致
np.random.seed(0)
# 载入并显示图像
img = io.imread('train.jpg')
H, W, C = img.shape
plt.imshow(img)
plt.axis('off')
plt.show()
from segmentation import color_features
np.random.seed(0)
features=color_features(img)
# 结果检测
# assert features.shape == (H * W, C),\
# "Incorrect shape! Check your implementation."
#
# assert features.dtype == np.float,\
# "dtype of color_features should be float."
from segmentation import kmeans_fast
assignments=kmeans_fast(features,8)
segments = assignments.reshape((H, W))
# 展示图像分割结果
plt.imshow(segments, cmap='viridis')
plt.axis('off')
plt.show()
from segmentation import color_position_features
np.random.seed(0)
features = color_position_features(img)
# 结果检测
assert features.shape == (H * W, C + 2),\
"Incorrect shape! Check your implementation."
assert features.dtype == np.float,\
"dtype of color_features should be float."
assignments = kmeans_fast(features, 8)
segments = assignments.reshape((H, W))
# 图像分割结果显示
plt.imshow(segments, cmap='viridis')
plt.axis('off')
plt.show()
from utils import visualize_mean_color_image
visualize_mean_color_image(img, segments)
print('2'*100)
from utils import load_dataset, compute_segmentation
from segmentation import evaluate_segmentation
# 载入该小型数据集
imgs, gt_masks = load_dataset('./data')
# 设置图像分割的参数
num_segments = 3
clustering_fn = kmeans_fast
feature_fn = color_features
scale = 0.5
mean_accuracy = 0.0
segmentations = []
for i, (img, gt_mask) in enumerate(zip(imgs, gt_masks)):
# Compute a segmentation for this image
segments = compute_segmentation(img, num_segments,
clustering_fn=clustering_fn,
feature_fn=feature_fn,
scale=scale)
segmentations.append(segments)
# 评估图像分割结果
accuracy = evaluate_segmentation(gt_mask, segments)
print('Accuracy for image %d: %0.4f' % (i, accuracy))
mean_accuracy += accuracy
mean_accuracy = mean_accuracy / len(imgs)
print('Mean accuracy: %0.4f' % mean_accuracy)
| lixixi89055465/py_stu | segment/train.py | train.py | py | 2,657 | python | en | code | 1 | github-code | 13 |
22984748178 | """
Реализуйте класс Version, описывающий версию программного обеспечения.
При создании экземпляра класс должен принимать один аргумент:
version — строка из трех целых чисел, разделенных точками и описывающих версию ПО.
Например, 2.8.1. Если одно из чисел не указано, оно считается равным нулю.
Например, версия 2 равнозначна версии 2.0.0, а версия 2.8 равнозначна версии 2.8.0
Экземпляр класса Version должен иметь следующее формальное строковое представление:
Version('<версия ПО в виде трех целых чисел, разделенных точками>')
И следующее неформальное строковое представление:
<версия ПО в виде трех целых чисел, разделенных точками>
Также экземпляры класса Version должны поддерживать между собой все операции сравнения с помощью операторов
==, !=, >, <, >=, <=. Два Version объекта считаются равными, если все три числа в их версиях совпадают.
Version объект считается больше другогоVersion объекта, если первое число в его версии больше.
Или если второе число в его версии больше, если первые числа совпадают.
Или если третье число в его версии больше, если первые и вторые числа совпадают.
Примечание 1. Если объект, с которым выполняется операция сравнения, некорректен, метод, реализующий эту операцию,
должен вернуть константу NotImplemented.
Примечание 2. Дополнительная проверка данных на корректность не требуется.
Гарантируется, что реализованный класс используется только с корректными данными.
Примечание 3. Никаких ограничений касательно реализации класса Version нет, она может быть произвольной.
"""
from functools import total_ordering
@total_ordering
class Version:
def __init__(self, version_):
self.version = (list(map(int, version_.split('.'))) + [0, 0])[:3]
def __repr__(self):
return f"Version({'.'.join(map(str, self.version)).__repr__()})"
def __str__(self):
return '.'.join(map(str, self.version))
def __eq__(self, other):
if isinstance(other, Version):
return self.version == other.version
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, Version):
return self.version < other.version
else:
return NotImplemented
# INPUT DATA:
# TEST_1:
print("\nтест 1")
print(Version('3.0.3') == Version('1.11.28'))
print(Version('3.0.3') < Version('1.11.28'))
print(Version('3.0.3') > Version('1.11.28'))
print(Version('3.0.3') <= Version('1.11.28'))
print(Version('3.0.3') >= Version('1.11.28'))
# TEST_2:
print("\nтест 2")
print(Version('3') == Version('3.0'))
print(Version('3') == Version('3.0.0'))
print(Version('3.0') == Version('3.0.0'))
# TEST_3:
print("\nтест 3")
versions = [Version('2'), Version('2.1'), Version('1.9.1')]
print(sorted(versions))
print(min(versions))
print(max(versions))
# TEST_4:
print("\nтест 4")
versions = [Version('162.5'), Version('68.3'), Version('173.8'), Version('108.9'), Version('159.6'), Version('145.7'),
Version('187.6'), Version('137.7'), Version('33.7'), Version('22.4'), Version('199.4'), Version('122.1'),
Version('47.4'), Version('10.2'), Version('164.9'), Version('191.6'), Version('139.9'), Version('184.4'),
Version('94.9'), Version('188.6'), Version('56.8'), Version('138.7'), Version('83.2'), Version('59.4'),
Version('189.7'), Version('128.5'), Version('6.6'), Version('111.2'), Version('5.6'), Version('188.8'),
Version('64.9'), Version('76.6'), Version('85.5'), Version('195.6'), Version('12.8'), Version('66.7'),
Version('121.7'), Version('20.3'), Version('9.8'), Version('140.8'), Version('70.3'), Version('12.3'),
Version('97.9'), Version('10.4'), Version('98.5'), Version('74.1'), Version('164.8'), Version('55.1'),
Version('147.7'), Version('39.2'), Version('27.4'), Version('50.3'), Version('174.7'), Version('196.9'),
Version('106.3'), Version('89.1'), Version('59.9'), Version('189.4'), Version('45.7'), Version('158.2'),
Version('147.5'), Version('3.2'), Version('49.9'), Version('173.6'), Version('63.9'), Version('8.2'),
Version('29.4'), Version('15.7'), Version('85.2'), Version('109.2'), Version('152.9'), Version('49.6'),
Version('53.5'), Version('26.7'), Version('135.9'), Version('155.3'), Version('134.7'), Version('159.4'),
Version('99.3'), Version('188.9'), Version('197.4'), Version('99.2'), Version('160.5'), Version('183.7'),
Version('74.2'), Version('184.7'), Version('139.8'), Version('199.2'), Version('122.1'), Version('198.7'),
Version('190.1'), Version('200.2'), Version('40.3'), Version('150.4'), Version('20.2'), Version('186.7'),
Version('47.2'), Version('57.5'), Version('72.8'), Version('23.1')]
print(sorted(versions))
print(min(versions))
print(max(versions))
# TEST_5:
print("\nтест 5")
version = Version('12')
not_supported = ['12.0.0', 12.0, (12, 0), {12: 0}, True, False]
for obj in not_supported:
print(obj == version)
# TEST_6:
print("\nтест 6")
version = Version('25')
print(version.__eq__(1))
print(version.__ne__(1.1))
print(version.__gt__(range(5)))
print(version.__lt__([1, 2, 3]))
print(version.__ge__({4, 5, 6}))
print(version.__le__({1: 'one'}))
| Archangel-Ray/OOP_Generation-Python_course-on-Stepik | 5. Магические методы/5.3 Сравнение объектов/04 сравнить строку из трёх чисел.py | 04 сравнить строку из трёх чисел.py | py | 6,421 | python | ru | code | 0 | github-code | 13 |
8961097609 | import pytest
from asynctest import TestCase, logging
from Trader import Trader
from OrderRequest import OrderRequest, OrderRequestStatus, OrderRequestType, OrderRequestList, SegmentedOrderRequestList
import jsonpickle
POLONIEX = 'poloniex'
BINANCE = 'binance'
KRAKEN = 'kraken'
BITSTAMP = 'bitstamp'
COINBASEPRO = 'coinbasepro'
BITTREX = 'bittrex'
ETH_EUR = 'ETH/EUR'
ETH_BTC = 'ETH/BTC'
BTC_USD = 'ETH/BTC'
logging.getLogger('Trader').setLevel(logging.DEBUG)
rootLogger = logging.getLogger()
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s - [%(filename)s:%(funcName)s:%(lineno)s]')
ch.setFormatter(formatter)
# add the handlers to the logger
rootLogger.addHandler(ch)
class TestClass(TestCase):
isSandboxMode = False
async def setUp(self):
Trader.TTL_TRADEORDER_S = 1
Trader.FETCH_ORDER_STATUS_TIMEOUT = 5
self.trader = Trader(is_sandbox_mode=TestClass.isSandboxMode)
await self.trader.initExchangesFromAWSParameterStore()
async def TearDown(self):
await self.trader.close_exchanges()
async def __test_fetch_balances(self, exch):
assert (exch in self.trader.getBalances()) is True
assert len(self.trader.getBalances()) > 0
assert len(self.trader.getBalances()[exch]) > 0
@pytest.mark.skip(reason="e2e test")
@pytest.mark.asyncio
async def test_fetch_balances(self):
assert self.trader.isSandboxMode() is False
await self.__test_fetch_balances(KRAKEN)
await self.__test_fetch_balances(BITSTAMP)
await self.__test_fetch_balances(BITTREX)
await self.__test_fetch_balances(COINBASEPRO)
@pytest.mark.skip(reason="e2e test")
@pytest.mark.asyncio
async def test_execute_trades(self):
self.trader.input = lambda x: 'ok'
or11 = OrderRequest('NINCSILYEN', ETH_BTC, volumeBase=0.1, limitPrice=0.02, meanPrice=0.02, requestType=OrderRequestType.BUY)
orl1 = OrderRequestList([or11])
# orl2 = OrderRequestList([or21, or22])
stl = SegmentedOrderRequestList('uuid', [orl1])
await self.trader.execute(stl)
@pytest.mark.skip(reason="e2e test")
@pytest.mark.asyncio
async def test_store_balances(self):
Trader.storeFreeBalances(None, None, {
"testexchage": {
"TEST": 0.1650193171279469,
}
})
| gbarany/crypto-arbitrage-finder | src/Trader_e2e_test.py | Trader_e2e_test.py | py | 2,487 | python | en | code | 4 | github-code | 13 |
39859177360 | from typing import Optional
import flet as ft
from switchpokepilot.mainwindow.state import MainWindowState
from switchpokepilot.mainwindow.ui.command_area import CommandArea
from switchpokepilot.mainwindow.ui.log_area import LogArea
from switchpokepilot.mainwindow.ui.video_area import VideoArea
class ToolsArea(ft.UserControl):
def __init__(self,
window_state: MainWindowState,
width: int,
height: int):
super().__init__(width=width, height=height)
self._window_state = window_state
self._indicators_height = 20
self._container: Optional[ft.Container] = None
self._area: Optional[ft.Tabs] = None
self._tabs: list[ft.Tab] = []
def build(self):
self._tabs = [
ft.Tab(icon=ft.icons.COMMENT,
content=LogArea(window_state=self._window_state,
width=self.width,
height=self.height - self._indicators_height)),
ft.Tab(icon=ft.icons.NAVIGATION,
content=CommandArea(window_state=self._window_state,
width=self.width,
height=self.height - self._indicators_height)),
ft.Tab(icon=ft.icons.VIDEOCAM,
content=VideoArea(window_state=self._window_state,
width=self.width,
height=self.height - self._indicators_height)),
]
self._area = ft.Tabs(selected_index=0,
width=self.width,
height=self.height,
animation_duration=300,
tabs=self._tabs)
self._container = ft.Container(content=self._area,
width=self.width,
height=self.height,
padding=ft.padding.all(0),
margin=ft.margin.all(0),
alignment=ft.alignment.top_left)
return self._container
def resize(self, width: int, height: int):
self.width = width
self.height = height
for tab in self._tabs:
if tab.content is not None:
tab.content.resize(width, height - self._indicators_height)
tab.content.update()
for control in [self._area, self._container]:
control.width = width
control.height = height - self._indicators_height
control.update()
self.update()
| carimatics/switch-poke-pilot | switchpokepilot/mainwindow/ui/tools_area.py | tools_area.py | py | 2,693 | python | en | code | 3 | github-code | 13 |
73314022099 |
import os
import sys
import time
import numpy as np
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from datetime import datetime
import scipy.io as scio
from util.SetRandomSeed import set_seed, worker_init
from util.SaveChkp import save_checkpoint
from util.MakeDataList import makelist
import util.SetDistTrain as utils
from yueli.code.nlos_cs_nips2023.pro.Train_ import train
from tqdm import tqdm
import torch.nn.functional as F
from models.utils_pytorch import phasor_1_10
from models.utils_pytorch import fk_1_10
from models.utils_pytorch import lct_1_10
import cv2
cudnn.benchmark = True
dtype = torch.cuda.FloatTensor
lsmx = torch.nn.LogSoftmax(dim=1)
smx = torch.nn.Softmax(dim=1)
from pro.Loss import criterion_KL, criterion_L2
from skimage.metrics import structural_similarity as ssim
def main():
# baseline
spatial = 128
temp_bin = 512
bin_len = ( 512 // temp_bin ) * 0.0096
model = phasor_1_10.phasor(spatial=128, crop=temp_bin, bin_len=bin_len, sampling_coeff=2.0, cycles=5,dnum=1)
# model = fk_1_10.lct_fk(spatial=128, crop=512, bin_len=bin_len,dnum=1)
# model = lct_1_10.lct(spatial=128, crop=512, bin_len=bin_len,method='lct',dnum=1)
model.cuda()
model = torch.nn.DataParallel(model)
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total Numbers of parameters are: {}".format(num_params))
print("+++++++++++++++++++++++++++++++++++++++++++")
print("Start eval...")
rw_path = '/data/yueli/dataset/align_fk_256_512/'
rw_path = '/data/yueli/dataset/align_fk_256_512_meas_10min'
out_path = f'/data/yueli/nlos_sp_output/nips_cs/traditional_algos/fk_meas_10min/compressed_tradition_out_resize_before/sptial{spatial}/rsd/'
# out_path = f'/data/yueli/nlos_sp_output/nips_cs/traditional_algos/fk_data/interp128/sptial{spatial}/'
if not os.path.exists(out_path):
os.makedirs(out_path, exist_ok=True)
all_file = []
files = os.listdir(rw_path)
for fi in files:
fi_d = os.path.join(rw_path, fi)
all_file.append(fi_d)
ims = []
for i in range(len(all_file)):
transient_data = scio.loadmat(all_file[i])['final_meas'] #sig final_meas measlr
M_wnoise = np.asarray(transient_data).astype(np.float32).reshape([1,256,256,-1]) # 1, 1, 64, 64,2048 8ps
# subsampled to 128 128
M_wnoise = M_wnoise[:,::2,:,:] + M_wnoise[:,1::2,:,:]
M_wnoise = M_wnoise[:,:,::2,:] + M_wnoise[:,:,1::2,:]
# spatial sample
ds = 128 // spatial
M_wnoise = M_wnoise[:,ds//2::ds,ds//2::ds,:]
M_wnoise = np.ascontiguousarray(M_wnoise)
M_wnoise = np.transpose(M_wnoise, (0, 3, 1, 2))
M_mea = torch.from_numpy(M_wnoise[None])
# resize before
M_mea = F.interpolate(M_mea,[512,128,128])
# resized_mea = M_mea.detach().cpu().numpy()[0,0].transpose([1,2,0])
# scio.savemat(out_path + files[i][:-4] + f'{spatial}to128.mat',{'resized_mea':resized_mea})
print(M_mea.size())
with torch.no_grad():
model.eval()
re = model(M_mea,[0,0,0],[temp_bin,temp_bin,temp_bin])
volumn_MxNxN = re.detach().cpu().numpy()[0, -1]
# zdim = volumn_MxNxN.shape[0] * 100 // 128
# volumn_MxNxN = volumn_MxNxN[:zdim]
# print('volumn min, %f' % volumn_MxNxN.min())
# print('volumn max, %f' % volumn_MxNxN.max())
volumn_MxNxN[volumn_MxNxN < 0] = 0
front_view = np.max(volumn_MxNxN, axis=0)
front_view = front_view / np.max(front_view)
front_view = (front_view*255).astype(np.uint8)
# front_view = cv2.resize(front_view, (128, 128))
depth_view = np.argmax(volumn_MxNxN, axis=0)
depth_view = depth_view.astype(np.float32)/np.max(depth_view)
depth_view = (depth_view*255).astype(np.uint8)
# depth_view = cv2.resize(depth_view, (128, 128))
# cv2.imwrite(out_path + files[i] + f'_dep_pred_temp{temp_bin}.png',depth_view)
cv2.imwrite(out_path + files[i] + f'_int_pred_temp{temp_bin}.png',front_view)
# dep_np = dep_re.squeeze(0).data.cpu().numpy()
# dep_np = (dep_np).clip(0,1)
# dep_np = dep_np/np.max(dep_np)
# cv2.imwrite(out_path + files[i] + f'_dep_pred_temp{temp_bin}.png',dep_np.squeeze(0)*255)
# int_re = int_re.squeeze(0).data.cpu().numpy()
# int_re = (int_re).clip(0,1)
# int_re = int_re/np.max(int_re)
# cv2.imwrite(out_path + files[i] + f'_int_pred_temp{temp_bin}.png',int_re.squeeze(0)*255)
# import matplotlib.pyplot as plt
# plt.imshow(dep_np.squeeze())
# plt.savefig(out_path + files[i] + '_pred_512.png')
# scio.savemat(out_path + files[i] + '_pred_512.mat',{'dep_pre':dep_np})
if __name__=="__main__":
print("+++++++++++++++++++++++++++++++++++++++++++")
print("Sleeping...")
time.sleep(3600*0)
print("Wake UP")
print("+++++++++++++++++++++++++++++++++++++++++++")
print("Execuating code...")
main()
| Depth2World/Under-scanning_NLOS | validate_utils/tra_algo.py | tra_algo.py | py | 5,353 | python | en | code | 1 | github-code | 13 |
37154804684 | from __future__ import print_function, absolute_import, division
import six
import operator
import itertools
import warnings
import mmap
from distutils.version import LooseVersion
import sys
import pytest
import astropy
from astropy.io import fits
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs import _wcs
from astropy.tests.helper import assert_quantity_allclose
from astropy.convolution import Gaussian2DKernel, Tophat2DKernel
import numpy as np
from .. import (SpectralCube, VaryingResolutionSpectralCube, BooleanArrayMask,
FunctionMask, LazyMask, CompositeMask)
from ..spectral_cube import (OneDSpectrum, Projection,
VaryingResolutionOneDSpectrum,
LowerDimensionalObject)
from ..np_compat import allbadtonan
from .. import spectral_axis
from .. import base_class
from .. import utils
from . import path
from .helpers import assert_allclose, assert_array_equal
# needed to test for warnings later
warnings.simplefilter('always', UserWarning)
warnings.simplefilter('error', utils.UnsupportedIterationStrategyWarning)
warnings.simplefilter('error', utils.NotImplementedWarning)
warnings.simplefilter('error', utils.WCSMismatchWarning)
try:
import yt
YT_INSTALLED = True
YT_LT_301 = LooseVersion(yt.__version__) < LooseVersion('3.0.1')
except ImportError:
YT_INSTALLED = False
YT_LT_301 = False
try:
import scipy
scipyOK = True
except ImportError:
scipyOK = False
import os
# if ON_TRAVIS is set, we're on travis.
on_travis = bool(os.environ.get('ON_TRAVIS'))
from radio_beam import Beam, Beams
NUMPY_LT_19 = LooseVersion(np.__version__) < LooseVersion('1.9.0')
def cube_and_raw(filename):
p = path(filename)
d = fits.getdata(p)
c = SpectralCube.read(p, format='fits', mode='readonly')
return c, d
def test_arithmetic_warning(recwarn):
cube, data = cube_and_raw('vda_Jybeam_lower.fits')
assert not cube._is_huge
# make sure the small cube raises a warning about loading into memory
cube + 5*cube.unit
w = recwarn.list[-1]
assert 'requires loading the entire cube into' in str(w.message)
def test_huge_disallowed():
cube, data = cube_and_raw('vda_Jybeam_lower.fits')
cube = SpectralCube(data=data, wcs=cube.wcs)
assert not cube._is_huge
# We need to reduce the memory threshold rather than use a large cube to
# make sure we don't use too much memory during testing.
from .. import cube_utils
OLD_MEMORY_THRESHOLD = cube_utils.MEMORY_THRESHOLD
try:
cube_utils.MEMORY_THRESHOLD = 10
assert cube._is_huge
with pytest.raises(ValueError) as exc:
cube + 5*cube.unit
assert 'entire cube into memory' in exc.value.args[0]
with pytest.raises(ValueError) as exc:
cube.max(how='cube')
assert 'entire cube into memory' in exc.value.args[0]
cube.allow_huge_operations = True
# just make sure it doesn't fail
cube + 5*cube.unit
finally:
cube_utils.MEMORY_THRESHOLD = OLD_MEMORY_THRESHOLD
class BaseTest(object):
def setup_method(self, method):
c, d = cube_and_raw('adv.fits')
mask = BooleanArrayMask(d > 0.5, c._wcs)
c._mask = mask
self.c = c
self.mask = mask
self.d = d
class BaseTestMultiBeams(object):
def setup_method(self, method):
c, d = cube_and_raw('adv_beams.fits')
mask = BooleanArrayMask(d > 0.5, c._wcs)
c._mask = mask
self.c = c
self.mask = mask
self.d = d
translist = [('advs', [0, 1, 2, 3]),
('dvsa', [2, 3, 0, 1]),
('sdav', [0, 2, 1, 3]),
('sadv', [0, 1, 2, 3]),
('vsad', [3, 0, 1, 2]),
('vad', [2, 0, 1]),
('vda', [0, 2, 1]),
('adv', [0, 1, 2]),
]
translist_vrsc = [('vda_beams', [0, 2, 1])]
class TestSpectralCube(object):
@pytest.mark.parametrize(('name', 'trans'), translist + translist_vrsc)
def test_consistent_transposition(self, name, trans):
"""data() should return velocity axis first, then world 1, then world 0"""
c, d = cube_and_raw(name + '.fits')
expected = np.squeeze(d.transpose(trans))
assert_allclose(c._get_filled_data(), expected)
@pytest.mark.parametrize(('file', 'view'), (
('adv.fits', np.s_[:, :,:]),
('adv.fits', np.s_[::2, :, :2]),
('adv.fits', np.s_[0]),
))
def test_world(self, file, view):
p = path(file)
d = fits.getdata(p)
wcs = WCS(p)
c = SpectralCube(d, wcs)
shp = d.shape
inds = np.indices(d.shape)
pix = np.column_stack([i.ravel() for i in inds[::-1]])
world = wcs.all_pix2world(pix, 0).T
world = [w.reshape(shp) for w in world]
world = [w[view] * u.Unit(wcs.wcs.cunit[i])
for i, w in enumerate(world)][::-1]
w2 = c.world[view]
for result, expected in zip(w2, world):
assert_allclose(result, expected)
@pytest.mark.parametrize('view', (np.s_[:, :,:],
np.s_[:2, :3, ::2]))
def test_world_transposes_3d(self, view):
c1, d1 = cube_and_raw('adv.fits')
c2, d2 = cube_and_raw('vad.fits')
for w1, w2 in zip(c1.world[view], c2.world[view]):
assert_allclose(w1, w2)
@pytest.mark.parametrize('view',
(np.s_[:, :,:],
np.s_[:2, :3, ::2],
np.s_[::3, ::2, :1],
np.s_[:], ))
def test_world_transposes_4d(self, view):
c1, d1 = cube_and_raw('advs.fits')
c2, d2 = cube_and_raw('sadv.fits')
for w1, w2 in zip(c1.world[view], c2.world[view]):
assert_allclose(w1, w2)
@pytest.mark.parametrize(('name','masktype','unit'),
itertools.product(('advs', 'dvsa', 'sdav', 'sadv', 'vsad', 'vad', 'adv',),
(BooleanArrayMask, LazyMask, FunctionMask, CompositeMask),
('Hz', u.Hz),
)
)
def test_with_spectral_unit(self, name, masktype, unit):
cube, data = cube_and_raw(name + '.fits')
cube_freq = cube.with_spectral_unit(unit)
if masktype == BooleanArrayMask:
# don't use data here:
# data haven't necessarily been rearranged to the correct shape by
# cube_utils.orient
mask = BooleanArrayMask(cube.filled_data[:].value>0,
wcs=cube._wcs)
elif masktype == LazyMask:
mask = LazyMask(lambda x: x>0, cube=cube)
elif masktype == FunctionMask:
mask = FunctionMask(lambda x: x>0)
elif masktype == CompositeMask:
mask1 = FunctionMask(lambda x: x>0)
mask2 = LazyMask(lambda x: x>0, cube)
mask = CompositeMask(mask1, mask2)
cube2 = cube.with_mask(mask)
cube_masked_freq = cube2.with_spectral_unit(unit)
assert cube_freq._wcs.wcs.ctype[cube_freq._wcs.wcs.spec] == 'FREQ-W2F'
assert cube_masked_freq._wcs.wcs.ctype[cube_masked_freq._wcs.wcs.spec] == 'FREQ-W2F'
assert cube_masked_freq._mask._wcs.wcs.ctype[cube_masked_freq._mask._wcs.wcs.spec] == 'FREQ-W2F'
# values taken from header
rest = 1.42040571841E+09*u.Hz
crval = -3.21214698632E+05*u.m/u.s
outcv = crval.to(u.m, u.doppler_optical(rest)).to(u.Hz, u.spectral())
assert_allclose(cube_freq._wcs.wcs.crval[cube_freq._wcs.wcs.spec],
outcv.to(u.Hz).value)
assert_allclose(cube_masked_freq._wcs.wcs.crval[cube_masked_freq._wcs.wcs.spec],
outcv.to(u.Hz).value)
assert_allclose(cube_masked_freq._mask._wcs.wcs.crval[cube_masked_freq._mask._wcs.wcs.spec],
outcv.to(u.Hz).value)
@pytest.mark.parametrize(('operation', 'value'),
((operator.add, 0.5*u.K),
(operator.sub, 0.5*u.K),
(operator.mul, 0.5*u.K),
(operator.truediv, 0.5*u.K),
(operator.div if hasattr(operator,'div') else operator.floordiv, 0.5*u.K),
))
def test_apply_everywhere(self, operation, value):
c1, d1 = cube_and_raw('advs.fits')
# append 'o' to indicate that it has been operated on
c1o = c1._apply_everywhere(operation, value)
d1o = operation(u.Quantity(d1, u.K), value)
assert np.all(d1o == c1o.filled_data[:])
# allclose fails on identical data?
#assert_allclose(d1o, c1o.filled_data[:])
@pytest.mark.parametrize(('name', 'trans'), translist)
def test_getitem(self, name, trans):
c, d = cube_and_raw(name + '.fits')
expected = np.squeeze(d.transpose(trans))
assert_allclose(c[0,:,:].value, expected[0,:,:])
assert_allclose(c[:,:,0].value, expected[:,:,0])
assert_allclose(c[:,0,:].value, expected[:,0,:])
# Not implemented:
#assert_allclose(c[0,0,:].value, expected[0,0,:])
#assert_allclose(c[0,:,0].value, expected[0,:,0])
assert_allclose(c[:,0,0].value, expected[:,0,0])
assert_allclose(c[1,:,:].value, expected[1,:,:])
assert_allclose(c[:,:,1].value, expected[:,:,1])
assert_allclose(c[:,1,:].value, expected[:,1,:])
# Not implemented:
#assert_allclose(c[1,1,:].value, expected[1,1,:])
#assert_allclose(c[1,:,1].value, expected[1,:,1])
assert_allclose(c[:,1,1].value, expected[:,1,1])
c2 = c.with_spectral_unit(u.km/u.s, velocity_convention='radio')
assert_allclose(c2[0,:,:].value, expected[0,:,:])
assert_allclose(c2[:,:,0].value, expected[:,:,0])
assert_allclose(c2[:,0,:].value, expected[:,0,:])
# Not implemented:
#assert_allclose(c2[0,0,:].value, expected[0,0,:])
#assert_allclose(c2[0,:,0].value, expected[0,:,0])
assert_allclose(c2[:,0,0].value, expected[:,0,0])
assert_allclose(c2[1,:,:].value, expected[1,:,:])
assert_allclose(c2[:,:,1].value, expected[:,:,1])
assert_allclose(c2[:,1,:].value, expected[:,1,:])
# Not implemented:
#assert_allclose(c2[1,1,:].value, expected[1,1,:])
#assert_allclose(c2[1,:,1].value, expected[1,:,1])
assert_allclose(c2[:,1,1].value, expected[:,1,1])
@pytest.mark.parametrize(('name', 'trans'), translist_vrsc)
def test_getitem_vrsc(self, name, trans):
c, d = cube_and_raw(name + '.fits')
expected = np.squeeze(d.transpose(trans))
# No pv slices for VRSC.
assert_allclose(c[0,:,:].value, expected[0,:,:])
# Not implemented:
#assert_allclose(c[0,0,:].value, expected[0,0,:])
#assert_allclose(c[0,:,0].value, expected[0,:,0])
assert_allclose(c[:,0,0].value, expected[:,0,0])
assert_allclose(c[1,:,:].value, expected[1,:,:])
# Not implemented:
#assert_allclose(c[1,1,:].value, expected[1,1,:])
#assert_allclose(c[1,:,1].value, expected[1,:,1])
assert_allclose(c[:,1,1].value, expected[:,1,1])
c2 = c.with_spectral_unit(u.km/u.s, velocity_convention='radio')
assert_allclose(c2[0,:,:].value, expected[0,:,:])
# Not implemented:
#assert_allclose(c2[0,0,:].value, expected[0,0,:])
#assert_allclose(c2[0,:,0].value, expected[0,:,0])
assert_allclose(c2[:,0,0].value, expected[:,0,0])
assert_allclose(c2[1,:,:].value, expected[1,:,:])
# Not implemented:
#assert_allclose(c2[1,1,:].value, expected[1,1,:])
#assert_allclose(c2[1,:,1].value, expected[1,:,1])
assert_allclose(c2[:,1,1].value, expected[:,1,1])
# @pytest.mark.xfail(raises=AttributeError)
@pytest.mark.parametrize(('name', 'trans'), translist_vrsc)
def test_getitem_vrsc(self, name, trans):
c, d = cube_and_raw(name + '.fits')
expected = np.squeeze(d.transpose(trans))
assert_allclose(c[:,:,0].value, expected[:,:,0])
class TestArithmetic(object):
def setup_method(self, method):
self.c1, self.d1 = cube_and_raw('adv.fits')
# make nice easy-to-test numbers
self.d1.flat[:] = np.arange(self.d1.size)
self.c1._data.flat[:] = np.arange(self.d1.size)
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_add(self,value):
d2 = self.d1 + value
c2 = self.c1 + value*u.K
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
def test_add_cubes(self):
d2 = self.d1 + self.d1
c2 = self.c1 + self.c1
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_subtract(self, value):
d2 = self.d1 - value
c2 = self.c1 - value*u.K
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
# regression test #251: the _data attribute must not be a quantity
assert not hasattr(c2._data, 'unit')
def test_subtract_cubes(self):
d2 = self.d1 - self.d1
c2 = self.c1 - self.c1
assert np.all(d2 == c2.filled_data[:].value)
assert np.all(c2.filled_data[:].value == 0)
assert c2.unit == u.K
# regression test #251: the _data attribute must not be a quantity
assert not hasattr(c2._data, 'unit')
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_mul(self, value):
d2 = self.d1 * value
c2 = self.c1 * value
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
def test_mul_cubes(self):
d2 = self.d1 * self.d1
c2 = self.c1 * self.c1
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K**2
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_div(self, value):
d2 = self.d1 / value
c2 = self.c1 / value
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
def test_div_cubes(self):
d2 = self.d1 / self.d1
c2 = self.c1 / self.c1
assert np.all((d2 == c2.filled_data[:].value) | (np.isnan(c2.filled_data[:])))
assert np.all((c2.filled_data[:] == 1) | (np.isnan(c2.filled_data[:])))
assert c2.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'),
(1,1.0,2,2.0))
def test_pow(self, value):
d2 = self.d1 ** value
c2 = self.c1 ** value
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K**value
def test_cube_add(self):
c2 = self.c1 + self.c1
d2 = self.d1 + self.d1
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
class TestFilters(BaseTest):
def test_mask_data(self):
c, d = self.c, self.d
expected = np.where(d > .5, d, np.nan)
assert_allclose(c._get_filled_data(), expected)
expected = np.where(d > .5, d, 0)
assert_allclose(c._get_filled_data(fill=0), expected)
@pytest.mark.parametrize('operation', (operator.lt, operator.gt, operator.le, operator.ge))
def test_mask_comparison(self, operation):
c, d = self.c, self.d
dmask = operation(d, 0.6) & self.c.mask.include()
cmask = operation(c, 0.6*u.K)
assert (self.c.mask.include() & cmask.include()).sum() == dmask.sum()
np.testing.assert_almost_equal(c.with_mask(cmask).sum().value,
d[dmask].sum())
def test_flatten(self):
c, d = self.c, self.d
expected = d[d > 0.5]
assert_allclose(c.flattened(), expected)
def test_flatten_weights(self):
c, d = self.c, self.d
expected = d[d > 0.5] ** 2
assert_allclose(c.flattened(weights=d), expected)
def test_slice(self):
c, d = self.c, self.d
expected = d[:3, :2, ::2]
expected = expected[expected > 0.5]
assert_allclose(c[0:3, 0:2, 0::2].flattened(), expected)
class TestNumpyMethods(BaseTest):
def _check_numpy(self, cubemethod, array, func):
for axis in [None, 0, 1, 2]:
for how in ['auto', 'slice', 'cube', 'ray']:
expected = func(array, axis=axis)
actual = cubemethod(axis=axis)
assert_allclose(actual, expected)
def test_sum(self):
d = np.where(self.d > 0.5, self.d, np.nan)
self._check_numpy(self.c.sum, d, allbadtonan(np.nansum))
# Need a secondary check to make sure it works with no
# axis keyword being passed (regression test for issue introduced in
# 150)
assert np.all(self.c.sum().value == np.nansum(d))
def test_max(self):
d = np.where(self.d > 0.5, self.d, np.nan)
self._check_numpy(self.c.max, d, np.nanmax)
def test_min(self):
d = np.where(self.d > 0.5, self.d, np.nan)
self._check_numpy(self.c.min, d, np.nanmin)
def test_argmax(self):
d = np.where(self.d > 0.5, self.d, -10)
self._check_numpy(self.c.argmax, d, np.nanargmax)
def test_argmin(self):
d = np.where(self.d > 0.5, self.d, 10)
self._check_numpy(self.c.argmin, d, np.nanargmin)
@pytest.mark.parametrize('iterate_rays', (True,False))
def test_median(self, iterate_rays):
# Make sure that medians ignore empty/bad/NaN values
m = np.empty(self.d.shape[1:])
for y in range(m.shape[0]):
for x in range(m.shape[1]):
ray = self.d[:, y, x]
# the cube mask is for values >0.5
ray = ray[ray > 0.5]
m[y, x] = np.median(ray)
scmed = self.c.median(axis=0, iterate_rays=iterate_rays)
assert_allclose(scmed, m)
assert not np.any(np.isnan(scmed.value))
assert scmed.unit == self.c.unit
@pytest.mark.skipif('NUMPY_LT_19')
def test_bad_median_apply(self):
# this is a test for manually-applied numpy medians, which are different
# from the cube.median method that does "the right thing"
#
# for regular median, we expect a failure, which is why we don't use
# regular median.
scmed = self.c.apply_numpy_function(np.median, axis=0)
# this checks whether numpy <=1.9.3 has a bug?
# as far as I can tell, np==1.9.3 no longer has this bug/feature
#if LooseVersion(np.__version__) <= LooseVersion('1.9.3'):
# # print statements added so we get more info in the travis builds
# print("Numpy version is: {0}".format(LooseVersion(np.__version__)))
# assert np.count_nonzero(np.isnan(scmed)) == 5
#else:
# print("Numpy version is: {0}".format(LooseVersion(np.__version__)))
assert np.count_nonzero(np.isnan(scmed)) == 6
scmed = self.c.apply_numpy_function(np.nanmedian, axis=0)
assert np.count_nonzero(np.isnan(scmed)) == 0
# use a more aggressive mask to force there to be some all-nan axes
m2 = self.c>0.65*self.c.unit
scmed = self.c.with_mask(m2).apply_numpy_function(np.nanmedian, axis=0)
assert np.count_nonzero(np.isnan(scmed)) == 1
@pytest.mark.parametrize('iterate_rays', (True,False))
def test_bad_median(self, iterate_rays):
# This should have the same result as np.nanmedian, though it might be
# faster if bottleneck loads
scmed = self.c.median(axis=0, iterate_rays=iterate_rays)
assert np.count_nonzero(np.isnan(scmed)) == 0
m2 = self.c>0.65*self.c.unit
scmed = self.c.with_mask(m2).median(axis=0, iterate_rays=iterate_rays)
assert np.count_nonzero(np.isnan(scmed)) == 1
@pytest.mark.parametrize(('pct', 'iterate_rays'),
(zip((3,25,50,75,97)*2,(True,)*5 + (False,)*5)))
def test_percentile(self, pct, iterate_rays):
m = np.empty(self.d.sum(axis=0).shape)
for y in range(m.shape[0]):
for x in range(m.shape[1]):
ray = self.d[:, y, x]
ray = ray[ray > 0.5]
m[y, x] = np.percentile(ray, pct)
scpct = self.c.percentile(pct, axis=0, iterate_rays=iterate_rays)
assert_allclose(scpct, m)
assert not np.any(np.isnan(scpct.value))
assert scpct.unit == self.c.unit
@pytest.mark.parametrize('method', ('sum', 'min', 'max', 'std', 'mad_std',
'median', 'argmin', 'argmax'))
def test_transpose(self, method):
c1, d1 = cube_and_raw('adv.fits')
c2, d2 = cube_and_raw('vad.fits')
for axis in [None, 0, 1, 2]:
assert_allclose(getattr(c1, method)(axis=axis),
getattr(c2, method)(axis=axis))
# check that all these accept progressbar kwargs
assert_allclose(getattr(c1, method)(axis=axis, progressbar=True),
getattr(c2, method)(axis=axis, progressbar=True))
class TestSlab(BaseTest):
def test_closest_spectral_channel(self):
c = self.c
ms = u.m / u.s
assert c.closest_spectral_channel(-321214.698632 * ms) == 0
assert c.closest_spectral_channel(-319926.48366321 * ms) == 1
assert c.closest_spectral_channel(-318638.26869442 * ms) == 2
assert c.closest_spectral_channel(-320000 * ms) == 1
assert c.closest_spectral_channel(-340000 * ms) == 0
assert c.closest_spectral_channel(0 * ms) == 3
def test_spectral_channel_bad_units(self):
with pytest.raises(u.UnitsError) as exc:
self.c.closest_spectral_channel(1 * u.s)
assert exc.value.args[0] == "'value' should be in frequency equivalent or velocity units (got s)"
with pytest.raises(u.UnitsError) as exc:
self.c.closest_spectral_channel(1. * u.Hz)
assert exc.value.args[0] == "Spectral axis is in velocity units and 'value' is in frequency-equivalent units - use SpectralCube.with_spectral_unit first to convert the cube to frequency-equivalent units, or search for a velocity instead"
def test_slab(self):
ms = u.m / u.s
c2 = self.c.spectral_slab(-320000 * ms, -318600 * ms)
assert_allclose(c2._data, self.d[1:3])
assert c2._mask is not None
def test_slab_reverse_limits(self):
ms = u.m / u.s
c2 = self.c.spectral_slab(-318600 * ms, -320000 * ms)
assert_allclose(c2._data, self.d[1:3])
assert c2._mask is not None
def test_slab_preserves_wcs(self):
# regression test
ms = u.m / u.s
crpix = list(self.c._wcs.wcs.crpix)
self.c.spectral_slab(-318600 * ms, -320000 * ms)
assert list(self.c._wcs.wcs.crpix) == crpix
class TestSlabMultiBeams(BaseTestMultiBeams, TestSlab):
""" same tests with multibeams """
pass
class TestRepr(BaseTest):
def test_repr(self):
assert repr(self.c) == """
SpectralCube with shape=(4, 3, 2) and unit=K:
n_x: 2 type_x: RA---SIN unit_x: deg range: 24.062698 deg: 24.063349 deg
n_y: 3 type_y: DEC--SIN unit_y: deg range: 29.934094 deg: 29.935209 deg
n_s: 4 type_s: VOPT unit_s: km / s range: -321.215 km / s: -317.350 km / s
""".strip()
def test_repr_withunit(self):
self.c._unit = u.Jy
assert repr(self.c) == """
SpectralCube with shape=(4, 3, 2) and unit=Jy:
n_x: 2 type_x: RA---SIN unit_x: deg range: 24.062698 deg: 24.063349 deg
n_y: 3 type_y: DEC--SIN unit_y: deg range: 29.934094 deg: 29.935209 deg
n_s: 4 type_s: VOPT unit_s: km / s range: -321.215 km / s: -317.350 km / s
""".strip()
@pytest.mark.skipif('not YT_INSTALLED')
class TestYt():
def setup_method(self, method):
self.cube = SpectralCube.read(path('adv.fits'))
# Without any special arguments
self.ytc1 = self.cube.to_yt()
# With spectral factor = 0.5
self.spectral_factor = 0.5
self.ytc2 = self.cube.to_yt(spectral_factor=self.spectral_factor)
# With nprocs = 4
self.nprocs = 4
self.ytc3 = self.cube.to_yt(nprocs=self.nprocs)
def test_yt(self):
# The following assertions just make sure everything is
# kosher with the datasets generated in different ways
ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3
ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset
assert_array_equal(ds1.domain_dimensions, ds2.domain_dimensions)
assert_array_equal(ds2.domain_dimensions, ds3.domain_dimensions)
assert_allclose(ds1.domain_left_edge.value, ds2.domain_left_edge.value)
assert_allclose(ds2.domain_left_edge.value, ds3.domain_left_edge.value)
assert_allclose(ds1.domain_width.value,
ds2.domain_width.value*np.array([1,1,1.0/self.spectral_factor]))
assert_allclose(ds1.domain_width.value, ds3.domain_width.value)
assert self.nprocs == len(ds3.index.grids)
ds1.index
ds2.index
ds3.index
unit1 = ds1.field_info["fits","flux"].units
unit2 = ds2.field_info["fits","flux"].units
unit3 = ds3.field_info["fits","flux"].units
ds1.quan(1.0,unit1)
ds2.quan(1.0,unit2)
ds3.quan(1.0,unit3)
@pytest.mark.skipif('YT_LT_301', reason='yt 3.0 has a FITS-related bug')
def test_yt_fluxcompare(self):
# Now check that we can compute quantities of the flux
# and that they are equal
ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3
ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset
dd1 = ds1.all_data()
dd2 = ds2.all_data()
dd3 = ds3.all_data()
flux1_tot = dd1.quantities.total_quantity("flux")
flux2_tot = dd2.quantities.total_quantity("flux")
flux3_tot = dd3.quantities.total_quantity("flux")
flux1_min, flux1_max = dd1.quantities.extrema("flux")
flux2_min, flux2_max = dd2.quantities.extrema("flux")
flux3_min, flux3_max = dd3.quantities.extrema("flux")
assert flux1_tot == flux2_tot
assert flux1_tot == flux3_tot
assert flux1_min == flux2_min
assert flux1_min == flux3_min
assert flux1_max == flux2_max
assert flux1_max == flux3_max
def test_yt_roundtrip_wcs(self):
# Now test round-trip conversions between yt and world coordinates
ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3
ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset
yt_coord1 = ds1.domain_left_edge + np.random.random(size=3)*ds1.domain_width
world_coord1 = ytc1.yt2world(yt_coord1)
assert_allclose(ytc1.world2yt(world_coord1), yt_coord1.value)
yt_coord2 = ds2.domain_left_edge + np.random.random(size=3)*ds2.domain_width
world_coord2 = ytc2.yt2world(yt_coord2)
assert_allclose(ytc2.world2yt(world_coord2), yt_coord2.value)
yt_coord3 = ds3.domain_left_edge + np.random.random(size=3)*ds3.domain_width
world_coord3 = ytc3.yt2world(yt_coord3)
assert_allclose(ytc3.world2yt(world_coord3), yt_coord3.value)
def test_read_write_rountrip(tmpdir):
cube = SpectralCube.read(path('adv.fits'))
tmp_file = str(tmpdir.join('test.fits'))
cube.write(tmp_file)
cube2 = SpectralCube.read(tmp_file)
assert cube.shape == cube.shape
assert_allclose(cube._data, cube2._data)
if (((hasattr(_wcs, '__version__')
and LooseVersion(_wcs.__version__) < LooseVersion('5.9'))
or not hasattr(_wcs, '__version__'))):
# see https://github.com/astropy/astropy/pull/3992 for reasons:
# we should upgrade this for 5.10 when the absolute accuracy is
# maximized
assert cube._wcs.to_header_string() == cube2._wcs.to_header_string()
# in 5.11 and maybe even 5.12, the round trip fails. Maybe
# https://github.com/astropy/astropy/issues/4292 will solve it?
@pytest.mark.parametrize(('memmap', 'base'),
((True, mmap.mmap),
(False, None)))
def test_read_memmap(memmap, base):
cube = SpectralCube.read(path('adv.fits'), memmap=memmap)
bb = cube.base
while hasattr(bb, 'base'):
bb = bb.base
if base is None:
assert bb is None
else:
assert isinstance(bb, base)
def _dummy_cube():
data = np.array([[[0, 1, 2, 3, 4]]])
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO-HEL']
def lower_threshold(data, wcs, view=()):
return data[view] > 0
m1 = FunctionMask(lower_threshold)
cube = SpectralCube(data, wcs=wcs, mask=m1)
return cube
def test_with_mask():
def upper_threshold(data, wcs, view=()):
return data[view] < 3
m2 = FunctionMask(upper_threshold)
cube = _dummy_cube()
cube2 = cube.with_mask(m2)
assert_allclose(cube._get_filled_data(), [[[np.nan, 1, 2, 3, 4]]])
assert_allclose(cube2._get_filled_data(), [[[np.nan, 1, 2, np.nan, np.nan]]])
def test_with_mask_with_boolean_array():
cube = _dummy_cube()
mask = cube._data > 2
cube2 = cube.with_mask(mask, inherit_mask=False)
assert isinstance(cube2._mask, BooleanArrayMask)
assert cube2._mask._wcs is cube._wcs
assert cube2._mask._mask is mask
def test_with_mask_with_good_array_shape():
cube = _dummy_cube()
mask = np.zeros((1, 5), dtype=np.bool)
cube2 = cube.with_mask(mask, inherit_mask=False)
assert isinstance(cube2._mask, BooleanArrayMask)
np.testing.assert_equal(cube2._mask._mask, mask.reshape((1, 1, 5)))
def test_with_mask_with_bad_array_shape():
cube = _dummy_cube()
mask = np.zeros((5, 5), dtype=np.bool)
with pytest.raises(ValueError) as exc:
cube.with_mask(mask)
assert exc.value.args[0] == ("Mask shape is not broadcastable to data shape: "
"(5, 5) vs (1, 1, 5)")
class TestMasks(BaseTest):
@pytest.mark.parametrize('op', (operator.gt, operator.lt,
operator.le, operator.ge))
def test_operator_threshold(self, op):
# choose thresh to exercise proper equality tests
thresh = self.d.ravel()[0]
m = op(self.c, thresh*u.K)
self.c._mask = m
expected = self.d[op(self.d, thresh)]
actual = self.c.flattened()
assert_allclose(actual, expected)
def test_preserve_spectral_unit():
# astropy.wcs has a tendancy to change spectral units from e.g. km/s to
# m/s, so we have a workaround - check that it works.
cube, data = cube_and_raw('advs.fits')
cube_freq = cube.with_spectral_unit(u.GHz)
assert cube_freq.wcs.wcs.cunit[2] == 'Hz' # check internal
assert cube_freq.spectral_axis.unit is u.GHz
# Check that this preferred unit is propagated
new_cube = cube_freq.with_fill_value(fill_value=3.4)
assert new_cube.spectral_axis.unit is u.GHz
def test_endians():
"""
Test that the endianness checking returns something in Native form
(this is only needed for non-numpy functions that worry about the
endianness of their data)
WARNING: Because the endianness is machine-dependent, this may fail on
different architectures! This is because numpy automatically converts
little-endian to native in the dtype parameter; I need a workaround for
this.
"""
pytest.importorskip('bottleneck')
big = np.array([[[1],[2]]], dtype='>f4')
lil = np.array([[[1],[2]]], dtype='<f4')
mywcs = WCS(naxis=3)
mywcs.wcs.ctype[0] = 'RA'
mywcs.wcs.ctype[1] = 'DEC'
mywcs.wcs.ctype[2] = 'VELO'
bigcube = SpectralCube(data=big, wcs=mywcs)
xbig = bigcube._get_filled_data(check_endian=True)
lilcube = SpectralCube(data=lil, wcs=mywcs)
xlil = lilcube._get_filled_data(check_endian=True)
assert xbig.dtype.byteorder == '='
assert xlil.dtype.byteorder == '='
xbig = bigcube._get_filled_data(check_endian=False)
xlil = lilcube._get_filled_data(check_endian=False)
assert xbig.dtype.byteorder == '>'
assert xlil.dtype.byteorder == '='
def test_header_naxis():
cube, data = cube_and_raw('advs.fits')
assert cube.header['NAXIS'] == 3 # NOT data.ndim == 4
assert cube.header['NAXIS1'] == data.shape[3]
assert cube.header['NAXIS2'] == data.shape[2]
assert cube.header['NAXIS3'] == data.shape[1]
assert 'NAXIS4' not in cube.header
def test_slicing():
cube, data = cube_and_raw('advs.fits')
# just to check that we're starting in the right place
assert cube.shape == (2,3,4)
sl = cube[:,1,:]
assert sl.shape == (2,4)
v = cube[1:2,:,:]
assert v.shape == (1,3,4)
# make sure this works. Not sure what keys to test for...
v.header
assert cube[:,:,:].shape == (2,3,4)
assert cube[:,:].shape == (2,3,4)
assert cube[:].shape == (2,3,4)
assert cube[:1,:1,:1].shape == (1,1,1)
@pytest.mark.parametrize(('view','naxis'),
[((slice(None), 1, slice(None)), 2),
((1, slice(None), slice(None)), 2),
((slice(None), slice(None), 1), 2),
((slice(None), slice(None), slice(1)), 3),
((slice(1), slice(1), slice(1)), 3),
((slice(None, None, -1), slice(None), slice(None)), 3),
])
def test_slice_wcs(view, naxis):
cube, data = cube_and_raw('advs.fits')
sl = cube[view]
assert sl.wcs.naxis == naxis
def test_slice_wcs_reversal():
cube, data = cube_and_raw('advs.fits')
view = (slice(None,None,-1), slice(None), slice(None))
rcube = cube[view]
rrcube = rcube[view]
np.testing.assert_array_equal(np.diff(cube.spectral_axis),
-np.diff(rcube.spectral_axis))
np.testing.assert_array_equal(rrcube.spectral_axis.value,
cube.spectral_axis.value)
np.testing.assert_array_equal(rcube.spectral_axis.value,
cube.spectral_axis.value[::-1])
np.testing.assert_array_equal(rrcube.world_extrema.value,
cube.world_extrema.value)
# check that the lon, lat arrays are *entirely* unchanged
np.testing.assert_array_equal(rrcube.spatial_coordinate_map[0].value,
cube.spatial_coordinate_map[0].value)
np.testing.assert_array_equal(rrcube.spatial_coordinate_map[1].value,
cube.spatial_coordinate_map[1].value)
def test_spectral_slice_preserve_units():
cube, data = cube_and_raw('advs.fits')
cube = cube.with_spectral_unit(u.km/u.s)
sl = cube[:,0,0]
assert cube._spectral_unit == u.km/u.s
assert sl._spectral_unit == u.km/u.s
assert cube.spectral_axis.unit == u.km/u.s
assert sl.spectral_axis.unit == u.km/u.s
def test_header_units_consistent():
cube, data = cube_and_raw('advs.fits')
cube_ms = cube.with_spectral_unit(u.m/u.s)
cube_kms = cube.with_spectral_unit(u.km/u.s)
cube_Mms = cube.with_spectral_unit(u.Mm/u.s)
assert cube.header['CUNIT3'] == 'km s-1'
assert cube_ms.header['CUNIT3'] == 'm s-1'
assert cube_kms.header['CUNIT3'] == 'km s-1'
assert cube_Mms.header['CUNIT3'] == 'Mm s-1'
# Wow, the tolerance here is really terrible...
assert_allclose(cube_Mms.header['CDELT3'], cube.header['CDELT3']/1e3,rtol=1e-3,atol=1e-5)
assert_allclose(cube.header['CDELT3'], cube_kms.header['CDELT3'],rtol=1e-2,atol=1e-5)
assert_allclose(cube.header['CDELT3']*1e3, cube_ms.header['CDELT3'],rtol=1e-2,atol=1e-5)
cube_freq = cube.with_spectral_unit(u.Hz)
assert cube_freq.header['CUNIT3'] == 'Hz'
cube_freq_GHz = cube.with_spectral_unit(u.GHz)
assert cube_freq_GHz.header['CUNIT3'] == 'GHz'
def test_spectral_unit_conventions():
cube, data = cube_and_raw('advs.fits')
cube_frq = cube.with_spectral_unit(u.Hz)
cube_opt = cube.with_spectral_unit(u.km/u.s,
rest_value=cube_frq.spectral_axis[0],
velocity_convention='optical')
cube_rad = cube.with_spectral_unit(u.km/u.s,
rest_value=cube_frq.spectral_axis[0],
velocity_convention='radio')
cube_rel = cube.with_spectral_unit(u.km/u.s,
rest_value=cube_frq.spectral_axis[0],
velocity_convention='relativistic')
# should all be exactly 0 km/s
for x in (cube_rel.spectral_axis[0], cube_rad.spectral_axis[0],
cube_opt.spectral_axis[0]):
np.testing.assert_almost_equal(0,x.value)
assert cube_rel.spectral_axis[1] != cube_rad.spectral_axis[1]
assert cube_opt.spectral_axis[1] != cube_rad.spectral_axis[1]
assert cube_rel.spectral_axis[1] != cube_opt.spectral_axis[1]
assert cube_rel.velocity_convention == u.doppler_relativistic
assert cube_rad.velocity_convention == u.doppler_radio
assert cube_opt.velocity_convention == u.doppler_optical
def test_invalid_spectral_unit_conventions():
cube, data = cube_and_raw('advs.fits')
with pytest.raises(ValueError) as exc:
cube.with_spectral_unit(u.km/u.s,
velocity_convention='invalid velocity convention')
assert exc.value.args[0] == ("Velocity convention must be radio, optical, "
"or relativistic.")
@pytest.mark.parametrize('rest', (50, 50*u.K))
def test_invalid_rest(rest):
cube, data = cube_and_raw('advs.fits')
with pytest.raises(ValueError) as exc:
cube.with_spectral_unit(u.km/u.s,
velocity_convention='radio',
rest_value=rest)
assert exc.value.args[0] == ("Rest value must be specified as an astropy "
"quantity with spectral equivalence.")
def test_airwave_to_wave():
cube, data = cube_and_raw('advs.fits')
cube._wcs.wcs.ctype[2] = 'AWAV'
cube._wcs.wcs.cunit[2] = 'm'
cube._spectral_unit = u.m
cube._wcs.wcs.cdelt[2] = 1e-7
cube._wcs.wcs.crval[2] = 5e-7
ax1 = cube.spectral_axis
ax2 = cube.with_spectral_unit(u.m).spectral_axis
np.testing.assert_almost_equal(spectral_axis.air_to_vac(ax1).value,
ax2.value)
@pytest.mark.parametrize(('func','how','axis'),
itertools.product(('sum','std','max','min','mean'),
('slice','cube','auto'),
(0,1,2)
))
def test_twod_numpy(func, how, axis):
# Check that a numpy function returns the correct result when applied along
# one axis
# This is partly a regression test for #211
cube, data = cube_and_raw('advs.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
proj = getattr(cube,func)(axis=axis, how=how)
# data has a redundant 1st axis
dproj = getattr(data,func)(axis=(0,axis+1)).squeeze()
assert isinstance(proj, Projection)
np.testing.assert_equal(proj.value, dproj)
assert cube.unit == proj.unit
@pytest.mark.parametrize(('func','how','axis'),
itertools.product(('sum','std','max','min','mean'),
('slice','cube','auto'),
((0,1),(1,2),(0,2))
))
def test_twod_numpy_twoaxes(func, how, axis):
# Check that a numpy function returns the correct result when applied along
# one axis
# This is partly a regression test for #211
cube, data = cube_and_raw('advs.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
if func == 'mean' and axis != (1,2):
with warnings.catch_warnings(record=True) as wrn:
spec = getattr(cube,func)(axis=axis, how=how)
assert 'Averaging over a spatial and a spectral' in str(wrn[-1].message)
spec = getattr(cube,func)(axis=axis, how=how)
# data has a redundant 1st axis
dspec = getattr(data.squeeze(),func)(axis=axis)
if axis == (1,2):
assert isinstance(spec, OneDSpectrum)
assert cube.unit == spec.unit
np.testing.assert_almost_equal(spec.value, dspec)
else:
np.testing.assert_almost_equal(spec, dspec)
def test_preserves_header_values():
# Check that the non-WCS header parameters are preserved during projection
cube, data = cube_and_raw('advs.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
cube._header['OBJECT'] = 'TestName'
proj = cube.sum(axis=0, how='auto')
assert isinstance(proj, Projection)
assert proj.header['OBJECT'] == 'TestName'
assert proj.hdu.header['OBJECT'] == 'TestName'
def test_preserves_header_meta_values():
# Check that additional parameters in meta are preserved
cube, data = cube_and_raw('advs.fits')
cube.meta['foo'] = 'bar'
assert cube.header['FOO'] == 'bar'
# check that long keywords are also preserved
cube.meta['too_long_keyword'] = 'too_long_information'
assert 'too_long_keyword=too_long_information' in cube.header['COMMENT']
# Checks that the header is preserved when passed to LDOs
for ldo in (cube.sum(axis=0, how='auto'), cube[:,0,0]):
assert isinstance(ldo, LowerDimensionalObject)
assert ldo.header['FOO'] == 'bar'
assert ldo.hdu.header['FOO'] == 'bar'
# make sure that the meta preservation works on the LDOs themselves too
ldo.meta['bar'] = 'foo'
assert ldo.header['BAR'] == 'foo'
assert 'too_long_keyword=too_long_information' in ldo.header['COMMENT']
@pytest.mark.parametrize('func',('sum','std','max','min','mean'))
def test_oned_numpy(func):
# Check that a numpy function returns an appropriate spectrum
cube, data = cube_and_raw('advs.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = getattr(cube,func)(axis=(1,2))
dspec = getattr(data,func)(axis=(2,3)).squeeze()
assert isinstance(spec, OneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, dspec)
assert cube.unit == spec.unit
def test_oned_slice():
# Check that a slice returns an appropriate spectrum
cube, data = cube_and_raw('advs.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = cube[:,0,0]
assert isinstance(spec, OneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data[0,:,0,0])
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
def test_oned_slice_beams():
# Check that a slice returns an appropriate spectrum
cube, data = cube_and_raw('sdav_beams.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = cube[:,0,0]
assert isinstance(spec, VaryingResolutionOneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data[:,0,0,0])
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
assert hasattr(spec, 'beams')
assert 'BMAJ' in spec.hdulist[1].data.names
def test_subcube_slab_beams():
cube, data = cube_and_raw('sdav_beams.fits')
slcube = cube[1:]
assert all(slcube.hdulist[1].data['CHAN'] == np.arange(slcube.shape[0]))
try:
# Make sure Beams has been sliced correctly
assert all(cube.beams[1:] == slcube.beams)
except TypeError:
# in 69eac9241220d3552c06b173944cb7cdebeb47ef, radio_beam switched to
# returning a single value
assert cube.beams[1:] == slcube.beams
# collapsing to one dimension raywise doesn't make sense and is therefore
# not supported.
@pytest.mark.parametrize('how', ('auto', 'cube', 'slice'))
def test_oned_collapse(how):
# Check that an operation along the spatial dims returns an appropriate
# spectrum
cube, data = cube_and_raw('advs.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = cube.mean(axis=(1,2), how=how)
assert isinstance(spec, OneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data.mean(axis=(0,2,3)))
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
def test_oned_collapse_beams():
# Check that an operation along the spatial dims returns an appropriate
# spectrum
cube, data = cube_and_raw('sdav_beams.fits')
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = cube.mean(axis=(1,2))
assert isinstance(spec, VaryingResolutionOneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data.mean(axis=(1,2,3)))
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
assert hasattr(spec, 'beams')
assert 'BMAJ' in spec.hdulist[1].data.names
def test_preserve_bunit():
cube, data = cube_and_raw('advs.fits')
assert cube.header['BUNIT'] == 'K'
hdu = fits.open(path('advs.fits'))[0]
hdu.header['BUNIT'] = 'Jy'
cube = SpectralCube.read(hdu)
assert cube.unit == u.Jy
assert cube.header['BUNIT'] == 'Jy'
def test_preserve_beam():
cube, data = cube_and_raw('advs.fits')
beam = Beam.from_fits_header(path("advs.fits"))
assert cube.beam == beam
def test_beam_attach_to_header():
cube, data = cube_and_raw('adv.fits')
header = cube._header.copy()
del header["BMAJ"], header["BMIN"], header["BPA"]
newcube = SpectralCube(data=data, wcs=cube.wcs, header=header,
beam=cube.beam)
assert cube.header["BMAJ"] == newcube.header["BMAJ"]
assert cube.header["BMIN"] == newcube.header["BMIN"]
assert cube.header["BPA"] == newcube.header["BPA"]
# Should be in meta too
assert newcube.meta['beam'] == cube.beam
def test_beam_custom():
cube, data = cube_and_raw('adv.fits')
header = cube._header.copy()
beam = Beam.from_fits_header(header)
del header["BMAJ"], header["BMIN"], header["BPA"]
newcube = SpectralCube(data=data, wcs=cube.wcs, header=header)
# newcube should now not have a beam
assert not hasattr(newcube, "beam")
# Attach the beam
newcube = newcube.with_beam(beam=beam)
assert newcube.beam == cube.beam
# Header should be updated
assert cube.header["BMAJ"] == newcube.header["BMAJ"]
assert cube.header["BMIN"] == newcube.header["BMIN"]
assert cube.header["BPA"] == newcube.header["BPA"]
# Should be in meta too
assert newcube.meta['beam'] == cube.beam
# Try changing the beam properties
newbeam = Beam(beam.major * 2)
newcube2 = newcube.with_beam(beam=newbeam)
assert newcube2.beam == newbeam
# Header should be updated
assert newcube2.header["BMAJ"] == newbeam.major.value
assert newcube2.header["BMIN"] == newbeam.minor.value
assert newcube2.header["BPA"] == newbeam.pa.value
# Should be in meta too
assert newcube2.meta['beam'] == newbeam
def test_multibeam_custom():
cube, data = cube_and_raw('vda_beams.fits')
# Make a new set of beams that differs from the original.
new_beams = Beams([1.] * cube.shape[0] * u.deg)
# Attach the beam
newcube = cube.with_beams(new_beams)
try:
assert all(new_beams == newcube.beams)
except TypeError:
# in 69eac9241220d3552c06b173944cb7cdebeb47ef, radio_beam switched to
# returning a single value
assert new_beams == newcube.beams
@pytest.mark.xfail(raises=ValueError, strict=True)
def test_multibeam_custom_wrongshape():
cube, data = cube_and_raw('vda_beams.fits')
# Make a new set of beams that differs from the original.
new_beams = Beams([1.] * cube.shape[0] * u.deg)
# Attach the beam
cube.with_beams(new_beams[:1])
def test_multibeam_slice():
cube, data = cube_and_raw('vda_beams.fits')
assert isinstance(cube, VaryingResolutionSpectralCube)
np.testing.assert_almost_equal(cube.beams[0].major.value, 0.4)
np.testing.assert_almost_equal(cube.beams[0].minor.value, 0.1)
np.testing.assert_almost_equal(cube.beams[3].major.value, 0.4)
scube = cube[:2,:,:]
np.testing.assert_almost_equal(scube.beams[0].major.value, 0.4)
np.testing.assert_almost_equal(scube.beams[0].minor.value, 0.1)
np.testing.assert_almost_equal(scube.beams[1].major.value, 0.3)
np.testing.assert_almost_equal(scube.beams[1].minor.value, 0.2)
flatslice = cube[0,:,:]
np.testing.assert_almost_equal(flatslice.header['BMAJ'],
(0.4/3600.))
# Test returning a VRODS
spec = cube[:, 0, 0]
assert (cube.beams == spec.beams).all()
# And make sure that Beams gets slice for part of a spectrum
spec_part = cube[:1, 0, 0]
assert cube.beams[0] == spec.beams[0]
def test_basic_unit_conversion():
cube, data = cube_and_raw('advs.fits')
assert cube.unit == u.K
mKcube = cube.to(u.mK)
np.testing.assert_almost_equal(mKcube.filled_data[:].value,
(cube.filled_data[:].value *
1e3))
def test_basic_unit_conversion_beams():
cube, data = cube_and_raw('vda_beams.fits')
cube._unit = u.K # want beams, but we want to force the unit to be something non-beamy
cube._meta['BUNIT'] = 'K'
assert cube.unit == u.K
mKcube = cube.to(u.mK)
np.testing.assert_almost_equal(mKcube.filled_data[:].value,
(cube.filled_data[:].value *
1e3))
def test_beam_jtok_array():
cube, data = cube_and_raw('advs.fits')
cube._meta['BUNIT'] = 'Jy / beam'
cube._unit = u.Jy/u.beam
equiv = cube.beam.jtok_equiv(cube.with_spectral_unit(u.GHz).spectral_axis)
jtok = cube.beam.jtok(cube.with_spectral_unit(u.GHz).spectral_axis)
Kcube = cube.to(u.K, equivalencies=equiv)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube.filled_data[:].value *
jtok[:,None,None]).value)
# test that the beam equivalencies are correctly automatically defined
Kcube = cube.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube.filled_data[:].value *
jtok[:,None,None]).value)
def test_multibeam_jtok_array():
cube, data = cube_and_raw('vda_beams.fits')
assert cube.meta['BUNIT'].strip() == 'Jy / beam'
assert cube.unit.is_equivalent(u.Jy/u.beam)
#equiv = [bm.jtok_equiv(frq) for bm, frq in zip(cube.beams, cube.with_spectral_unit(u.GHz).spectral_axis)]
jtok = u.Quantity([bm.jtok(frq) for bm, frq in zip(cube.beams, cube.with_spectral_unit(u.GHz).spectral_axis)])
# don't try this, it's nonsense for the multibeam case
# Kcube = cube.to(u.K, equivalencies=equiv)
# np.testing.assert_almost_equal(Kcube.filled_data[:].value,
# (cube.filled_data[:].value *
# jtok[:,None,None]).value)
# test that the beam equivalencies are correctly automatically defined
Kcube = cube.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube.filled_data[:].value *
jtok[:,None,None]).value)
def test_beam_jtok():
# regression test for an error introduced when the previous test was solved
# (the "is this an array?" test used len(x) where x could be scalar)
cube, data = cube_and_raw('advs.fits')
# technically this should be jy/beam, but astropy's equivalency doesn't
# handle this yet
cube._meta['BUNIT'] = 'Jy'
cube._unit = u.Jy
equiv = cube.beam.jtok_equiv(np.median(cube.with_spectral_unit(u.GHz).spectral_axis))
jtok = cube.beam.jtok(np.median(cube.with_spectral_unit(u.GHz).spectral_axis))
Kcube = cube.to(u.K, equivalencies=equiv)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube.filled_data[:].value *
jtok).value)
def test_varyres_moment():
cube, data = cube_and_raw('vda_beams.fits')
assert isinstance(cube, VaryingResolutionSpectralCube)
# the beams are very different, but for this test we don't care
cube.beam_threshold = 1.0
with warnings.catch_warnings(record=True) as wrn:
warnings.simplefilter('default')
m0 = cube.moment0()
assert "Arithmetic beam averaging is being performed" in str(wrn[-1].message)
assert_quantity_allclose(m0.meta['beam'].major, 0.35*u.arcsec)
def test_append_beam_to_hdr():
cube, data = cube_and_raw('advs.fits')
orig_hdr = fits.getheader(path('advs.fits'))
assert cube.header['BMAJ'] == orig_hdr['BMAJ']
assert cube.header['BMIN'] == orig_hdr['BMIN']
assert cube.header['BPA'] == orig_hdr['BPA']
def test_cube_with_swapped_axes():
"""
Regression test for #208
"""
cube, data = cube_and_raw('vda.fits')
# Check that masking works (this should apply a lazy mask)
cube.filled_data[:]
def test_jybeam_upper():
cube, data = cube_and_raw('vda_JYBEAM_upper.fits')
assert cube.unit == u.Jy/u.beam
assert hasattr(cube, 'beam')
np.testing.assert_almost_equal(cube.beam.sr.value,
(((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value)
def test_jybeam_lower():
cube, data = cube_and_raw('vda_Jybeam_lower.fits')
assert cube.unit == u.Jy/u.beam
assert hasattr(cube, 'beam')
np.testing.assert_almost_equal(cube.beam.sr.value,
(((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value)
# Regression test for #257 (https://github.com/radio-astro-tools/spectral-cube/pull/257)
def test_jybeam_whitespace():
cube, data = cube_and_raw('vda_Jybeam_whitespace.fits')
assert cube.unit == u.Jy/u.beam
assert hasattr(cube, 'beam')
np.testing.assert_almost_equal(cube.beam.sr.value,
(((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value)
def test_beam_proj_meta():
cube, data = cube_and_raw('advs.fits')
moment = cube.moment0(axis=0)
# regression test for #250
assert 'beam' in moment.meta
assert 'BMAJ' in moment.hdu.header
slc = cube[0,:,:]
assert 'beam' in slc.meta
proj = cube.max(axis=0)
assert 'beam' in proj.meta
def test_proj_meta():
cube, data = cube_and_raw('advs.fits')
moment = cube.moment0(axis=0)
assert 'BUNIT' in moment.meta
assert moment.meta['BUNIT'] == 'K'
slc = cube[0,:,:]
assert 'BUNIT' in slc.meta
assert slc.meta['BUNIT'] == 'K'
proj = cube.max(axis=0)
assert 'BUNIT' in proj.meta
assert proj.meta['BUNIT'] == 'K'
def test_pix_sign():
cube, data = cube_and_raw('advs.fits')
s,y,x = (cube._pix_size_slice(ii) for ii in range(3))
assert s>0
assert y>0
assert x>0
cube.wcs.wcs.cdelt *= -1
s,y,x = (cube._pix_size_slice(ii) for ii in range(3))
assert s>0
assert y>0
assert x>0
cube.wcs.wcs.pc *= -1
s,y,x = (cube._pix_size_slice(ii) for ii in range(3))
assert s>0
assert y>0
assert x>0
def test_varyres_moment_logic_issue364():
""" regression test for issue364 """
cube, data = cube_and_raw('vda_beams.fits')
assert isinstance(cube, VaryingResolutionSpectralCube)
# the beams are very different, but for this test we don't care
cube.beam_threshold = 1.0
with warnings.catch_warnings(record=True) as wrn:
warnings.simplefilter('default')
# note that cube.moment(order=0) is different from cube.moment0()
# because cube.moment0() calls cube.moment(order=0, axis=(whatever)),
# but cube.moment doesn't necessarily have to receive the axis kwarg
m0 = cube.moment(order=0)
if six.PY2:
# sad face, tests do not work
pass
else:
assert "Arithmetic beam averaging is being performed" in str(wrn[-1].message)
# note that this is just a sanity check; one should never use the average beam
assert_quantity_allclose(m0.meta['beam'].major, 0.35*u.arcsec)
def test_mask_bad_beams():
"""
Prior to #543, this tested two different scenarios of beam masking. After
that, the tests got mucked up because we can no longer have minor>major in
the beams.
"""
cube, data = cube_and_raw('vda_beams.fits')
# make sure all of the beams are initially good (finite)
assert np.all(cube.goodbeams_mask)
# make sure cropping the cube maintains the mask
assert np.all(cube[:3].goodbeams_mask)
# middle two beams have same area
masked_cube = cube.mask_out_bad_beams(0.01,
reference_beam=Beam(0.3*u.arcsec,
0.2*u.arcsec,
60*u.deg))
assert np.all(masked_cube.mask.include()[:,0,0] == [False,True,True,False])
assert np.all(masked_cube.goodbeams_mask == [False,True,True,False])
mean = masked_cube.mean(axis=0)
assert np.all(mean == cube[1:3,:,:].mean(axis=0))
#doesn't test anything any more
# masked_cube2 = cube.mask_out_bad_beams(0.5,)
# mean2 = masked_cube2.mean(axis=0)
# assert np.all(mean2 == (cube[2,:,:]+cube[1,:,:])/2)
# assert np.all(masked_cube2.goodbeams_mask == [False,True,True,False])
def test_convolve_to():
cube, data = cube_and_raw('vda_beams.fits')
convolved = cube.convolve_to(Beam(0.5*u.arcsec))
def test_convolve_to_with_bad_beams():
cube, data = cube_and_raw('vda_beams.fits')
convolved = cube.convolve_to(Beam(0.5*u.arcsec))
with pytest.raises(ValueError) as exc:
# should not work: biggest beam is 0.4"
convolved = cube.convolve_to(Beam(0.35*u.arcsec))
assert exc.value.args[0] == "Beam could not be deconvolved"
# middle two beams are smaller than 0.4
masked_cube = cube.mask_channels([False, True, True, False])
# should work: biggest beam is 0.3 arcsec (major)
convolved = masked_cube.convolve_to(Beam(0.35*u.arcsec))
# this is a copout test; should really check for correctness...
assert np.all(np.isfinite(convolved.filled_data[1:3]))
def test_jybeam_factors():
cube, data = cube_and_raw('vda_beams.fits')
assert_allclose(cube.jtok_factors(),
[15111171.12641629, 10074201.06746361, 10074287.73828087,
15111561.14508185])
def test_channelmask_singlebeam():
cube, data = cube_and_raw('adv.fits')
masked_cube = cube.mask_channels([False, True, True, False])
assert np.all(masked_cube.mask.include()[:,0,0] == [False, True, True, False])
def test_mad_std():
cube, data = cube_and_raw('adv.fits')
if int(astropy.__version__[0]) < 2:
with pytest.raises(NotImplementedError) as exc:
cube.mad_std()
else:
# mad_std run manually on data
result = np.array([[0.15509701, 0.45763670],
[0.55907956, 0.42932451],
[0.48819454, 0.25499305]])
np.testing.assert_almost_equal(cube.mad_std(axis=0).value, result)
mcube = cube.with_mask(cube < 0.98*u.K)
result2 = np.array([[0.15509701, 0.45763670],
[0.55907956, 0.23835865],
[0.48819454, 0.25499305]])
np.testing.assert_almost_equal(mcube.mad_std(axis=0).value, result2)
def test_mad_std_params():
cube, data = cube_and_raw('adv.fits')
# mad_std run manually on data
result = np.array([[0.15509701, 0.45763670],
[0.55907956, 0.42932451],
[0.48819454, 0.25499305]])
np.testing.assert_almost_equal(cube.mad_std(axis=0, how='cube').value, result)
np.testing.assert_almost_equal(cube.mad_std(axis=0, how='ray').value, result)
with pytest.raises(NotImplementedError) as exc:
cube.mad_std(axis=0, how='slice')
with pytest.raises(NotImplementedError) as exc:
cube.mad_std(axis=1, how='slice')
with pytest.raises(NotImplementedError) as exc:
cube.mad_std(axis=(1,2), how='ray')
# stats.mad_std(data, axis=(1,2))
np.testing.assert_almost_equal(cube.mad_std(axis=0, how='ray').value, result)
def test_caching():
cube, data = cube_and_raw('adv.fits')
assert len(cube._cache) == 0
worldextrema = cube.world_extrema
assert len(cube._cache) == 1
# see https://stackoverflow.com/questions/46181936/access-a-parent-class-property-getter-from-the-child-class
world_extrema_function = base_class.SpatialCoordMixinClass.world_extrema.fget.wrapped_function
assert cube.world_extrema is cube._cache[(world_extrema_function, ())]
np.testing.assert_almost_equal(worldextrema.value,
cube.world_extrema.value)
def test_spatial_smooth_g2d():
cube, data = cube_and_raw('adv.fits')
#
# Guassian 2D smoothing test
#
g2d = Gaussian2DKernel(3)
cube_g2d = cube.spatial_smooth(g2d)
# Check first slice
result0 = np.array([[ 0.06653894, 0.06598313],
[ 0.07206352, 0.07151016],
[ 0.0702898 , 0.0697944 ]])
np.testing.assert_almost_equal(cube_g2d[0].value, result0)
# Check third slice
result2 = np.array([[ 0.04217102, 0.04183251],
[ 0.04470876, 0.04438826],
[ 0.04269588, 0.04242956]])
np.testing.assert_almost_equal(cube_g2d[2].value, result2)
def test_spatial_smooth_preserves_unit():
"""
Regression test for issue527
"""
cube, data = cube_and_raw('adv.fits')
cube._unit = u.K
#
# Guassian 2D smoothing test
#
g2d = Gaussian2DKernel(3)
cube_g2d = cube.spatial_smooth(g2d)
assert cube_g2d.unit == u.K
def test_spatial_smooth_t2d():
cube, data = cube_and_raw('adv.fits')
#
# Tophat 2D smoothing test
#
t2d = Tophat2DKernel(3)
cube_t2d = cube.spatial_smooth(t2d)
# Check first slice
result0 = np.array([[ 0.14864167, 0.14864167],
[ 0.14864167, 0.14864167],
[ 0.14864167, 0.14864167]])
np.testing.assert_almost_equal(cube_t2d[0].value, result0)
# Check third slice
result2 = np.array([[ 0.09203958, 0.09203958],
[ 0.09203958, 0.09203958],
[ 0.09203958, 0.09203958]])
np.testing.assert_almost_equal(cube_t2d[2].value, result2)
def test_spatial_smooth_median():
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw('adv.fits')
cube_median = cube.spatial_smooth_median(3)
# Check first slice
result0 = np.array([[ 0.54671028, 0.54671028],
[ 0.89482735, 0.77513282],
[ 0.93949894, 0.89482735]])
np.testing.assert_almost_equal(cube_median[0].value, result0)
# Check third slice
result2 = np.array([[ 0.38867729, 0.35675333],
[ 0.38867729, 0.35675333],
[ 0.35675333, 0.54269608]])
np.testing.assert_almost_equal(cube_median[2].value, result2)
@pytest.mark.parametrize('num_cores', (None, 1))
def test_spectral_smooth_median(num_cores):
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw('adv.fits')
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=num_cores)
# Check first slice
result = np.array([0.77513282, 0.35675333, 0.35675333, 0.98688694])
np.testing.assert_almost_equal(cube_spectral_median[:,1,1].value, result)
def test_spectral_smooth_median_4cores():
pytest.importorskip('joblib')
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw('adv.fits')
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=4)
# Check first slice
result = np.array([0.77513282, 0.35675333, 0.35675333, 0.98688694])
np.testing.assert_almost_equal(cube_spectral_median[:,1,1].value, result)
def update_function():
print("Update Function Call")
@pytest.mark.skipif('on_travis')
def test_smooth_update_function_parallel(capsys):
pytest.importorskip('joblib')
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw('adv.fits')
# this is potentially a major disaster: if update_function can't be
# pickled, it won't work, which is why update_function is (very badly)
# defined outside of this function
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=4,
update_function=update_function)
sys.stdout.flush()
captured = capsys.readouterr()
assert captured.out == "Update Function Call\n"*6
@pytest.mark.skipif('on_travis')
def test_smooth_update_function_serial(capsys):
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw('adv.fits')
def update_function():
print("Update Function Call")
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=1, parallel=False,
update_function=update_function)
captured = capsys.readouterr()
assert captured.out == "Update Function Call\n"*6
@pytest.mark.skipif('not scipyOK')
def test_parallel_bad_params():
cube, data = cube_and_raw('adv.fits')
with pytest.raises(ValueError) as exc:
cube.spectral_smooth_median(3, num_cores=2, parallel=False,
update_function=update_function)
assert exc.value.args[0] == ("parallel execution was not requested, but "
"multiple cores were: these are incompatible "
"options. Either specify num_cores=1 or "
"parallel=True")
with warnings.catch_warnings(record=True) as wrn:
cube.spectral_smooth_median(3, num_cores=1, parallel=True,
update_function=update_function)
assert ("parallel=True was specified but num_cores=1. "
"Joblib will be used to run the task with a "
"single thread.") in str(wrn[-1].message)
def test_initialization_from_units():
"""
Regression test for issue 447
"""
cube, data = cube_and_raw('adv.fits')
newcube = SpectralCube(data=cube.filled_data[:], wcs=cube.wcs)
assert newcube.unit == cube.unit
def test_varyres_spectra():
cube, data = cube_and_raw('vda_beams.fits')
assert isinstance(cube, VaryingResolutionSpectralCube)
sp = cube[:,0,0]
assert isinstance(sp, VaryingResolutionOneDSpectrum)
assert hasattr(sp, 'beams')
sp = cube.mean(axis=(1,2))
assert isinstance(sp, VaryingResolutionOneDSpectrum)
assert hasattr(sp, 'beams')
def test_median_2axis():
"""
As of this writing the bottleneck.nanmedian did not accept an axis that is a
tuple/list so this test is to make sure that is properly taken into account.
"""
cube, data = cube_and_raw('adv.fits')
cube_median = cube.median(axis=(1, 2))
# Check first slice
result0 = np.array([0.83498009, 0.2606566 , 0.37271531, 0.48548023])
np.testing.assert_almost_equal(cube_median.value, result0)
def test_varyres_mask():
cube, data = cube_and_raw('vda_beams.fits')
cube._beams.major.value[0] = 0.9
cube._beams.minor.value[0] = 0.05
cube._beams.major.value[3] = 0.6
cube._beams.minor.value[3] = 0.09
# mask out one beams
goodbeams = cube.identify_bad_beams(0.5, )
assert all(goodbeams == np.array([False, True, True, True]))
mcube = cube.mask_out_bad_beams(0.5)
assert hasattr(mcube, '_goodbeams_mask')
assert all(mcube.goodbeams_mask == goodbeams)
assert len(mcube.beams) == 3
sp_masked = mcube[:,0,0]
assert hasattr(sp_masked, '_goodbeams_mask')
assert all(sp_masked.goodbeams_mask == goodbeams)
assert len(sp_masked.beams) == 3
try:
assert mcube.unmasked_beams == cube.beams
except ValueError:
# older versions of beams
assert np.all(mcube.unmasked_beams == cube.beams)
try:
# check that slicing works too
assert mcube[:5].unmasked_beams == cube[:5].beams
except ValueError:
assert np.all(mcube[:5].unmasked_beams == cube[:5].beams)
| mevtorres/astrotools | spectral_cube/tests/test_spectral_cube.py | test_spectral_cube.py | py | 69,166 | python | en | code | 0 | github-code | 13 |
1985561854 | #!/usr/bin/env -S ipython --matplotlib=auto
#%%
import matplotlib.pyplot as pp
import numpy as np
import os
path = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(path, 'nft-rates.csv')
data = np.genfromtxt(path, delimiter=',')[2:]
year = data[:, 0]
unit = data[:, 6]
kilo = data[:,12]
mega = data[:,18]
giga = data[:,24]
tera = data[:,30]
peta = data[:,36]
ones = np.ones(len(year))
pp.semilogy(year, peta, color='black', linestyle='solid')
pp.semilogy(year, tera, color='black', linestyle='dashdot')
pp.semilogy(year, giga, color='black', linestyle='dashed')
pp.semilogy(year, mega, color='black', linestyle=(0, (3, 2, 1, 2)))
pp.semilogy(year, kilo, color='black', linestyle='dotted')
pp.fill_between(year, peta, tera, color='red', alpha=0.2)
pp.fill_between(year, tera, giga, color='red', alpha=0.4)
pp.fill_between(year, giga, mega, color='red', alpha=0.6)
pp.fill_between(year, mega, kilo, color='red', alpha=0.8)
pp.fill_between(year, kilo, ones, color='red', alpha=1.0)
pp.title('ROI on XPower NFTs', fontweight='bold')
pp.legend(['Peta NFT', 'Tera NFT', 'Giga NFT', 'Mega NFT', 'Kilo NFT'])
pp.xticks([0, 10, 20, 30, 40], map(str, [0, 10, 20, 30, 40]))
pp.yticks([1, 2, 4, 6, 8, 10], map(str, [1, 2, 4, 6, 8, 10]))
pp.ylabel('Multiple [XPower]')
pp.xlabel('Years')
pp.grid()
#%%
if __name__ == '__main__':
pp.show(block=True)
| blackhan-software/xpower-hh | params/nft-rates/nft-rates.py | nft-rates.py | py | 1,362 | python | en | code | 6 | github-code | 13 |
10739185005 | '''
Get ariana's pics in JinRiTouTiao, some problems can't
be solved, weird! Try to use MongoDB, and download pics.
'''
import json
from urllib.parse import urlencode
import requests
from requests.exceptions import RequestException
import pymongo
import os
from hashlib import md5
from multiprocessing import Pool
from json.decoder import JSONDecodeError
MONGO_URL = 'localhost'
MONGO_DB = 'toutiao'
MONGO_TABLE = 'toutiao'
GROUP_START = 0
GROUP_END = 20
client = pymongo.MongoClient(MONGO_URL, connect=False)
db = client[MONGO_DB]
def get_one_page(offset, keyword):
data = {
'offset': offset,
'format': 'json',
'keyword': keyword,
'autoload': 'true',
'count': 20,
'cur_tab': 1,
'from': 'search_tab',
'pd': 'synthesis'
}
url = 'https://www.toutiao.com/search_content/?' + urlencode(data)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except RequestException:
print('Request error.')
return None
def parse_one_page(html):
try:
data = json.loads(html)
if data and 'data' in data.keys():
for item in data.get('data'):
if item.get('title') is not None:
yield {
'title': item.get('title'),
'url': item.get('article')
}
except JSONDecodeError:
pass
def save_to_mongo(result):
if db[MONGO_TABLE].insert(result):
print('Insert succeed')
return True
return False
def download_img(url):
try:
response = requests.get(url)
if response.status_code == 200:
save_img(response.content)
return None
except RequestException:
print('Request error.')
return None
def save_img(content):
file_path = "{0}/{1}.{2}".format(os.getcwd(), md5(content).hexdigest(),
'jpg')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(content)
f.close()
def main(offset):
html = get_one_page(offset, 'ariana')
for item in parse_one_page(html):
print(item['title'], item['url'])
save_to_mongo(item)
if __name__ == '__main__':
pool = Pool()
groups = [x*20 for x in range(GROUP_START, GROUP_END)]
pool.map(main, groups) | parkerhsu/Web_Scraping | toutiao.py | toutiao.py | py | 2,447 | python | en | code | 0 | github-code | 13 |
22803961239 | import requests
from bs4 import BeautifulSoup
import numpy as np
from scipy.stats import poisson
def obtener_Liga(nombre_pais):
print('Buscando equipo...')
# URL de la página de resultados del equipo
base_url = 'https://fbref.com/en/squads'
# Realizar la solicitud GET a la página
response = requests.get(base_url)
# Crear el objeto BeautifulSoup
soup = BeautifulSoup(response.content, features='html.parser')
# Encontrar la tabla de resultados
tabla_resultados = soup.find('table', id="countries")
# Verificar si se encontraron resultados para el pais
if tabla_resultados is None:
print('No se encontraron resultados para el país.')
return
# Encontrar las filas de los resultados
filas_resultados = tabla_resultados.find_all('tr')
# Recorrer las filas y extraer los resultados
liga_link = None
for fila in filas_resultados:
# Extrae las columnas
liga = fila.find_all('th')[0].text.strip()
if nombre_pais.capitalize() in liga:
print(liga)
liga_link = base_url[:17]+fila.find_next('a')['href']
break
# Verificar si se encontró el enlace del equipo
if liga_link is None:
print('No se encontró la liga.')
else:
print(liga_link)
return liga_link
def obtener_Equipo(nombre_equipo, liga_link):
print('Buscando equipo...')
# URL base de la página de equipos
url_base = 'https://fbref.com/en/squads/'
# Realizar la solicitud GET a la página
response = requests.get(liga_link)
# Crear el objeto BeautifulSoup
soup = BeautifulSoup(response.content, features='html.parser')
# Encontrar la tabla de resultados
tabla_resultados = soup.find('table', id="clubs")
# Verificar si se encontraron resultados para el pais
if tabla_resultados is None:
print('No se encontraron resultados.')
return
# Encontrar las filas de los resultados
filas_resultados = tabla_resultados.find_all('tr')
# Recorrer las filas y extraer los resultados
equipo_link = None
for fila in filas_resultados:
# Extrae las columnas
clubs = fila.find_all('th')[0].text.strip()
if nombre_equipo.lower() in clubs.lower():
equipo = clubs
print(equipo)
club_id = fila.find_next('a')['href'][11:19]
equipo_link = url_base + club_id
break
# Verificar si se encontró el enlace del equipo
if equipo_link is None:
print('No se encontró el equipo.')
return equipo, equipo_link
def obtener_datos_equipo(nombre_equipo, nombre_pais):
# Obtener la URL del equipo
equipo, url = obtener_Equipo(nombre_equipo, obtener_Liga(nombre_pais))
print(url)
# Realizar la solicitud GET a la página
response = requests.get(url)
# Crear el objeto BeautifulSoup
soup = BeautifulSoup(response.content, features='html.parser')
# Encontrar la tabla de resultados
tabla_resultados = soup.find('table', id="matchlogs_for")
# Encontrar las filas de los resultados
filas_resultados = tabla_resultados.find_all('tr')
# Recorrer las filas al revés y extraer los resultados
resultados, resultados_local, resultados_visitante = [], [], []
for fila in reversed(filas_resultados[1:]): # Recorrer las filas en reversa
# Extraer las columnas
columnas = fila.find_all('td')
# Extraer el resultado
resultado = columnas[5].text.strip()
# Verificar si el partido se ha jugado y tiene un resultado
if resultado:
# Extraer la fecha
fecha = fila.find('th').text.strip()
"""
# Eliminar la bandera a los equipos
if (columnas[8].text.strip()[2] == ' '):
oponente = columnas[8].text.strip()[3:]
elif (columnas[8].text.strip()[3] == ' '):
oponente = columnas[8].text.strip()[4:]
else:"""
oponente = columnas[8].text.strip()
# Agregar el marcador
goles_favor = columnas[6].text.strip()[0]
goles_contra = columnas[7].text.strip()[0]
# Verificar si el equipo jugó como local o visitante
local_visitante = columnas[4].text.strip()
if local_visitante == 'Home' and len(resultados_local) < 10:
resultados_local.append((fecha, oponente, resultado, goles_favor, goles_contra))
elif local_visitante == 'Away' and len(resultados_visitante) < 10:
resultados_visitante.append((fecha, oponente, resultado, goles_favor, goles_contra))
if len(resultados) < 10:
resultados.append((fecha, oponente, resultado, goles_favor, goles_contra))
# Verificar si se han obtenido los últimos 10 partidos jugados
if len(resultados) >= 10 and len(resultados_local) >= 10 and len(resultados_visitante) >= 10:
break
# Imprimir los resultados en orden inverso
"""print("Últimos 10 partidos jugados:")
for resultado in reversed(resultados):
print(f'Fecha: {resultado[0]}, Oponente: {resultado[1]}, Resultado: {resultado[2]} {resultado[3]}-{resultado[4]}')
print("Últimos 10 partidos como local:")
for resultado in reversed(resultados_local):
print(f'Fecha: {resultado[0]}, Oponente: {resultado[1]}, Resultado: {resultado[2]} {resultado[3]}-{resultado[4]}')
print("Últimos 10 partidos como visitante:")
for resultado in reversed(resultados_visitante):
print(f'Fecha: {resultado[0]}, Oponente: {resultado[1]}, Resultado: {resultado[2]} {resultado[3]}-{resultado[4]}')"""
return resultados, resultados_local, resultados_visitante, equipo
def obtener_resultados(resultados, resultados_local, resultados_visitante):
probabilidades, probabilidades_local, probabilidades_visitante = [], [], []
probabilidades = calcular_probabilidad(resultados)
probabilidades.append((calcular_goles(resultados)[0], round(calcular_goles(resultados)[0]/len(resultados),1)))
probabilidades.append((calcular_goles(resultados)[1],round(calcular_goles(resultados)[1]/len(resultados),1)))
probabilidades_local = calcular_probabilidad(resultados_local)
probabilidades_local.append((calcular_goles(resultados_local)[0],round(calcular_goles(resultados_local)[0]/len(resultados_local),1)))
probabilidades_local.append((calcular_goles(resultados_local)[1],round(calcular_goles(resultados_local)[1]/len(resultados_local),1)))
probabilidades_visitante = calcular_probabilidad(resultados_visitante)
probabilidades_visitante.append((calcular_goles(resultados_visitante)[0],round(calcular_goles(resultados_visitante)[0]/len(resultados_visitante),1)))
probabilidades_visitante.append((calcular_goles(resultados_visitante)[1],round(calcular_goles(resultados_visitante)[1]/len(resultados_visitante),1)))
print(f'Resultados (Últimos 10 partidos):')
print(f'Goles anotados: {probabilidades[3][0]} ({probabilidades[3][1]}) || Goles recibidos: {probabilidades[4][0]} ({probabilidades[4][1]})')
print(f'Victoria: {probabilidades[0]*100}%')
print(f'Empate: {probabilidades[1]*100}%')
print(f'Derrota: {probabilidades[2]*100}%')
print(f'Resultados como local (Últimos 10 partidos):')
print(f'Goles anotados: {probabilidades_local[3][0]} ({probabilidades_local[3][1]}) || Goles recibidos: {probabilidades_local[4][0]} ({probabilidades_local[4][1]})')
print(f'Victoria: {probabilidades_local[0]*100}%')
print(f'Empate: {probabilidades_local[1]*100}%')
print(f'Derrota: {probabilidades_local[2]*100}%')
print(f'Resultados como visitante: (Últimos 10 partidos)')
print(f'Goles anotados: {probabilidades_visitante[3][0]} ({probabilidades_visitante[3][1]}) || Goles recibidos: {probabilidades_visitante[4][0]} ({probabilidades_visitante[4][1]})')
print(f'Victoria: {probabilidades_visitante[0]*100}%')
print(f'Empate: {probabilidades_visitante[1]*100}%')
print(f'Derrota: {probabilidades_visitante[2]*100}%')
return probabilidades, probabilidades_local, probabilidades_visitante
def calcular_goles(resultados):
goles_anotados = 0
goles_recibidos = 0
for r in resultados:
goles_anotados += int(r[3])
goles_recibidos += int(r[4])
return goles_anotados, goles_recibidos
def calcular_probabilidad(resultados):
v = 0
e = 0
d = 0
for r in resultados:
if r[2].lower() == 'w':
v+=1
elif r[2].lower() == 'l':
d+=1
else:
e += 1
return [v/10,e/10,d/10]
def calcular_probabilidad_apuesta(equipo,pais,local_visitante):
probabilidad, probabilidad_handicap = 0, 0
general, local, visitante, equipo = obtener_datos_equipo(equipo,pais)
probabilidades, probabilidades_local, probabilidades_visitante = obtener_resultados(general,local,visitante)
resultados = [probabilidades, probabilidades_local, probabilidades_visitante]
probabilidad_ganar, probabilidad_empatar, probabilidad_perder = 0, 0, 0
#print("Probando resultados:")
#print(resultados)
pesos = [0.6, 0.4]
if local_visitante.lower() == 'local':
# Cálculo de la probabilidad ponderada de ganar
probabilidad_ganar = (pesos[0] * probabilidades[0]) + (pesos[1] * probabilidades_local[0])
# Cálculo de la probabilidad ponderada de empatar
probabilidad_empatar = (pesos[0] * probabilidades[1]) + (pesos[1] * probabilidades_local[1])
# Cálculo de la probabilidad ponderada de perder
probabilidad_perder = 1 - (probabilidad_ganar + probabilidad_empatar)
#print(f'La probabilidad de acertar al 1 X = {(probabilidad_ganar+probabilidad_empatar)*100}%')
probabilidad = [probabilidad_ganar, probabilidad_empatar, probabilidad_perder]
#print(resultados)
return probabilidad, equipo, resultados
else:
# Cálculo de la probabilidad ponderada de ganar
probabilidad_ganar = (pesos[0] * probabilidades[0]) + (pesos[1] * probabilidades_visitante[0])
# Cálculo de la probabilidad ponderada de empatar
probabilidad_empatar = (pesos[0] * probabilidades[1]) + (pesos[1] * probabilidades_visitante[1])
# Cálculo de la probabilidad ponderada de perder
probabilidad_perder = 1 - (probabilidad_ganar + probabilidad_empatar)
#print(f'La probabilidad de acertar al X 2 = {(probabilidad_ganar+probabilidad_empatar)*100}%')
# Handicap
probabilidad_handicap = calcular_handicap(general, visitante, pesos)
print(f'La probabilidad de acertar con handicap de +2 = {probabilidad_handicap*100}%')
probabilidad = [probabilidad_ganar, probabilidad_empatar, probabilidad_perder]
#print(resultados)
return probabilidad, probabilidad_handicap, equipo, resultados
def calcular_handicap(general, visitante, pesos):
handicap = 2
casos_favorables_general, casos_favorables_visitante = 0, 0
# Encontrando cuantos partidos se ganaron con ese handicap en los ultimos 10 partidos
#print("General:")
for r in general:
#print(r)
if int(r[3])+handicap > int(r[4]):
#print(f'Favorable {r[3]}+2 - {r[4]}')
casos_favorables_general+=1
# Encontrando cuantos partidos se ganaron con ese handicap en los ultimos 10 partidos de visitante
#print("Visitante:")
for v in visitante:
#print(v)
if int(v[3])+handicap > int(v[4]):
#print(f'Favorable {v[3]}+2 - {v[4]}')
casos_favorables_visitante+=1
# Cálculo de la probabilidad ponderada de ganar con handicap
probabilidad_handicap = (pesos[1] * casos_favorables_general/len(general)) + (pesos[0] * casos_favorables_visitante/len(visitante))
return probabilidad_handicap
# probabilidad = [probabilidad_ganar, probabilidad_empatar, probabilidad_perder]
# probabilidad_handicap = number
# equipo = string
# resultados = [probabilidades, probabilidades_local, probabilidades_visitante]
def calcular_doble_oportunidad(probabilidades):
# Calcula probabilidades de Doble Oportunidad
ganar_empatar = probabilidades[0]+probabilidades[1]
ganar_perder = probabilidades[0]+probabilidades[2]
empatar_perder = probabilidades[1]+probabilidades[2]
doble_oportunidad = [ganar_empatar, ganar_perder, empatar_perder]
# Encuentra la maxima probabilidad
max_probabilidad = max(doble_oportunidad)
indice = -1
rep = 0
# Comprueba si hay una misma probabilidad entre dos eventos de doble oportunidad y selecciona un priorizando empates y local
for i in doble_oportunidad:
if i == max_probabilidad:
rep += 1
if rep > 1:
if ganar_empatar == ganar_perder or ganar_empatar == empatar_perder:
indice = 0
elif ganar_perder == empatar_perder:
indice = 2
else:
indice = 1
else:
indice = doble_oportunidad.index(max_probabilidad)
return max_probabilidad, indice
def calcular_resultado_probable (local, visitante):
# ganar_empatar = 0, ganar_perder = 1, empatar_perder = 2
mejor_prob_local = calcular_doble_oportunidad(local[0])
mejor_prob_visita = calcular_doble_oportunidad(visitante[0])
pronostico = "N/A"
if mejor_prob_local[1] == 0 and mejor_prob_visita[1] == 2:
pronostico = '1X'
print("Probabilidad Alta!")
print("Primero")
elif mejor_prob_local[1] == 0 and mejor_prob_visita[1] == 0:
# Si el visitante ha sacado 3 victorias mas que el local el pronostico favorecera al visitante
if (visitante[0][0]-local[0][0]) > 0.2:
pronostico = 'X2'
else:
pronostico = '1X'
print("Segundo")
elif mejor_prob_local[1] == 0 and mejor_prob_visita[1] == 1:
if mejor_prob_visita[0] > mejor_prob_local[0]:
# Si el visitante ha ganado mas que el local y el porcentaje de victorias es mayor al de derrotas
if visitante[0][0] > local[0][0] and visitante[0][0] > visitante[0][2]:
pronostico = 'X2'
else:
pronostico = '1X'
else:
pronostico = '1X'
print("Tercero")
elif mejor_prob_local[1] == 1 and mejor_prob_visita[1] == 1:
# Solo apoya al visitante si ha ganado mas veces que el local
if local[0][2] > local[0][0] and local[0][2] > visitante[0][2]:
pronostico = 'X2'
else:
pronostico = '1X'
print("Cuarto")
elif mejor_prob_local[1] == 1 and mejor_prob_visita[1] == 0:
if local[0][2] > 0.5:
pronostico = 'X2'
elif mejor_prob_visita[0] > mejor_prob_local[0]:
# Si el visitante ha ganado mas que el local y el porcentaje de victorias es mayor al de derrotas
if visitante[0][0] > local[0][0] and visitante[0][0] > visitante[0][2]:
pronostico = 'X2'
else:
pronostico = '1X'
else:
pronostico = '1X'
print("Quinto")
elif mejor_prob_local[1] == 1 and mejor_prob_visita[1] == 2:
# Si el local gana mas de lo que pierde
if local[0][0] >= local[0][2]:
pronostico = "1X"
# Si el visitante pierde mas de lo que empata
elif visitante[0][2] > visitante[0][1]:
pronostico = "1X"
else:
pronostico = "X2"
print("Sexto")
elif mejor_prob_local[1] == 2 and mejor_prob_visita[1] == 0:
pronostico = "X2"
print("Probabilidad Alta!")
print("Septimo")
elif mejor_prob_local[1] == 2 and mejor_prob_visita[1] == 1:
# Si el visitante gana mas de lo que pierde
if visitante[0][0] >= visitante[0][2]:
pronostico = "X2"
# Si el local pierde mas de lo que empata
elif local[0][2] > local[0][1]:
pronostico = "X2"
else:
pronostico = "1X"
print("Octavo")
else:
# Si el visitante pierde menos que el local
if visitante[0][2] < local[0][2]:
pronostico = "X2"
else:
pronostico = "1X"
print("Noveno")
return pronostico
def prob_goles(goles_local, goles_visitante):
# Cálculo de los parámetros de la distribución Poisson
media_goles_local = np.mean(goles_local)
media_goles_visitante = np.mean(goles_visitante)
# Cálculo de las probabilidades de goles usando la distribución Poisson
probabilidades_local = poisson.pmf(goles_local, media_goles_local)
probabilidades_visitante = poisson.pmf(goles_visitante, media_goles_visitante)
return probabilidades_local, probabilidades_visitante
def resultado_exacto(local,visitante):
resultado = "N/A"
prob = []
arreglo = []
arreglo.append((local[0]+visitante[2])/2)
arreglo.append((local[1]+visitante[1])/2)
arreglo.append((local[2]+visitante[0])/2)
print(arreglo)
max_prob = 0
for i in range(len(arreglo)):
if arreglo[i] > arreglo[max_prob]:
max_prob = i
elif arreglo[i] == arreglo[max_prob] and i > 0:
print("Hay dos probabilidades iguales")
prob.append(i)
prob.append(max_prob)
print(max_prob)
print(f"Probabilidades: {prob}")
if len(prob) == 1:
if prob[0] == 0:
resultado = "1"
elif prob[0] == 1:
resultado = "X"
else:
resultado = "2"
else:
if 1 in prob:
resultado = "X"
else:
if local[0] >= visitante[0]:
resultado = "1"
else:
resultado = "2"
return resultado | Juanromrod/Football-Predictions | Football_Predictions/FootballPredictionsApp/footballData.py | footballData.py | py | 17,715 | python | es | code | 0 | github-code | 13 |
16084629172 | """
Author: Missy Shi
Course: math 458
Date: 04/23/2020
Project: A3 - 2
Description:
Implementation of Fast Powering Algorithm
Task:
Compute the last five digits of the number 2 ** (10 ** 15)
"""
def bin_expansion(p: int) -> int:
""" find binary expansions from given exponent p """
count = 0
while p > 1:
count += 1
p = p - (2 ** count)
return count
def mod(num: int, m: int) -> int:
""" compute given number(num) mod modulus(m) """
while num - m > 0:
num -= m
return num
def q2():
""" Compute n ** p (mod m) """
n = 2
p = 10 ** 15
m = 100000
cl = [] # list of binary expansions
while p > 0:
count = bin_expansion(p)
p = p - 2 ** count
cl.append(count)
pl = [] # list of n ** binary expansions
for c in cl:
pl.append(mod(n ** (2 ** c), m))
result = 1
for i in pl:
result *= i
print(mod(result, m))
return
def main():
""" main program runner """
q2()
if __name__ == '__main__':
main() | missystem/math-crypto | fastpowering.py | fastpowering.py | py | 1,066 | python | en | code | 0 | github-code | 13 |
74599209618 | # 원형 큐 디자인
# https://leetcode.com/problems/design-circular-queue/
# page.259
class MyCircularQueue:
def __init__(self, k: int):
self.front = 0
self.rear = 0
self.q = [None] * k
self.maxlen = k
def enQueue(self, value: int) -> bool:
if self.q[self.rear] is None:
self.q[self.rear] = value
self.rear = (self.rear + 1) % self.maxlen
return True
else:
return False
def deQueue(self) -> bool:
if self.q[self.front] is not None:
self.q[self.front] = None
self.front = (self.front + 1) % self.maxlen
return True
else:
return False
def Front(self) -> int:
if self.q[self.front] is not None:
return self.q[self.front]
else:
return -1
def Rear(self) -> int:
if self.q[ (self.rear + self.maxlen-1) % self.maxlen ] is not None:
return self.q[ (self.rear + self.maxlen-1) % self.maxlen ]
else:
return -1
def isEmpty(self) -> bool:
if self.front == self.rear and self.q[self.front] is None:
return True
else:
return False
def isFull(self) -> bool:
if self.front == self.rear and self.q[self.front] is not None:
return True
else:
return False
| ChanYoung-dev/python | HelloWorld/2. CodingTEST/Level 1/circular-que.py | circular-que.py | py | 1,396 | python | en | code | 0 | github-code | 13 |
20042539057 | # Echo server program
from pynput import keyboard
from threading import Thread
from threading import Event
import hashlib
import pyaudio
import datetime
import socket
import time
start=1
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 4
WIDTH = 2
delimiter = "|:|:|";
seqFlag = 0
ta = 0
class packet():
checksum = 0;
length = 0;
seqNo = 0;
msg = 0;
def make(self, data):
self.msg = data
self.length = str(len(data))
self.checksum=hashlib.sha1(data).hexdigest()
# create ouput stream
def createOuputStream():
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
output=True,
frames_per_buffer=CHUNK)
return stream,p
class Connect(Thread):
def __init__(self, HOST, PORT):
super(Connect, self).__init__()
# create socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.bind((HOST, PORT))
# create ouput stream
self.stream,self.p = createOuputStream()
self.__flag = Event()
self.__flag.set()
self.__running = Event()
self.__running.set()
def run(self):
while self.__running.isSet():
self.s.settimeout(None)
data= "".encode()
i=1
self.s.settimeout(0.2)
while True:
try:
data,addr = self.s.recvfrom(65535)
if not data:
continue
data+=data
i=i+1
print (i)
self.stream.write(data)
except:
#print('*')
break
# close output stream
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def timeout(self,time):
self.s.settimeout(time)
def send(self,data):
b = self.s.sendto(data,('127.0.0.1',50008))
return b
def recv(self):
ack,address = self.s.recvfrom(100)
return ack,address
def close(self):
self.s.close()
# create input stream and record
class Record(Thread):
def __init__(self, *args, **kwargs):
super(Record, self).__init__(*args, **kwargs)
p = pyaudio.PyAudio()
self.stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
self.frames = []
self.__flag = Event()
self.__flag.set()
self.__running = Event()
self.__running.set()
def run(self):
while self.__running.isSet():
pkt = packet()
data = self.stream.read(CHUNK)
self.frames.append(data)
x = 0
start_time=time.time()
resend = 0;
while x < (len(data) / 2048):
msg = data;
pkt.make(msg);
finalPacket = str(pkt.checksum) + delimiter + str(pkt.seqNo) + delimiter + str(pkt.length) + delimiter# + str(pkt.msg)
finalPacket = finalPacket.encode() + pkt.msg
sent = connect.send(finalPacket)
print('Sent {} bytes back to {}, awaiting acknowledgment..'.format(sent, "('127.0.0.1',50008)"))
connect.timeout(0.001)
try:
ack,address = connect.recv()
ack = ack.decode()
print("got ack")
except:
print ("Time out reached, resending ...",x);
resend += 1;
print("Resend:",resend)
continue;
if ack.split(",")[0] == str(pkt.seqNo):
pkt.seqNo = int(not pkt.seqNo)
print ("Acknowledged by: " + ack + "\nAcknowledged at: " + str(
datetime.datetime.utcnow()) + "\nElapsed: " + str(time.time() - start_time))
x += 1
print("x = ",x)
self.__flag.wait()
def pause(self):
self.__flag.clear()
def resume(self):
self.__flag.set()
def stop(self):
self.__flag.set()
self.__running.clear()
# key press
def on_press(key):
global start
if (key == keyboard.KeyCode.from_char('s')):
if start==1:
print('- Started recording -'.format(key))
start=0
try:
audio.start()
except:
audio.resume() #ç¹¼ç?
else:
print('.')
else:
print('incorrect character {0}, press s'.format(key))
# key release
def on_release(key):
global start
print('{0} released'.format(key))
if (key == keyboard.KeyCode.from_char('s')):
print('{0} stop'.format(key))
audio.pause()
start=1
if (key == keyboard.KeyCode.from_char('e')):
keyboard.Listener.stop
connect.close()
return False
if __name__ == '__main__':
print('start....')
HOST = '127.0.0.1'
PORT = 50009
# New Record Tread
audio = Record()
# New Connect Thread and listens
connect = Connect(HOST,PORT)
connect.start()
print('server ready and listening')
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
| MikeLin0206/Network | NetworkProtocol/RUDP/PressToTalk/server.py | server.py | py | 5,772 | python | en | code | 0 | github-code | 13 |
10973470469 | import pandas as pd
from time import time_ns
import timing
from tqdm import tqdm
from random import shuffle
# TODO: Put timing in this!!!
# TODO: Use ordered list for logn times
# note that neighbors for sake of this research are in-neighbors only.
# 1. number of active neighboors
# 2. Personal Network Exposure
# "This value is defined as the ratio of number of active neighbors to
# total number of neighbors. (An Empirical Evaluation of Social Influence Metrics
# 3. Average in neighbor count of active neighbors (we SHOULD check this... see if theres a correlation
# AIC
# 4. Average out neighbor count of active neighbors (Why not?)
def get_active_neighbors(prev_posts, neighbors, t, t_fos):
active_neighbors = set()
t_fos = t - t_fos
for post in prev_posts:
user = post[0]
date = post[1]
if user in neighbors and t >= date > t_fos:
active_neighbors.add(user)
return active_neighbors
def get_NAN(prev_posts, neighbors, t, t_fos):
active_neighbors = get_active_neighbors(prev_posts, neighbors, t, t_fos)
return len(active_neighbors)
def get_active_neighbors_and_hubs(prev_posts, in_neighbors, t, t_sus, t_fos, net):
active_neighbors = set()
hubs_count = 0
t_fos = t - t_fos
for user, date in prev_posts:
if user in in_neighbors and t >= date > t_fos:
if user not in active_neighbors:
active_neighbors.add(user)
if len(get_out_neighbors_at_time(net.out_edges(user), t, t_sus, net)) > 50:
hubs_count += 1
return active_neighbors, hubs_count
def get_NAN_and_HUB(prev_posts, in_neighbors, t, t_sus, t_fos, net):
active_neighbors, hubs = get_active_neighbors_and_hubs(prev_posts, in_neighbors, t, t_sus, t_fos, net)
return active_neighbors, len(active_neighbors), hubs
def get_active_neighbors_and_NAN(prev_posts, neighbors, t, t_fos):
active_neighbors = get_active_neighbors(prev_posts, neighbors, t, t_fos)
return active_neighbors, len(active_neighbors)
# TODO: Make the hub threshold configurable from a higher level
def get_HUB(active_neighbors, t, t_sus, net):
hub_count = 0
for neighbor in active_neighbors:
out_count = get_out_neighbors_at_time(net.out_edges(neighbor), t, t_sus, net)
if len(out_count) > 50:
hub_count += 1
return hub_count
def get_f1(positive_users, user, net):
# Return who in network is active
neighbors = net.in_edges(user)
active_neighbors = 0
for n in neighbors:
if n in positive_users:
active_neighbors += 1
return active_neighbors
def get_PNE(NAN, neighbors):
if neighbors == 0:
return 0
return NAN / neighbors
# Return PNE - active neighbor count over total number of users.
# Counting self user as active neighbor?
def get_f2(active_neighbors, user, net):
x = net.in_degree(user)
if x == 0:
return 0
else:
return active_neighbors / x
def get_f3(users, net): # G.out_degree(1) average
summation = 0
for usr in users:
summation += (net.in_degree(usr))
return summation / len(users)
def get_in_neighbors_at_time(in_edges, t, t_sus, net):
# don't look at edges after t (in the future)
t_sus = t - t_sus
neighbors = set()
for neighbor, user in in_edges:
data = net.get_edge_data(neighbor, user)
for i in data:
# prob dont need this if statement
if data:
date = data.get(i)['date']
if t >= date > t_sus:
neighbors.add(neighbor)
break
return neighbors
def get_in_prev_post_count_at_time(in_edges, t, t_sus, net):
# don't look at edges after t (in the future)
t_sus = t - t_sus
posts = 0
for neighbor, user in in_edges:
data = net.get_edge_data(neighbor, user)
for i in data:
# prob dont need this if statement
if data:
date = data.get(i)['date']
if t >= date > t_sus:
posts += 1
break
return posts
def get_out_neighbors_at_time(out_edges, t, t_sus, net):
# don't look at edges after t (in the future)
t_sus = t - t_sus
neighbors = set()
if out_edges:
for user, neighbor in out_edges:
data = net.get_edge_data(user, neighbor)
for i in data:
# prob dont need this if statement
if data:
date = data.get(i)['date']
if t >= date > t_sus:
neighbors.add(neighbor)
break
return neighbors
def get_out_neighbors(out_edges):
neighbors = set()
for user, neighbor in out_edges:
neighbors.add(neighbor)
return neighbors
def get_root_user(prev_posts, t, t_fos):
t_fos = t - t_fos
for user, date in prev_posts:
if t >= date > t_fos:
return user
return None
def get_negative_user(pos_user, prev_posts, prev_posters, root_neighbors, t, t_sus, t_fos, net, in_dataset_negative):
for user in root_neighbors:
if user in prev_posts or user in in_dataset_negative:
continue
else:
in_neighbors = get_in_neighbors_at_time(net.in_edges(user), t, t_sus, net)
if in_neighbors:
active_neighbors = get_active_neighbors(prev_posts, in_neighbors, t, t_fos)
if pos_user in active_neighbors:
continue
if len(active_neighbors) >= 1 and user not in prev_posters:
return user, in_neighbors, len(active_neighbors)
return None, None, None
def get_balanced_dataset(thread_list, thread_info, N, t_sus, t_fos, features_bits, positive_users):
global start
start = time_ns()
net = N
data = []
# if not features_bits.any():
# print("No features in configuration. Model requires at least 1 feature to run.")
# return 1
for thread in tqdm(thread_list):
thread_posts = thread_info[thread]
prev_posts = []
prev_posters = set()
active_users_total = positive_users[thread]
in_dataset = set() # we are only looking for the first time a user engages with a post in a time period.
in_dataset_negative = set()
for user, time in thread_posts:
prev_posts.append((user, time))
prev_posters.add(user)
if len(prev_posters) == len(active_users_total):
break
# can't have active neighbors without previous posts
if len(prev_posts) > 1:
# skip user if they are already in dataset for this topic
if user in in_dataset or time < time - t_fos:
continue
in_neighbors = get_in_neighbors_at_time(net.in_edges(user), time, t_sus, net)
if features_bits[2]:
active_neighbors, NAN, HUB = get_NAN_and_HUB(prev_posts, in_neighbors, time, t_sus, t_fos, net)
else:
NAN = get_NAN(prev_posts, in_neighbors, time, t_fos)
if NAN < 1:
in_dataset.add(user) # Count them as added w/out adding them... they're an "innovator"
continue
if features_bits[1]:
PNE = get_PNE(NAN, len(in_neighbors))
if features_bits[3]:
PPP = get_in_prev_post_count_at_time(net.in_edges(user), time, t_sus, net)
# make negative sample
root_user = get_root_user(prev_posts, time, t_fos)
if not root_user:
continue
root_neighbors = get_out_neighbors_at_time(net.out_edges(root_user), time, t_sus, net)
# someone who has not posted in the thread but has 2 active neighbors wrt thread (root + 1 additional)
negative_user, in_neighbors_negative, NAN_negative = get_negative_user(user, prev_posts, prev_posters, root_neighbors, time, t_sus, t_fos, net,in_dataset_negative)
if not negative_user:
continue
if features_bits[2]:
#in_neighbors_negative = get_in_neighbors_at_time(net.in_edges(negative_user), time, t_sus, net)
negative_active_neighbors, NAN_negative, HUB_negative = get_NAN_and_HUB(prev_posts, in_neighbors_negative, time, t_sus, t_fos, net)
# else:
# NAN_negative = get_NAN(prev_posts, in_neighbors_negative, time, t_fos)
if NAN_negative < 1:
continue
if features_bits[1]:
PNE_negative = get_PNE(NAN_negative, len(in_neighbors_negative))
# if features_bits[2]:
# HUB_negative = get_HUB(negative_active_neighbors, time, t_sus, net)
# only appends if both samples were good? change this?
if features_bits[3]:
PPP_negative = get_in_prev_post_count_at_time(net.in_edges(negative_user), time, t_sus, net)
data_row = [user]
if features_bits[0]:
data_row.append(NAN)
if features_bits[1]:
data_row.append(PNE)
if features_bits[2]:
data_row.append(HUB)
if features_bits[3]:
data_row.append(PPP)
data_row.append(1)
data.append(data_row)
# data.append([user, NAN, PNE, 1])
# currently, each user should get a positive record.
in_dataset.add(user)
# print(data_row)
negative_data_row = [negative_user]
if features_bits[0]:
negative_data_row.append(NAN_negative)
if features_bits[1]:
negative_data_row.append(PNE_negative)
if features_bits[2]:
negative_data_row.append(HUB_negative)
if features_bits[3]:
negative_data_row.append(PPP_negative)
negative_data_row.append(0)
in_dataset_negative.add(negative_user)
data.append(negative_data_row)
# print(negative_data_row)
# print(negative_user, len(in_neighbors_negative), NAN_negative, PNE_negative, 0)
columns = ['user_id']
data_message = "Compiled Data: "
if features_bits[0]:
columns.append('NAN')
data_message += "NAN "
if features_bits[1]:
columns.append('PNE')
data_message += "PNE "
if features_bits[2]:
columns.append('HUB')
data_message += "HUB "
if features_bits[3]:
columns.append('PPP')
data_message += "PPP "
columns.append('Class')
df = pd.DataFrame(data, columns=columns)
df.to_csv('dataset.csv', header=True, index=False)
timing.print_timing(data_message)
return df
def get_ratio(thread_info, net, t_sus, t_fos, user_amount):
global start
start = time_ns()
ratio = 0.0
count = 0
for thread in thread_info:
thread_posts = thread_info[thread]
prev_posts = []
prev_posters = set()
in_dataset = set()
for user, time in thread_posts:
prev_posts.append((user, time))
prev_posters.add(user)
if user in in_dataset:
continue
# get ratio of posters over whole all users
ratio += (len(prev_posters) / user_amount)
count += 1
in_dataset.add(user)
return ratio / count
| clcannon/CalSysUserEngagement | getFeatures.py | getFeatures.py | py | 11,753 | python | en | code | 0 | github-code | 13 |
31930300219 | #código adaptado dessa fonte:
#https://github.com/ashik0007/mlpr1_iris
from sklearn import naive_bayes, svm, neighbors, ensemble
#import das bibliotecas numpy e pandas
import numpy as np
import pandas as pd
# (1).
df = pd.read_csv('iris.data.csv')
data = np.array(df)
# (2).
np.random.shuffle(data)
# (3).
X_train = np.array(data[0:105, 0:4])
y_train = np.array(data[0:105, 4])
# (4).
X_test = np.array(data[105:150, 0:4])
y_test = np.array(data[105:150, 4])
# (5).
clf = svm.SVC(C=10, kernel='poly')
# (6).
clf.fit(X_train, y_train)
# (7).
esperado = clf.predict(X_test)
precisao = clf.score(X_test, y_test)
# (8).
print('Precisão: ', precisao * 100, '%')
print('Saídas: ', y_test)
print('Saídas esperadas: ', esperado) | KevinTome/Unifesp | inteligencia_artificial/aprendizado_de_maquina_2/analise_MLP.py | analise_MLP.py | py | 743 | python | pt | code | 0 | github-code | 13 |
6911465555 | from services.pdf_service import PdfService
def main():
# Example usage of the tokenization module
directory = 'C:/Users/john/OneDrive/Documents/FORBIDDEN KNOWLEDGE'
# Create an instance of PdfService
pdf_service = PdfService(directory)
# Process the PDFs using PdfService
pdf_service.process_pdfs()
# Example usage of the training module
# data = pdf_service.get_tokens() # Assuming you have a method to retrieve the tokens
# model = train(data)
# Use the trained model for further processing
if __name__ == "__main__":
main()
| threewisewomen/aesirmimir | main.py | main.py | py | 578 | python | en | code | 0 | github-code | 13 |
6922626368 | '''
Interviewing.io #Reference:
https://interviewing.io/recordings/Java-Google-2/
'''
'''
1st approach: Sort everything then query the 3rd element
Time Complexity: O(nlogn)
Can we do better? Yes. Lets try a dynamic approach,
as we iterate we sort. In the end we will traverse
the list only once, So time complexity has to be n.
'''
class Integers:
'''
nlogn Time Complexity
'''
def third_smallest_sort_approach(self,numbers):
assert (len(numbers)>0)
return sorted(numbers)[2]
def third_smallest_linear_approach(self,numbers):
assert (len(numbers)>0)
return None
'''
smallest =
second_smallest =
third_smallest =
return third_smallest
'''
'''
2 pointers approach. Declare 1 pointer on left, one on right
O(logn)
'''
def third_smallest_finest_approach(self,numbers):
'''
This approach is the finest since
we will dynamically sort the array.
'''
left = 1 #index of first number
right = len(numbers)-1 #index of last number
while (left < right):
if numbers[left-1] > numbers[left]:
#perform swap here
numbers[left-1],numbers[left] = numbers[left], numbers[left-1]
left +=1 #move left to the right
if numbers[right] < numbers[right-1]:
numbers[right], numbers[right-1] = numbers[right-1], numbers[right]
right -=1
return numbers[2]
class Solution_nth():
def smallest_n(self,number,n):
left = 0
right = len(number)-1
current = 1
'''
loop over the list only once
only under the condition that current <=right
'''
while (current<=right):
if number[current] < number[left]:
number[left],number[current]= number[current], number[left]
left +=1
elif number[right] < number[current]:
number[current],number[right]=number[right],number[current]
right -=1
else:
current +=1
return number[n]
if __name__ == "__main__":
numbers = [1,4,3,8,2]
current_Integers = Integers()
print ('The third smallest number is:',current_Integers.third_smallest_sort_approach(numbers))
print ('The third smallest number based on simultaneous sorting is:',current_Integers.third_smallest_finest_approach(numbers))
generic_solution = Solution_nth()
n = 3
print ('The third smallest number based on generic n approach is:',generic_solution.smallest_n(numbers,3))
| Oushesh/CODING_INTERVIEW | interviewing.io/GoogleEngineer_3rd_Smallest_Number.py | GoogleEngineer_3rd_Smallest_Number.py | py | 2,643 | python | en | code | 0 | github-code | 13 |
6014520026 | from PIL import ImageTk, Image
import tkinter
def titleBar(topF):
img = ImageTk.PhotoImage(Image.open(
r"C:\Users\Admin\Downloads\fol\fol\iiit.jpg").resize((400, 120), Image.ANTIALIAS))
logo = tkinter.Label(topF, bg="red", borderwidth=0,
image=img, height=120, width=400)
logo.image = img
logo.place(relx=0, rely=0)
| soham04/Library_Management_System | student_package/topF_module.py | topF_module.py | py | 367 | python | en | code | 0 | github-code | 13 |
18770698979 | # 키패드 누르기 문제
# n: numbers 의 길이
# 시간복잡도 : O(n), 공간 복잡도 : O()
# destination 과의 거리를 비교해주는 함수
def compare_distance(left_hand: int, right_hand: int, destination: int, hand: str) -> int:
ly, lx = keypad[left_hand]
ry, rx = keypad[right_hand]
dy, dx = keypad[destination]
if abs(dy - ly) + abs(dx - lx) < abs(dy - ry) + abs(dx - rx):
return -1
elif abs(dy - ly) + abs(dx - lx) > abs(dy - ry) + abs(dx - rx):
return 1
if hand == 'left':
return -1
return 1
# 키페드의 위치를 저장하는 dictionary
keypad = {10: (3, 0), 0: (3, 1), 11: (3, 2)}
_key = 1
for y in range(3):
for x in range(3):
keypad[_key] = (y, x)
_key += 1
def solution(numbers: list, hand: str) -> str:
answer = ''
left, right = 10, 11
for number in numbers:
# 1,4,7 의 경우 왼손으로 친다.
if number in (1, 4, 7):
left = number
answer += 'L'
continue
# 3,6,9 의 경우 오른손으로 친다.
if number in (3, 6, 9):
right = number
answer += 'R'
continue
# 왼손과 오른손 중 더 가까운 곳 혹은 주손에 따른 타자를 칠 손을 결정
if compare_distance(left, right, number, hand) < 0:
left = number
answer += 'L'
else:
right = number
answer += 'R'
return answer
| galug/2023-algorithm-study | level_1/press_keypad.py | press_keypad.py | py | 1,491 | python | ko | code | null | github-code | 13 |
26454942323 | def count_drop(server):
dropCount = 0
numThreads = 0
for i in range(len(server)):
if server[i] > 0:
numThreads = numThreads + server[i]
elif server[i] == -1:
if numThreads > 0:
print("thread less->")
numThreads = numThreads - 1
else:
dropCount = dropCount + 1
return dropCount
if __name__ == '__main__':
inp = int(input())
arr = []
for i in range(inp):
arr.append(int(input()))
print(count_drop(arr)) | Eyakub/Problem-solving | HackerRank/Python/request_processing_server.py | request_processing_server.py | py | 542 | python | en | code | 3 | github-code | 13 |
71612528338 | with open('input.txt') as file:
nums = [int(num) for num in file]
cals = {}
for i, num in enumerate(nums):
diff = 2020 - num
if diff in cals:
print(num * nums[cals[diff]])
break
cals[num] = i
| FilipBudac/adventofcode | 2020/01/day1.py | day1.py | py | 225 | python | en | code | 0 | github-code | 13 |
7326516314 | import base64
from io import BytesIO
from django.shortcuts import render, get_object_or_404, redirect
from .forms import *
from .models import *
from .miband4_func import get_activity_logs, sleeping, sleep_graph, colors
from datetime import datetime
from django.contrib import messages
from django.contrib.auth import login, logout
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
def user_logout(request):
logout(request)
return redirect('home')
def user_login(request):
if request.method == 'POST':
form = UserLoginForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect('home')
else:
form = UserLoginForm()
return render(request, 'activity/login.html', {'form': form})
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save()
print(user, dir(user))
login(request, user)
user_save = UserDeviceData(user=user.username)
user_save.save()
messages.success(request, 'Вы успешно зарегистрировались')
return redirect('entering-device-data')
else:
messages.error(request, 'Ошибка регистрации')
else:
form = UserRegisterForm()
return render(request, 'activity/register.html', {"form": form})
def entering_device_data_view(request):
if request.method == 'POST':
form = UserDeviceDataForm(request.POST)
if form.is_valid():
last_user_username = User.objects.order_by('-id')[0].username
user = UserDeviceData.objects.get(user=last_user_username)
user.mac_address = form.data['mac_address']
user.auth_key = form.data['auth_key']
user.save()
messages.success(request, 'Данные сохранены')
return redirect('home')
else:
messages.error(request, 'Данные не сохранены')
else:
form = UserDeviceDataForm()
return render(request, 'activity/entering_user_device_data.html', {"form": form})
def index(request):
articles = Articles.objects.all()
return render(request, 'activity/index.html', context={'articles': articles, 'title': 'Мониторинг сна'})
def syncs(request):
username = request.user.username
user_data = UserDeviceData.objects.filter(user=username)[0]
with open("activity/current_user.txt", "w") as f:
f.write(user_data.user)
if Activity.objects.filter(user=username).exists():
last_date_time = Activity.objects.order_by('-id')[0].date_time
start_date_time = datetime.fromtimestamp(last_date_time.timestamp() - 180)
res = get_activity_logs(
start_date_time,
mac_address=user_data.mac_address,
auth_key=user_data.auth_key
)
else:
last_date_time = datetime.strptime('03.01.2016 16:30:00', "%d.%m.%Y %H:%M:%S")
res = get_activity_logs(
last_date_time,
mac_address=user_data.mac_address,
auth_key=user_data.auth_key
)
if not res:
messages.error(request, 'Не удалось установить соединение с браслетом. Обновите MAC-адрес и ключ аутентификации')
return redirect('index')
else:
messages.success(request, 'Данные успешно обновлены!')
return redirect('index')
def one_article(request, article_id):
article = get_object_or_404(Articles, pk=article_id)
return render(request, 'activity/article.html', context={'article': article})
def analysis(request):
print('clicked analysis')
try:
print('from try block')
bruh = Activity.objects.filter(
date_time=datetime(
datetime.now().year,
datetime.now().month,
datetime.now().day-5,
20, 0
).strftime("%Y-%m-%d %H:%M:%S"),
user=request.user.username
)[0].is_sleeping
except IndexError:
print('from except')
messages.info(request, 'Синхронизируйте данные браслета!')
return redirect('home')
if bruh is None:
print('yeah, babe, bruh is none')
sleeping(request.user.username)
count_of_periods = sleep_graph(user=request.user.username)
index_ = np.arange(16)
fig = plt.figure(figsize=(8, 2))
plt.title('Анализ сна за 23.05')
ax = fig.add_subplot(3, 1, 1)
ax.xaxis.set_major_locator(ticker.MultipleLocator(120))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(60))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel('Время')
ax.set_yticklabels(['сон'])
ax.set_xticklabels(['18', '20', '22', '0', '2', '4', '6', '8', '10'])
series = []
for i in range(len(count_of_periods)):
series.append(count_of_periods[i][1])
left = 0
for i in range(len(count_of_periods)):
ax.barh(index_, series[i], left=left, color=colors[count_of_periods[i][0]])
left += series[i]
buffer = BytesIO()
plt.savefig(buffer, format='png')
buffer.seek(0)
image_png = buffer.getvalue()
buffer.close()
graphic = base64.b64encode(image_png)
graphic = graphic.decode('utf-8')
return render(request, 'activity/analysis.html', context={"graphic": graphic})
def parameters(request):
if request.method == 'POST':
form = UserDeviceDataForm(request.POST)
if form.is_valid():
UserDeviceData.objects.filter(user=request.user.username).update(
mac_address=form.data['mac_address'],
auth_key=form.data['auth_key']
)
messages.success(request, 'Данные обновлены')
return redirect('home')
else:
messages.error(request, 'Данные не обновлены')
else:
form = UserDeviceDataForm()
return render(request, 'activity/parameters.html', {"form": form})
| krisrrr/SIMS2_0 | activity/views.py | views.py | py | 6,286 | python | en | code | 0 | github-code | 13 |
31042809956 | # databaseAccessExample.py
#
# Example access script for SQLite database
#
# More resources:
# https://www.tutorialspoint.com/sqlite/sqlite_python.htm
# https://www.w3schools.com/sql/
#
import sqlite3
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import datetime as dt
databaseFile = "D:\\UKRIT_2021\\Database\\ukirt_2021-09-07.db"
index = 0
magnitude,colors = [],[]
sat_name,norad=[],[]
jlist,klist,zlist,hlist,ylist = [],[],[],[],[] #temporary lists to hold magnitudes per satellite visit
jmean,kmean,zmean,hmean,ymean = [],[],[],[],[] #lists to hold mean values of magnitudes for each visit (from the lists above)
elongation_list,elongation_mean = [],[] #elongation list and mean as the above
zminusy,yminusj,jminush,hminusk = [],[],[],[] #plotted values/color indices
zyelong,yjelong,jhelong,hkelong = [],[],[],[] #Since some mean values may be empty (no observations), a matching elongation list is made for each color index
#not currently used, but will perhaps be later
def Colorize(filterband):
color = ''
if filterband == 'Z':
color = 'blue'
elif filterband == 'Y':
color = 'gold'
elif filterband == 'J':
color = '#66AABB'
elif filterband == 'H':
color = 'crimson'
elif filterband == 'K':
color = 'purple'
return(color)
# Open database file (will create new one if not exist)
try:
print("Opening database...")
conn = sqlite3.connect(databaseFile)
except Exception as e:
print("Failed!")
print(e)
exit()
print("Success!")
# Cursor for accessing the DB
cursor = conn.cursor()
# Print only norad_id, name from sats table only for HS-376 models
cursor.execute( "SELECT norad_id, name FROM sats WHERE type LIKE 'HS-376%';" )
for s in cursor:
sat_name.append(s[1])
norad.append(s[0])
for i in range(len(norad)):
magnitude,phase_angle,colors = [],[],[]
select_string_statement="""SELECT sats.norad_id, images.filter, images.start_time, targets.magnitude, targets.sun_elong_predicted
FROM ((sats INNER JOIN targets ON sats.id = targets.target_id) INNER JOIN images ON targets.image_id = images.id)
WHERE (sats.norad_id = """+str(norad[i])+"""
AND targets.rejected is NULL
AND targets.inst_mag IS NOT NULL
)
ORDER BY images.start_time;"""
# Print select items from combination of sats and targets and images tables
cursor.execute(select_string_statement)
for s in cursor:
if s[0] == 15308: #current just one satellite.will loop over more later if desired
if index == 0: #initialize first time
group_start_time = dt.datetime.strptime(s[2], '%Y-%m-%d %H:%M:%S.%f')
current_time = dt.datetime.strptime(s[2], '%Y-%m-%d %H:%M:%S.%f')
timedifference = (current_time - group_start_time).total_seconds()
if timedifference >= 0 and timedifference < 1800: #finding >= zero was subtle
#make lists of magnitudes in each band
if s[1] == "J":
jlist.append(s[3])
if s[1] == "H":
hlist.append(s[3])
if s[1] == "Y":
ylist.append(s[3])
if s[1] == "K":
klist.append(s[3])
if s[1] == "Z":
zlist.append(s[3])
elongation_list.append(s[4]) #also record elongation
else:
group_start_time = current_time #reset the group start time so that there so no observations are missed
#calculate mean values. If no values, append placeholder so all lists are the same length
if jlist:
jmean.append(np.median(np.array(jlist)))
else:
jmean.append([])
if hlist:
hmean.append(np.median(np.array(hlist)))
else:
hmean.append([])
if ylist:
ymean.append(np.median(np.array(ylist)))
else:
ymean.append([])
if klist:
kmean.append(np.median(np.array(klist)))
else:
kmean.append([])
if zlist:
zmean.append(np.median(np.array(zlist)))
else:
zmean.append([])
elongation_mean.append(np.median(np.array(elongation_list)))
#reset temporary lists
jlist,klist,zlist,hlist,ylist,elongation_list = [],[],[],[],[],[]
#Regardless of condition, values are appended in each part of the IF/ELSE statement
#These then are the first in the next grouping. There is probably a better way since this is in both.
if s[1] == "J":
jlist.append(s[3])
if s[1] == "H":
hlist.append(s[3])
if s[1] == "Y":
ylist.append(s[3])
if s[1] == "K":
klist.append(s[3])
if s[1] == "Z":
zlist.append(s[3])
elongation_list.append(s[4]) #also record elongation
#for bookeeping... but used at the moment
index=index+1
# the last bit of data after the last else statement (Enters if..but never gets to Else...so lists are not averaged)
if jlist:
jmean.append(np.median(np.array(jlist)))
else:
jmean.append([])
if hlist:
hmean.append(np.median(np.array(hlist)))
else:
hmean.append([])
if ylist:
ymean.append(np.median(np.array(ylist)))
else:
ymean.append([])
if klist:
kmean.append(np.median(np.array(klist)))
else:
kmean.append([])
if zlist:
zmean.append(np.median(np.array(zlist)))
else:
zmean.append([])
elongation_mean.append(np.median(np.array(elongation_list)))
print('++++++++++++++++++++++++++++')
print('VISITS')
print(len(elongation_mean))
print('++++++++++++++++++++++++++++')
#creating the points to plot. Checks if no value in either filter (skips if so)Thus the matching of elongation
for i in range(len(elongation_mean)):
if zmean[i] and ymean[i]:
zminusy.append(zmean[i] - ymean[i])
zyelong.append(elongation_mean[i])
if ymean[i] and jmean[i]:
yminusj.append(ymean[i] - jmean[i])
yjelong.append(elongation_mean[i])
if jmean[i] and hmean[i]:
jminush.append(jmean[i] - hmean[i])
jhelong.append(elongation_mean[i])
if hmean[i] and kmean[i]:
hminusk.append(hmean[i] - kmean[i])
hkelong.append(elongation_mean[i])
fig, (ax1) = plt.subplots(figsize=(12,6))
fig.suptitle('Satellite 15308', size=25)
ax1.set_ylabel('Color Indices',size=20)
ax1.set_xlabel('Elongation (degrees)', size = 20)
ax1.grid(True)
ax1.scatter(zyelong,zminusy,color='blue',s = 4)
ax1.scatter(yjelong,yminusj,color='gold',s = 4)
ax1.scatter(jhelong,jminush,color='#66AABB',s = 4)
ax1.scatter(hkelong,hminusk,color='crimson',s = 4)
ax1.set_xlim([50,180])
ax1.set_ylim([-2, 2])
zy = mpatches.Patch(color='blue', label='Z-Y',linewidth=.5)
yj = mpatches.Patch(color='gold', label='Y-J',linewidth=.5)
jh= mpatches.Patch(color='#66AABB', label='J-H',linewidth=.5)
hk = mpatches.Patch(color='crimson', label='H-K',linewidth=.5)
plt.rcParams["legend.fontsize"] = 12
ax1.legend(handles=[zy,yj,jh,hk])
plt.savefig('D:\\UKRIT_2021\\xx_PLOTS_xx\\test.png')
plt.close('all')
# Commit changes to database and close
conn.commit()
conn.close()
| ngc1535git/UKIRT_database | queryDB_1.py | queryDB_1.py | py | 6,884 | python | en | code | 0 | github-code | 13 |
27643795894 | import csv, pickle
import pandas as pd
input_file = "raw_data/buchwald.csv"
# input_file = "raw_data/uspto_raw_head5k.txt"
# output_file = "processed_data/buchwald.csv"
output_file = "processed_data/buchwald.pkl"
with open(input_file, 'r') as csvfile:
rows = list(csv.reader(csvfile, delimiter = ','))[1:]
# with open(output_file, 'w') as csvfile:
# fieldnames = ["ID", "X", "Y"]
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# writer.writeheader()
# for i,row in enumerate(rows):
# reaction_id = "reactions_" + str(i+1)
# reactant, catalyst, product = row[0].split('>')
# reactions_dict = {"reactant": reactant, "catalyst": catalyst, "product": product}
# yields = row[1]
# writer.writerow({'ID': reaction_id, 'X': reactions_dict, 'Y': yields})
reaction_id_lst, reaction_dict_lst, yields_lst = [], [], []
for i,row in enumerate(rows):
reaction_id = "reactions_" + str(i+1)
reaction_id_lst.append(reaction_id)
reactant, catalyst, product = row[0].split('>')
reactions_dict = {"reactant": reactant, "catalyst": catalyst, "product": product}
reaction_dict_lst.append(reactions_dict)
yields = float(row[1])
yields_lst.append(yields)
data = {"ID": reaction_id_lst, "X": reaction_dict_lst, "Y": yields_lst}
df = pd.DataFrame(data, columns = ["ID", "X", "Y"])
pickle.dump(df, open(output_file, 'wb'))
# with open(output_file, 'r') as csvfile:
# reader = list(csv.reader(csvfile, delimiter=','))[1:]
# for row in reader:
# reactions_dict = row[1]
# print(reactions_dict)
| kexinhuang12345/data_process | data_process/buchwald_yield.py | buchwald_yield.py | py | 1,525 | python | en | code | 1 | github-code | 13 |
39532118985 | import statistics
import numpy as np
import pandas as pd
from osu_analysis import BeatmapIO, ReplayIO, StdMapData, StdReplayData, StdScoreData, Gamemode
from app.misc.Logger import Logger
from app.misc.utils import Utils
from app.misc.osu_utils import OsuUtils
class ScoreNpy():
logger = Logger.get_logger(__name__)
IDX_MAP = 0
IDX_ENTRY = 1
IDX_MOD = 2
IDX_NOTE = 3
COLUMNS = [
'MD5', 'TIMESTAMP', 'MODS', 'IDXS',
'CS', 'AR', 'T_MAP', 'X_MAP', 'Y_MAP', 'T_HIT', 'X_HIT', 'Y_HIT', 'TYPE_MAP', 'TYPE_HIT'
]
@staticmethod
def __get_map_data_from_file(file_name):
try: beatmap = BeatmapIO.open_beatmap(file_name)
except Exception as e:
ScoreNpy.logger.error(Utils.get_traceback(e, 'Error opening map'))
return None
return ScoreNpy.__get_map_data_from_object(beatmap)
@staticmethod
def __get_map_data_from_object(beatmap):
if beatmap.gamemode != Gamemode.OSU:
ScoreNpy.logger.info(f'{Gamemode(beatmap.gamemode)} gamemode is not supported')
return None
try: map_data = StdMapData.get_map_data(beatmap)
except Exception as e:
ScoreNpy.logger.error(Utils.get_traceback(e, 'Error reading map'))
return None
return map_data
@staticmethod
def __get_replay_data_from_file(file_name):
try: replay = ReplayIO.open_replay(file_name)
except Exception as e:
ScoreNpy.logger.error(Utils.get_traceback(e, 'Error opening replay'))
return None
return ScoreNpy.__get_replay_data_from_object(replay)
@staticmethod
def __get_replay_data_from_object(replay):
try: replay_data = StdReplayData.get_replay_data(replay)
except Exception as e:
ScoreNpy.logger.error(Utils.get_traceback(e, 'Error reading replay'))
return None
return replay_data
@staticmethod
def __process_mods(map_data, replay_data, replay):
if replay.mods.has_mod('DT') or replay.mods.has_mod('NC'):
map_data['time'] /= 1.5
replay_data['time'] /= 1.5
return
if replay.mods.has_mod('HT'):
map_data['time'] *= 1.5
replay_data['time'] *= 1.5
return
if replay.mods.has_mod('HR'):
# Do nothing
pass
@staticmethod
def __get_data(map_data, replay_data, map_md5, timestamp, mods, cs, ar):
# Process score data
settings = StdScoreData.Settings()
settings.ar_ms = OsuUtils.ar_to_ms(ar)
settings.hitobject_radius = OsuUtils.cs_to_px(cs)/2
settings.pos_hit_range = 100 # ms point of late hit window
settings.neg_hit_range = 100 # ms point of early hit window
settings.pos_hit_miss_range = 100 # ms point of late miss window
settings.neg_hit_miss_range = 100 # ms point of early miss window
score_data = StdScoreData.get_score_data(replay_data, map_data, settings)
size = score_data.shape[0]
df = pd.DataFrame()
df['MD5'] = [ map_md5 ] * size
df['TIMESTAMP'] = np.full(size, int(timestamp))
df['MODS'] = np.full(size, mods)
df['IDXS'] = np.arange(size)
df['CS'] = np.full(size, cs)
df['AR'] = np.full(size, ar)
df['T_MAP'] = score_data['map_t']
df['X_MAP'] = score_data['map_x']
df['Y_MAP'] = score_data['map_y']
df['T_HIT'] = score_data['replay_t']
df['X_HIT'] = score_data['replay_x']
df['Y_HIT'] = score_data['replay_y']
df['TYPE_MAP'] = score_data['action']
df['TYPE_HIT'] = score_data['type']
df.set_index(['MD5', 'TIMESTAMP', 'MODS', 'IDXS'], inplace=True)
return df
@staticmethod
def get_blank_data() -> pd.DataFrame:
df = pd.DataFrame(columns = ScoreNpy.COLUMNS)
df.set_index(['MD5', 'TIMESTAMP', 'MODS', 'IDXS'], inplace=True)
return df
@staticmethod
def get_first_entry(score_data: pd.DataFrame, groupby: list = ['MD5', 'TIMESTAMP', 'MODS']) -> pd.DataFrame:
for entry in score_data.groupby(groupby):
# Gives (idx, data)
return entry[1]
@staticmethod
def get_entries(score_data: pd.DataFrame, groupby: list = ['MD5', 'TIMESTAMP', 'MODS']) -> pd.DataFrame:
"""
This is not expected to be used directly, but rather
to serve as an example on how to get entries
"""
for entry in score_data.groupby(groupby):
yield entry[1]
@staticmethod
def get_idx_md5s(score_data: pd.DataFrame) -> pd.Index:
return score_data.index.get_level_values(ScoreNpy.IDX_MAP)
@staticmethod
def get_idx_timestamps(score_data: pd.DataFrame) -> pd.Index:
return score_data.index.get_level_values(ScoreNpy.IDX_ENTRY)
@staticmethod
def get_idx_mods(score_data: pd.DataFrame) -> pd.Index:
return score_data.index.get_level_values(ScoreNpy.IDX_MOD)
@staticmethod
def get_idx_notes(score_data: pd.DataFrame) -> pd.Index:
return score_data.index.get_level_values(ScoreNpy.IDX_NOTE)
@staticmethod
def get_num_maps(score_data: pd.DataFrame) -> int:
return score_data.index.unique(level=ScoreNpy.IDX_MAP).shape[0]
@staticmethod
def get_num_entries(score_data: pd.DataFrame) -> int:
return score_data.index.unique(level=ScoreNpy.IDX_ENTRY).shape[0]
@staticmethod
def get_first_entry_md5(score_data: pd.DataFrame) -> str:
return score_data.index.values[0][ScoreNpy.IDX_MAP]
@staticmethod
def get_first_entry_timestamp(score_data: pd.DataFrame) -> int:
return score_data.index.values[0][ScoreNpy.IDX_ENTRY]
@staticmethod
def get_first_entry_mod(score_data: pd.DataFrame) -> int:
return score_data.index.values[0][ScoreNpy.IDX_MOD]
@staticmethod
def compile_data(beatmap, replay):
if type(beatmap) is not str:
map_data = ScoreNpy.__get_map_data_from_object(beatmap)
else:
map_data = ScoreNpy.__get_map_data_from_file(beatmap)
if type(replay) is not str:
replay_data = ScoreNpy.__get_replay_data_from_object(replay)
else:
replay_data = ScoreNpy.__get_replay_data_from_file(replay)
ScoreNpy.__process_mods(map_data, replay_data, replay)
# Get data
try: timestamp = replay.timestamp.timestamp()
except OSError:
timestamp = 0
return map_data, replay_data, ScoreNpy.__get_data(
map_data,
replay_data,
beatmap.metadata.beatmap_md5,
timestamp,
replay.mods.value,
beatmap.difficulty.cs,
beatmap.difficulty.ar
)
| abraker-osu/osu-play-analyzer | app/data_recording/score_npy.py | score_npy.py | py | 6,923 | python | en | code | 2 | github-code | 13 |
33004282725 | import io
import time
import picamera
import logging
import socketserver
from threading import Condition
from http import server
from google.cloud import vision
from google.cloud.vision_v1 import types
from google.oauth2 import service_account
# Constants
credentials = service_account.Credentials.from_service_account_file('/home/pdemotion/Desktop/ui/ServiceAccountToken.json')
client = vision.ImageAnnotatorClient(credentials=credentials)
EMOTION_RANGE = ["joy_likelihood", "sorrow_likelihood", "anger_likelihood"]
# This Class handles the output from Picamera.
class StreamingOutput(object):
def _init_(self):
self.frame = None
self.last_frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all clients it's available
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.last_frame = self.frame
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
# This Class handles client actions and responses sent by the server.
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
elif self.path == '/index.html':
with open('index.html', 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/ScanIcon.png':
with open('ScanIcon.png', 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/stream.mjpg':
# Handle Video feed display, emulating 'true mirror' behaviour
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(frame))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning('Removed streaming client %s: %s', self.client_address, str(e))
elif self.path == '/centre':
# Handle centre button click event
self.send_response(200)
time.sleep(1)
emotion = None
# Store last frame to send for emotion recognition
frame = output.last_frame
if frame:
image = types.Image(content=frame)
response = client.face_detection(image=image)
# Analize response
face_annotations = response.face_annotations
for _, face in enumerate(face_annotations):
max_likelihood = types.Likelihood.POSSIBLE
for attr in EMOTION_RANGE:
likelihood = getattr(face, attr)
if likelihood > max_likelihood:
max_likelihood = likelihood
emotion = attr.split("_")[0].title()
else:
logging.warning("Could not acces a valid frame")
# Send emotion to the webpage
if emotion is None:
emotion = "No emotion"
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(emotion.encode('utf-8'))
elif self.path == '/center':
# Handle center button click event
self.send_response(200)
self.end_headers()
self.wfile.write(b'Center button pressed')
elif self.path == '/right':
# Handle right button click event
self.send_response(200)
self.end_headers()
self.wfile.write(b'Right button pressed')
else:
self.send_error(404)
self.end_headers()
# Streaming server instance used for hosting
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
# Get video feed from the camera and send it to the html page
with picamera.PiCamera(resolution='640x480', framerate=32) as camera:
output = StreamingOutput()
#Uncomment the next line to change your Pi's Camera rotation (in degrees)
#camera.rotation = 90
camera.start_recording(output, format='mjpeg')
try:
address = ('', 8000)
server = StreamingServer(address, StreamingHandler)
server.serve_forever()
finally:
camera.stop_recording()
camera.close()
| AlexandruSto/Smart_Mirror | homepage.py | homepage.py | py | 5,683 | python | en | code | 0 | github-code | 13 |
20993682701 | import json
import glob
# Helper function to extract individual name from filepath
def remove_prefix(my_string, prefix, suffix):
my_string = my_string.lstrip(prefix)
my_string = my_string.rstrip(suffix)
return my_string
file_list = glob.glob("../PoplarVCFsAnnotated/*.filter.vcf")
data = {}
destf = open('main_db.json', 'w')
for f in file_list:
individual = remove_prefix(f, '../PoplarVCFsAnnotated/', '.filter.vcf')
curr_individual = {}
print("reading "+individual)
| LZhang98/snp-viewer | glob_test.py | glob_test.py | py | 515 | python | en | code | 0 | github-code | 13 |
73282811219 | import os
import base64
import zipfile
import logging
import collections
from bs4 import BeautifulSoup
logger = logging.getLogger(__name__)
class FB2:
def __init__(self, filename):
self.filename = filename
self.zip_file = None
self.xml = None
self.metadata = None
self.content = []
self.generate_references()
def generate_references(self):
if self.filename.endswith('.fb2.zip'):
this_book = zipfile.ZipFile(
self.filename, mode='r', allowZip64=True)
for i in this_book.filelist:
if os.path.splitext(i.filename)[1] == '.fb2':
book_text = this_book.read(i.filename)
break
else:
with open(self.filename, 'r') as book_file:
book_text = book_file.read()
self.xml = BeautifulSoup(book_text, 'lxml')
def generate_metadata(self):
# All metadata can be parsed in one pass
all_tags = self.xml.find('description')
title = all_tags.find('book-title').text
if title == '' or title is None:
title = os.path.splitext(
os.path.basename(self.filename))[0]
author = all_tags.find(
'author').getText(separator=' ').replace('\n', ' ')
if author == '' or author is None:
author = '<Unknown>'
else:
author = author.strip()
# TODO
# Account for other date formats
try:
year = int(all_tags.find('date').text)
except ValueError:
year = 9999
isbn = None
tags = None
cover = self.generate_book_cover()
Metadata = collections.namedtuple(
'Metadata', ['title', 'author', 'year', 'isbn', 'tags', 'cover'])
self.metadata = Metadata(title, author, year, isbn, tags, cover)
def generate_content(self, temp_dir):
# TODO
# Check what's up with recursion levels
# Why is the TypeError happening in get_title
def get_title(element):
this_title = '<No title>'
title_xml = '<No title xml>'
try:
for i in element:
if i.name == 'title':
this_title = i.getText(separator=' ')
this_title = this_title.replace('\n', '').strip()
title_xml = str(i.unwrap())
break
except TypeError:
return None, None
return this_title, title_xml
def recursor(level, element):
children = element.findChildren('section', recursive=False)
if not children and level != 1:
this_title, title_xml = get_title(element)
self.content.append(
[level, this_title, title_xml + str(element)])
else:
for i in children:
recursor(level + 1, i)
first_element = self.xml.find('section') # Recursive find
siblings = list(first_element.findNextSiblings('section', recursive=False))
siblings.insert(0, first_element)
for this_element in siblings:
this_title, title_xml = get_title(this_element)
# Do not add chapter content in case it has sections
# inside it. This prevents having large Book sections that
# have duplicated content
section_children = this_element.findChildren('section')
chapter_text = str(this_element)
if section_children:
chapter_text = this_title
self.content.append([1, this_title, chapter_text])
recursor(1, this_element)
# Extract all images to the temp_dir
for i in self.xml.find_all('binary'):
image_name = i.get('id')
image_path = os.path.join(temp_dir, image_name)
image_string = f'<image l:href="#{image_name}"'
replacement_string = f'<p></p><img src=\"{image_path}\"'
for j in self.content:
j[2] = j[2].replace(
image_string, replacement_string)
try:
image_data = base64.decodebytes(i.text.encode())
with open(image_path, 'wb') as outimage:
outimage.write(image_data)
except AttributeError:
pass
# Insert the book cover at the beginning
cover_image = self.generate_book_cover()
if cover_image:
cover_path = os.path.join(
temp_dir, os.path.basename(self.filename)) + ' - cover'
with open(cover_path, 'wb') as cover_temp:
cover_temp.write(cover_image)
self.content.insert(
0, (1, 'Cover', f'<center><img src="{cover_path}" alt="Cover"></center>'))
def generate_book_cover(self):
cover = None
try:
cover_image_xml = self.xml.find('coverpage')
for i in cover_image_xml:
cover_image_name = i.get('l:href')
cover_image_data = self.xml.find_all('binary')
for i in cover_image_data:
if cover_image_name.endswith(i.get('id')):
cover = base64.decodebytes(i.text.encode())
except (AttributeError, TypeError):
# Catch TypeError in case no images exist in the book
logger.warning('Cover not found: ' + self.filename)
return cover
| BasioMeusPuga/Lector | lector/readers/read_fb2.py | read_fb2.py | py | 5,534 | python | en | code | 1,479 | github-code | 13 |
9857583041 | #Exercicio 4
#Faça um programa em Python que solicite ao usuário sua altura e sexo,
#calcule e imprima o seu peso ideal. Utilize a seguinte convenção:
#▪ Para homens: (72.7*h) – 58
#▪ Para mulheres: (62.1*h) – 44.7
alt = float(input('Digite sua altura em metros: '))
sexo = input('Digite o seu genero h/m: ')
if sexo == 'H' or 'h':
peso = (72.7*alt)-58
elif sexo == 'M' or 'm':
peso = (62.1*alt)-44.7
else:
peso = 0
if peso == 0:
print('Sexo invalido')
else:
print(f'O peso ideal {peso:.2f} para essa pessoa') | Lipesti/Exercicios6 | exerc4.py | exerc4.py | py | 567 | python | pt | code | 0 | github-code | 13 |
9638945222 | import requests
import xml.etree.ElementTree as ET
import tkinter as tk
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def fetch_rates(event=None):
selected_currency = currency_entry.get()
url = "https://www.tcmb.gov.tr/kurlar/today.xml"
response = requests.get(url=url)
tree = ET.fromstring(response.content)
for currency in tree.findall(".//Currency"):
currency_code = currency.get("Kod")
if currency_code == selected_currency:
banknote_buying = currency.find("BanknoteBuying").text
banknote_selling = currency.find("BanknoteSelling").text
result = float(banknote_selling) - float(banknote_buying)
result_label.config(text=f"Alış: {banknote_buying}\nSatış : {banknote_selling}\nAlış Satış Farkı: {result}")
root = tk.Tk()
root.title("TL Karşılığı Birim Fiyat")
currency_entry = tk.Entry(root)
currency_entry.pack()
currency_entry.bind("<Return>", fetch_rates)
fetch_button = tk.Button(root, text="Dövizin TL karşılık fiyatını getir", command=fetch_rates)
fetch_button.pack()
result_label = tk.Label(root, text="")
result_label.pack()
root.mainloop()
| unsatisfieddeveloper/currencyApi | currencyTracker.py | currencyTracker.py | py | 1,195 | python | en | code | 0 | github-code | 13 |
27309135585 | from clef.esdoc import get_doc, get_wdcc, errata, retrieve_error, citation
from esdoc_fixtures import *
from code_fixtures import dids6
#import pytest
def test_esdoc_urls():
#dids=[]
assert True
def test_get_model_doc():
assert True
@pytest.mark.xfail
def test_get_doc():
base = 'https://api.es-doc.org/2/document/search-name?client=ESDOC-VIEWER-DEMO&encoding=html&'
assert get_doc('model', 'MIROC6', 'CMIP6') == ( base +
'project=CMIP6&name=MIROC6&type=CIM.2.SCIENCE.MODEL')
assert get_doc('experiment', 'historical') == ( base +
'project=CMIP6&name=historical&type=cim.2.designing.NumericalExperiment')
assert get_doc('mip', 'FAFMIP') == ( base +
'project=CMIP6&name=FAFMIP&type=cim.2.designing.Project')
def test_get_wdcc():
did='CMIP6.CMIP.MRI.MRI-ESM2-0.historical.none.r1i1p1f1'
url, res6 = get_wdcc(did)
json6 = res6.json()
assert url == 'https://cera-www.dkrz.de/WDCC/ui/cerasearch/cerarest/cmip6?input=CMIP6.CMIP.MRI.MRI-ESM2-0.historical'
assert json6['identifier']['id'] == '10.22033/ESGF/CMIP6.6842'
did='cmip5.output1.MIROC.MIROC5.historical.mon.atmos.Amon.r1i1p1.v20111028'
url, res5 = get_wdcc(did)
json5 = res5.json()
assert url == ("https://cera-www.dkrz.de/WDCC/ui/cerasearch/solr/select?" +
"rows=1&wt=json&q=entry_name_s:cmip5*output1*MIROC*MIROC5")
assert json5['response']['docs'][0]['entry_name_s'] == "cmip5 output1 MIROC MIROC5"
did='cordex.something.or.other'
assert get_wdcc(did) == (None, None)
@pytest.mark.xfail
def test_errata():
assert ( errata('hdl:21.14100/e4193a02-6405-49b6-8ad3-65def741a4dd') ==
["b6302400-3620-c8f1-999b-d192c0349084","2f6b5963-f87e-b2df-a5b0-2f12b6b68d32"])
assert errata('hdl:21.14100/7d16d79b-77c8-446c-9039-36c6803752f2') is None
@pytest.mark.xfail
def test_retrieve_error(test_error):
assert retrieve_error('ce889690-1ef3-6f46-9152-ccb27fc42490') == test_error
def test_citation(dids6, citations):
assert citation(dids6) == citations
| coecms/clef | test/test_esdoc.py | test_esdoc.py | py | 2,084 | python | en | code | 7 | github-code | 13 |
30784723375 | # -*- coding: utf-8 -*-
fichier=open("GDN_pos_filtered.txt")
dico=dict()
for ligne in fichier:
words=ligne.split(" ")
for w in words:
if w not in dico:
dico[w]=0
dico[w]+=1
distribution=open("distribution_gdn.txt", "w")
for word in dico:
distribution.write(word + " "+str(dico[word])+"\n")
distribution.close() | nicolasdugue/hackatal2019 | EtudeEmbeddings/distrib.py | distrib.py | py | 352 | python | en | code | 3 | github-code | 13 |
6148837196 | import uos
import settings
from time import sleep_ms
from machine import Pin
from primitives.pushbutton import Pushbutton
from homie.node import HomieNode
from homie.device import HomieDevice
from homie.property import HomieProperty
from homie.constants import TRUE, FALSE, BOOLEAN
def reset(led):
import machine
wdt = machine.WDT()
wdt.feed()
while True:
led(not led())
sleep_ms(250)
class SmartSocket(HomieNode):
def __init__(self, name="Relay 16A"):
super().__init__(id="relay", name=name, type="Gosund SP1")
# disable REPL so we can use the blue led
uos.dupterm(None, 1)
self.led_b = Pin(1, Pin.OUT, value=1) # Blue LED
self.led_r = Pin(13, Pin.OUT, value=1) # Red LED
self.relay = Pin(14, Pin.OUT)
self.p_power = HomieProperty(
id="power",
name="Power",
settable=True,
datatype=BOOLEAN,
default=FALSE,
on_message=self.on_power_msg,
)
self.add_property(self.p_power)
self.button = Pushbutton(Pin(3, Pin.IN))
self.button.release_func(self.toggle, ())
self.button.long_func(reset, (self.led_r,))
def off(self):
self.relay(0)
self.led_b(0)
self.led_r(1)
def on(self):
self.relay(1)
self.led_b(1)
self.led_r(0)
def on_power_msg(self, topic, payload, retained):
if payload == FALSE:
self.off()
elif payload == TRUE:
self.on()
def toggle(self):
if self.p_power.value == TRUE:
self.off()
self.p_power.value = FALSE
else:
self.on()
self.p_power.value = TRUE
def main():
homie = HomieDevice(settings)
homie.add_node(SmartSocket())
homie.run_forever()
if __name__ == "__main__":
sleep_ms(500)
main()
| microhomie/microhomie | examples/gosund/main.py | main.py | py | 1,906 | python | en | code | 78 | github-code | 13 |
18609597674 | # -*- coding: utf-8 -*-
"""
Created on Mon May 31 18:20:27 2021
@author: dongting
"""
import os
import time
import socket
import numpy
import time
"""
Dynamixel Initialaztion
"""
# if os.name == 'nt':
# import msvcrt
# def getch():
# # return msvcrt.getch().decode()
# return msvcrt.getch()
# else:
# import sys, tty, termios
# fd = sys.stdin.fileno()
# old_settings = termios.tcgetattr(fd)
# def getch():
# try:
# tty.setraw(sys.stdin.fileno())
# ch = sys.stdin.read(1)
# finally:
# termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
# return ch
from dynamixel_sdk import *
# Control table address
ADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model
ADDR_PRO_GOAL_POSITION = 116
ADDR_PRO_PRESENT_POSITION = 132
ADDR_PRO_PRESENT_VELOCITY = 128
ADDR_PRO_PRESENT_CURRENT = 126
ADDR_PRO_PRPFILE_VELOCITY = 112
ADDR_PRO_OPERATING_MODE = 11
ADDR_PRO_VELOCITY_LIMIT = 44
ADDR_PRO_GOAL_VELOCITY = 104
DXL_OPERATING_is_CURRENT = 0
DXL_OPERATING_is_VELOCITY = 1
DXL_OPERATING_is_POSITION = 3
DXL_OPERATING_is_EXTENDED_POSITION = 4
DXL_OPERATING_is_PWM = 3
# Protocol version
PROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel
# Default setting
DXL_ID_1 = 0 # Dynamixel ID : 1
BAUDRATE = 57600 # Dynamixel default baudrate : 57600
#DEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller
DEVICENAME = 'COM4' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
#DXL_MINIMUM_POSITION_VALUE = 695 # Dynamixel will rotate between this value
#DXL_MAXIMUM_POSITION_VALUE = 1445 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)
#DXL_MOVING_STATUS_THRESHOLD = 10 # Dynamixel moving status threshold
DXL_PROFILE_SPEED = 200
DXL_PRO_GOAL_VELOCITY = 20
DXL_PRO_GOAL_CURRENT = 20
portHandler = PortHandler(DEVICENAME)
packetHandler = PacketHandler(PROTOCOL_VERSION)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
quit()
# Enable Dynamixel Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel_1 has been successfully connected")
# Position Control
dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_OPERATING_MODE,DXL_OPERATING_is_POSITION)
dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
# dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
DXL_PROFILE_SPEED = 200
DXL_GOAL_POSITION = 1500
dxl_comm_result_1, dxl_error_1 = packetHandler.write4ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_PRPFILE_VELOCITY,DXL_PROFILE_SPEED)
dxl_comm_result_1, dxl_error_1 = packetHandler.write4ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_GOAL_POSITION, DXL_GOAL_POSITION)
# Velocity Control
# Still developing this part......
dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, ADDR_PRO_OPERATING_MODE,DXL_OPERATING_is_VELOCITY)
# # dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, 98,0)
# dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, 44, 255)
# # dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1, 112, 4720)
# dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1,104,100)
# dxl_comm_result_1, dxl_error_1 = packetHandler.write1ByteTxRx(portHandler, DXL_ID_1,65,1)
| DuxtX/code_equipment | python/idealab_equipment/control_dynamixel_servo.py | control_dynamixel_servo.py | py | 4,900 | python | en | code | 0 | github-code | 13 |
39859190810 | from typing import Callable, Self
import flet as ft
class Button(ft.ElevatedButton):
def __init__(self,
text: str,
on_click: Callable[[Self, ft.ControlEvent], None] | None = None,
visible: bool = True,
disabled: bool = False):
super().__init__(
text=text,
on_click=lambda e: on_click(self, e),
style=ft.ButtonStyle(
side={
ft.MaterialState.DEFAULT: ft.BorderSide(2)
},
shape={
ft.MaterialState.DEFAULT: ft.RoundedRectangleBorder(radius=10),
}
),
visible=visible,
disabled=disabled,
)
| carimatics/switch-poke-pilot | switchpokepilot/ui/button.py | button.py | py | 751 | python | en | code | 3 | github-code | 13 |
18927276001 | # we have to decide which are dark beans
# and which are light beans
import os
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# from skimage.filters import threshold_otsu
from joblib import Parallel, delayed
from demo_new_trial_light_end_to_end_catch import mask_for_beans_light_catch
from demo_new_trial_dark_end_to_end import mask_for_beans_dark
path = r"D:\Jellybean\Relevant\jellybean_data-master"
os.chdir(path)
folder_names = os.listdir()
catch = []
for i in folder_names:
path_temp = path + "\\" + i
os.chdir(path_temp)
first_file = path_temp + "\\" + os.listdir()[0]
# read the file
test_image_colored = cv2.imread(first_file)
# convert to yuv
# the y channel is the light/dark channel
test_image = cv2.cvtColor(test_image_colored, cv2.COLOR_BGR2YUV)
# extract the y component
test_image = test_image[:,:,0]
# get the lowest value of the pixel
lowest_value = np.min(test_image.ravel())
# catch
catch_dict = {"name":i, "pixel": lowest_value}
# append
catch.append(catch_dict)
# convert to a dataframe
df_val = pd.DataFrame(catch).sort_values("pixel", ascending = False).reset_index(drop= True)
# threshold_otsu(df_val["pixel"])
plt.hist(df_val["pixel"],np.max(df_val["pixel"]),[np.min(df_val["pixel"]),np.max(df_val["pixel"])])
plt.show()
# above red apple can go to light_val
# get the paths for the light_val
path = r"D:\Jellybean\Relevant\jellybean_data-master"
light_val = df_val[:30]["name"]
light_paths = []
for i in light_val:
path_temp = path + "\\" + i
os.chdir(path_temp)
first_file = path_temp + "\\" + os.listdir()[0]
last_file = path_temp + "\\" + os.listdir()[len(os.listdir())-1]
# append
light_paths.append(first_file)
light_paths.append(last_file)
Parallel(n_jobs=6, verbose = 10, backend = "loky")(delayed(mask_for_beans_light_catch)(i,resize_factor = 3.5) for i in light_paths)
# red apple and belowcan go into the dark script
# get the paths for the dark_val
path = r"D:\Jellybean\Relevant\jellybean_data-master"
dark_val = df_val[30:]["name"]
dark_paths = []
for i in dark_val:
path_temp = path + "\\" + i
os.chdir(path_temp)
first_file = path_temp + "\\" + os.listdir()[0]
last_file = path_temp + "\\" + os.listdir()[len(os.listdir())-1]
# append
dark_paths.append(first_file)
dark_paths.append(last_file)
Parallel(n_jobs=6, verbose = 10, backend = "loky")(delayed(mask_for_beans_dark)(i,resize_factor = 0.7) for i in dark_paths)
| srvanderplas/jellybean | Codes_Jellybean/decide_Scripts.py | decide_Scripts.py | py | 2,679 | python | en | code | 0 | github-code | 13 |
25372728427 | import psycopg2
from psycopg2 import sql
from datetime import datetime
def create_customers_table():
commands = (
"""
CREATE TABLE IF NOT EXIST customers(
id INTEGER PRIMARY KEY,
first_name text,
last_name text,
created_at timestampz NOT NULL,
info jsonb,
priority int4,
)
"""
)
with conn.cursor() as cursor:
for data in commands:
conn.execute(data)
def generate_customers_rows(conn, table_name, n):
cursor = conn.cursor()
conn.autocommit = True
values = [
('Sergey', 'Rom', datetime.now()),
('Ivan', 'Ivanov', datetime.now()),
('Petr', 'Petrov', datetime.now()),
]
insert = sql.SQL('INSERT INTO {} (code, name, country_name) VALUES {}').format(
table_name, sql.SQL(',').join(map(sql.Literal, values))
)
cursor.execute(insert)
conn = psycopg2.connect(dbname='postgres', user='postgres', password='postgres', host='localhost', port=5433)
generate_customers_rows(conn, 10, 10) | saromanov/postgesql-experiments | generator/generator.py | generator.py | py | 1,073 | python | en | code | 0 | github-code | 13 |
23748743145 | import os
import time
import datetime
import logging
log = logging.getLogger(__name__)
import numpy as np
import tensorflow as tf
from brooksrfigan.generator import Unet_default
from brooksrfigan.discriminator import ConvNet_default
bce_loss = tf.keras.losses.BinaryCrossentropy()
mae_loss = tf.keras.losses.MeanAbsoluteError()
train_img_metric = tf.keras.metrics.MeanAbsoluteError()
train_gen_metric = tf.keras.metrics.BinaryCrossentropy()
train_disc_metric = tf.keras.metrics.BinaryCrossentropy()
val_img_metric = tf.keras.metrics.MeanAbsoluteError()
val_gen_metric = tf.keras.metrics.BinaryCrossentropy()
val_disc_metric = tf.keras.metrics.BinaryCrossentropy()
generator_optimizer = tf.keras.optimizers.Adam()
discriminator_optimizer = tf.keras.optimizers.Adam()
def s_to_hms(total_seconds):
"""Convenience function to get N seconds in HMS format"""
hours = total_seconds // 3600
minutes = (total_seconds - hours*3600) // 60
seconds = (total_seconds - hours*3600 - minutes*60)
return '{0}h {1}m {2}s'.format(int(hours), int(minutes), int(seconds))
def generator_loss(disc_fake_output, generated_masks, real_masks, lam):
gan_loss = bce_loss(tf.ones_like(disc_fake_output), disc_fake_output)
image_loss = mae_loss(generated_masks, real_masks)
return gan_loss + lam*image_loss
def discriminator_loss(disc_real_output, disc_fake_output):
real_loss = bce_loss(tf.ones_like(disc_real_output), disc_real_output)
fake_loss = bce_loss(tf.zeros_like(disc_fake_output), disc_fake_output)
return real_loss + fake_loss
@tf.function
def train_step(im_batch, mask_batch, generator, discriminator, gen_loss_lambda):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_masks = generator(im_batch, training=True)
disc_real_out = discriminator(mask_batch, training=True)
disc_fake_out = discriminator(generated_masks, training=True)
gen_loss = generator_loss(disc_fake_out, generated_masks, mask_batch, gen_loss_lambda)
disc_loss = discriminator_loss(disc_real_out, disc_fake_out)
generator_grads = gen_tape.gradient(gen_loss, generator.trainable_variables)
discriminator_grads = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(generator_grads, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_grads, discriminator.trainable_variables))
train_img_metric.update_state(mask_batch, generated_masks) # How good the masks look
train_gen_metric.update_state(tf.ones_like(disc_fake_out), disc_fake_out) # How good the generator is at tricking the discriminator
train_disc_metric.update_state(tf.zeros_like(disc_fake_out), disc_fake_out) # How good the discriminator is at spotting fakes
@tf.function
def validate_step(im_batch, mask_batch, generator, discriminator):
generated_masks = generator(im_batch, training=False)
disc_fake_out = discriminator(generated_masks, training=False)
val_img_metric.update_state(mask_batch, generated_masks) # How good the images look
val_gen_metric.update_state(tf.ones_like(disc_fake_out), disc_fake_out) # How good the generator is at tricking the discriminator
val_disc_metric.update_state(tf.zeros_like(disc_fake_out), disc_fake_out) # How good the discriminator is at spotting fakes
def train(dataset, epochs=10, batch_size=32, validation_multi=1, gen_loss_lambda=100, generator_model=None, discriminator_model=None, tblogdir='./tensorboard_log/',enable_tensorboard=False):
"""
The primary function of this package. Interfaces with Tensorflow to produce a trained generator model and discriminator model. The trained generator model can be used generate accurate flag masks that
retain the characteristics of those found in the training set. By default, the function will train a generator and a discriminator using the basic models found in brooksrfigan.generator and brooksrfigan.dsicriminator,
though an option exists to specify a different Keras model. Details of the training process can be logged to tensorboard through the *enable_tensorboard* parameter, and the location of the stored information is
controlled by passing a filepath to *tblogdir*. Also outputs useful information to the python logger during training.
.. role:: python(code)
:language: python
:param dataset: A two component `tf.data.Dataset <https://www.tensorflow.org/api_docs/python/tf/data/Dataset>`_ instance containing the image and ground truth mask cutouts. Normally created by something like:
:python:`dataset = tf.data.Dataset.from_tensor_slices((images,masks))` where *images* and *masks* are numpy arrays created by preprocessing.make_cutouts, for example.
:param epochs: An integer specifying the number of loops to perform over the full training set. A validation run is performed at the end of each epoch.
:param batch_size: How many images to process in each training step. The choice of batch_size is pretty much down to how much memory you have on your system. Higher RAM capacity systems can process more batches at once.
:param validation_multi: How many multiples of *batch_size* to set aside as a validation set. For example, with a batch size of 16, setting *validation_multi = 4* would keep aside 64 images for validation at
the end of each epoch
:param gen_loss_lambda: An integer specifying the weight associated with the mean absolute error in the generator loss. Leave this as default unless you know what you're doing!
:param generator_model: Accepts a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`_ instance for a custom generator model. Leaving unspecified will use the model defined in brooksrfigan.generator.
:param discriminator_model: Same as *generator_model*, but for a discriminator. Leaving unspecified will use the model defined in brooksrfigan.discriminator.
:param tblogdir: A string indicating the filepath to store tensorboard information. Defaults to the current working directory.
:param enable_tensorboard: A toggle option for tensorboard logging.
:returns: trained generator model, trained discriminator model
"""
if generator_model != None and isinstance(generator_model, tf.keras.Model):
generator = generator_model
else:
generator = brooksrfigan.generator.Unet_default((128,1024,1))
if discriminator_model != None and isinstance(discriminator_model, tf.keras.Model):
discriminator = discriminator_model
else:
discriminator = brooksrfigan.discriminator.ConvNet_default((128,1024,1))
if enable_tensorboard:
tbsamples = dataset.take(3).batch(3)
for x, y in tbsamples: #this is so dumb, do better tensorflow
tbimages = x
tbmasks = y
tstart = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_writer = tf.summary.create_file_writer(tblogdir+tstart+'/Training_Metrics')
val_writer = tf.summary.create_file_writer(tblogdir+tstart+'/Validation_Metrics')
img_writer = tf.summary.create_file_writer(tblogdir+tstart+'/Generator_Guesses')
with img_writer.as_default():
tf.summary.image("Input Images", tbimages, max_outputs=3, step=0)
tf.summary.image("Input Masks", tbmasks, max_outputs=3, step=0)
log.info('-- Splitting data into training and validation sets...')
val_ds = dataset.take(int(batch_size*validation_multi))
train_ds = dataset.skip(int(batch_size*validation_multi))
val_ds = val_ds.shuffle(10, reshuffle_each_iteration=False).batch(batch_size)
train_ds = train_ds.shuffle(10, reshuffle_each_iteration=False).batch(batch_size)
train_starttime = time.time()
for epoch in range(epochs):
epoch_startime = time.time()
log.info('')
log.info('---- Starting training epoch {}/{}'.format(epoch+1, epochs))
for step, (im_batch, mask_batch) in enumerate(train_ds):
train_step(im_batch, mask_batch, generator, discriminator, gen_loss_lambda)
log.info('Iterations - {} Generated image accuracy - {:.4f} Generator performance - {:.4f} Discriminator performance - {:.4f}'.format(step, train_img_metric.result(), train_gen_metric.result(), train_disc_metric.result()))
if enable_tensorboard:
with train_writer.as_default(step=epoch):
tf.summary.scalar('Image MAE',train_img_metric.result())
tf.summary.scalar('Generator Performace',train_gen_metric.result())
tf.summary.scalar('Discriminator Performance',train_disc_metric.result())
train_img_metric.reset_state()
train_gen_metric.reset_state()
train_disc_metric.reset_state()
log.info('-- Finished training iterations, validating...')
for step, (im_batch, mask_batch) in enumerate(val_ds):
validate_step(im_batch, mask_batch, generator, discriminator)
log.info('Validation metrics: Generated image accuracy - {:.4f} Generator performance - {:.4f} Discriminator performance - {:.4f}'.format(val_img_metric.result(), val_gen_metric.result(), val_disc_metric.result()))
if enable_tensorboard:
with val_writer.as_default(step=epoch):
tf.summary.scalar('Image MAE',val_img_metric.result())
tf.summary.scalar('Generator Performace',val_gen_metric.result())
tf.summary.scalar('Discriminator Performance',val_disc_metric.result())
val_img_metric.reset_state()
val_gen_metric.reset_state()
val_disc_metric.reset_state()
if enable_tensorboard:
with img_writer.as_default():
tf.summary.image("Generator guesses", generator(tbimages, training=False), max_outputs=3, step=epoch)
log.info('-- Time taken for this epoch: {}'.format(s_to_hms(time.time() - epoch_startime)))
if epoch < (epochs - 1):
log.info('-- ETA: {}'.format(s_to_hms((time.time() - epoch_startime)*((epochs - 1) - epoch))))
log.info('')
log.info('Finished training in {}'.format(s_to_hms(time.time() - train_starttime)))
return generator, discriminator | JakeEBrooks/BrooksRFIGAN | brooksrfigan/training.py | training.py | py | 10,254 | python | en | code | 0 | github-code | 13 |
16809194124 | from collections import namedtuple
import pytest
from hypothesis import settings as Settings
from hypothesis.stateful import Bundle, RuleBasedStateMachine, precondition, rule
from hypothesis.strategies import booleans, integers, lists
Leaf = namedtuple("Leaf", ("label",))
Split = namedtuple("Split", ("left", "right"))
class BalancedTrees(RuleBasedStateMachine):
trees = Bundle("BinaryTree")
@rule(target=trees, x=booleans())
def leaf(self, x):
return Leaf(x)
@rule(target=trees, left=trees, right=trees)
def split(self, left, right):
return Split(left, right)
@rule(tree=trees)
def test_is_balanced(self, tree):
if isinstance(tree, Leaf):
return
else:
assert abs(self.size(tree.left) - self.size(tree.right)) <= 1
self.test_is_balanced(tree.left)
self.test_is_balanced(tree.right)
def size(self, tree):
if isinstance(tree, Leaf):
return 1
else:
return 1 + self.size(tree.left) + self.size(tree.right)
class DepthCharge:
def __init__(self, value):
if value is None:
self.depth = 0
else:
self.depth = value.depth + 1
class DepthMachine(RuleBasedStateMachine):
charges = Bundle("charges")
@rule(targets=(charges,), child=charges)
def charge(self, child):
return DepthCharge(child)
@rule(targets=(charges,))
def none_charge(self):
return DepthCharge(None)
@rule(check=charges)
def is_not_too_deep(self, check):
assert check.depth < 3
class RoseTreeStateMachine(RuleBasedStateMachine):
nodes = Bundle("nodes")
@rule(target=nodes, source=lists(nodes))
def bunch(self, source):
return source
@rule(source=nodes)
def shallow(self, source):
def d(ls):
if not ls:
return 0
else:
return 1 + max(map(d, ls))
assert d(source) <= 5
class NotTheLastMachine(RuleBasedStateMachine):
stuff = Bundle("stuff")
def __init__(self):
super().__init__()
self.last = None
self.bye_called = False
@rule(target=stuff)
def hi(self):
result = object()
self.last = result
return result
@precondition(lambda self: not self.bye_called)
@rule(v=stuff)
def bye(self, v):
assert v == self.last
self.bye_called = True
class PopulateMultipleTargets(RuleBasedStateMachine):
b1 = Bundle("b1")
b2 = Bundle("b2")
@rule(targets=(b1, b2))
def populate(self):
return 1
@rule(x=b1, y=b2)
def fail(self, x, y):
raise AssertionError
class CanSwarm(RuleBasedStateMachine):
"""This test will essentially never pass if you choose rules uniformly at
random, because every time the snake rule fires we return to the beginning,
so we will tend to undo progress well before we make enough progress for
the test to fail.
This tests our swarm testing functionality in stateful testing by ensuring
that we can sometimes generate long runs of steps which exclude a
particular rule.
"""
def __init__(self):
super().__init__()
self.seen = set()
# The reason this rule takes a parameter is that it ensures that we do not
# achieve "swarming" by by just restricting the alphabet for single byte
# decisions, which is a thing the underlying conjecture engine will
# happily do on its own without knowledge of the rule structure.
@rule(move=integers(0, 255))
def ladder(self, move):
self.seen.add(move)
assert len(self.seen) <= 15
@rule()
def snake(self):
self.seen.clear()
bad_machines = (
BalancedTrees,
DepthMachine,
RoseTreeStateMachine,
NotTheLastMachine,
PopulateMultipleTargets,
CanSwarm,
)
for m in bad_machines:
m.TestCase.settings = Settings(m.TestCase.settings, max_examples=1000)
cheap_bad_machines = list(bad_machines)
cheap_bad_machines.remove(BalancedTrees)
with_cheap_bad_machines = pytest.mark.parametrize(
"machine", cheap_bad_machines, ids=[t.__name__ for t in cheap_bad_machines]
)
@pytest.mark.parametrize(
"machine", bad_machines, ids=[t.__name__ for t in bad_machines]
)
def test_bad_machines_fail(machine):
test_class = machine.TestCase
try:
test_class().runTest()
raise RuntimeError("Expected an assertion error")
except AssertionError as err:
notes = err.__notes__
steps = [l for l in notes if "Step " in l or "state." in l]
assert 1 <= len(steps) <= 50
| HypothesisWorks/hypothesis | hypothesis-python/tests/nocover/test_stateful.py | test_stateful.py | py | 4,631 | python | en | code | 7,035 | github-code | 13 |
15021570770 | """
Program to step towards cosmological analysis
By creating a slightly more complicated model using real parameters, corresponding data points and performing chi squared analysis
author: Rhys Seeburger
"""
#import relevant packages
import numpy as np
import matplotlib.pyplot as plt
from chainconsumer import ChainConsumer
import scipy.stats as sci
import time
#start timer
start_time = time.time()
#define function
def func(omega,sigma8,c):
y = omega**0.5 * sigma8 * c + omega*10**-5.5
return y
#define chi_2
def chi_2(omega, sigma8, c, y, cov):
model = func(omega,sigma8,c)
chi_2 = (np.dot(np.dot((np.asarray(y)-np.asarray(model)),np.linalg.inv(cov)),np.transpose(np.asarray(y)-np.asarray(model))))
return chi_2
#define s_8
def s_8(omega, sigma8):
s8 = omega**0.5 * sigma8
return s8
#define sigma from s8 and omega
def sigma_8(omega,s8):
sigma8 = s8/(omega**0.5)
return sigma8
#main body of program
#choose true values for omega and sigma8
omega = 0.3
sigma8 = 0.8
s8 = s_8(omega,sigma8)
std = 1
#create cholesky matrix from file
filename="covariance.ascii"
file=open(filename)
cov=np.loadtxt(file,comments='#')
L = np.linalg.cholesky(cov)
#create datapoints from file
filename="input_function.ascii"
file=open(filename)
input=np.loadtxt(file,comments='#')
theta = input[:,0]
c_raw = input[:,1]
y_model = func(omega,sigma8,c_raw)
#add errors using cholesky decomposition
length = 9
nReal = 10000
err = np.dot(L,np.random.normal(0.,1,(length,nReal)))
y_plot_arr=np.zeros((length,nReal))
for iReal in range(nReal):
y_plot_arr[:,iReal] = err[:,iReal] + y_model
#create plot
plt.plot(theta,y_model,"r-",label="model")
plt.xscale("log")
plt.yscale("log")
plt.errorbar(theta,y_plot_arr[:,0],yerr=np.sqrt(np.diag(cov)),ecolor='b',
fmt='d',markeredgecolor='b',mew=1,markerfacecolor='none',
markersize=6,label='data')
plt.xlabel(r"$\theta$")
plt.ylabel(r"$\xi_+$")
plt.title(r"$\xi_+$" +" vs "+ r"$\theta$")
plt.legend()
plt.show()
# plt.savefig('Plots/data.pdf',bbox_inches='tight')
#now apply chi_2 with sigma8
#set up a grid of om and sig8 values to step around
steps = 100
om_t = np.repeat(np.linspace(0.0,0.99,steps),steps)
sig8_t = np.tile(np.linspace(0.0,0.99,steps),steps)
chis = []
#calculate chi2 at each point of the grid
for i in range(0,steps**2):
chi = chi_2(om_t[i], sig8_t[i], c_raw, y_plot_arr[:,0], cov)
chis.append(chi)
#convert into correct format for chainconsumer
chisq = np.array(chis).reshape(steps,steps)
om = np.array(om_t[0::steps])
sig8 = np.array(sig8_t[0:steps])
pdf = np.array(np.exp(-0.5*chisq))
#plot using chainconsumer
c = ChainConsumer()
c.add_chain([om,sig8], parameters=[r"$\Omega_m$", r"$\sigma_8$"], weights=pdf, grid=True)
fig = c.plotter.plot(display=True,truth=[omega,sigma8])
"""
#now apply chi_2 with s8
#set up a grid of om and s8 values to step around
steps = 100
om_t = np.repeat(np.linspace(0.01,0.7,steps),steps)
s8_t = np.tile(np.linspace(s_8(omega,0.3),s_8(omega,1.3),steps),steps)
sig8_t = []
chis = []
#calculate chi2 at each point of the grid
for i in range(0,steps**2):
sig8 = sigma_8(om_t[i],s8_t[i])
chi = chi_2(om_t[i], sig8, c_raw, y_plot_arr[:,0], cov)
sig8_t.append(sig8)
chis.append(chi)
#convert into correct format for chainconsumer
chisq = np.array(chis).reshape(steps,steps)
om = np.array(om_t[0::steps])
ess8 = np.array(s8_t[0:steps])
pdf = np.array(np.exp(-0.5*chisq))
#plot using chainconsumer
c = ChainConsumer()
c.add_chain([om,ess8], parameters=[r"$\Omega_m$", r"$S_8$"], weights=pdf, grid=True)
fig = c.plotter.plot(display=True,truth=[omega,s8])
"""
#find minimum chi_2 and omega and sig8 at that chi_2
chimin = np.argmin(chis)
omin = om_t[chimin]
sigmin = sig8_t[chimin]
print("omin is ", omin)
print("sigmin is ", sigmin)
"""
#test to see if min chisquared was computed correctly
kaisq = chi_2(omin,sigmin,c_raw,y_plot_arr[:,0],cov)
print(kaisq)
print(np.amin(chis))
"""
#calculate y values for this omin and sigmin and plot
y_chimin = func(omin,sigmin,c_raw)
plt.plot(theta,y_model,"r:",label="model")
plt.plot(theta,y_chimin,"g-",label="min chisq")
plt.xscale("log")
plt.yscale("log")
plt.errorbar(theta,y_plot_arr[:,0],yerr=np.sqrt(np.diag(cov)),ecolor='b',
fmt='d',markeredgecolor='b',mew=1,markerfacecolor='none',
markersize=6,label='data')
plt.xlabel(r"$\theta$")
plt.ylabel(r"$\xi_+$")
plt.title(r"$\xi_+$" +" vs "+ r"$\theta$")
plt.legend()
plt.show()
#chi_2 function plot
#compute chisquared for the realisations
x = np.linspace(0.,30.,num=100)
y_chis =[]
for i in range(0,nReal):
y_chi = chi_2(omega, sigma8, c_raw, y_plot_arr[:,i], cov)
y_chis.append(y_chi)
y_chisq = np.array(y_chis)
#plot this
plt.hist(y_chisq,bins=100,normed=1,histtype='stepfilled', label="data")
plt.plot(x,sci.chi2.pdf(x,7),"k--",label="7 dof")
plt.plot(x,sci.chi2.pdf(x,8),ls="--",color="orange",label="8 dof")
plt.plot(x,sci.chi2.pdf(x,9),ls="--",color="r",label="9 dof")
plt.legend()
plt.ylim(0,0.15)
plt.show()
#timekeeping
print("My program took", time.time() - start_time, "s to run")
| RhysSeeburger/2019_summer | omsig_model.py | omsig_model.py | py | 5,289 | python | en | code | 1 | github-code | 13 |
5213747275 | #coding=utf-8
#Version: python3.6.0
#Tools: Pycharm 2017.3.2
_author_ = ' Hermione'
count=0
alp=0
dig=0
blank=0
oth=0
s=[]
while True:
a=list(input())
count+=1
s.extend(a)
if len(s)+count>10:
count-=1
break
for i in s:
if i.isalpha():
alp+=1
elif i.isdigit():
dig+=1
elif i.isspace():
blank+=1
else:
oth+=1
print("letter = {}, blank = {}, digit = {}, other = {}".format(alp,blank,dig,oth))
# 本题重点在于回车的数量如何计算,count的数量就是回车的数量 | Harryotter/zhedaPTApython | ZheDapython/z4/z4.14.py | z4.14.py | py | 558 | python | en | code | 1 | github-code | 13 |
72032906258 |
import torch
from torch.utils.data import DataLoader
from pytorch_lightning.core.lightning import LightningModule
from torch.optim import RMSprop
from torch.optim.lr_scheduler import CosineAnnealingLR
from datasets import MapDataset
from models import SurfaceMapModel
from models import InterMapModel
from loss import SSDLoss
from loss import IsometricMapLoss
from loss import CircleBoundaryLoss
from loss import SDFLoss
class InterSurfaceMap(LightningModule):
def __init__(self, config):
super().__init__()
self.config = config
self.net = InterMapModel() # neural map between domains
self.meta = SurfaceMapModel() # surface map (fixed) 2D -> 3D
self.map_loss = IsometricMapLoss() # isometric energy
self.lands_loss = SSDLoss()
self.bound_loss = CircleBoundaryLoss()
self.sdf_loss = SDFLoss()
def train_dataloader(self):
self.dataset = MapDataset(self.config.dataset)
dataloader = DataLoader(self.dataset, batch_size=None, shuffle=True,
num_workers=self.config.dataset.num_workers)
return dataloader
def configure_optimizers(self):
LR = 1.0e-4
optimizer = RMSprop(self.net.parameters(), lr=LR)
restart = int(self.config.dataset.num_epochs)
scheduler = CosineAnnealingLR(optimizer, T_max=restart)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
source = batch['source_g'] # Nx2
weights_g = batch['weights_g'] # weights source
weights_f = batch['weights_f'] # weights target
boundary = batch['boundary_g'] # Bx2
R = batch['R'] # 2x2
lands_g = batch['lands_g'] # Lx2
lands_f = batch['lands_f'] # Lx2
C_g = batch['C_g'] # float (area normalization factor)
C_f = batch['C_f'] # float (area normalization factor)
# activate gradient for autodiff
source.requires_grad_(True)
rot_src = source.matmul(R.t()) # pre apply rotation
rot_bnd = boundary.matmul(R.t())
rot_lands = lands_g.matmul(R.t())
map_src = self.net(rot_src) # forward source to target domain
map_bnd = self.net(rot_bnd) # forward boundary
map_lands = self.net(rot_lands) # forward landmarks
G = self.meta(source, weights_g) * C_g # forward surface source
F = self.meta(map_src, weights_f) * C_f # forward surface target
loss_map = self.map_loss(F, map_src, source, G)
loss_bnd = self.bound_loss(map_bnd)
loss_lands = self.lands_loss(map_lands, lands_f)
loss_sdf = self.sdf_loss(map_src)
loss = loss_map + 1.0e4 * (loss_bnd+loss_sdf) + 1.0e6 * loss_lands
return loss
| luca-morreale/neural_surface_maps | mains/intersurface_map_train.py | intersurface_map_train.py | py | 2,824 | python | en | code | 53 | github-code | 13 |
7042586918 | from tensorflow import keras
import tensorflow as tf
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from plotting import plot_pred_dots
import scipy.sparse
# current working directory
if(os.getcwd()[-1] == '/'):
cwd = os.getcwd()
else:
cwd = os.getcwd() + '/'
MODEL_PATH = cwd + 'models/'
IMG_PATH = cwd + 'plots/res_plots/'
MOVIE_PATH = '/home/macleanlab/mufeng/tfrecord_data_processing/'
parser = argparse.ArgumentParser(description='Args for generating the spikes')
parser.add_argument('model_name', type=str, help='Name of the model')
parser.add_argument('num_dot_movies', type=int, help='Number of dot movies to check')
parser.add_argument('make_positive', type=str, help='How do you want to make firing rates positive, rec or abs')
parser.add_argument('--num_natural_movies', default=20, type=int, help='Number of natural movies to check')
args = parser.parse_args()
model_name = args.model_name
num_dot_movies = args.num_dot_movies
num_natural_movies = args.num_natural_movies
makepositive = args.make_positive
FRAMES_PER_TRIAL = 240
FRAMERATE = 60
MS_PER_TRIAL = (FRAMES_PER_TRIAL // FRAMERATE) * 1000
model = keras.models.load_model(MODEL_PATH+model_name)
def create_dataset(paths, max_seq_len=4800, encoding='png', pool=None):
"""
Read in .tfrecord datasets of the drifting dots movies
"""
feature_description = {
'frames': tf.io.VarLenFeature(tf.string),
'change_label': tf.io.VarLenFeature(tf.int64),
'coherence_label': tf.io.VarLenFeature(tf.int64),
'direction_label': tf.io.VarLenFeature(tf.int64),
'dominant_direction': tf.io.VarLenFeature(tf.int64),
'trial_coherence': tf.io.VarLenFeature(tf.int64),
'is_changes': tf.io.VarLenFeature(tf.int64),
}
data_set = tf.data.TFRecordDataset(paths)
def _parse_example(_x):
_x = tf.io.parse_single_example(_x, feature_description)
if encoding == 'png':
_frames = tf.map_fn(lambda a: tf.io.decode_png(a), _x['frames'].values, dtype=tf.uint8)[:max_seq_len]
_label = _x['coherence_label'].values
_change_label = _x['change_label'].values
_direction_label = _x['direction_label'].values
_dominant_direction = _x['dominant_direction'].values
_trial_coherence = _x['trial_coherence'].values
_is_changes = _x['is_changes'].values
return _frames, dict(tf_op_layer_coherence=_label, # 0 or 1, length 4800
tf_op_layer_change=_change_label, # 0 or 1, length 4800
tf_op_dir=_direction_label, # 1,2,3,4, length 4800
tf_dom_dir=_dominant_direction, # 1,2,3,4, length 10 (i.e. per trial/grey screen)
tf_trial_coh=_trial_coherence, # 0 or 1, length 10
tf_is_changes=_is_changes,
)
data_set = data_set.map(_parse_example, num_parallel_calls=24)
return data_set
def read_dot(num_movies):
"""
Select a random subset of drifting dots movies
num_movies: number of 4800 frames-long drifting dots movies to select. Each contains 10 trials and 10 grey screens
"""
# each tfrecord file corresponds to one movie (4800 frames)
# file_names = [os.path.expanduser(f'preprocessed/processed_data_{i+1}.tfrecord') for i in range(num_movies)]
file_names = [MOVIE_PATH + f'preprocessed/processed_data_{i+1}.tfrecord' for i in range(num_movies)]
data_set = create_dataset(file_names, 4800).batch(1)
# ex[0] the frames (tensor), shape [1, 4800, 36, 64, 3]
# ex[1] a dictionary, containing coh level, change label and direction
k = 1
movies = []
trial_cohs = [] # original coherence level for each trial
coh_labels = [] # coherence level at each frame
is_changes = [] # whether coherence changes for each trial
for ex in data_set:
trials = []
print("Movie ", k)
trial_coh = ex[1]['tf_trial_coh'].numpy()[0] # len = 10, coh vector of this movie
trial_cohs.append(trial_coh)
coh_label = ex[1]['tf_op_layer_coherence'].numpy()[0] # len = 4800, for each frame.
coh_labels.append(coh_label)
is_change = ex[1]['tf_is_changes'].numpy()[0]
is_changes.append(is_change)
start_frames = np.arange(0, 80, 4)
end_frames = np.arange(4, 84, 4)
framerate = 60
for i in range(2*len(trial_coh)): # 20
trial = ex[0][:, start_frames[i]*framerate:end_frames[i]*framerate] # [1, 240, 36, 64, 3]
trials.append(trial)
# concatenate the trials and grey screens into a large tensor
movie = tf.concat(trials, axis=0) # [20, 240, 36, 64, 3]
movies.append(movie)
k+=1
all_movies = tf.concat(movies, axis=0) # [num_movies*20,240,36,64,3]
all_trial_cohs = np.stack(trial_cohs) # [num_movies, 10]
all_coh_labels = np.stack(coh_labels) # [num_movies, 4800]
all_is_changes = np.stack(is_changes) # [num_movies, 10]
return all_movies, all_trial_cohs, all_coh_labels, all_is_changes
def read_natural(num_natural_movies):
"""
Read in natural motion movies
num_natural_movies: number of natural movies (length=240 frames) to select
"""
data = np.load('natural_data.npz', mmap_mode='r+')
x = data['x']
y = data['y']
r = np.random.RandomState(4) # fix a random set of movies
idx = r.randint(x.shape[0], size=num_natural_movies)
return x[idx], y[idx]
def plot_firing_rates(all_movies, makepositive, plot_grey=True, stim='natural'):
"""
Plot the 16 neurons' firing rates to drifting dots or natural movies, rectified
all_movies: input
stim: 'natural' or 'dots'
"""
intermediate_layer_model = keras.Model(inputs=model.input,
outputs=model.layers[12].output)
# responses given one movie
batch_size = 2
dataset = tf.data.Dataset.from_tensor_slices(all_movies).batch(batch_size)
batch_rates = []
for d in dataset:
output = intermediate_layer_model(d) # batch_size, 60, 16
if makepositive == 'rec':
batch_rate = tf.nn.relu(output).numpy() # batch_size, 60, 16
elif makepositive == 'abs':
batch_rate = tf.math.abs(output).numpy() # batch_size, 60, 16
else:
batch_rate = output.numpy()
batch_rates.append(batch_rate)
f_rates = np.concatenate(batch_rates, axis=0) # 20, 60, 16
if plot_grey == False:
f_rates = f_rates[::2] # 10, 60, 16
print("(num movies, compressed seq len, num cells): ", f_rates.shape)
num_neurons = f_rates.shape[2]
rec_res = np.reshape(f_rates, (-1, num_neurons)) # 600, 16
NUM_COLORS = rec_res.shape[1]
cm = plt.get_cmap('nipy_spectral')
cmap = [cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)]
fig = plt.figure(figsize=(12,12))
for i in range(rec_res.shape[1]):
ax = plt.subplot(rec_res.shape[1], 1, i+1)
ax.set_ylim([0, 1])
ax.plot(rec_res[:, i], alpha=1, c=cmap[i])
fig.text(0.5, 0.06, 'time', ha='center')
fig.text(0.06, 0.5, 'avg. firing rates over 4 frames', va='center', rotation='vertical')
plt.savefig(IMG_PATH+f'responses_{stim}_{model_name}_{makepositive}', dpi=300)
if False: # plot all neurons in one frame
plt.figure(figsize=(12, 1))
for i in range(rec_res.shape[1]):
plt.plot(rec_res[:, i], alpha=0.6)
plt.xlabel('time')
plt.savefig(IMG_PATH+'responses_all.png', dpi=200)
def plot_dot_predictions():
"""
Plot true optic flow vs predicted optic flow by the model
"""
plot_pred_dots(model)
def spike_generation(all_movies, makepositive):
"""
Generate spikes based on firing rates
Input: (num_dot_movies*20, 240, 36, 64, 3)
Output: (num_dot_movies*10, 4080, 16), binary
"""
intermediate_layer_model = keras.Model(inputs=model.input,
outputs=model.layers[12].output)
# load the data into the model in batches
batch_size = 2
dataset = tf.data.Dataset.from_tensor_slices(all_movies).batch(batch_size)
batch_rates = []
for d in dataset:
output = intermediate_layer_model(d) # batch_size, 60, 16
if makepositive == 'rec':
batch_rate = tf.nn.relu(output).numpy() # batch_size, 60, 16
elif makepositive == 'abs':
batch_rate = tf.math.abs(output).numpy() # batch_size, 60, 16
batch_rates.append(batch_rate)
f_rates = np.concatenate(batch_rates, axis=0) # 20*num_dot_movies, 60, 16
f_rates = f_rates[::2] # 10*num_dot_movies, 60, 16
num_neurons = f_rates.shape[2]
f_rates_r = np.repeat(f_rates, int(MS_PER_TRIAL/f_rates.shape[1])+2, axis=1) # 10*num_dot_movies, 4080, 16
# random matrix between [0,1] for Poisson process
# this step is stochastic, may need to introduce a seed
random_matrix = np.random.rand(f_rates_r.shape[0], f_rates_r.shape[1], num_neurons)
spikes = (f_rates_r - random_matrix > 0)*1.
return spikes
def raster_plot(all_movies):
"""
Generate raster plot of one trial
"""
spikes = spike_generation(all_movies, makepositive)[0]
example = np.reshape(spikes, (-1, 16)) # 4080, 16
plt.figure(figsize=(6,3))
plt.pcolormesh(example.T, cmap='cividis')
plt.title(f'{makepositive} natural')
plt.xlabel('Time (frames)')
plt.ylabel('Neurons')
plt.savefig(IMG_PATH+f'raster_plot_{makepositive}.png', dpi=200)
def main(num_dot_movies, num_natural_movies):
"""
Difference between two inputs:
num_dot_movies:
number of drifting dot movies, each containing 10 moving dots and 10 grey screens, total length = 4800 frames
num_natural_movies:
number of natural movies, total length = 240 frames
The binary spike train matrix is saved as a scipy sparse matrix (.npz)
"""
print("Reading movies...")
dot_movies, all_trial_cohs, all_coh_labels, all_is_changes = read_dot(num_dot_movies)
trial_coh_labels = all_coh_labels.reshape((-1, 20, FRAMES_PER_TRIAL))[:, ::2] # num_dot_movies, 10, 240
trial_coh_labels = trial_coh_labels.reshape((-1, 10*FRAMES_PER_TRIAL)) # num_dot_movies, 2400
# natural_movies, natural_y = read_natural(num_natural_movies)
if True: # generate spikes
print(f'Generating spikes using {makepositive}')
spikes = spike_generation(dot_movies, makepositive)
print("Shape of spike train: ", spikes.shape)
print(f"Reshaping the spike train to a matrix with shape ({spikes.shape[0]}x{spikes.shape[1]}, {spikes.shape[2]})...")
spikes_sparse = scipy.sparse.csc_matrix(spikes.reshape((-1, spikes.shape[2])))
# dilate the coherence vectors from frames to ms
# sanity check: 2400*17 = 10*4080 = 40800ms per movie
coh_labels_ms = np.repeat(trial_coh_labels, int(MS_PER_TRIAL/FRAMES_PER_TRIAL)+1, axis=1)
print("Shape of ms-by-ms coherence levels: ", coh_labels_ms.shape)
coh_labels_sparse = scipy.sparse.csc_matrix(coh_labels_ms) # num_dot _movies, 40800
save_path = 'CNN_outputs'
if not os.path.exists(save_path):
os.mkdir(save_path)
if model_name == 'ch_model4':
training_mode = 'natural'
elif model_name == 'ch_model6':
training_mode = 'mixed'
# spikes, [num_dot_movies*10*4080, 16]
scipy.sparse.save_npz(save_path+f'/spike_train_{training_mode}_{makepositive}.npz', spikes_sparse)
# frame-by-frame coherence of each movie, [num_dot_movies, 40800], sparsified
scipy.sparse.save_npz(save_path+f'/coherences_{training_mode}_{makepositive}.npz', coh_labels_sparse)
# trial-by-trial coherence changes (0 or 1), [num_dot_movies, 10]
np.save(save_path+f'/changes_{training_mode}_{makepositive}.npy', all_is_changes)
plot_firing_rates(dot_movies[0:20], makepositive=makepositive, plot_grey=True, stim='dot')
if False: # generate plots
# plot using the first drifting dots movie
# plot_firing_rates(natural_movies, rectify=False, plot_grey=True, stim='natural')
# plot using the first drifting dots movie
# plot_firing_rates(dot_movies[0:20], rectify=False, plot_grey=True, stim='dot')
raster_plot(dot_movies[0:20])
# natural_movies, natural_directions = read_natural('x_all.npy', 'y_all.npy', num_natural_movies)
# plot_firing_rates(natural_movies, stim='natural')
# plot_dot_predictions()
# angles = np.arctan2(natural_directions[:,1], natural_directions[:,0]) * 180 / np.pi
# distance = np.sqrt(natural_directions[:,0]**2 + natural_directions[:,1]**2)
if __name__ == '__main__':
main(num_dot_movies, num_natural_movies)
| C16Mftang/front-end-CNN | firing_rate.py | firing_rate.py | py | 12,893 | python | en | code | 0 | github-code | 13 |
42014946135 | #!/home/halvard/miniconda3/bin/python
import pprint
import subprocess
import sys
import os
GET_RUNNING_KERNELS_SCRIPT = os.environ['HOME'] + '/bin/get_running_kernels.sh'
# use -a to print all kernels, including those without GPUs
args = " ".join(sys.argv[1:])
print_all = args == "-a"
def get_gpu_processes():
"""Gets data of processes using GPU by parsing output of nvidia-smi -q"""
out = subprocess.run(['nvidia-smi', '-q'], stdout=subprocess.PIPE)
raw_processes = [s.strip() for s in
out.stdout.decode("utf-8").split("\n")]
raw_processes = [s for s in raw_processes if s]
ind = raw_processes.index("Processes")
raw_processes = raw_processes[1+ind:]
processes = []
process = None
for line in raw_processes:
if line.startswith("GPU instance ID"):
if process is not None:
processes.append(process)
process = {}
else:
key, val = map(str.strip, line.split(":"))
process[key] = val
if process is not None:
processes.append(process)
return processes
def match_process_and_jupyter_kernel(processes):
"""using the PID of each python process, extracts jupyter kernel id from
the command used to run the process"""
kernel_id_to_process_num = {}
for i,process in enumerate(processes):
if "python" in process['Name']:
cmd = subprocess.run(['ps', '-p', process['Process ID'], '-o',
'args'], stdout=subprocess.PIPE
).stdout.decode("UTF-8")
path = cmd.split()[-1]
kid = path[7+path.find('kernel'):].split(".")[0]
kernel_id_to_process_num[kid] = i
return kernel_id_to_process_num
def get_running_jupyter_sessions():
sessions = subprocess.run([GET_RUNNING_KERNELS_SCRIPT], stdout=subprocess.PIPE)
sessions = eval(sessions.stdout.decode("utf-8"))
return sessions
def print_kernel_GPU_memory_usage(processes, kernel_id_to_process_num, sessions):
for session in sessions:
kernel_id = session['kernel']['id']
if kernel_id in kernel_id_to_process_num:
process = processes[kernel_id_to_process_num[session['kernel']['id']]]
mem = process['Used GPU Memory']
else:
if not print_all:
continue
mem = "No GPU"
print(f"{mem:10}", session['path'], )
def main():
gpu_processes = get_gpu_processes()
kernel_id_to_process_num = match_process_and_jupyter_kernel(gpu_processes)
jupyter_sessions = get_running_jupyter_sessions()
print_kernel_GPU_memory_usage(gpu_processes, kernel_id_to_process_num,
jupyter_sessions)
if __name__ == "__main__":
main()
| halvarsu/bin | print_jupyter_kernel_GPU_usage.py | print_jupyter_kernel_GPU_usage.py | py | 2,783 | python | en | code | 0 | github-code | 13 |
10773390853 | #!/usr/bin/env python
# coding: utf-8
import requests
import json
import datetime
import os
def save_tokens(filename, tokens):
with open(filename, "w") as fp:
json.dump(tokens, fp)
def load_tokens(filename):
with open(filename) as fp:
tokens = json.load(fp)
return tokens
def update_tokens(app_key, filename):
tokens = load_tokens(filename)
url = "https://kauth.kakao.com/oauth/token"
data = {
"grant_type": "refresh_token",
"client_id": app_key,
"refresh_token": tokens['refresh_token']
}
response = requests.post(url, data=data)
if response.status_code != 200:
print('error', response.json())
tokens = None
else:
print(tokens)
now = datetime.datetime.now().strftime("%Y%m%D_%H%M%S")
backup_filename = filename + '.' + now
os.rename(filename, backup_filename)
tokens['access_token'] = response.json()['access_token']
save_tokens(filename, tokens)
return tokens
def send_message(filename, template):
tokens = load_tokens(filename)
headers = {
"Authorization": "Bearer " + tokens['access_token']
}
payload = {
"template_object": json.dumps(template)
}
url = "https://kapi.kakao.com/v2/api/talk/memo/default/send"
response = requests.post(url, headers=headers, data=payload)
return response
| bibersay/Toy-project | toy_project/_6_dont_sleep/kakao_utils.py | kakao_utils.py | py | 1,399 | python | en | code | 0 | github-code | 13 |
26192782996 | import os
import json
from typing import Iterable
from harquery.query import parse
from harquery.endpoint import HeadersBase
class HeadersPreset(HeadersBase):
def __init__(self, workspace: 'Workspace', name: str):
self._workspace = workspace
self._name = name
path = os.path.join(
workspace._path, "presets", "headers", name + ".json")
with open(path, "r") as f:
self._data = json.load(f)
@staticmethod
def new(workspace: 'Workspace', name: str, data: list = None):
path = os.path.join(
workspace._path, "presets", "headers", name + ".json")
if os.path.isfile(path):
raise FileExistsError("Headers preset '{0}' already exists".format(name))
if data is None:
data = []
with open(path, "w") as f:
json.dump(data, f)
print("Added new headers preset: '{0}'".format(name))
return HeadersPreset(workspace, name)
def save(self) -> None:
path = os.path.join(
self._workspace._path, "presets", "headers", self._name + ".json")
with open(path, "w") as f:
json.dump(self._data, f)
print("Saved headers preset: '{0}'".format(self._name))
def __repr__(self):
repr_str = "headers > presets > {0}\n".format(self._name)
count = 0
for header in self._data:
repr_str += " {0}: {1}\n".format(
header["name"], header["value"])
count += 1
if count == 0:
return "headers > presets > {0}: (empty)".format(self._name)
else:
return repr_str[:-1]
return repr_str
__str__ = __repr__
class FiltersPreset:
def __init__(self, workspace: 'Workspace', name: str):
self._workspace = workspace
self._name = name
path = os.path.join(
workspace._path, "presets", "filters", name + ".json")
with open(path, "r") as f:
self._obj = json.load(f)
@staticmethod
def new(workspace: 'Workspace', name: str, data: list = None):
path = os.path.join(
workspace._path, "presets", "filters", name + ".json")
if os.path.isfile(path):
raise FileExistsError("Filters preset '{0}' already exists".format(name))
if data is None:
data = []
with open(path, "w") as f:
json.dump(data, f)
print("Added new filters preset: '{0}'".format(name))
return FiltersPreset(workspace, name)
def add(self, query: str) -> None:
if isinstance(query, str):
doc = parse(query)
elif isinstance(query, dict):
doc = query
else:
raise TypeError
current_filters = [item["string"] for item in self._obj]
if doc["string"] in current_filters:
raise ValueError("duplicate filter '{0}'".format(doc["string"]))
self._obj.append(doc)
print("added filter: {0}".format(doc["string"]))
def take(self, filters: Iterable):
count = 0
current_filters = [item["string"] for item in self._obj]
for filt in filters:
if filt["string"] not in current_filters:
self._obj.append(filt)
count += 1
print("Took {0} filters".format(count))
def drop(self, index: int) -> None:
if index < 0:
index = len(self._obj) + index
if index < 0 or index >= len(self._obj):
raise IndexError
drop_query = self._obj[index]["string"]
self._obj.pop(index)
print("removed filter: {0}".format(drop_query))
def clear(self) -> None:
self._obj = []
print("removed all filters")
def save(self) -> None:
path = os.path.join(
self._workspace._path, "presets", "filters", self._name + '.json')
with open(path, "w") as f:
json.dump(self._obj, f)
print("Saved filters preset: '{0}'".format(self._name))
def __iter__(self):
for filt in self._obj:
yield filt
def __repr__(self):
repr_str = "presets > filters > {0}\n".format(self._name)
count = 0
for i in range(len(self._obj)):
query = self._obj[i]["string"]
repr_str += "[{0}] {1}\n".format(i, query)
count += 1
if count == 0:
return "presets > filters > {0}: (empty)".format(self._name)
else:
return repr_str[:-1]
__str__ = __repr__ | evaneldemachki/harquery | harquery/preset.py | preset.py | py | 4,671 | python | en | code | 0 | github-code | 13 |
74718330896 | from abc import ABC
from typing import Optional, List
import marshy
from marshy.types import ExternalItemType
from servey.security.authorization import Authorization
from servey.security.authorizer.jwt_authorizer_abc import (
JwtAuthorizerABC,
date_from_jwt,
)
from persisty.security.permission import Permission
from persisty.security.permission_authorization import PermissionAuthorization
class JwtPermissionAuthorizerABC(JwtAuthorizerABC, ABC):
@staticmethod
def authorization_from_decoded(decoded: ExternalItemType):
scope = decoded.get("scope")
scopes = tuple()
if scope:
scopes = scope.split(" ")
authorization = PermissionAuthorization(
subject_id=decoded.get("sub"),
not_before=date_from_jwt(decoded, "nbf"),
expire_at=date_from_jwt(decoded, "exp"),
scopes=frozenset(scopes),
permissions=marshy.load(
Optional[List[Permission]], decoded.get("permissions")
),
)
return authorization
@staticmethod
def payload_from_authorization(authorization: Authorization, iss: str, aud: str):
payload = super().payload_from_authorization(authorization, iss, aud)
permissions = getattr(authorization, "permissions", None)
if permissions:
payload["permissions"] = marshy.dump(permissions)
return payload
| tofarr/persisty | persisty/security/jwt_permission_authorizer_abc.py | jwt_permission_authorizer_abc.py | py | 1,417 | python | en | code | 1 | github-code | 13 |
13578668300 | """Test runway.core.providers.aws._account."""
# pylint: disable=no-self-use
from runway.core.providers.aws import AccountDetails
class TestAccountDetails(object):
"""Test runway.core.providers.aws._account.AccountDetails."""
def test_aliases(self, runway_context):
"""Test aliases."""
aliases = ["test", "runway-test"]
stubber = runway_context.add_stubber("iam")
stubber.add_response(
"list_account_aliases", {"AccountAliases": aliases, "IsTruncated": False}
)
account = AccountDetails(runway_context)
with stubber:
assert account.aliases == aliases
def test_id(self, runway_context):
"""Test id."""
account_id = "123456789012"
arn = "arn:aws:iam::{}:user/test-user".format(account_id)
stubber = runway_context.add_stubber("sts")
stubber.add_response(
"get_caller_identity",
{"UserId": "test-user", "Account": account_id, "Arn": arn},
)
account = AccountDetails(runway_context)
with stubber:
assert account.id == account_id
| muni77-sh/runway | tests/unit/core/providers/aws/test_account.py | test_account.py | py | 1,121 | python | en | code | null | github-code | 13 |
20049395966 | import tkinter as tk
from game_logic.bot import Bot
from display.gameboard import Gameboard
from display.player_hand import Player_hand
class Menu:
def __init__(self, game):
"""
Create the welcome window.
Args:
game (Game): The game to launch.
"""
self.game = game
self.window = tk.Tk()
self.window.title("6 Qui Prend!")
self.window.geometry("800x600")
self.game.menu = self
self.gameboard_frame = tk.Frame(self.window, bg="white")
self.player_frame = tk.Frame(self.window, bg="white")
self.toolbar = tk.Frame(self.window, bg="#f0f0f0")
self.gameboard = Gameboard(self.game, self.gameboard_frame)
self.player_hand = Player_hand(self.game, self.player_frame)
welcome = tk.PhotoImage(file="display/images/welcome.png")
self.welcome_label = tk.Label(self.window, image=welcome)
self.welcome_label.pack()
self.window.bind("<Key>", lambda event: self.menu())
self.window.mainloop()
def menu(self):
"""
Create the menu window.
"""
# Destroy the welcome window
self.welcome_label.destroy()
self.clear_toolbar()
self.window.unbind("<Key>")
self.toolbar.pack(fill=tk.Y, side=tk.LEFT, expand=False)
self.gameboard_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.player_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Play button
tk.Button(self.toolbar,
text="Play",
command=self.play,
anchor='w',
bg='#ff6666',
activebackground='#c44343').pack(fill='x', ipady=10, pady=3)
# Configuration player button
tk.Button(self.toolbar,
text="Player configuration",
command=self.player_conf,
anchor='w',
bg='#f0f0f0',
activebackground='#bdbdbd').pack(fill='x', ipady=10, pady=3)
# Rules button
tk.Button(self.toolbar,
text="Rules",
command=self.rules,
anchor='w',
bg='#f0f0f0',
activebackground='#bdbdbd').pack(fill='x', ipady=10, pady=3)
# Quit button
tk.Button(self.toolbar,
text="Quit",
command=self.window.quit,
anchor='w',
bg='#f0f0f0',
activebackground='#bdbdbd').pack(fill='x', ipady=10, pady=3)
def clear_toolbar(self):
"""
Destroy all the widgets in the toolbar.
"""
for widget in self.toolbar.winfo_children():
widget.destroy()
def play(self):
self.window_update()
self.game.play(self)
def player_conf(self):
"""
Player configuration window.
"""
def add_bot(bot_name, difficulty):
self.game.players.append(Bot(bot_name, difficulty))
self.player_conf()
def delete_bot(bot_name):
for i in range(len(self.game.players)):
if self.game.players[i].name == bot_name:
self.game.players.pop(i)
break
self.player_conf()
def show_player_list():
tk.Label(self.toolbar, text="List of players").pack(fill='x', ipady=10, pady=3)
players = tk.Listbox(self.toolbar, height=5)
players.pack(fill='x', ipady=10, pady=3)
for player in self.game.players:
if player.difficulty != None:
players.insert(tk.END, player.name+" ("+player.difficulty+")")
else:
players.insert(tk.END, player.name)
self.clear_toolbar()
# Back button
tk.Button(self.toolbar,
text="Back",
command=self.menu,
anchor='w',
bg='#f0f0f0',
activebackground='#bdbdbd').pack(fill='x', ipady=10, pady=3)
# Bot configuration
tk.Label(self.toolbar, text="Bot name").pack(anchor='w', padx=20, pady=3)
bot_name = tk.Entry(self.toolbar)
bot_name.pack(anchor='w', padx=30, pady=3)
tk.Label(self.toolbar, text="Difficulty").pack(anchor='w', padx=20, pady=3)
difficulty = tk.StringVar()
tk.Radiobutton(self.toolbar, text="Easy", variable=difficulty, value="Easy").pack(anchor='w', padx=30, pady=3)
tk.Radiobutton(self.toolbar, text="Medium", variable=difficulty, value="Medium").pack(anchor='w', padx=30, pady=3)
tk.Radiobutton(self.toolbar, text="Hard", variable=difficulty, value="Hard").pack(anchor='w', padx=30, pady=3)
tk.Button(self.toolbar,
text="Add",
command=lambda: add_bot(bot_name.get(), difficulty.get()),
width=10,
bg='#f0f0f0',
activebackground='#bdbdbd').pack(anchor='w', padx=20, pady=3)
tk.Button(self.toolbar,
text="Delete",
command=lambda: delete_bot(bot_name.get()),
width=10,
bg='#f0f0f0',
activebackground='#bdbdbd').pack(anchor='w',ipadx=10, padx=20, pady=3)
show_player_list()
def rules(self):
pass
def window_update(self):
"""
Update the window.
"""
self.gameboard.display_rows()
self.player_hand.display_hand()
| T0UT0UM/6QP | display/main.py | main.py | py | 5,606 | python | en | code | 0 | github-code | 13 |
15703036625 | import cv2
import os
# Crear objeto VideoCapture
cap = cv2.VideoCapture(0)
# Comprobar si la cámara se abrió correctamente
if not cap.isOpened():
print("Error al abrir la cámara.")
exit()
# Variables para contar el número de fotos tomadas y el límite de fotos a capturar
contador = 0
limite_fotos = 5
ruta_guardado = 'D:\\Escritorio\\LAB'
# Bucle para tomar fotos
while contador < limite_fotos:
# Leer el siguiente fotograma
ret, frame = cap.read()
# Comprobar si se pudo leer el fotograma
if not ret:
print("Error al capturar el fotograma.")
break
# Generar el nombre de archivo único
nombre_archivo = f"foto{contador}.png"
ruta_completa = os.path.join(ruta_guardado, nombre_archivo)
# Tomar la foto y guardarla en la ubicación especificada
cv2.imwrite(ruta_completa, frame)
print(f"Foto guardada en: {ruta_completa}")
# Incrementar el contador
contador += 1
# Liberar los recursos
cap.release()
cv2.destroyAllWindows()
| NotAndeer/PythonScripts | CAMARA/cam-foto.py | cam-foto.py | py | 1,009 | python | es | code | 0 | github-code | 13 |
11363827023 | import logging, config, command, news_sender
from storage import storage
from aiogram import Bot, Dispatcher, executor
config.init()
storage.init()
logging.info(f'Started bot with config: config = {config.bot_config}')
def start():
bot = Bot(token=config.bot_config.token)
db = Dispatcher(bot)
for name, handler in command.commands.items():
db.register_message_handler(handler, commands=[name])
news_sender.start_send_news(config.bot_config.timeout_seconds, bot)
executor.start_polling(db, skip_updates=True)
start()
| vitalii-honchar/hacker-bot | src/bot.py | bot.py | py | 558 | python | en | code | 0 | github-code | 13 |
1678759199 | g = open('wasteland', mode='rt', encoding='utf-8')
g.read() # entire file
g.seek(0) # points to the start of the file
g.readline() # reads a single line
g.seek(0)
l = g.readlines()
print(l)
g.close() | alexbujenita/python-learning | files/read_files.py | read_files.py | py | 201 | python | en | code | 0 | github-code | 13 |
8838019435 | def arithmetic_arranger(problems, results=False):
# Raise error if problems' length is greater than 5
if len(problems) > 5:
return 'Error: Too many problems.'
# Split problems on space; obatin 3 strings per problem.
lines = ['', '', '']
problems_split = []
for i, el in enumerate(problems):
problems_split.append([])
problems_split[i] = el.split()
# Check if conditions are met
for problem in problems_split:
# Raise error if numbers are longer than 4 digits
if len(problem[0]) > 4 or len(problem[2]) > 4:
return 'Error: Numbers cannot be more than four digits.'
# Raise error if operators are not '+' or '-'
if problem[1] not in '+-':
return "Error: Operator must be '+' or '-'."
# Raise error if operands contain other than digits.
if not problem[0].isnumeric() or not problem[2].isnumeric():
return 'Error: Numbers must only contain digits.'
# Calculate operations results if results is True
if results:
lines.append('')
for problem in problems_split:
if problem[1] == '+':
problem.append(str(int(problem[0]) + int(problem[2])))
else:
problem.append(str(int(problem[0]) - int(problem[2])))
for problem in problems_split:
# Store sign, operands and their length in variables to improve readability
operand_1 = problem[0]
operand_2 = problem[2]
sign = problem[1]
operand_1_len = len(operand_1)
operand_2_len = len(operand_2)
# Store total if we are calculating them:
total = ''
if results:
total = str(problem[3])
# How to proceed if operand_1 is greater than or equal to operand_2
if operand_1_len >= operand_2_len:
# Add two leading spaces to operand_1 to account for
# op_2 sign and space
operand_1 = ' '*2 + operand_1
# Update operand_1 length
operand_1_len = len(operand_1)
# Add sign length to operand_2
operand_2_len = len(operand_2) + 1
# Add leading spaces to operand_2
operand_2 = sign + ' '*(operand_1_len - operand_2_len) + operand_2
# Update operand_2 length
operand_2_len = len(operand_2)
# How to proceed if operand_2 is greater than operand_1
elif operand_2_len > operand_1_len:
# Calculate and add leading spaces to op_1
operand_1 = ' '*(operand_2_len - operand_1_len + 2) + operand_1
# Update op_1 length
operand_1_len = len(operand_1)
# Add sign and space at front of op_2
operand_2 = sign + ' ' + operand_2
# Update op_2 length
operand_2_len = len(operand_2)
# --Generate list 'lines' content--
# Add first operand and trailing space
lines[0] += operand_1 + ' '*4
# Add sign, second operand and trailing space
lines[1] += operand_2 + ' '*4
# Add slashes
lines[2] += '-'*operand_1_len + ' '*4
# Add results if True
if results:
lines[3] += ' '*(operand_2_len-len(problem[3])) + problem[3] + ' '*4
arranged_problems = """"""
for line in lines:
if line == lines[-1]:
arranged_problems += line.rstrip()
else:
arranged_problems += line.rstrip() + '\n'
return arranged_problems | lucferre/scientific_computing_with_python | arithmetic_arranger.py | arithmetic_arranger.py | py | 3,578 | python | en | code | 0 | github-code | 13 |
29913478016 | from botocore.exceptions import ClientError
import boto3
import configparser
import json
import pandas as pd
import s3fs
from time import sleep
def create_role(iam, role_name):
print("Creating IAM role...")
try:
role = iam.create_role(
Path='/',
RoleName=role_name,
Description = 'Allows Redshift clusters to call AWS services on your behalf.',
AssumeRolePolicyDocument=json.dumps(
{
'Statement': [
{
'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {
'Service': 'redshift.amazonaws.com'
}
}
],
'Version': '2012-10-17'
}
)
)
return role
except ClientError as e:
print(e)
def attach_role_policy(iam, role_name):
print("Attaching policy...")
return iam.attach_role_policy(
RoleName=role_name,
PolicyArn='arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
)['ResponseMetadata']['HTTPStatusCode']
def create_cluster(redshift, roles, config_dict):
try:
response = redshift.create_cluster(
ClusterType=config_dict['DWH']['cluster_type'],
NodeType=config_dict['DWH']['node_type'],
NumberOfNodes=int(config_dict['DWH']['num_nodes']),
DBName=config_dict['DWH']['db'],
ClusterIdentifier=config_dict['DWH']['cluster_identifier'],
MasterUsername=config_dict['DWH']['db_user'],
MasterUserPassword=config_dict['DWH']['db_password'],
IamRoles=roles
)
except ClientError as e:
print(e)
props = redshift.describe_clusters(ClusterIdentifier=config_dict['DWH']['cluster_identifier'])['Clusters'][0]
print('Waiting for cluster {} to be created...\n(this can take a few mins)'.format(config_dict['DWH']['cluster_identifier']))
is_created = False
while not is_created:
sleep(1)
props = redshift.describe_clusters(ClusterIdentifier=config_dict['DWH']['cluster_identifier'])['Clusters'][0]
is_created = props['ClusterStatus'] == 'available'
print('Cluster {} created.'.format(config_dict['DWH']['cluster_identifier']))
return props
def prettify_redshift_props(props):
pd.set_option('display.max_colwidth', -1)
keysToShow = ['ClusterIdentifier', 'NodeType', 'ClusterStatus', 'MasterUsername', 'DBName', 'Endpoint', 'NumberOfNodes', 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=['key', 'value'])
def authorize_ingress(ec2, props, port):
'''
Update cluster security group to allow access through Redshift port
'''
print("Authorizing Ingres...")
try:
vpc = ec2.Vpc(id=props['VpcId'])
defaultSg = list(vpc.security_groups.all())[0]
defaultSg.authorize_ingress(
GroupName='default',
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(port),
ToPort=int(port)
)
except ClientError as e:
print(e)
def main():
print('Parsing config file...')
config = configparser.ConfigParser()
config.read('dwh.cfg') # Note: this transforms keys in your config file to lowercase
# Creating dictionary from config object to make it easier to work with
config_dict = {sect: dict(config.items(sect)) for sect in config.sections()}
# Create variables from subset of dictionary
AWS_KEY = config_dict['AWS']['key']
AWS_SECRET = config_dict['AWS']['secret']
DWH_CLUSTER_TYPE = config_dict['DWH']['cluster_type']
DWH_NUM_NODES = int(config_dict['DWH']['num_nodes'])
DWH_NODE_TYPE = config_dict['DWH']['node_type']
DWH_CLUSTER_IDENTIFIER = config_dict['DWH']['cluster_identifier']
DWH_DB = config_dict['DWH']['db']
DWH_DB_USER = config_dict['DWH']['db_user']
DWH_DB_PASSWORD = config_dict['DWH']['db_password']
DWH_PORT = int(config_dict['DWH']['port'])
DWH_IAM_ROLE_NAME = config_dict['DWH']['iam_role_name']
# Print a summary of key-values that will be used to create cluster
df = pd.DataFrame({
'Param':
['DWH_CLUSTER_TYPE', 'DWH_NUM_NODES', 'DWH_NODE_TYPE', \
'DWH_CLUSTER_IDENTIFIER', 'DWH_DB', 'DWH_DB_USER', \
'DWH_DB_PASSWORD', 'DWH_PORT', 'DWH_IAM_ROLE_NAME'],
'Value':
[DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, \
DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, \
DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME]
})
print(df)
# Initialize AWS Clients
print("Initializing AWS clients...")
ec2 = boto3.resource(
'ec2',
region_name='us-west-2',
aws_access_key_id=AWS_KEY,
aws_secret_access_key=AWS_SECRET
)
s3 = boto3.resource(
's3',
region_name='us-west-2',
aws_access_key_id=AWS_KEY,
aws_secret_access_key=AWS_SECRET
)
iam = boto3.client(
'iam',
region_name='us-west-2',
aws_access_key_id=AWS_KEY,
aws_secret_access_key=AWS_SECRET
)
redshift = boto3.client(
'redshift',
region_name='us-west-2',
aws_access_key_id=AWS_KEY,
aws_secret_access_key=AWS_SECRET
)
# Create cluster
role = create_role(iam, DWH_IAM_ROLE_NAME)
attach_role_policy(iam, DWH_IAM_ROLE_NAME)
role_arn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
redshift_props = create_cluster(redshift, [role_arn], config_dict)
if redshift_props:
print(prettify_redshift_props(redshift_props))
DWH_ENDPOINT = redshift_props['Endpoint']['Address']
DWH_ROLE_ARN = redshift_props['IamRoles'][0]['IamRoleArn']
print('DWH_ENDPOINT :: ', DWH_ENDPOINT)
config.set('CLUSTER', 'host', str(DWH_ENDPOINT))
print("--> config['CLUSTER']['host'] updated with new endpoint")
print('DWH_ROLE_ARN :: ', DWH_ROLE_ARN)
config.set('IAM_ROLE', 'arn', DWH_ROLE_ARN)
print("--> config['IAM_ROLE']['arn'] updated with new ARN")
with open('dwh.cfg', 'w') as configfile:
config.write(configfile)
print("--> config file 'dwh.cfg' updated with new endpoint and ARN")
authorize_ingress(ec2, redshift_props, DWH_PORT)
if __name__ == "__main__":
main()
| tommytracey/udacity_data_engineering | p3_data_warehouse_redshift/create_cluster.py | create_cluster.py | py | 6,638 | python | en | code | 2 | github-code | 13 |
16291863980 | #import the library to control the GPIO pins
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
#import the time library
import time
led_pin = 8
#setup the pin, and make it be off to start with
GPIO.setup(led_pin, GPIO.OUT)
#turn on the led
print(True)
GPIO.output(led_pin, True)
#wait for 0.5 seconds
time.sleep(0.5)
#turn off the led
print(False)
GPIO.output(led_pin, False)
| mattvenn/raspi-workshop | www/flash.py | flash.py | py | 404 | python | en | code | 2 | github-code | 13 |
25666877236 | import json
import platform
from collections import OrderedDict, namedtuple
from pathlib import Path
import os
import cv2
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
import urllib
import requests
import subprocess
import logging
import pkg_resources as pkg
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
return self.act(self.conv(x))
def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
# Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
file = Path(file)
assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
try: # url1
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, str(file))
assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
except Exception as e: # url2
file.unlink(missing_ok=True) # remove partial downloads
print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
finally:
if not file.exists() or file.stat().st_size < min_bytes: # check
file.unlink(missing_ok=True) # remove partial downloads
print(f"ERROR: {assert_msg}\n{error_msg}")
print('')
STDOUT = -2
def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download()
# Attempt file download if does not exist
file = Path(str(file).strip().replace("'", ''))
if not file.exists():
# URL specified
name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
if str(file).startswith(('http:/', 'https:/')): # download
url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
if Path(file).is_file():
print(f'Found {url} locally at {file}') # file already exists
else:
safe_download(file=file, url=url, min_bytes=1E5)
return file
# GitHub assets
file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
try:
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v1.0'
except: # fallback plan
assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
try:
tag = subprocess.check_output('git tag', shell=True, stderr=STDOUT).decode().split()[-1]
except:
tag = 'v6.0' # current release
if name in assets:
safe_download(file,
url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
# url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional)
min_bytes=1E5,
error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/')
return str(file)
def set_logging(name=None, verbose=True):
# Sets level and returns logger
for h in logging.root.handlers:
logging.root.removeHandler(h) # remove all handlers associated with the root logger object
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING)
return logging.getLogger(name)
LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super().__init__()
def forward(self, x, augment=False, profile=False, visualize=False):
y = []
for module in self:
y.append(module(x, augment, profile, visualize)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):
# Check version vs. required version
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string
if hard:
assert result, s # assert min requirements met
if verbose and not result:
LOGGER.warning(s)
return result
class Detect(nn.Module):
stride = None # strides computed during build
onnx_dynamic = False # ONNX export parameter
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
super().__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
z = [] # inference output
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
y = x[i].sigmoid()
if self.inplace:
y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
def _make_grid(self, nx=20, ny=20, i=0):
d = self.anchors[i].device
if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility
yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij')
else:
yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)])
grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float()
anchor_grid = (self.anchors[i].clone() * self.stride[i]) \
.view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float()
return grid, anchor_grid
def attempt_load(weights, map_location=None, inplace=True, fuse=True):
from models.yolo import Detect, Model
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt = torch.load(attempt_download(w), map_location=map_location) # load
if fuse:
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
else:
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
m.inplace = inplace # pytorch 1.7.0 compatibility
if type(m) is Detect:
if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility
delattr(m, 'anchor_grid')
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print(f'Ensemble created with {weights}\n')
for k in ['names']:
setattr(model, k, getattr(model[-1], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
return model # return ensemble
class DetectMultiBackend(nn.Module):
# YOLOv5 MultiBackend class for python inference on various backends
def __init__(self, weights='yolov5s.pt', device=None, dnn=False):
# Usage:
# PyTorch: weights = *.pt
# TorchScript: *.torchscript
# CoreML: *.mlmodel
# TensorFlow: *_saved_model
# TensorFlow: *.pb
# TensorFlow Lite: *.tflite
# ONNX Runtime: *.onnx
# OpenCV DNN: *.onnx with dnn=True
# TensorRT: *.engine
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
suffix = Path(w).suffix.lower()
suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']
check_suffix(w, suffixes) # check weights have acceptable suffix
pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans
stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults
w = attempt_download(w) # download if not local
if jit: # TorchScript
LOGGER.info(f'Loading {w} for TorchScript inference...')
extra_files = {'config.txt': ''} # model metadata
model = torch.jit.load(w, _extra_files=extra_files)
if extra_files['config.txt']:
d = json.loads(extra_files['config.txt']) # extra_files dict
stride, names = int(d['stride']), d['names']
elif pt: # PyTorch
model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)
stride = int(model.stride.max()) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
elif coreml: # CoreML
LOGGER.info(f'Loading {w} for CoreML inference...')
import coremltools as ct
model = ct.models.MLModel(w)
elif dnn: # ONNX OpenCV DNN
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
check_requirements(('opencv-python>=4.5.4',))
net = cv2.dnn.readNetFromONNX(w)
elif onnx: # ONNX Runtime
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
cuda = torch.cuda.is_available()
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
import onnxruntime
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
session = onnxruntime.InferenceSession(w, providers=providers)
elif engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...')
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
check_version(trt.__version__, '8.0.0', verbose=True) # version requirement
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
logger = trt.Logger(trt.Logger.INFO)
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
model = runtime.deserialize_cuda_engine(f.read())
bindings = OrderedDict()
for index in range(model.num_bindings):
name = model.get_binding_name(index)
dtype = trt.nptype(model.get_binding_dtype(index))
shape = tuple(model.get_binding_shape(index))
data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)
bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
context = model.create_execution_context()
batch_size = bindings['images'].shape[0]
else: # TensorFlow model (TFLite, pb, saved_model)
if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')
import tensorflow as tf
def wrap_frozen_graph(gd, inputs, outputs):
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped
return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),
tf.nest.map_structure(x.graph.as_graph_element, outputs))
graph_def = tf.Graph().as_graph_def()
graph_def.ParseFromString(open(w, 'rb').read())
frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
elif saved_model:
LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')
import tensorflow as tf
model = tf.keras.models.load_model(w)
elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
if 'edgetpu' in w.lower():
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
import tflite_runtime.interpreter as tfli
delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime
'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'}[platform.system()]
interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])
else:
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
import tensorflow as tf
interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model
interpreter.allocate_tensors() # allocate
input_details = interpreter.get_input_details() # inputs
output_details = interpreter.get_output_details() # outputs
self.__dict__.update(locals()) # assign all variables to self
def forward(self, im, augment=False, visualize=False, val=False):
# YOLOv5 MultiBackend inference
b, ch, h, w = im.shape # batch, channel, height, width
if self.pt or self.jit: # PyTorch
y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)
return y if val else y[0]
elif self.coreml: # CoreML
im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
im = Image.fromarray((im[0] * 255).astype('uint8'))
# im = im.resize((192, 320), Image.ANTIALIAS)
y = self.model.predict({'image': im}) # coordinates are xywh normalized
box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
elif self.onnx: # ONNX
im = im.cpu().numpy() # torch to numpy
if self.dnn: # ONNX OpenCV DNN
self.net.setInput(im)
y = self.net.forward()
else: # ONNX Runtime
y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
elif self.engine: # TensorRT
assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)
self.binding_addrs['images'] = int(im.data_ptr())
self.context.execute_v2(list(self.binding_addrs.values()))
y = self.bindings['output'].data
else: # TensorFlow model (TFLite, pb, saved_model)
im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)
if self.pb:
y = self.frozen_func(x=self.tf.constant(im)).numpy()
elif self.saved_model:
y = self.model(im, training=False).numpy()
elif self.tflite:
input, output = self.input_details[0], self.output_details[0]
int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
if int8:
scale, zero_point = input['quantization']
im = (im / scale + zero_point).astype(np.uint8) # de-scale
self.interpreter.set_tensor(input['index'], im)
self.interpreter.invoke()
y = self.interpreter.get_tensor(output['index'])
if int8:
scale, zero_point = output['quantization']
y = (y.astype(np.float32) - zero_point) * scale # re-scale
y[..., 0] *= w # x
y[..., 1] *= h # y
y[..., 2] *= w # w
y[..., 3] *= h # h
y = torch.tensor(y) if isinstance(y, np.ndarray) else y
return (y, []) if val else y
def warmup(self, imgsz=(1, 3, 640, 640), half=False):
# Warmup model by running inference once
if self.pt or self.engine or self.onnx: # warmup types
if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models
im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image
self.forward(im) # warmup
| skarlett992/yolo_v5_tracking | src/yolov5_utils.py | yolov5_utils.py | py | 20,026 | python | en | code | 0 | github-code | 13 |
74564453138 | #!/usr/bin/env python
"""
_AutoIncrementCheck_
AutoIncrement Check
Test to properly set the autoIncrement value
First, find the highest jobID either in wmbs_job or in wmbs_highest_job
Then reset AUTO_INCREMENT to point to that.
"""
__all__ = []
import logging
from WMCore.Database.DBFormatter import DBFormatter
class AutoIncrementCheck(DBFormatter):
"""
_AutoIncrmentCheck_
Check and properly set the auto_increment counter for wmbs_job
"""
highestSQL = """SELECT IFNULL(MAX(id), 0) FROM wmbs_job"""
currentSQL = """SELECT Auto_increment FROM information_schema.tables WHERE table_name='wmbs_job' AND table_schema=DATABASE()"""
alterSQL = "ALTER TABLE wmbs_job AUTO_INCREMENT = :value"
def execute(self, input = 0, conn = None, transaction = False):
"""
_execute_
"""
highest = self.dbi.processData(self.highestSQL, {}, conn = conn,
transaction = transaction)[0].fetchall()[0][0]
current = self.dbi.processData(self.currentSQL, {}, conn = conn,
transaction = transaction)[0].fetchall()[0][0]
value = max(input + 1, highest + 1)
if value > current:
self.dbi.processData(self.alterSQL, {'value': value},
conn = conn, transaction = transaction)
return
| dmwm/WMCore | src/python/WMCore/WMBS/MySQL/Jobs/AutoIncrementCheck.py | AutoIncrementCheck.py | py | 1,389 | python | en | code | 44 | github-code | 13 |
5323200784 | import numpy as np
from timeit import default_timer as timer
from numba import cuda
import numpy as np
import math
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
# Smooth iteration count
temp = i + 1 - math.log(math.log(abs(z*z+c)))/math.log(2)
return temp
return max_iters
mandel_gpu = cuda.jit('int32(float64, float64, float64)', device=True)(mandel)
@cuda.jit('(float64, float64, float64, float64, uint8[:,:], uint32)')
def mandel_kernel(min_x, max_x, min_y, max_y, image, iters):
"""
GPU function
"""
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x;
gridY = cuda.gridDim.y * cuda.blockDim.y;
for x in range(startX, width, gridX):
real = min_x + x * pixel_size_x
for y in range(startY, height, gridY):
imag = min_y + y * pixel_size_y
image[y, x] = mandel_gpu(real, imag, iters)
def run_mandel(min_x, max_x, min_y, max_y):
"""
Helper function to set up and run functions for calculating image matrix
"""
gimage = np.zeros((1334,2000), dtype = np.uint8) #Pre-determined image size 1334x2000
blockdim = (32, 8)
griddim = (32,16)
iter_lvl = 1000
start = timer()
d_image = cuda.to_device(gimage)
mandel_kernel[griddim, blockdim](min_x, max_x, min_y, max_y, d_image, iter_lvl)
gimage = d_image.copy_to_host()
dt = timer() - start
print("Mandelbrot created on GPU in %f s" % dt)
return gimage
def calc_dim(xpos,ypos,xmin, xmax, ymin, ymax):
"""
Function to calculate the area in a zoomed image, base on where you click.
"""
ypixels, xpixels = 1334, 2000 # Pre determined x, y size
halfY, halfX = ypixels/2, xpixels/2
# Step size
pixel_size_x = (xmax - xmin) / xpixels
pixel_size_y = (ymax - ymin) / ypixels
# image lower left = (0,0), initial min_x, min_y = (-2, -1)
xcenter = xmin + xpos*pixel_size_x
ycenter = ymin + ypos*pixel_size_y
#Calculate x, y bounadries for new image
min_x= xcenter-halfX/10*pixel_size_x
max_x = xcenter+halfX/10*pixel_size_x
min_y= ycenter-halfY/10*pixel_size_y
max_y = ycenter+halfY/10*pixel_size_y
return min_x, max_x, min_y, max_y | CJRockball/Mandelbrot | mandel_calc.py | mandel_calc.py | py | 2,600 | python | en | code | 0 | github-code | 13 |
652639867 | import sys
import matplotlib.pyplot as plt
import numpy as np
sys.path.insert(1, '../')
from pmt_he_study.models import *
from ReadRED import sndisplay as sn
tdc2ns = 0.390625
adc2mv = 0.610352
def get_template():
template = []
with open("/Users/williamquinn/Desktop/commissioning/template_1_0_1_run_104.csv", "r") as temp_file:
fl = temp_file.readlines()
for index, line in enumerate(fl):
line_list = line.split(",")
template.append(float(line_list[0].strip()))
return np.array(template)
def plot_shape_dist(om_id, shapes, run):
shapes = np.array(shapes)
print(len(shapes[shapes >= 0.9]), len(shapes[shapes < 0.9]))
print(np.average(shapes[shapes > 0.9]), np.average(shapes))
plt.figure(figsize=figsize)
freq, bin_edges = np.histogram(shapes, range=(0.90, 1), bins=20)
width = bin_edges[2] - bin_edges[1]
bin_centres = bin_edges[:-1] + width / 2
plt.bar(bin_centres, freq, width=width)
plt.xlabel('MF Shape Index')
plt.axvline(np.average(shapes), ls='--', color='k',
label='Mean = {:.4f}'.format(np.average(shapes)))
plt.xlim(0.9, 1)
plt.legend(loc='best')
plt.ylabel("Counts")
plt.title("PMT Pulse shape index OM - " + om_id)
plt.tight_layout()
plt.savefig("/Users/williamquinn/Desktop/commissioning/{}_shape_dist_{}.pdf".format(run, om_id))
def plot_amp_vs_shape(amps, shapes, run, om_id):
plt.figure(figsize=figsize)
plt.plot(amps, shapes, 'C0.', alpha=0.5)
plt.xlabel("Amplitude /mV")
plt.ylabel("Shape Index")
plt.title("PMT Pulse Amplitude vs Shape Index - " + om_id)
plt.tight_layout()
plt.savefig("/Users/williamquinn/Desktop/commissioning/{}_amp_vs_shape_{}.pdf".format(run, om_id))
def plot_amp_vs_fwhm(amps, fwhms, run, om_id):
plt.figure(figsize=figsize)
plt.plot(amps, fwhms, 'C1.', alpha=0.5)
plt.xlabel("Amplitude /mV")
plt.ylabel("FWHM /ns")
plt.title("PMT Pulse Amplitude vs Pulse Width - " + om_id)
plt.tight_layout()
plt.savefig("/Users/williamquinn/Desktop/commissioning/{}_amp_vs_fwhm_{}.pdf".format(run, om_id))
def plot_fwhm_vs_shape(fwhms, shapes, run, om_id):
plt.figure(figsize=figsize)
plt.plot(fwhms, shapes, 'C3.', alpha=0.5)
plt.xlabel("FWHM /ns")
plt.ylabel("Shape Index")
plt.title("PMT Pulse Width vs Shape Index - " + om_id)
plt.tight_layout()
plt.savefig("/Users/williamquinn/Desktop/commissioning/{}_fwhm_vs_shape_{}.pdf".format(run, om_id))
def plot_fwhm_shape_vs_amp(amps, shapes, fwhms, run, om_id):
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=figsize)
fig.suptitle("PMT Pulse Amplitude vs Shape - " + om_id)
axes[0].plot(amps, shapes, 'C0.', alpha=0.5, markersize=1.5)
axes[0].set_ylabel("Shape Index")
axes[1].plot(amps, fwhms, 'C1.', alpha=0.5, markersize=1.5)
axes[1].yaxis.tick_right()
axes[1].yaxis.set_label_position("right")
axes[1].set_ylabel("FWHM /ns")
fig.supxlabel("Amplitude /mV")
plt.tight_layout()
plt.savefig("/Users/williamquinn/Desktop/commissioning/{}_fwhm_shape_vs_amp_{}.pdf".format(run, om_id))
def plot_av_shapes(avs, stds, ref_om, run):
sncalo_0 = sn.calorimeter("average_shape_{}".format(run), with_palette=True)
sncalo_0.draw_content = False
sncalo_0.draw_omid = False
print(avs)
for om, val in enumerate(avs):
if om == ref_om and ref_om is not None:
sncalo_0.setcontent(om, 1)
elif val is None or val < 0.9:
pass
else:
sncalo_0.setcontent(om, val)
sncalo_0.setrange(0.95, 1)
sncalo_0.draw()
sncalo_0.save("/Users/williamquinn/Desktop/commissioning")
del sncalo_0
sncalo_1 = sn.calorimeter("std_shape_{}".format(run), with_palette=True)
sncalo_1.draw_content = False
sncalo_1.draw_omid = False
for om, val in enumerate(stds):
if om == ref_om and ref_om is not None:
sncalo_1.setcontent(om, 1)
elif val is None:
pass
else:
sncalo_1.setcontent(om, val)
sncalo_1.draw()
sncalo_1.save("/Users/williamquinn/Desktop/commissioning")
del sncalo_1
def plot_waveform(waveform, amplitude, peak, template, om_id, run, shape):
plt.figure(figsize=figsize)
start = peak - int(25 / tdc2ns)
stop = peak + int(175 / tdc2ns)
x = [i * 400 / 1024 for i in range(1024)][start:stop]
plt.plot(x, waveform, ".", label='PMT Pulse', markersize=1.5)
plt.plot(x, amplitude * template/(-1*np.min(template)), label='Template')
plt.xlabel("Timestamp /ns")
plt.ylabel('Voltage /mV')
plt.title("PMT Pulse - {} Shape Index: {:.4f}".format(om_id, shape))
plt.legend(loc='best')
plt.tight_layout()
plt.savefig("/Users/williamquinn/Desktop/commissioning/{}_pulse_vs_template_{}.pdf".format(run, om_id))
def plot_om_type_shape(avs, run):
new_avs = [[] for i in range(4)]
for om, av in enumerate(avs):
if av == None:
continue
om_t, st = om_type(om)
new_avs[om_t].append(av)
labels = ['5" MW', '8" MW', '5" XW', '5" GV']
plt.figure(figsize=figsize)
for index in range(4):
freq, bin_edges = np.histogram(new_avs[index], bins=10, range=(0.95, 1))
width = bin_edges[2] - bin_edges[1]
bin_centres = bin_edges[:-1] + width/2
plt.bar(bin_centres, freq, width=width, alpha=0.5, label=labels[index])
plt.ylabel("No. OMs")
plt.xlabel("Shape Index")
plt.legend(loc='best')
plt.tight_layout()
plt.savefig("/Users/williamquinn/Desktop/commissioning/{}_om_tupe_shape_dist.pdf".format(run))
def main():
args = io_parse_arguments()
input_file = args.i
run = int(input_file.split("/")[-1].split(".")[0].split("run_")[-1])
template = get_template()
file = ROOT.TFile(input_file, "READ")
tree = file.T
n_counter = [0 for i in range(712)]
shapes = [[] for i in range(712)]
mf_amplitudes = [[] for i in range(712)]
amplitudes = [[] for i in range(712)]
fwhms = [[] for i in range(712)]
n_events = tree.GetEntries()
i_e = 0
for event in tree:
i_e += 1
if i_e % 10000 == 0:
print(i_e, "/", n_events)
for index, om in enumerate(list(event.OM_ID)):
if run == 104:
om = om + 260
if om > 712:
continue
if n_counter[om] < 1000:
waveform = list(event.waveform)[index*1024: 1024*(index + 1)]
baseline = get_baseline(waveform, 100)
amplitude = get_amplitude(waveform, baseline)
peak = get_peak(waveform)
if -1 * amplitude > 50 and int(25 / tdc2ns) < peak < (1024-int(175 / tdc2ns)):
fwhm = get_fwhm_timestamp(waveform, baseline, peak, -1 * amplitude) * tdc2ns
x = [i * 400 / 1024 for i in range(1024)][peak - int(25 / tdc2ns):peak + int(175 / tdc2ns)]
temp = waveform[peak - int(25 / tdc2ns):peak + int(175 / tdc2ns)]
temp = (np.array(temp) - baseline) * adc2mv
amplitude = -1* amplitude * adc2mv
try:
mf_shape, mf_amp = mf_waveform(waveform=temp, template=template)
except:
continue
if mf_shape < 0.9 and om_id_string(om) == 'M:1.1.1':
plot_waveform(temp, amplitude, peak, template, om_id_string(om), run, mf_shape)
shapes[om].append(mf_shape)
mf_amplitudes[om].append(mf_amp)
amplitudes[om].append(amplitude)
fwhms[om].append(fwhm)
n_counter[om] += 1
avs = [None for i in range(712)]
stds = [None for i in range(712)]
ref_om = None
for om in range(712):
om_id = om_id_string(om)
if len(shapes[om]) == 0:
av = None
std = None
else:
av = np.average(shapes[om])
std = np.std(shapes[om])
avs[om] = av
stds[om] = std
if om_id == 'M:1.1.1':
plot_shape_dist(om_id, shapes[om], run)
# plot_fwhm_shape_vs_amp(amplitudes[om], shapes[om], fwhms[om], run, om_id)
# plot_amp_vs_shape(amplitudes[om], shapes[om], run, om_id)
# plot_amp_vs_fwhm(amplitudes[om], fwhms[om], run, om_id)
# plot_fwhm_vs_shape(fwhms[om], shapes[om], run, om_id)
if om_id == 'M:1.0.1':
ref_om = om
# plot_av_shapes(avs, stds, ref_om, run)
plot_om_type_shape(avs, run)
file.Close()
if __name__ == "__main__":
main()
| SuperNEMO-DBD/PMT-ShapeAnalysis | commissioning/pmt_shape_analysis.py | pmt_shape_analysis.py | py | 8,720 | python | en | code | 0 | github-code | 13 |
31741782145 | # encoding: utf-8
"""
@author: nanjixiong
@time: 2020/6/28 22:07
@file: example04.py
@desc:
"""
import numpy
world_alcohol = numpy.genfromtxt("./world_alcohol.txt", delimiter=',', dtype=str, skip_header=1)
print(world_alcohol)
uruguay_other_1986 = world_alcohol[1, 4]
print(uruguay_other_1986)
third_country = world_alcohol[2, 2]
print(uruguay_other_1986)
print(third_country)
vector = numpy.array([5, 10, 15, 20])
print(vector[0:3])
matrix = numpy.array([
[5, 10, 15],
[20, 25, 30],
[35, 40, 45]
])
print(matrix[:, 1])
print(matrix[:,0:2])
print(matrix[1:3,0:2])
vector=numpy.array([5,10,15,20])
equal_to_len=vector==10
print(vector[equal_to_len])
numpy.array([
[5,10,15],
[20,25,30],
[35,40,45],
])
second_column_25=(matrix[:,1]==25)
print(second_column_25)
print(matrix[second_column_25, :])
| lixixi89055465/py_stu | tangyudi/base/numpy/example04.py | example04.py | py | 827 | python | en | code | 1 | github-code | 13 |
72987579857 | # -*- coding: utf-8 -*-
# @author: Darren Vong
import urllib
import urllib2
import json
from bs4 import BeautifulSoup
from utils import find_recursive_dict_key
AGENT_NAME = "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0"
headers = {"User-Agent": AGENT_NAME}
def get_num_links(soup):
return len(soup.find_all("a", href=True))
def get_num_imgs(soup):
return len(soup.find_all("img"))
def get_num_references(soup):
reference_list = soup.select("ol.references > li")
return len(reference_list)
def get_img_url(page_title):
img_exists_url = (u"https://en.wikipedia.org/w/api.php?action=query&"+
u"titles=%s&prop=images&format=json" % (urllib.quote(page_title)))
request = urllib2.Request(img_exists_url, None, headers)
feed = urllib2.urlopen(request)
img_exists_obj = json.load(feed)
img_names_list = find_recursive_dict_key(img_exists_obj, u"images")
if isinstance(img_names_list, list):
# Ensures utf-8 representation of characters are used
img_name = img_names_list[0]["title"].encode('utf8')
img_url_info_link = (u"https://en.wikipedia.org/w/api.php?action=query&titles=%s&prop=imageinfo&iiprop=url&format=json" % (urllib.quote(img_name, safe='')))
img_req = urllib2.Request(img_url_info_link, None, headers)
img_link_feed = urllib2.urlopen(img_req)
img_url_cont_obj = json.load(img_link_feed)
img_url = find_recursive_dict_key(img_url_cont_obj, u"imageinfo")[0][u"url"]
else: # No appropriate imgs found, returns link to wikipedia logo img
img_url = u"https://usmentor.qbcontent.com/wp-content/uploads/2014/07/wikipedia-logo1.jpg"
return img_url
def get_page_id(page_title):
id_exists_url = (u"https://en.wikipedia.org/w/api.php?action=query&"+
u"titles=%s&prop=images&format=json" % (urllib.quote(page_title)))
request = urllib2.Request(id_exists_url, None, headers)
feed = urllib2.urlopen(request)
id_exists_obj = json.load(feed)
if find_recursive_dict_key(id_exists_obj, u"pageid"):
return find_recursive_dict_key(id_exists_obj, u"pageid")
else:
return -1 # id not found
def get_page_data(page_title):
try:
address = u"https://en.wikipedia.org/wiki/%s" % page_title
request = urllib2.Request(address, None, headers)
page = urllib2.urlopen(request).read()
except urllib2.HTTPError: # Page not found
return dict(imageCount=-1, linkCount=-1, refCount=-1,
imageURL=u"https://usmentor.qbcontent.com/wp-content/uploads/2014/07/wikipedia-logo1.jpg",
id=-1)
soup = BeautifulSoup(page, "html.parser")
num_of_links = get_num_links(soup)
num_of_imgs = get_num_imgs(soup)
num_of_refs = get_num_references(soup)
img_url = get_img_url(page_title)
page_id = get_page_id(page_title)
return dict(imageCount=num_of_imgs, linkCount=num_of_links,
refCount=num_of_refs, imageURL=img_url, id=page_id)
| frazerbw/wikitrumps | server/page_data_extractor.py | page_data_extractor.py | py | 3,055 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.