index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
9,300 | f5a474cdc8aa22322b252b980c0334a9db21bd5c | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 17:14:14 2018
@author: Winry
"""
import pandas as pd
# 显示所有的列
pd.set_option('display.max_columns', None)
# 读取数据
file_name = "data_11_8.csv"
file_open = open(file_name)
df = pd.read_csv(file_open)
file_open.close()
Newtaxiout_time = df['Newtaxiout_time']
time = df['time']
file_name2 = "df_append.csv"
file_open2 = open(file_name2)
df2 = pd.read_csv(file_open2)
# append1
append1_res = []
for i in range(len(df)):
count = []
count = df2["Newappend1"][(df2["Newappend1"] > Newtaxiout_time[i]) & (df2["Newappend1"] < time[i]*60+Newtaxiout_time[i])]
append1_res.append(len(count))
# append2
append2_res = []
for i in range(len(df)):
count = []
count = df2["Newappend2"][(df2["Newappend2"] > Newtaxiout_time[i]) & (df2["Newappend2"] < time[i]*60+Newtaxiout_time[i])]
append2_res.append(len(count))
df['append1_res'] = append1_res
df['append2_res'] = append2_res
df.to_csv('df_11_9.csv',index=False) |
9,301 | 244191087fcab2a6f03bf024708484b9838731ed | import sys
import pygame
import os
import random
import subprocess
FPS, NEWENEMYSPAWN, fst_spawn, not_paused, coins, enemies_count, killed, score = 50, 30, 2000, True, 0, 0, 0, 0
MiniG_rate, EnemyG_rate, MetalM_rate = 1, 5, 15
WEAPONS_LIST = ['Green laser gun', 'Purple laser gun', 'Plasma gun']
def load_image(name, colorkey=None):
fullname = os.path.join('data', name)
image = pygame.image.load(fullname).convert()
if colorkey is not None:
if colorkey == -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey)
else:
image = image.convert_alpha()
return image
def info_print():
global score, killed, coins
font = pygame.font.Font(None, 30)
text_coord = 2
pygame.draw.rect(screen, (100, 100, 100), (0, 0, 200, 100), 3)
pygame.draw.rect(screen, (150, 150, 150), (3, 3, 194, 94), 3)
pygame.draw.rect(screen, (250, 250, 250), (5, 5, 190, 90))
text = [f'Счёт: {score}',
f'Убито: {killed}',
f'Монеты: {coins}']
for line in text:
string_rendered = font.render(line, 1, (50, 50, 50))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
class Board:
def __init__(self, screen, width, height):
self.width = width
self.height = height
self.board = [[0] * width for _ in range(height)]
self.left = 0
self.top = 0
self.cell_size = 70
self.screen = screen
def set_view(self, left, top, cell_size):
self.left = left
self.top = top
self.cell_size = cell_size
def render(self):
tp, pp = [[0, 140], [17, 105], [35, 140]], [[17, 105], [35, 140], [52, 105]]
for y in range(self.height):
for x in range(self.width):
if y >= 2:
pygame.draw.rect(self.screen, (100, 100, 100), (
x * self.cell_size, y * self.cell_size, self.cell_size, self.cell_size),
1)
pygame.draw.rect(self.screen, (150, 150, 150), (
x * self.cell_size + 1, y * self.cell_size + 1, self.cell_size - 2,
self.cell_size - 2), 2)
pygame.draw.rect(self.screen, (250, 250, 250), (
x * self.cell_size + 3, y * self.cell_size + 3, self.cell_size - 4,
self.cell_size - 4))
for i in range(self.width * 2 - 1):
pygame.draw.polygon(screen, (0, 230, 200), pp)
pp[0][1] += 2
pp[0][0] += 4
pp[1][1] -= 3
pp[2][1] += 2
pp[2][0] -= 4
pygame.draw.polygon(screen, (0, 125, 200), pp)
pp[0][1] += 4
pp[0][0] += 6
pp[1][1] -= 7
pp[2][1] += 4
pp[2][0] -= 6
pygame.draw.polygon(screen, (0, 230, 200), pp)
pp[0][1] -= 6
pp[0][0] -= 10
pp[1][1] += 10
pp[2][1] -= 6
pp[2][0] += 10
for point in pp:
point[0] += 35
for i in range(self.width * 2):
pygame.draw.polygon(screen, (100, 100, 100), tp)
tp[0][1] -= 2
tp[0][0] += 4
tp[1][1] += 4
tp[2][1] -= 2
tp[2][0] -= 4
pygame.draw.polygon(screen, (150, 150, 150), tp)
tp[0][1] -= 2
tp[0][0] += 4
tp[1][1] += 4
tp[2][1] -= 2
tp[2][0] -= 4
pygame.draw.polygon(screen, (250, 250, 250), tp)
tp[0][1] += 4
tp[0][0] -= 8
tp[1][1] -= 8
tp[2][1] += 4
tp[2][0] += 8
for point in tp:
point[0] += 35
class Bullet(pygame.sprite.Sprite):
def __init__(self, enemy_sprites, x, damage, kind):
super().__init__(bullet_sprites)
self.damage = damage
if kind == 'Green laser gun':
self.image = load_image("green.png", -1)
elif kind == 'Purple laser gun':
self.image = load_image("purple.png", -1)
elif kind == 'Plasma gun':
self.image = pygame.transform.scale(load_image("plasma.png", -1), (25, 25))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = x + 30, 665
self.mask = pygame.mask.from_surface(self.image)
self.fly(enemy_sprites)
def fly(self, enemy_sprites):
if self.rect.y >= 140:
self.rect.y -= 1
for enemy in enemy_sprites:
if pygame.sprite.collide_mask(enemy, self):
self.hit(enemy)
else:
self.kill()
def hit(self, enemy):
enemy.hp -= self.damage
self.kill()
class Weapon:
def __init__(self, player, kind):
self.kind = kind
self.ability = None
self.player = player
if self.kind == 'Green laser gun':
self.damage = 2
self.price = 0
elif self.kind == 'Purple laser gun':
self.damage = 4
self.price = 50
elif self.kind == 'Plasma gun':
self.damage = 8
self.price = 150
self.ability = 'Rage'
def shoot(self, enemy_sprites):
bullet = Bullet(enemy_sprites, self.player.rect.x, self.damage, self.kind)
class Player(pygame.sprite.Sprite):
def __init__(self, group):
super().__init__(group)
self.weapon = Weapon(self, 'Green laser gun')
self.image = load_image("player.jpg", -1)
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = 75, 635
self.mask = pygame.mask.from_surface(self.image)
def shoot(self, enemy_sprites):
self.weapon.shoot(enemy_sprites)
def move(self, side):
x = self.rect.x
if x < 630 and side == 'right':
x += 70
if x > 35 and side == 'left':
x -= 70
self.rect.x = x
class Enemy(pygame.sprite.Sprite):
global enemies_count, MiniG_rate, EnemyG_rate, MetalM_rate
def __init__(self, group):
super().__init__(group)
if enemies_count >= 30 and enemies_count % MetalM_rate == 0:
self.type = 'MM'
self.hp = 24
self.image = pygame.transform.scale(load_image("Metal_Man.png", -1), (120, 140))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = random.randrange(10, 560, 70), 140
self.mask = pygame.mask.from_surface(self.image)
elif enemies_count >= 15 and enemies_count % EnemyG_rate == 0:
self.type = 'EG'
self.hp = 6
self.image = pygame.transform.scale(load_image('Enemy_glider.png', -1), (70, 70))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = random.randrange(0, 700, 70), 140
self.mask = pygame.mask.from_surface(self.image)
else:
self.type = 'MG'
self.hp = 4
self.image = pygame.transform.scale(load_image('Mini_glider.png', -1), (70, 70))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = random.randrange(0, 700, 70), 140
self.mask = pygame.mask.from_surface(self.image)
def death_check(self):
global killed, score, coins, FPS
if self.hp <= 0:
killed += 1
if self.type == 'MM':
score += 30
coins += 15
FPS += 10
elif self.type == 'EG':
score += 15
coins += 5
elif self.type == 'MG':
score += 10
coins += 2
self.kill()
def move(self):
self.rect.y += 1
def game_over():
global FPS, not_paused, score, killed, coins
def text_print():
game_over = ' GAME OVER'
intro_text = ["",
"Нажми клавишу A",
"чтобы сыграть еще раз",
'',
'Нажми на кнопку "Esc", ',
'чтобы выйти из игры',
f'Счёт: {score}',
f'Убито: {killed}',
f'Монеты: {coins}']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
screen.blit(fon, (0, 0))
font = pygame.font.Font(None, 50)
text_coord = 40
string_rendered = font.render(game_over, 1, pygame.Color('white'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
font = pygame.font.Font(None, 30)
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('white'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
intro_rect.x += 10
screen.blit(string_rendered, intro_rect)
FPS = 30
pygame.mouse.set_visible(True)
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
terminate()
if event.key == 97:
pygame.quit()
subprocess.call("python" + " проект.py", shell=True)
if not_paused:
pygame.display.flip()
clock.tick(FPS)
terminate()
def terminate():
pygame.quit()
sys.exit()
def start_screen(screen, width, height):
global FPS, not_paused
def text_print():
intro_text = [" SPACE SOLDIER", "",
" Нажми любую клавишу,",
" чтобы начать игру",
' Нажимай на кнопки стрелок, чтобы перемещать персонажа',
' Не дай врагу пролететь мимо тебя!',
' Нажми на кнопку "Esc", ',
' чтобы открыть меню паузы',
' или попасть в магазин']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
font = pygame.font.Font(None, 30)
text_coord = 50
screen.blit(fon, (0, 0))
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
pygame.mouse.set_visible(True)
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pause_menu(screen, width, height)
text_print()
else:
pygame.mouse.set_visible(False)
return
if not_paused:
pygame.display.flip()
clock.tick(FPS)
terminate()
def pause_menu(screen, width, height):
global FPS, not_paused
def text_print():
intro_text = ["Нажми на кнопку 'S',",
"чтобы открыть магазин",
'',
"Нажми на кнопку 'C',",
"чтобы продолжжить игру",
'',
"УПРАВЛЕНИЕ",
'',
'Нажимай на кнопки стрелок, чтобы перемещать персонажа',
'',
'Не дай врагу пролететь мимо тебя!',
'',
'Нажми на кнопку "Esc", ',
'чтобы закрыть меню паузы']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
font = pygame.font.Font(None, 30)
text_coord = 50
screen.blit(fon, (0, 0))
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
pygame.mouse.set_visible(True)
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
screen.blit(fon, (0, 0))
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
not_paused = True
pygame.mouse.set_visible(False)
return
if event.key == 115:
shop(screen, width, height)
if event.key == 99:
return
pygame.display.flip()
clock.tick(FPS)
terminate()
def shop(screen, width, height):
global FPS, not_paused, WEAPONS_LIST, coins
def text_print():
intro_text = [" Нажми на кнопку 'U',",
"чтобы улучшить свое оружие",
'Нажми на кнопку "Esc", ',
'чтобы выйти из магазина', '',
'Текущее оружие:',
f'{player.weapon.kind}',
'Наносимый урон:',
f'{player.weapon.damage}',
'Следующее улучшение:',
f'{next_weapon}',
'Урон:',
f'{next_damage}',
'Стоимость:',
f'{next_price}',
'Ваши монеты:',
f'{coins}']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
font = pygame.font.Font(None, 30)
text_coord = 50
screen.blit(fon, (0, 0))
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
if player.weapon.kind != 'Plasma gun':
next_weapon = WEAPONS_LIST[WEAPONS_LIST.index(player.weapon.kind) + 1]
if next_weapon == 'Purple laser gun':
next_damage = 4
next_price = 50
else:
next_damage = 6
next_price = 150
else:
next_weapon = 'Вы имеете лучшее оружие'
next_damage = 'Наносимый урон максимальный'
next_price = 'Покупать больше нечего'
pygame.mouse.set_visible(True)
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
screen.blit(fon, (0, 0))
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.mouse.set_visible(False)
screen.blit(fon, (0, 0))
return
if event.key == 117 and player.weapon.kind != 'Plasma gun' and coins >= next_price:
coins -= next_price
player.weapon = Weapon(player, WEAPONS_LIST[WEAPONS_LIST.index(player.weapon.kind) + 1])
pygame.display.flip()
clock.tick(FPS)
terminate()
pygame.init()
size = width, height = 700, 700
screen = pygame.display.set_mode(size)
pygame.display.set_caption('SPACE SOLDIER')
pygame.display.set_icon(load_image("icon.png", -1))
fon1 = pygame.transform.scale(load_image('fon1.png'), (700, 400))
board = Board(screen, 10, 10)
pygame.mouse.set_visible(True)
enemy_sprites = pygame.sprite.Group()
player_sprites = pygame.sprite.Group()
bullet_sprites = pygame.sprite.Group()
player = Player(player_sprites)
enemy_li = [Enemy(enemy_sprites)]
clock = pygame.time.Clock()
start_screen(screen, width, height)
pygame.time.set_timer(NEWENEMYSPAWN, fst_spawn)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
not_paused = False
pause_menu(screen, width, height)
if not_paused:
if event.key == 275:
player.move('right')
elif event.key == 276:
player.move('left')
if event.key == 115:
player.shoot(enemy_sprites)
if not_paused and event.type == NEWENEMYSPAWN:
enemy_li.append(Enemy(enemy_sprites))
enemies_count += 1
if not_paused:
screen.blit(fon1, (0, 0))
board.render()
player_sprites.draw(screen)
enemy_sprites.draw(screen)
bullet_sprites.draw(screen)
for enemy in enemy_sprites:
if enemy.type != 'MM':
lim = 630
else:
lim = 560
if enemy.rect.y <= lim:
enemy.move()
else:
game_over()
for bullet in bullet_sprites:
bullet.fly(enemy_sprites)
enemy.death_check()
info_print()
pygame.display.flip()
clock.tick(FPS)
terminate()
|
9,302 | 2d48a343ca7f0f8ba7de8b520aad71d774d9b4ba | # Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
class VirtualArray(common.CoprHDResource):
# Commonly used URIs for the 'varrays' module
URI_VIRTUALARRAY = '/vdc/varrays'
URI_VIRTUALARRAY_BY_VDC_ID = '/vdc/varrays?vdc-id={0}'
URI_VIRTUALARRAY_URI = '/vdc/varrays/{0}'
def varray_query(self, name):
"""Returns the UID of the varray specified by the name."""
if common.is_uri(name):
return name
uris = self.varray_list()
for uri in uris:
varray = self.varray_show(uri)
if varray and varray['name'] == name:
return varray['id']
raise common.CoprHdError(common.CoprHdError.NOT_FOUND_ERR,
(_("varray %s: not found") % name))
def varray_list(self, vdcname=None):
"""Returns all the varrays in a vdc.
:param vdcname: Name of the Virtual Data Center
:returns: JSON payload of varray list
"""
vdcrestapi = None
if vdcname is not None:
vdcrestapi = VirtualArray.URI_VIRTUALARRAY_BY_VDC_ID.format(
vdcname)
else:
vdcrestapi = VirtualArray.URI_VIRTUALARRAY
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
vdcrestapi, None)
o = common.json_decode(s)
returnlst = []
for item in o['varray']:
returnlst.append(item['id'])
return returnlst
def varray_show(self, label):
"""Makes REST API call to retrieve varray details based on name."""
uri = self.varray_query(label)
(s, h) = common.service_json_request(
self.ipaddr, self.port, "GET",
VirtualArray.URI_VIRTUALARRAY_URI.format(uri),
None)
o = common.json_decode(s)
if 'inactive' in o and o['inactive'] is True:
return None
else:
return o
|
9,303 | b7687240413441e1d3ed0085e5953f8089cbf4c9 | # Generated by Django 2.1.7 on 2020-01-09 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0004_auto_20200109_0713'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='show_type',
field=models.IntegerField(choices=[(1, '首页轮播'), (2, '最新活动')], default=1, verbose_name='展示控制'),
),
]
|
9,304 | a9ea3db019435733b5782d69450942373bb828e5 | def calc(*numbers):
sum=0
for n in numbers:
sum=sum+n*n
return sum
print(calc(*[1,2,3])) |
9,305 | e99cf5a7058db984b323af1375003e4e21e36612 | import random
from connectfour.agents.monte_carlo import Node, MTCS
from connectfour.agents.agent import Agent
MAX_DEPTH = 3
class MonteCarloAgent(Agent):
def __init__(self, name):
super().__init__(name)
def get_move(self, board):
best_move = self.find_best_move(board)
return self._find_move_from_new_board_state(board.board, best_move.state.board)
def find_best_move(self, board, factor=2.0):
"""
Returns the best move using MonteCarlo Tree Search
"""
o = Node(board)
return MTCS(MAX_DEPTH, o, factor, self.id)
def _find_move_from_new_board_state(self, old, new):
"""
Making a move in Connect Four makes exactly one change to the board.
Searching through all x,y positions for a difference between the old and
new board will tell us exactly where a move was made.
"""
for x in range(len(old)):
for y in range(len(old[0])):
if old[x][y] != new[x][y]:
return x, y
# there is no difference between old and new board states
return -1, -1
class RandomAgent(Agent):
def __init__(self, name):
super().__init__(name)
def get_move(self, board):
"""
RandomAgent always returns a valid (ie. partially empty) column to place token in
"""
while True:
col = random.randint(0, board.width)
row = board.try_move(col)
if row >= 0:
break
return row, col
|
9,306 | 9e814e3f1162e248c5d778c2df9960b199854a27 | n = int(input('Informe um numero: '))
print('----------------')
print('{} x {:2} = {:2}'.format(n, 1, 1*n))
print('{} x {:2} = {:2}'.format(n, 2, 2*n))
print('{} x {:2} = {:2}'.format(n, 3, 3*n))
print('{} x {:2} = {:2}'.format(n, 4, 4*n))
print('{} x {:2} = {:2}'.format(n, 5, 5*n))
print('{} x {:2} = {:2}'.format(n, 6, 6*n))
print('{} x {:2} = {:2}'.format(n, 7, 7*n))
print('{} x {:2} = {:2}'.format(n, 8, 8*n))
print('{} x {:2} = {:2}'.format(n, 9, 9*n))
print('{} x {:2} = {:2}'.format(n, 10, 10*n))
print('----------------')
|
9,307 | 1cb320cf57823511b0398adce097b770b2131eb6 | import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecommerce.settings.development')
application = get_asgi_application()
|
9,308 | 88d0ced41a8f176a8a12bba6406b4162ea6dfc52 | import sqlite3
# cur.execute('CREATE TABLE admin(username TEXT,password TEXT)')
# conn.commit()
# cur.execute("INSERT INTO admin VALUES('nilesh','nilesh')")
# conn.commit()
def verif_admin(username, password):
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
print(username)
print(password)
data = cur.execute('SELECT password FROM admin WHERE username = "{}"'.format(username)).fetchall()[0][0]
conn.close()
if password == data:
return True
else:
return False
except:
return False
def add_product(id_, name, quantity, cost):
if id_ == '' and name == '' and quantity == '' and cost == '':
return False, " You Cannot Leave It Empty "
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
print(id_, name, quantity, cost)
try:
quantity = int(quantity)
cost = int(cost)
print(id_, name, quantity, cost)
print(type(id_), type(name), type(quantity), type(cost))
check = cur.execute(f"SELECT * FROM products WHERE id = '{id_}'").fetchall()
if len(check) > 0:
return False, " This Product Already Exist Try Updating "
else:
cur.execute('INSERT INTO products VALUES("{}","{}",{},{})'.format(id_, name, quantity, cost))
conn.commit()
conn.close()
return True, " Product Added Successfully "
except:
return False, " Quantity and Cost are Integers "
except:
return False, " Failed Connecting Database "
def get_product_detail(prod_id):
if prod_id == '':
return False, " Enter Product Id "
else:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute(f"SELECT rowid,* FROM products where id='{prod_id}'").fetchall()
conn.close()
if len(data) == 0:
return False, " Product Don't Exist "
return True, data
def update_delete_product(rowid, id_, name, quantity, cost, qry):
if id_ == '' and name == '' and quantity == '' and cost == '':
return False, " You Cannot Leave It Empty "
try:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
try:
quantity = int(quantity)
cost = int(cost)
if qry == 'update':
cur.execute(
f"UPDATE products SET id = '{id_}',name='{name}',quantity = {quantity},cost={cost} WHERE rowid = {rowid}")
conn.commit()
return True, " Product Updated Successfully "
if qry == "delete":
cur.execute(f"DELETE FROM products WHERE rowid={rowid} ")
conn.commit()
return True, " Product Deleted Successfully "
conn.commit()
conn.close()
except:
return False, " Quantity and Cost are Integers "
except:
return False, " Failed Connecting Database "
def showProducts_all():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute("SELECT * FROM products").fetchall()
return True, data
def added_to_cart(prod_id, qry):
if prod_id == '':
return False, " Please Enter Product Id ",1
else:
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
if qry == "add":
try:
cur.execute("""CREATE TABLE cart(
id TEXT,
name TEXT,
quantity INTEGER,
cost INTEGER) """)
except:
pass
data = cur.execute(f"""SELECT * FROM products WHERE id = '{prod_id}'""").fetchall()
cart_check = cur.execute(f"""SELECT * FROM cart WHERE id = '{prod_id}' """).fetchall()
if len(cart_check) == 0:
cur.execute(f"""INSERT INTO cart VALUES('{data[0][0]}','{data[0][1]}',1,{data[0][3]})""")
conn.commit()
cur.execute(f"""UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'""")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True, " Product Added To Cart Successfully ",all_prods
elif len(cart_check) > 0:
cur.execute(
f"""UPDATE cart SET quantity = {(cart_check[0][2] + 1)},cost={(cart_check[0][3] + data[0][3])} WHERE id ='{prod_id}'""")
conn.commit()
cur.execute(f"""UPDATE products SET quantity = {(data[0][2] - 1)} WHERE id ='{prod_id}'""")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True, " Product Added To Cart Successfully ",all_prods
if qry == "remove":
cart_check = cur.execute(f"""SELECT * FROM cart WHERE id = '{prod_id}' """).fetchall()
if len(cart_check) == 0:
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True," Product Doesn't Exist ",all_prods
elif len(cart_check) > 0:
data = cur.execute(f"""SELECT * FROM products WHERE id = '{prod_id}'""").fetchall()
cur.execute(f"UPDATE products SET quantity = {(data[0][2]+cart_check[0][2])} WHERE id ='{prod_id}'")
conn.commit()
cur.execute(f"DELETE FROM cart WHERE id = '{prod_id}'")
conn.commit()
all_prods = cur.execute("SELECT * FROM cart").fetchall()
return True," Product Deleted Successfully ",all_prods
conn.close()
def get_cost():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
data = cur.execute("SELECT * FROM cart").fetchall()
cost = 0
for i in data:
cost = cost+i[3]
return cost
def done_Drp():
conn = sqlite3.connect('SuperMarket.db')
cur = conn.cursor()
cur.execute("DROP TABLE cart")
conn.commit()
|
9,309 | 7e29220752b4a52be34cdf0c734695d1052d0414 | '''
Handprint module for handling credentials.
Authors
-------
Michael Hucka <mhucka@caltech.edu> -- Caltech Library
Copyright
---------
Copyright (c) 2018-2022 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from .base import Credentials
from .amazon_auth import AmazonCredentials
from .google_auth import GoogleCredentials
from .microsoft_auth import MicrosoftCredentials
|
9,310 | a75691af17f6d1effd469d5c2ded340c71521ee1 | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.views import LoginView
from django.shortcuts import render
from django.views import View
from django.views.generic import CreateView
from resume.forms import NewResumeForm
from vacancy.forms import NewVacancyForm
class MenuView(View):
def get(self, request, *args, **kwargs):
context = {
'is_authenticated': request.user.is_authenticated,
'username': request.user.username,
}
return render(request, 'main.html', context=context)
class MySignupView(CreateView):
form_class = UserCreationForm
success_url = 'login'
template_name = 'signup.html'
class MyLoginView(LoginView):
redirect_authenticated_user = True
template_name = 'login.html'
class HomeView(View):
def get(self, request, *args, **kwargs):
form = NewVacancyForm() if request.user.is_staff else NewResumeForm()
context = {
'form': form,
'is_authenticated': request.user.is_authenticated,
'is_staff': request.user.is_staff,
'username': request.user.username,
}
return render(request, 'home.html', context=context)
|
9,311 | ca7b3b5df860d3c3fb0953857ad950affdcc671d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('location', '0005_auto_20170303_1625'),
]
operations = [
migrations.RemoveField(
model_name='location',
name='block',
),
migrations.RemoveField(
model_name='location',
name='mandapam',
),
migrations.RemoveField(
model_name='location',
name='others',
),
migrations.RemoveField(
model_name='location',
name='sub_district',
),
migrations.RemoveField(
model_name='location',
name='taluka',
),
migrations.RemoveField(
model_name='location',
name='tehsil',
),
migrations.AlterField(
model_name='location',
name='sub_district_type',
field=models.ForeignKey(related_name='location', blank=True, to='location.SubDistrictType', null=True, on_delete=django.db.models.deletion.CASCADE),
),
]
|
9,312 | 499baaa8c739c1bd846edc944e510542d76bbed5 | from collections import deque
def my_queue(n=5):
return deque([],n)
pass
if __name__ == '__main__':
mq = my_queue()
for i in range(10):
mq.append(i)
print((i, list(mq)))
"""Queue size does not go beyond n int, this outputs:
(0, [0])
(1, [0, 1])
(2, [0, 1, 2])
(3, [0, 1, 2, 3])
(4, [0, 1, 2, 3, 4])
(5, [1, 2, 3, 4, 5])
(6, [2, 3, 4, 5, 6])
(7, [3, 4, 5, 6, 7])
(8, [4, 5, 6, 7, 8])
(9, [5, 6, 7, 8, 9])
""" |
9,313 | d2754099adebdb4bd2b028fdf9015571ad773754 | """
챕터: day4
주제: 반복문(for문)
문제: 1에서 100까지 합을 구하여 출력하시오.
작성자: 한현수
작성일: 2018.9.20.
"""
result = 0
for i in range(101):
result += i
print(result) |
9,314 | 3c7280bbd23bd3472915da0760efbfd03bfe995d | # # -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import urllib
import urllib2
import cookielib
from excel import *
from user import *
List=[]
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'zjh':user(0),'mm':user(1)})
loginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'
result = opener.open(loginUrl,postdata)
gradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'
result = opener.open(gradeUrl)
html = etree.HTML(result.read().decode('gbk'))
schedule = html.xpath('//td[@class="pageAlign"]/table[@border="1"]')
write_schedule(cut(get_son(schedule[0],List))) |
9,315 | 757a69f9ceaa3434c6d9f8b1fcdbadd991190f29 | # encoding = utf-8
import hmac
import time
from hashlib import sha1
def get_signature(now_):
# 签名由clientId,grantType,source,timestamp四个参数生成
h = hmac.new(
key='d1b964811afb40118a12068ff74a12f4'.encode('utf-8'),
digestmod=sha1)
grant_type = 'password'
client_id = 'c3cef7c66a1843f8b3a9e6a1e3160e20'
source = 'com.zhihu.web'
now = now_
h.update((grant_type + client_id + source + now).encode('utf-8'))
return h.hexdigest()
timestamp = str(int(time.time() * 1000))
signature = get_signature(timestamp)
print(signature)
|
9,316 | f276e33cde2e043fc8f81403e499544aa816a639 | class Member:
not_allowed_name = ["Shit", "Hell", "Baloot"]
users_num = 0
def __init__(self, first_name, middle_name, last_name, gender):
self.fname = first_name
self.mname = middle_name
self.lname = last_name
self.gender = gender
Member.users_num += 1
@classmethod
def show_users_count(cls):
print(f"We Have {cls.users_num} Users In Our System.")
@staticmethod
def say_hello(): # static method not linked to class or instances
print(f"Hello From Static Method")
def full_name(self): # instance method
if self.fname in Member.not_allowed_name:
raise ValueError("Name is Not Allowed")
else:
return f"{self.fname} {self.mname} {self.lname}"
def welcome(self):
if self.gender == "Male":
return f"Hello Mr {self.fname}"
elif self.gender == "Female":
return f"Hello Mrs {self.fname}"
else:
return f"Hello {self.fname}"
def get_all_info(self):
return f"{self.welcome()}, Your Full Name Is: {self.full_name()}"
def delete_user(self):
Member.users_num -= 1
return f"Users {self.fname} Is Deleted"
print(Member.users_num)
member_one = Member("Osama", "Mohammed", "Elzero", "Male")
member_two = Member("Mohammed", "Mohammed", "Saad", "Male")
member_three = Member("Hala", "Mohammed", "Saad", "Female")
member_four = Member("Shit", "Hell", "Metal", "DD")
# print(member_one.fname, member_one.mname, member_one.lname)
# print(member_two.mname)
# print(member_three.lname)
# print(member_one.full_name())
# print(member_three.welcome())
# print(dir(Member))
# print(member_three.get_all_info())
# print(member_four.get_all_info()) # Value Error
print(Member.users_num)
print(member_four.delete_user())
print(Member.users_num)
print("#" * 100)
Member.show_users_count()
Member.say_hello()
#print("#" * 100)
#print(member_three.full_name()) # Both same
#print(Member.full_name(member_three)) # Both same (Backend)
|
9,317 | 9d190face528d1a237f4c92bfb94a399f61a5af2 | import csv
import Feature_extraction as urlfeature
import trainer as tr
import warnings
warnings.filterwarnings("ignore")
def resultwriter(feature, output_dest):
flag = True
with open(output_dest, 'w') as f:
for item in feature:
w = csv.DictWriter(f, item[1].keys())
if flag:
w.writeheader()
flag = False
w.writerow(item[1])
def process_URL_list(file_dest,
output_dest):
feature = []
with open(file_dest) as file:
for line in file:
url = line.split(',')[0].strip()
malicious_bool = line.split(',')[1].strip()
if url != '':
print('working on: ' + url) # showoff
ret_dict = urlfeature.feature_extract(url)
ret_dict['malicious'] = malicious_bool
feature.append([url, ret_dict]);
resultwriter(feature, output_dest)
def process_test_list(file_dest,
output_dest): # i think this takes whole file of urls without given malicious to extract their feature and doest not provide malicious column like this will take query.txt
global f
feature = []
with open(file_dest) as file:
for line in file:
url = line.strip()
if url != '':
print('working on: ' + url) # showoff
ret_dict = urlfeature.feature_extract(url)
feature.append([url, ret_dict]);
resultwriter(feature, output_dest)
# change
def process_test_url(url,
output_dest):
feature = []
url = url.strip()
if url != '':
print('working on: ' + url)
ret_dict = urlfeature.feature_extract(url)
feature.append([url, ret_dict])
resultwriter(feature, output_dest)
def main():
for i in range(1, 6):
s = 'comp/train_Data' + str(i) + '.csv'
k = 'comp/test_features' + str(i) + '.csv'
tr.train(s, k)
print(' -------------------------------------------------------- ')
|
9,318 | 282bccf20cfb114e31c5465c110819796bf81bc0 | from types import *
class Tokenizer:
def __init__(self, buf):
self.buf = buf
self.index = 0
def token(self):
return self.buf[self.index]
def move(self, value):
self.index += value
def skip_whitespaces(self):
while self.index < len(self.buf) and self.token().isspace():
self.move(1)
def next(self):
self.skip_whitespaces()
if self.index < len(self.buf):
if self.token() == '+':
self.move(1)
return Symbol('+')
elif self.token() == '-':
self.move(1)
return Symbol('-')
elif self.token() == '*':
self.move(1)
return Symbol('*')
elif self.token() == '/':
self.move(1)
return Symbol('/')
elif self.token() == '(':
self.move(1)
return OpenParen()
elif self.token() == ')':
self.move(1)
return CloseParen()
else:
if self.token().isnumeric():
number = int(self.token())
self.move(1)
while self.index < len(self.buf) and self.token().isnumeric():
number = number * 10 + int(self.token())
self.move(1)
return Number(number)
else:
char = self.token()
self.move(1)
return Undefined(char)
else:
return Eof()
|
9,319 | 9fff345dedcfc7051a258bc471acf07aece95bcf | import sys
from photo_dl.request import request
from photo_dl.request import MultiRequest
class Jav_ink:
def __init__(self):
self.parser_name = 'jav_ink'
self.domain = 'https://www.jav.ink'
self.album_flag = {}
@staticmethod
def category2albums(category_url):
category_url = category_url[:category_url.find('/page/')]
category_html = request(category_url)
albums = category_html.xpath('//*[@id="infinite-articles"]/li[contains(@class, "post")]/a/@href')
pages = category_html.xpath('//*[@class="pages"]/text()')
if pages:
pages = pages[0]
pages = pages[pages.find('of') + 3:]
urls = []
for page in range(1, int(pages) + 1):
urls.append('%s/page/%d/' % (category_url, page))
urls = [{'url': url} for url in urls]
threads = MultiRequest(urls=urls, progress=False).run()
for thread in threads:
albums.extend(thread.response.xpath('//*[@id="infinite-articles"]\
/li[contains(@class, "post")]/a/@href'))
del thread
return albums
def album2photos(self, album_url, album_html):
photos = []
album_id = album_html.xpath('//article/div/@id')
if not album_id:
return {'error': {'url': album_url, 'info': 'not supported'}}
album_id = album_id[0]
if album_id in self.album_flag:
return
self.album_flag[album_id] = 1
album_name = album_html.xpath('//*[contains(@class, "article-title")]/text()')
photos_html = album_html.xpath('//*[@class="gallery-item"]')
for photo_html in photos_html:
photo_url = photo_html.xpath('.//a/@href')[0]
photo_name = photo_url[photo_url.rfind('/') + 1:]
photos.append({'photo_url': photo_url, 'photo_name': photo_name})
if len(album_name) == 0:
album_name = album_url.split('/')[-2]
else:
album_name = album_name[0]
album = {'parser_name': self.parser_name, 'album_name': album_name, 'photos': photos}
return album
def url2albums(self, url):
albums_url = []
if '/category/' in url or '/?s=' in url:
albums_url.extend(self.category2albums(url))
else:
albums_url.append(url)
albums = []
urls = [{'url': url} for url in albums_url]
threads = MultiRequest(urls=urls, name=url).run()
for thread in threads:
try:
album = self.album2photos(thread.url, thread.response)
if album is not None:
albums.append(album)
except SystemExit:
sys.exit()
except:
albums.append({'error': {'url': thread.url, 'info': 'parse error'}})
del thread
return albums
|
9,320 | e714755d660ba809f7958cad4f0b9f95b0a0ffdc | from django.apps import AppConfig
class SmashbotspainConfig(AppConfig):
name = 'smashbotspain'
|
9,321 | 795bd22fb805069b342915638c52900ea52a4939 | from UI.Window import Window
class PolygonApplication:
def __init__(self):
self.window = Window("Détermination des périmètre, surface et centre de gravité d'un polygone")
self.window.addMouseClickListener(self.window.onClick)
def start(self):
self.window.show()
|
9,322 | bffd211a2d2dc3dd9b596f69909be7f0437ab0c8 | import nltk
tw_dict = {'created_at':[],
'id':[],
'id_str':[],
'full_text':[],
'entities':[],
'source':[],
'user':[],
'lang':[]}
def Preprocessing(instancia):
# Remove caracteres indesejados.
instancia = re.sub(r"#\S+", "", instancia)
instancia = re.sub(r"@\S+", "", instancia).lower().replace('.','').replace(';','').replace('-','').replace(':','').replace(')','').replace('"','').replace(',','')
# Removendo palavras e termos frequentes que não tem relevância nos dados.
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [i for i in instancia.split() if not i in stopwords]
return (" ".join(palavras)) |
9,323 | 842f8b4de0378a2c83d22f3fd54ba4857d249597 | PRECISAO = 3
MAX_ITER = 20
def gauss_jacobi(entrada,*valores_iniciais):
tamanho = len(entrada[0])
variaveis = [*valores_iniciais[:tamanho]]
variaveism1 = [None] * (tamanho-1)
for _ in range(0,MAX_ITER):
print(variaveis)
for linha in range(tamanho-1):
soma = 0
for coluna in range(tamanho-1):
if(linha!=coluna):
soma += -entrada[linha][coluna]*variaveis[coluna]
variaveism1[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)
if(all([variaveism1[i]==variaveis[i] for i in range(tamanho-1)])):
break
variaveis = [*variaveism1]
return variaveis
def gauss_seidel(entrada,*valores_iniciais):
tamanho = len(entrada[0])
variaveis = [*valores_iniciais[:tamanho]]
antigo = [None] * (tamanho-1)
for _ in range(0,MAX_ITER):
print(variaveis)
for linha in range(tamanho-1):
soma = 0
for coluna in range(tamanho-1):
if(linha!=coluna):
soma += -entrada[linha][coluna]*variaveis[coluna]
variaveis[linha] = round((entrada[linha][tamanho-1]+soma)/entrada[linha][linha],PRECISAO)
if(all([antigo[i]==variaveis[i] for i in range(tamanho-1)])):
break
antigo = [*variaveis]
return variaveis
def main():
print()
entrada = [
[10,2,1,7],
[1,5,1,-8],
[2,3,10,6]
]
saida = gauss_jacobi(entrada,0,0,0)
print(saida)
print()
saida = gauss_seidel(entrada,0,0,0)
print(saida)
if __name__=="__main__":
main() |
9,324 | 9bd6da909baeb859153e3833f0f43d8cbcb66200 | # coding=utf-8
import sys
if len(sys.argv) == 2:
filepath = sys.argv[1]
pRead = open(filepath,'r')#wordlist.txt
pWrite = open("..\\pro\\hmmsdef.mmf",'w')
time = 0
for line in pRead:
if line != '\n':
line = line[0: len(line) - 1] #去除最后的\n
if line == "sil ":
line = line[0: len(line) - 1]
print line
everyHmmfilepath = "..\\..\\model\\hmm3\\hmm_" + line + ".hmm"
pHmmRead = open(everyHmmfilepath,'r')
if time == 0:
pWrite.write(pHmmRead.read()) # read()读剩余全文
pWrite.write("\n")
time = 1
else:
for i in range(3):
pHmmRead.readline()
pWrite.write(pHmmRead.read())
pWrite.write("\n")
else :
print "the agres must be one" |
9,325 | 65bfb59a255b42854eec8b55b28711737cfc46c2 | #basic API start
from flask import Flask, jsonify, abort, request
from cruiseItem import cruiseItem
from sqlalchemy import create_engine
from json import dumps
db_connect = create_engine('sqlite:///Carnivorecruise.sqlite')
app = Flask(__name__)
app.json_encoder.default = lambda self, o: o.to_joson()
app.app_context()
# Array to store the objects
InventoryArr = {}
HistoryArr = {}
def get_cruiseitemArr():
conn = db_connect.connect() # connect to database
query = conn.execute("select * from CruiseItem") #Perform query for all CruiseItems in db
InventoryArr = query.cursor.fetchall()
print(InventoryArr)
return jsonify(InventoryArr)
def get_cruiseitemArr_byLoc(Location):
conn = db_connect.connect() #connect to database
query = conn.execute("select * from Cruiseitem where fromLocation ='%s'"%str(Location))
InventoryArr = query.cursor.fetchall()
print(InventoryArr)
return jsonify(query) #convert query result into a json
def get_cruiseHistory():
conn = db_connect.connect() # connect to database
query = conn.execute("select * from cruiseHistory")
HistoryArr = query.cursor.fetchall()
print(HistoryArr)
@app.route('/inventory', methods=['GET'])
def get_cruiseitems():
return jsonify(status="ok",InventoryArr=get_cruiseitemArr())
@app.route('/inventory/location/<Location>', methods=['GET'])
def get_cruiseitems_by_location(Location):
return jsonify(status="ok", InventoryArr=get_cruiseitemArr_byLoc(Location))
if __name__ == '__main__':
app.run("0.0.0.0", 80)
|
9,326 | f9310aa6c26ec10041dac272fa17ac21f74c21ac | # -*- coding: utf-8 -*-
from wordcloud import WordCloud, ImageColorGenerator
import numpy as np
from PIL import Image
def word2cloud(text: str, mask_image: Image=None):
if mask_image == None:
wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode='RGBA',
background_color=None).generate(text)
else:
mask = np.array(mask_image) # 使用mask,最好界限分明对比强烈的图形
image_colors = ImageColorGenerator(mask) # 提取蒙版颜色
wc = WordCloud(mask=mask, color_func=image_colors,
width=800, height=600,
font_path='simhei.ttf', mode='RGBA',
background_color=None).generate(text)
img_res = wc.to_image()
return img_res
# 这个大小只是大概,若要精细化,可用结巴统计词频
# freq=jieba.analyse.extract_tags(text, topK=200, withWeight=True)
# freq={w[0]:w[1] for w in freq}
# WordCloud(...).generate_from_frequencies(freq)
# plt.imshow(wc,interpolation='bilinear') # 插值颜色均匀
# plt.axis('off')
# plt.show()
#wc.to_file('wordcloud.png') # 保存 |
9,327 | f23bc0c277967d8e7a94a49c5a81ed5fb75d36cc | from mpi4py import MPI
import matplotlib
from tmm import coh_tmm
import pandas as pd
import os
from numpy import pi
from scipy.interpolate import interp1d
from joblib import Parallel, delayed
import numpy as np
import glob
import matplotlib.pyplot as plt
import pickle as pkl
import seaborn as sns
from scipy.optimize import minimize
import json
from tqdm import tqdm
DATABASE = './data'
INSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe']
METALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni']
num_workers = 8
def cal_reward(R, T, A, target):
'''
Calculate reward based on given spectrums.
We calculate the reward using averaged (1-mse).
Args:
R, T, A: numpy array. Reflection, transmission, and
absorption spectrums, respectively.
target: dict. {'R':np.array, 'T':np.array, 'A':np.array}
Returns:
reward: float. Reward for the spectrum.
'''
reward = 0
for k, v in target.items():
if k == 'R':
res = R
elif k == 'T':
res = T
else:
res = A
reward += 1 - np.abs(res.squeeze() - v).mean()
reward /= len(target)
return reward
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
def batch_spectrum(env, names_list, thickness_list):
def spectrum(args):
'''
Inputs:
1. names: list of lists, each list correspond to the structures
2. thickness: list of lists
'''
names, thickness = args
R, T, A = env.spectrum(names, thickness, 0, False)
return R, T, A
res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[1], color=color, alpha=alpha)
plt.fill_between(df[x],
df['AverageEpRet']-width/2*df['StdEpRet'],
df['AverageEpRet']+width/2*df['StdEpRet'],
alpha=0.3, color=color)
return df
def combine_tracker(folder):
'''
Merge all buffers
'''
trackers = []
if 'design_tracker_merged.pkl' in os.listdir(folder):
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
combined_tracker = pkl.load(open(tracker_file, 'rb'))
return combined_tracker
for file in os.listdir(folder):
if file.startswith('design_tracker_'):
tracker_file = os.path.join(folder, file)
trackers.append(pkl.load(open(tracker_file, 'rb')))
combined_tracker = DesignTracker(len(trackers[0].layer_ls))
max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0)
for e in range(len(trackers[0].layer_ls)):
combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]
combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]
combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]
if combined_tracker.layer_ls[-1] != 0:
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb'))
return combined_tracker
def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):
root = '../spinningup/data/'
progress_ls = []
max_ret_ls = []
params = {'size':14}
matplotlib.rc('font', **params)
fig, ax = plt.subplots(2,1, figsize=(10,8))
for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):
folder = os.path.join(root, exp, exp+'_s{}'.format(seed))
progress_file = os.path.join(folder, 'progress.txt')
df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)
tracker = combine_tracker(folder)
progress = tracker.print_progress()
print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)]))
progress_ls.append(progress)
max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))
ax[0].legend(max_ret_ls)
ax[1].legend(exp_ls)
plt.show()
return progress_ls
def load_exp_res(folder):
subfolders = [item for item in glob.glob(folder+'/*')]
def read_hyper(file_name, rep=10):
with open(os.path.join(file_name, 'config.json')) as f:
hypers = json.load(f)
hypers_dict = {}
for k, v in hypers.items():
if k.startswith('logger'):
continue
elif isinstance(v, dict):
for kk, vv in v.items():
if isinstance(vv, list):
hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep
else:
hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep
else:
hypers_dict[k] = [v] * rep
hyper_df = pd.DataFrame(hypers_dict)
return hyper_df
first=True # first pandas file to load
for subfolder in tqdm(subfolders):
runs = glob.glob(subfolder+'/*')
num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\t'))
for run in runs:
tracker = combine_tracker(run)
progress = tracker.print_progress()
best_design = progress[np.argmax(tracker.max_ret_ls)]
if first:
df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df))
df = pd.concat([df, hyper_df, best_designs_df], axis=1)
first = False
else:
df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df_))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_))
df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)
df = pd.concat([df, df_], axis=0)
return df
def finetune(simulator, m0, x0, target, display=False, bounds=None):
'''
Finetune the structure using quasi-Newton's method.
Args:
m0: materials list given by the upstream RL
x0: thicknesses given by the upstream RL
display: if true, then plot the spectrum before and after the finetuning.
Returns:
x_opt: finetuned thickness list
'''
def objective_func(x):
R, T, A = simulator.spectrum(m0, [np.inf]+list(x)+[np.inf])
return 1-cal_reward(R, T, A, target)
if bounds is None:
bounds = [(15, 200)] * len(x0)
res = minimize(objective_func, x0, bounds=bounds, options={'disp':True})
x_opt = [int(item) for item in res.x]
if display:
plt.figure()
simulator.spectrum(m0, [np.inf]+x0+[np.inf], title=True, plot=True)
plt.figure()
simulator.spectrum(m0, [np.inf]+x_opt+[np.inf], title=True, plot=True)
return x_opt, res
|
9,328 | 83bbb6433d1577be869bf840bdd42aa86e415da6 | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
headline = "Hello world from a variable!"
# headline de la izq es el nombre de la variable en la vista
# headline de la der es el nombre de la variable en el server
return render_template("index.html", headline=headline)
# Ahora usamos el mismo idex.html pero con un contenido distinto!
@app.route("/bye/")
def bye():
headline = "Goodbye!"
return render_template("index.html", headline=headline) |
9,329 | 1158ab95ac67d62459284267a8cc9f587daf89b1 | from zipfile import ZipFile
import reference_new_stdds
import reader
import os
def runall(path):
print("==========================")
"""get the current path """
abs_file_path = os.path.abspath(__file__)
parent_dir = os.path.dirname(abs_file_path)
parent_dir = os.path.dirname(parent_dir)
""" path that stores xml files"""
xml_path = parent_dir.replace("\\", "/") + "/Examples/xmls/"+path
# print(xml_path)
""" call RIE module"""
ref_list = reference_new_stdds.get_contri_info(xml_path)
reference_new_stdds.write_excel(ref_list)
""" call reader module"""
reader.write_csv(path)
# Create a zip file
with ZipFile(parent_dir.replace("\\", "/")+'/Output/xmlOutput/XMLOutput.zip', 'w') as zipObj:
# # Add multiple files to the zip
zipObj.write(parent_dir.replace("\\", "/")+'/Output/xmlOutput/TNC_Taxonomic_name_usage_XmlOutput.csv', "TNC_Taxonomic_name_usage_XmlOutput.csv")
zipObj.write(parent_dir.replace("\\", "/")+'/Output/xmlOutput/TNC_Typification_XmlOutput.csv', "TNC_Typification_XmlOutput.csv")
zipObj.write(parent_dir.replace("\\", "/")+'/Output/xmlOutput/{}_XmlOutput.csv'.format(path.replace(".xml","")), "{}_XmlOutput.csv".format(path.replace(".xml","")))
zipObj.write(parent_dir.replace("\\", "/") + '/Output/xmlOutput/BibliographicResource.csv', "BibliographicResource.csv")
runall("A_new_genus_and_two_new_species_of_miniature_clingfishes.xml")
|
9,330 | 39fb8d9f93be1e6c1ed2a425d14061737d643ab6 | from .hailjwt import JWTClient, get_domain, authenticated_users_only
__all__ = [
'JWTClient',
'get_domain',
'authenticated_users_only'
]
|
9,331 | 94348aed0585024c70062e9201fb41aae2122625 | # NumPy(Numerical Python) 是 Python 语言的一个扩展程序库,
# 支持大量的维度数组与矩阵运算,此外也针对数组运算提供大量的数学函数库。 |
9,332 | 13e89e13f88ac306a62be3390f5292665f128a4d | #encoding: utf-8
"""
Desc:
Author: Makoto OKITA
Date: 2016/09/03
"""
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
import itertools
"""
基本処理
"""
class RnnAnalize(Chain):
def __init__(self, v, k, y):
super(RnnAnalize, self).__init__(
embed = L.EmbedID(v, k),
H = L.LSTM(k, k),
W = L.Linear(k, y),
)
def __call__(self, x, y):
accum_loss = None
v, k = self.embed.W.data.shape
self.H.reset_state()
for i in range(len(x)):
nx = Variable(np.array([x[i]], dtype=np.int32))
ny = Variable(np.array([y], dtype=np.int32))
wx = self.embed(nx)
wh = self.H(wx)
ww = self.W(wh)
loss = F.softmax_cross_entropy(ww, ny)
accum_loss = loss if accum_loss is None else accum_loss + loss
return accum_loss, ww
def forward(self, x):
for i in range(len(x)):
nx = Variable(np.array([x[i]], dtype=np.int32))
wx = self.embed(nx)
wh = self.H(wx)
ww = self.W(wh)
return ww
"""
学習・予測処理
"""
class AnazlizeTrainer():
def __init__(self, v, k, y):
self.model = RnnAnalize(v, k, y)
#self.model.compute_accuracy = False # accuracyが必要ない場合はFalseした方が学習が速い?
self.optimizer = optimizers.Adam() # Adam, AdaGrad, AdaDelta, RMSpropGraves, SGD, MomentumSGD
self.optimizer.setup(self.model)
#self.optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005)) #??? 荷重減衰による正則化 ??? saveで保存されない!?
### 学習
def practice(self, x, y):
self.model.H.reset_state()
self.model.zerograds()
loss, y = self.model(x, y)
loss.backward()
#loss.unchain_backward() # truncate
self.optimizer.update()
return loss, y
### 予測
def predict(self, x):
self.model.H.reset_state()
self.model.zerograds()
y = self.model.forward(x)
return F.softmax(y)
def save(self, filename):
#modelとoptimizerを保存
serializers.save_npz(filename +'_model.dat', self.model)
serializers.save_npz(filename +'_optimizer.dat', self.optimizer)
def load(self, filename):
serializers.load_npz(filename +'_model.dat', self.model)
serializers.load_npz(filename +'_optimizer.dat', self.optimizer)
## Test Main
if __name__ == "__main__":
import sys
import io
import re
arg1 = sys.argv[1] if len(sys.argv) == 2 else None
trainData = [[4], [1,2,3], [10,11,12], [1,22,23], [1], [5],[6],[7],[8],[9] ]
#for data in baseData:
# for i in itertools.permutations(data):
# trainData.append( list(i) )
print trainData
print len(trainData)
#dim_in = 1000
#dim_mid = 100
#dim_out = len(trainData)
dim_in = len(trainData)
dim_mid = 50
dim_out = len(trainData)
epoch = 1
## 学習
if arg1 == 'train':
print "training..."
train = AnazlizeTrainer(dim_in, dim_mid, dim_out)
for j in range(epoch):
i = 0
for ids in trainData:
#pp(ids)
if True:
for l in itertools.permutations(ids):
x = list(l)
#print(x)
#loss, y = train.practice(x[::-1], i)
loss, y = train.practice(x, i)
else:
loss, y = train.practice(ids[::-1], i)
#loss, y = train.practice(ids, i)
#print loss.data
i += 1
#if j % 10 == 0:
# print loss.data
print loss.data
train.save('train_analize')
## 予測
elif arg1 == 'predict':
print 'predict...'
train = AnazlizeTrainer(dim_in, dim_mid, dim_out)
train.load('train_analize')
while True:
#train = AnazlizeTrainer(dim_in, dim_mid, dim_out)
#train.load('train_analize')
ids = map(int, raw_input().split())
print ids
y = train.predict(ids[::-1])
print y.data.argmax(1)[0]
rank = y.data.argsort()[0]
uprank = map(int, rank[::-1])
print uprank
#print y.data[0]
for i in uprank:
print '%d, %2f' % (i, y.data[0][i])
print '' |
9,333 | 1983340b3ce7ba8b631ba090871bea1ef7044943 |
import sys
from pypsi.pipes import ThreadLocalStream
from pypsi.shell import Shell
from pypsi.core import pypsi_print
from nose.tools import *
class PypsiTestShell(Shell):
pass
class TestShellBootstrap(object):
def setUp(self):
self.real_stdout = sys.stdout
self.real_stderr = sys.stderr
self.real_stdin = sys.stdin
self.real_print = print
self.shell = PypsiTestShell()
def tearDown(self):
self.shell.restore()
def test_bootstrap_streams(self):
for attr in ('stdout', 'stderr', 'stdin'):
yield self._test_bootstrap_stream_type, attr
yield self._test_bootstrap_stream_instance, attr
def _test_bootstrap_stream_type(self, attr):
assert_is_instance(getattr(sys, attr), ThreadLocalStream)
def _test_bootstrap_stream_instance(self, attr):
assert_equal(
getattr(sys, attr)._get_target_stream(),
getattr(self, 'real_' + attr)
)
def test_bootstrap_print(self):
assert_equal(print, pypsi_print)
def test_restore_print(self):
self.shell.restore()
assert_equal(print, self.real_print)
def test_restore_streams(self):
for attr in ('stdout', 'stderr', 'stdin'):
yield self._test_restore_stream_type, attr
yield self._test_restore_stream_instance, attr
def _test_restore_stream_type(self, attr):
self.shell.restore()
assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)
def _test_restore_stream_instance(self, attr):
self.shell.restore()
assert_equal(
getattr(sys, attr),
getattr(self, 'real_'+attr)
)
|
9,334 | 88d8d04dd7117daed0e976f3abc52c5d7bf18434 | import logging
import os
from os.path import exists, abspath, join, dirname
from os import mkdir
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["MP_NUM_THREADS"] = "1"
from smallab.runner_implementations.multiprocessing_runner import MultiprocessingRunner
from plannin_experiment import PlanningExperiment
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
logging.getLogger("smallab").propogate = False
from smallab.specification_generator import SpecificationGenerator
from smallab.runner.runner import ExperimentRunner
from smallab.runner_implementations.main_process_runner import MainRunner
from itertools import product
from sample_sim.memory_mapper_utility import map_memory
from smallab.file_locations import get_experiment_save_directory
import sys
import numpy as np
if __name__ == '__main__':
if "experiments" in os.getcwd():
os.chdir("../..")
this_dir = dirname(abspath(__file__))
for dir_name in ('.cache', '.params'):
path = join(this_dir, dir_name)
if not exists(path):
mkdir(path)
if len(sys.argv) > 1:
name = sys.argv[1]
else:
name = "IPP_POMCP"
num_seeds = 5
num_steps = 200
base_specs = {
"plot": False,
"file": ["fn:sbo"],
"seed": list(range(num_seeds)),
"objective_c": 10, # 10 for sbo, 100 for validation envs
"state_space_dimensionality": [[50,50,200]], # for fn:sbo, [[62, 70, 5]], # for validation envs
"rollout_number_goal": [num_steps * 150], # z_steps * 150
"alpha_param": 6,
"beta_param": 1,
"epsilon": 10,
"delta": 0.1,
"sample_observations": False,
"use_expected_improvement": False,
"planning_steps": [num_steps],
}
gen_baseline = base_specs.copy()
gen_baseline.update({
"plan_commitment_algorithm": "n_steps",
"plan_threshold": [1],
"rollout_allocation_method": ["fixed"],
"waste_unused_rollouts": [False],
})
specs_baseline = SpecificationGenerator().generate(gen_baseline)
gen_our_best = base_specs.copy()
gen_our_best.update({
"plan_commitment_algorithm":"tTest",
"plan_threshold":[0.05],
"rollout_allocation_method": ["beta-ugapeb"],
"waste_unused_rollouts": [True],
})
specs_our_best = SpecificationGenerator().generate(gen_our_best)
specifications = []
specifications += specs_baseline
specifications += specs_our_best
print(f"Expt {name}:\t{len(specifications)/num_seeds} specs to run, over {num_seeds} seeds")
for spec in specifications:
if spec["seed"] == 0:
print(spec)
runner = ExperimentRunner()
map_memory(base_specs["file"], base_specs["state_space_dimensionality"])
DEBUG = False
if DEBUG:
runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=True,
specification_runner=MainRunner(), use_dashboard=False, force_pickle=True, context_type="fork")
else:
gpus = 4
jobs_per_gpu = 2
resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))
runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=False,
specification_runner=MultiprocessingRunner(), context_type="fork", use_dashboard=True,
force_pickle=True)
|
9,335 | ccb3ec8e367881710c437e7ae53082a1bb0137e5 | from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import random
import time
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.image as mpimg
import os
from scipy.ndimage import filters
import urllib
import sys
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from threading import Thread
import traceback
def timeout(func, args=(), kwargs={}, timeout_duration=1, default=None):
'''From:
http://code.activestate.com/recipes/473878-timeout-function-using-threading/'''
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
def run(self):
try:
self.result = func(*args, **kwargs)
except:
self.result = default
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return False
else:
return it.result
testfile = urllib.URLopener()
def intensityImg(im):
intensities = im[:,:,0] * 0.30 + im[:,:,1] * 0.59 + im[:,:,2] * 0.11
#normalize color intensities
intensities = intensities / np.max(intensities)
return intensities
def processImage(local_file_in, local_file_out, face_coords, bounds_ratio):
try:
img = imread(local_file_in)
#TODO image_bounds
real_height = face_coords[3] - face_coords[1]
new_height = (face_coords[2] - face_coords[0]) * bounds_ratio
hdiff = int(real_height - new_height)
img_processed = Image.fromarray(
img[
face_coords[1]:face_coords[3],
face_coords[0]+hdiff/2:face_coords[2]-(hdiff/2),
:]
)
img_thumb = img_processed.resize((64, 64),
resample=Image.BILINEAR)
img_thumb.save(".".join(local_file_out.split(".")[:-1]) + ".png", "png")
os.remove(local_file_in)
except Exception as e:
print("error processing %s -> %s %s" %
(local_file_in, local_file_out, face_coords))
traceback.print_exc(e)
print
print
def make_actor_dirs(localpath, actor_name):
print "making actor dirs for %s in %s" % (actor_name, localpath)
name = actor_name.replace(" ","_")
dir_unprocessed = os.path.join(localpath, "unprocessed")
dir_processed = os.path.join(localpath, "processed")
if not os.path.exists(dir_unprocessed):
os.mkdir(dir_unprocessed)
if not os.path.exists(dir_processed):
os.mkdir(dir_processed)
actor_dirname = os.path.join(dir_unprocessed, name)
if not os.path.exists(actor_dirname):
os.mkdir(actor_dirname)
actor_dirname = os.path.join(dir_processed, name)
if not os.path.exists(actor_dirname):
os.mkdir(actor_dirname)
def doAll(path, localpath):
seen_actors = set()
bounds_ratio = 0.0
smallest_width = -1
for line in open(path):
spl = line.split("\t")
coords = map(lambda a: int(a), spl[4].split(","))
width = coords[2] - coords[0]
c_ratio = float(width) / (coords[3] - coords[1])
if c_ratio > bounds_ratio:
bounds_ratio = c_ratio
if smallest_width == -1 or width < smallest_width:
smallest_width = width
print "bounds_ratio: %s, width:%spx"%(bounds_ratio, smallest_width)
for i,line in enumerate(open(path), 1):
# A version without timeout (uncomment in case you need to
# unsupress exceptions, which timeout() does)
# testfile.retrieve(line.split()[4], "unprocessed/"+filename)
# timeout is used to stop downloading images which take too long to download
# helper variables
spl = line.split("\t")
person_name = spl[0]
if person_name not in seen_actors:
seen_actors.add(person_name)
make_actor_dirs(localpath, person_name)
person_name = person_name.replace(" ","_")
face_coords = map(lambda a: int(a), spl[4].split(","))
url = spl[3]
extension = url.split('.')[-1]
local_file = os.path.join(
person_name, str(i) + "." + extension)
local_file_full = os.path.join(
localpath, "unprocessed", local_file)
# print local_file_full
#load the file with timeout
timeout(testfile.retrieve,
(url, local_file_full), {}, 3)
# on fail, print msg and continue
if not os.path.isfile(local_file_full):
print "..fetching file failed <%s>"%(url)
# otherwise, process the image
else:
# print("processing " + local_file)
# print url, face_coords
processImage(
local_file_full,
os.path.join(localpath, "processed", local_file),
face_coords, bounds_ratio)
# print "created processed/%s"%(local_file)
if __name__ == "__main__":
if len(sys.argv) < 3:
print "usage: %s <data file> <local path>"
sys.exit(1)
doAll(sys.argv[1], sys.argv[2])
|
9,336 | 8e71ea23d04199e8fb54099c404c5a4e9af6c4b1 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import datetime
#takes in a sorted data frame holding the actuals, the predicted values (sorted descending) and the percentile of each obsevation
#and returns a new dataframe with all of the appropriate calculations
def lift_calculations(df):
#adding a sample counter variable
df['sample_num'] = range(len(df))
#adding cumulative sum of actual target
df['actual_sum'] = df['actual'].cumsum()
#column tracking the percentage of samples of total covered
df['per_sample_covered'] = ((df['sample_num']+1)*100)/len(df)
#percentage of all positives captured
df['per_pos_captured'] = (df['actual_sum']/(len(df[df['actual']==1])))*100
#proportion of positives captured from total
df['prop_pos_captured_from_all'] = df['per_pos_captured']/df['per_sample_covered']
return df
#creates a plot of cumulative positive gain
#takes in a dataframe with all of the relevant statistics already calculated
def gain_plot(df,figsize=None,x_range=None,y_range=None,legend='on'):
#Plot of the cumulative capture of positives as we go across the deciles
if figsize:
fig = plt.figure(figsize=figsize)
plt.plot(pd.Series([0]).append(df['per_sample_covered']),
pd.Series([0]).append(df['per_pos_captured'])) #pre-pending zeos to the front of the series for polotting purposes
plt.plot([0,100],[0,100])
plt.title('Cumulative True Positives Captured vs Random (Gain Curve)',fontsize=20)
plt.xlabel('% of Sample Covered',fontsize=15)
plt.ylabel('% of True Positives Captured',fontsize=15)
if x_range:
plt.xlim(x_range[0],x_range[1])
if y_range:
plt.ylim(y_range[0],y_range[1])
if legend=='on':
plt.legend(['Predictive Targeting','Random Targeting'],fontsize=12,loc=2)
def lift_plot(df,figsize=None,x_range=None,y_range=None,legend='on'):
#Lift Curve Plot(at whatever percent of customers cutoff, the model is targeting X times better than random)
#i.e. at the XX percentile, the response rate is Y times as good as it would be if targeting at random at the XX percentile
if figsize:
fig = plt.figure(figsize=figsize)
plt.plot(df['per_sample_covered'],df['prop_pos_captured_from_all'])
plt.plot([df['per_sample_covered'].min(),100],[1,1])
plt.title('Lift Curve',fontsize=20)
plt.xlabel('% of Customers',fontsize=15)
plt.ylabel('Lift',fontsize=15)
if x_range:
plt.xlim(x_range[0],x_range[1])
if y_range:
plt.ylim(y_range[0],y_range[1])
if legend=='on':
plt.legend(['Predictive Targeting','Random Targeting'],fontsize=12)
#a function which takes in an array of predicted values and returns the percentile associated with each one
def percentile_gen(arr_y_pred):
return np.array(pd.qcut(pd.Series(arr_y_pred).rank(method='first'),100,labels=range(1,101))) #method = first is used in the case when there are a lot of 0s and overlapping of labels
#a function which takes in an array of actual test values and the model predicted values and stacks them together
#then sorts them and puts them into a dataframe
def data_prep(arr_y_test,arr_y_pred):
#assigning each observation into a percentile
percentiles = percentile_gen(arr_y_pred)
#print(percentiles.shape)
#joining all the pieces together
data = np.hstack((arr_y_test.reshape((len(arr_y_test),1)),
arr_y_pred.reshape((len(arr_y_pred),1)),
percentiles.reshape((len(percentiles),1))))
#converting to a data frame
data_df = pd.DataFrame(data)
data_df.columns = ['actual','prob','percentile']
data_df.actual = data_df.actual.astype(int)
data_df.prob = data_df.prob.astype('float64')
#sorting by the probability
data_df = data_df.sort_values(by='prob',ascending=False)
#calculating lift metrics
data_df = lift_calculations(data_df)
return data_df
#a function which plots the lift curve for the model
def lift_curve(arr_y_test,arr_y_pred,figsize=None,x_range=None,y_range=None,legend='on'):
data_df = data_prep(arr_y_test,arr_y_pred)
#print(data_df.groupby('percentile').size())
#lift curve plot
lift_plot(data_df,figsize=figsize,x_range=x_range,y_range=y_range,legend=legend)
plt.show()
#a function which plots the gain curve for the model
def gain_curve(arr_y_test,arr_y_pred,figsize=None,x_range=None,y_range=None,legend='on'):
data_df = data_prep(arr_y_test,arr_y_pred)
#gain curve plot
gain_plot(data_df,figsize=figsize,x_range=x_range,y_range=y_range,legend=legend)
plt.show()
#a function which returns two numpy arrays:
#the first one is the percent of samples covered (X-value)
#the second being the lift values for the correponding the sample (Y-value)
def lift_values_generator(arr_y_test,arr_y_pred):
data_df = data_prep(arr_y_test,arr_y_pred)
return data_df.per_sample_covered, data_df.prop_pos_captured_from_all
#a function which plots multiple lift curves all on the same graph
#the first parameter is the x axis which represents %of the sample covered
#the second parameter is a list of lists, where each one presents the lift
#curve for a particular model, the last parameter holds the labels for the lift
#curves in the corresponding order
def plot_lift_curves(percent_sample_covered,list_of_lift_metrics,labels,figsize=None,x_range=None,y_range=None,legend='on'):
if figsize:
plt.figure(figsize=figsize)
#plotting the various model lift curves
for i,lift_scores in enumerate(list_of_lift_metrics):
plt.plot(percent_sample_covered,lift_scores)
#base line plot for random guessing
plt.plot([percent_sample_covered.min(),100],[1,1])
#formats and labels
plt.title('Lift Curves Comparison',fontsize=20)
plt.xlabel('% of Customers',fontsize=15)
plt.ylabel('Lift',fontsize=15)
if x_range:
plt.xlim(x_range[0],x_range[1])
if y_range:
plt.ylim(y_range[0],y_range[1])
model_labels = labels + ['Random Guessing']
if legend == 'on':
plt.legend(model_labels,fontsize=12,loc='best')
|
9,337 | c0c8f40e43f1c27f8efa47cfc366c6076b77b9c9 | import sys
minus = "-"
plus = "+"
divis = "/"
multi = "*"
power = "^"
unary = "-"
br_op = "("
br_cl = ")"
operations = [power, divis, multi, minus, plus]
digits = ['1','2','3','4','5','6','7','8','9','0','.']
def find_close_pos(the_string):
open_count = 0
close_count = 0
for i in range(len(the_string)):
if the_string[i] == br_op :
open_count = open_count + 1
if the_string[i] == br_cl :
close_count = close_count + 1
if close_count == open_count:
return i
def parse(the_string):
parsed_string = []
number = ""
for i in range(len(the_string)):
if the_string[i] == "-" and i == 0: ### string = "-2 + blablabla"
number += the_string[i]
elif the_string[i] in operations and the_string[i-1] not in operations: ### ^
parsed_string.append(float(number))
parsed_string.append(the_string[i])
number = ""
elif the_string[i] == "-" and the_string[i-1] in operations: ### ^-
number += the_string[i]
elif the_string[i] in digits: ### 2
number += the_string[i]
parsed_string.append(float(number))
return parsed_string
def single_operation(parsed_string):
if parsed_string[1] == "+":
return parsed_string[0] + parsed_string[2]
if parsed_string[1] == "-":
return parsed_string[0] - parsed_string[2]
if parsed_string[1] == "/":
return parsed_string[0] / parsed_string[2]
if parsed_string[1] == "*":
return parsed_string[0] * parsed_string[2]
if parsed_string[1] == "^":
return parsed_string[0] ** parsed_string[2]
def compute(the_string):
try:
the_string = the_string.replace(" ", "") ### delete space chars
while br_op in the_string :
open_pos = the_string.index(br_op)
close_pos = find_close_pos(the_string)
old = the_string[open_pos:close_pos+1]
new = compute(the_string[open_pos+1:close_pos])
the_string = the_string.replace(old, str(new))
parsed_string = parse(the_string)
for operation in operations:
while operation in parsed_string:
pos = len(parsed_string) - parsed_string[::-1].index(operation)
res = single_operation(parsed_string[pos-2:pos+1])
parsed_string[pos-2:pos+1] = [res]
return parsed_string[0]
except:
pass
def read_file(path):
lines = [ line for line in open (path,'r') if line.strip() != "" ]
return lines
def main(path):
try:
for line in read_file(path):
print str(round(float(compute(line)),5)).rstrip('0').rstrip('.')
except:
print line
print "Unexpected error:", sys.exc_info()[0]
if __name__ == '__main__':
path = sys.argv[1]
main(path)
|
9,338 | 2027904401e5be7b1c95eebec3a1e6a88c25660c | from Socket import Socket
import threading
class Server(Socket):
def __init__(self):
super(Server, self).__init__()
print("server listening")
self.users = []
def set_up(self):
self.bind(("192.168.0.109", 1337))
self.listen(0)
self.accept_sockets()
def send_data(self, data):
for user in self.users:
try:
user.send(data)
except ConnectionResetError:
self.users.pop(self.users.index(user))
pass
def listen_socket(self, listened_socket=None):
countForDel = 0
while True:
data = listened_socket.recv(2048)
if data.decode("utf-8")[0:-2] == '':
countForDel += 1
if countForDel > 5:
print("deleting user: Antispam")
self.users.pop(self.users.index(listened_socket))
raise ConnectionResetError
print(f"User sent {data}")
self.send_data(data)
def accept_sockets(self):
while True:
user_socket, address = self.accept()
print(f"User <{address[0]}> connected!")
self.users.append(user_socket) # добавляется юзер
print(len(self.users))
listen_accepted_user = threading.Thread(
target=self.listen_socket,
args=(user_socket,))
listen_accepted_user.start()
if __name__ == '__main__':
server = Server()
server.set_up()
|
9,339 | 00ed68c68d51c5019fde0c489cd133be3d6985c3 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 12:39:59 2015
@author: user
Needs to be run after the basic analysis which loads all the data into workspace
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def AverageLeftRight(EyeData):
#Take the average of two eyes to get more accurate gaze position
for eyes in EyeData:
eyes['avg_x'] = (eyes['left_x'] + eyes['right_x'])/2
eyes['avg_y'] = (eyes['left_y'] + eyes['right_y'])/2
#Do not take the average if one of the eyes was not detected. In that case only use the other eye
eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.right_x < -100]
eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.right_y < -100]
eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.left_x < -100]
eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.left_y < -100]
eyes = eyes.loc[eyes.avg_x > 0]
def PlotXY(EyeData):
for eyes in EyeData:
fig = plt.figure()
fig.suptitle('Separate Components')
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.plot(eyes.avg_x)
ax2.plot(eyes.avg_y)
|
9,340 | 56e8cdec854b3b7a2f925e70d7d59a73b76f9952 | from collections import defaultdict
from mask import Mask
from utils import bits_to_decimal
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
def run_program_v1(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
address = mem[0]
bits = mem[1]
masked = mask.apply_v1(bits)
addresses[address] = masked
return addresses
def run_program_v2(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
subaddresses = mask.apply_v2(mem)
addresses.update(subaddresses)
return addresses
if __name__ == "__main__":
program = get_program('input.txt')
addresses_v1 = run_program_v1(program)
part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])
print(f'Part One Answer: {part_one}')
addresses_v2 = run_program_v2(program)
part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])
print(f'Part Two Answer: {part_two}')
|
9,341 | 39475626b7e3e0f4c8143b300c002a2eb50cc23a | """Gaussian mixture model, with Stochastic EM algorithm."""
import numpy as np
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky
from Core.gllim import MyGMM
class SEMGaussianMixture(MyGMM):
"""Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente."""
def _compute_Z_conditionnal_density(self,Y):
"""
Calcule les proba conditionnelles de Z_i sachant Y_i
:param Y: Observations (n_samples,n_features)
:return: matrice stochastique (en ligne) (n_samples,n_components)
"""
proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)
s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))
return proba_cond / s #On normalise
def _draw_conditionnal_Z(self,Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:,np.newaxis]
zi = (s < r).sum(axis=1)[:,np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self,Z,n_features):
pik = Z.sum(axis=0)
return (pik >= (n_features + 1)).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z,Y.shape[1]): #Condition de seuil
Z = self._draw_conditionnal_Z(Y)
print("Ajustement au seuil")
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter,diff_ll)
self.current_iter = n_iter + 1 #Prochaine itération
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]): # Condition de seuil
Z = self._draw_conditionnal_Z(Y)
i += 1
print("Ajustement au seuil")
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = (
_estimate_gaussian_parameters(Y, np.exp(log_resp), self.reg_covar,
self.covariance_type))
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt( r + 1)
|
9,342 | f1fdba1c07a29aa22ee8d0dcbd6f902aa2e8b4c2 | from django.shortcuts import render, HttpResponse, redirect
from ..login.models import *
from ..dashboard.models import *
def display(request, id):
context = {
'job': Job.objects.get(id=int(id))
}
return render(request, 'handy_helper_exam/display.html', context)
|
9,343 | df19aa720993c2385a6d025cf7ec8f3935ee4191 | ####################################################################
# a COM client coded in Python: talk to MS-Word via its COM object
# model; uses either dynamic dispatch (run-time lookup/binding),
# or the static and faster type-library dispatch if makepy.py has
# been run; install the windows win32all extensions package to use
# this interface; Word runs hidden unless Visible is set to 1 (and
# Visible lets you watch, but impacts interactive Word sessions);
####################################################################
from sys import argv
docdir = 'C:\\temp\\'
if len(argv) == 2: docdir = argv[1] # ex: comclient.py a:\
from win32com.client import Dispatch # early or late binding
word = Dispatch('Word.Application') # connect/start word
word.Visible = 1 # else word runs hidden
# create and save new doc file
newdoc = word.Documents.Add() # call word methods
spot = newdoc.Range(0,0)
spot.InsertBefore('Hello COM client world!') # insert some text
newdoc.SaveAs(docdir + 'pycom.doc') # save in doc file
newdoc.SaveAs(docdir + 'copy.doc')
newdoc.Close()
# open and change a doc file
olddoc = word.Documents.Open(docdir + 'copy.doc')
finder = word.Selection.Find
finder.text = 'COM'
finder.Execute()
word.Selection.TypeText('Automation')
olddoc.Close()
# and so on: see Word's COM interface specs
|
9,344 | c4898f3298c2febed476f99fe08bc5386527a47e | """
Convert file containing histograms into the response function
"""
import h5py
import wx
import numpy as np
import matplotlib.pyplot as plt
#############################################################################
# Select the file cantoning histograms,
# which will be converted to response function
app = wx.App ()
openFileDialog = wx.FileDialog (None, "Chose file containing histograms to get repose function", "", "", \
"HDF5 files (*.hdf5)|*.hdf5", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
# Check whether user canceled
if openFileDialog.ShowModal() == wx.ID_CANCEL :
raise ValueError("HDF5 file is not selected")
hist_filename = openFileDialog.GetPath()
del app
#############################################################################
# Adding up all histograms
with h5py.File(hist_filename, 'r') as F :
for histogram in F["histograms"].values() :
try :
sum_histogram += histogram[...]
except NameError :
sum_histogram = histogram[...]
# Loading resolution
Resolution = F["parameters/Resolution"][...]
#############################################################################
# Remove zeros
cut_off = np.nonzero(sum_histogram)[0].max()
sum_histogram = sum_histogram[:cut_off]
#############################################################################
# Normalize to max 1
sum_histogram = sum_histogram.astype(np.float)
sum_histogram /= sum_histogram.max()
# Delete the background
sum_histogram[ np.nonzero(sum_histogram < 0.03) ] = 0
#############################################################################
# plot
plt.plot( 1e-3*Resolution*np.arange(sum_histogram.size), sum_histogram )
plt.title ("Total histogram with resolution %d ps" % Resolution )
plt.xlabel("time (ns)")
plt.ylabel("counts")
plt.show()
# Save
sum_histogram.tofile("response_function_%dps.dat" % Resolution)
|
9,345 | 7cd6a8a106c21e8e377666d584e19d30c607b7d2 | # import os,sys
# BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.append(BASE_DIR)
from lib import common
from conf import settings
import random
import pickle
import os
import xlrd
import time
class Base:
def save(self):
file_path=r'%s/%s' %(self.DB_PATH,self.id)
pickle.dump(self,open(file_path,'wb'))
@classmethod
def get_obj_by_id(cls,id):
file_path=r'%s/%s' %(cls.DB_PATH,id)
return pickle.load(open(file_path,'rb'))
class Subject(Base):
DB_PATH=settings.QUESTION_PATH
def __init__(self,type,comment,choice,right_res,score=5):
self.id=common.create_id()
self.type=type
self.comment=comment
self.choice=choice
self.right_res=right_res
self.score=score
@classmethod
def create_from_file(cls,src_file):
data=xlrd.open_workbook(src_file)
table=data.sheets()[0]
subject={
'type':None,
'comment':None,
'choice':[],
'res':set(),
}
for i in range(2,table.nrows):
row=table.row_values(i)
if len(subject['choice'])==4:
obj=cls(
subject['type'],
subject['comment'],
subject['choice'],
subject['res']
)
obj.save()
subject={
'type':None,
'comment':None,
'choice':[],
'res':set()
}
if row[0]:
subject['type']=row[0]
subject['comment']=row[1]
else:
subject.setdefault('choice').append(row[2])
if row[3] == 1:
res_str=row[2].strip()
res=res_str[0].upper()
subject['res'].add(res)
else:
obj=cls(
subject['type'],
subject['comment'],
subject['choice'],
subject['res']
)
obj.save()
@classmethod
def filter_question(cls):
id_l=os.listdir(settings.QUESTION_PATH)
r_id_l=random.sample(id_l,3)
return [cls.get_obj_by_id(id) for id in r_id_l]
def __str__(self):
return '<type: %s comment: %s>' %(self.type,self.comment)
class Customer(Base):
DB_PATH=settings.CUSTOMER_PATH
def __init__(self,name,sex,age,phone):
self.id=common.create_id()
self.name=name
self.sex=sex
self.age=age
self.phone=phone
class Record(Base):
DB_PATH=settings.RECORD_PATH
def __init__(self,customer_id,record_list,total_score):
self.id=common.create_id()
self.customer_id=customer_id
self.record_list=record_list
self.total_score=total_score
self.sub_time=time.strftime('%Y-%m-%d %X')
@classmethod
def get_obj_by_phone(cls,phone):
records=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for record in records:
customer_obj=Customer.get_obj_by_id(record.customer_id)
if phone == customer_obj.phone:
return record
class Prize(Base):
DB_PATH=settings.PRIZE_PATH
def __init__(self,name):
self.id=common.create_id()
self.name=name
@classmethod
def create_prize(cls):
while True:
name=input('奖品名: ').strip()
if not name:continue
obj=Prize(name)
obj.save()
choice=input('继续(Y/N)?: ').strip()
if choice == 'N' or choice == 'n':
break
@classmethod
def get_obj_by_name(cls,name):
prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.name == name:
return prize
def __str__(self):
return '<%s>' %self.name
class Customer2Prize(Base):
DB_PATH=settings.C2P_PATH
def __init__(self,customer_id,prize_id):
self.id=common.create_id()
self.customer_id=customer_id
self.prize_id=prize_id
@classmethod
def get_obj_by_customer_id(cls,customer_id):
prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.customer_id == customer_id:
return prize
@classmethod
def draw_prize(cls,customer_id):
'''
奖品概率:
0/100 欧洲十国游
1/100 iphone7 plus
10/100 mac电脑
50/100 珍藏版alex写真集一套
39/100 egon签名一个
'''
num=random.randint(1,100)
if num == 1:
# 1/100 iphone7 plus
prize_name='欧洲十国游'
if num >1 and num <=11:
# mac电脑
prize_name='mac电脑'
if num > 11 and num <=61:
# 珍藏版alex写真集一套
prize_name='珍藏版alex写真集一套'
if num > 61:
# egon签名一个
prize_name='egon签名一个'
prize=Prize.get_obj_by_name(prize_name)
obj=cls(customer_id,prize.id)
obj.save()
return prize_name
if __name__ == '__main__':
# Subject.create_from_file(r'/Users/jieli/PycharmProjects/爬虫/t1/AnswerSys/test.xlsx')
# res=Subject.filter_question()
# for i in res:
# print(i)
Prize.create_prize() |
9,346 | f4e287f5fce05e039c54f1108f6e73020b8d3d8f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 RAPP
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Authors: Konstantinos Panayiotou, Manos Tsardoulias
# contact: klpanagi@gmail.com, etsardou@iti.gr
## @file ServiceController/ServiceControllerSync.py
#
# @copyright Rapp Projecty EU 2015
# @author Konstantinos Panayiotou, [klpanagi@gmail.com]
#
from ServiceControllerBase import *
# high-level interface for asynchronously executing callables.
from concurrent.futures import as_completed
class AsyncHandler(object):
""" Synchronous service controller class implementation. """
def __init__(self, future):
"""! Constructor
"""
self.__future = future
"""! Wait for response.
@param timeout - Optional argument. Set timeout for waiting for a response.
@returns resp - Response object
"""
def wait(self, timeout=None):
resp = self.__future.result()
return resp
|
9,347 | 796fada5dcd45ace8240760ac7e9bad41953ab56 | """
Chess state handling model.
"""
from concurrent.futures import ThreadPoolExecutor
from itertools import count
from json import dumps
from .base_board import BaseBoard, NoBoard
from .table_board import TableBoard
from .table_game import TableGame
__all__ = ['Board', 'NoBoard']
class Board(BaseBoard):
"""
Chess board interaction model.
"""
EMOJI = [
'⌛', '‼',
'♝', '♗', '♚', '♔', '♞', '♘', '♟', '♙', '♛', '♕', '♜', '♖', '▪', '▫']
def __init__(self, board=None, _id=None, active_player=True):
"""
Set up board.
"""
super().__init__(board, _id, active_player)
self.executor = ThreadPoolExecutor()
def __repr__(self):
"""
Output the raw view of board.
"""
return f'Board({ self.board !r})'
def __str__(self):
"""
Output the emoji view of board.
"""
if self._active_player:
def piece_to_index(piece):
return (piece & 0xF)
else:
def piece_to_index(piece):
return (piece & 0xE) | (0 if piece & 1 else 1)
return '\n'.join(map(
lambda posY, row: ''.join(map(
lambda posX, piece: self.EMOJI[
piece_to_index(piece)
if piece else
14 + ((posY + posX) % 2)],
count(), row)),
count(),
self.board if self._active_player else reversed(
[reversed(row) for row in self.board])))
def add_player_v1(self, dbsession, player):
"""
Player 2 joins game.
"""
assert player
if self.player1:
self.player2 = player
table_game = TableGame(
game=self.id,
player_one=self.player1,
player_two=self.player2,
one_won=True,
two_won=True)
table_board = TableBoard(
board_state=dumps(tuple(map(tuple, self.board))),
move_num=self._board.move_count,
player=self.active_player(),
game=self.id)
table_board.game_link.append(table_game)
dbsession.add(table_game)
dbsession.add(table_board)
self.poke_player(False)
return {}
self.player1 = player
return {}
def slice_cursor_v1(self, cursor=None, lookahead=1, complete=False):
"""
Retrieve REST cursor slice.
"""
return self.cursor_delegate.slice_cursor_v1(self._board, cursor, int(lookahead), complete)
def update_state_v1(self, dbsession, state):
"""
Make a move to a new state on the board.
"""
moving_player = self.active_player()
board = self.update(state)
table_game = dbsession.query(TableGame).filter(
TableGame.game == board.id).first()
table_board = TableBoard(
board_state=dumps(tuple(map(tuple, board.board))),
move_num=board._board.move_count,
player=board.active_player(),
game=board.id)
if table_game: # TODO(grandquista)
table_board.game_link.append(table_game)
dbsession.add(table_board)
if board:
board.poke_player(False)
return {'end': False}
board.poke_player(True, moving_player)
if board._board.has_kings():
table_game.one_won = False
table_game.two_won = False
elif moving_player == table_game.player_one:
table_game.two_won = False
else:
table_game.one_won = False
board.close()
return {'end': True}
|
9,348 | d4b432735a112ccb293bf2f40929846b4ce34cd0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import optparse
import logging
from pyspark import SparkContext
from pyspark import SparkConf
logger = logging.getLogger(__name__)
def create_context(appName):
"""
Creates Spark HiveContext
"""
logger.info("Creating Spark context - may take some while")
# Create SparkConf containing some custom configurations
conf = SparkConf()
conf.set("spark.hadoop.validateOutputSpecs", "false")
sc = SparkContext(appName=appName, conf=conf)
return sc
def parse_options():
"""
Parses all command line options and returns an approprate Options object
:return:
"""
parser = optparse.OptionParser(description='PySpark WordCount.')
parser.add_option('-i', '--input', action='store', nargs=1,
default='s3://dimajix-training/data/alice/',
help='Input file or directory')
parser.add_option('-o', '--output', action='store', nargs=1,
default='alice-counts',
help='Output file or directory')
(opts, args) = parser.parse_args()
return opts
def run():
opts = parse_options()
logger.info("Creating Spark Context")
sc = create_context(appName="WordCount")
logger.info("Starting processing")
sc.textFile(opts.input) \
.flatMap(lambda x: x.split()) \
.filter(lambda x: x != "") \
.map(lambda x: (x,1)) \
.reduceByKey(lambda x,y: x+y) \
.sortBy(lambda x: x[1], ascending=False) \
.saveAsTextFile(opts.output)
logger.info("Successfully finished processing")
def main():
logging.basicConfig(level=logging.INFO)
logging.getLogger('').setLevel(logging.INFO)
logger.info("Starting main")
run()
logger.info("Successfully finished main")
if __name__ == "__main__":
main()
|
9,349 | 1cd82883e9a73cfbe067d58c30659b9b2e5bf473 | data=[1,4,2,3,6,8,9,7]
def partition(data,l,h):
i=l
j=h
pivot=data[l]
while(i<j):
while(data[i]<=pivot and i<=h-1):
i=i+1
while(data[j]>pivot and j>=l+1):
j=j-1
if(i<j):
data[i],data[j]=data[j],data[i]
data[l],data[j]=data[j],data[l]
return j
def quickSort(data,l,h):
if(l<h):
divider=partition(data,l,h)
quickSort(data,l,divider-1)
quickSort(data,divider+1,h)
quickSort(data,0,len(data)-1)
print(data)
|
9,350 | a325feba1c2bb588321429a045133d6eede9e8cf | #!/usr/bin/python
# pymd2mc.xyzfile
"""
"""
__author__ = 'Mateusz Lis'
__version__= '0.1'
from optparse import OptionParser
import sys
from time import time
from constants import R, T
from energyCalc import EnergyCalculator
from latticeProjector import LatticeProjectorSimple
from lattices import HexLattice
from structures.xyzfile import XYZFile
from utils import delLine, clearFile
def main():
options = parseCommandLine()
inFile = XYZFile(options.inXyzFilename)
clearFile(options.outDatFilename)
outFile = open(options.outDatFilename, 'w')
i = 0
startTime = time()
omegas = []
sumOmegas = 0L
calc = EnergyCalculator(inFile, R, T)
while True:
i += 1
if options.verbose:
delLine()
print i,
omega = calc.getNextEnergy(options.symbol)
if omega is None:
break
omega , sim, diff = omega
if omega > -10**4 and omega < 10**10:
omegas.append(omega)
sumOmegas += omega
outFile.write("%d %f %f %f \n" % (i, omega, sim, diff))
outFile.close()
if options.verbose:
print "Done. Execution time=%f" % (time() - startTime)
print "omegas" ,sumOmegas, (sum(omegas))
lenOmegas = len(omegas)
midOmega = (sum(omegas)/len(omegas))
print "Result omegaAB = %f" % midOmega
sd = 0
for omega in omegas:
sd += (midOmega - omega)**2
sd /= len(omegas)
sd **= (1./2.)
print "Standard deviation = %f" % sd
def parseCommandLine():
"""
Sets up command line arguments and parses them
"""
parser = OptionParser(usage="%prog ", version="%prog " + __version__,
description='''
This program calculates omegaAB value from a hexagonal lattice trajectory
stored in xyz file (see for more details)''')
parser.add_option("-f", "--traj", dest="inXyzFilename",default = "hexTraj.xyz",
help="xyz input trajectory file (default traj.xyz)", metavar="INXYZFILE")
parser.add_option("-r", "--reference", dest="symbol",default = "P11",
help="reference particle name", metavar="ADATOM")
parser.add_option("-o", "--output", dest="outDatFilename", default="omega.dat",
help="output dat file with omega values for each frame. WARNING: it will be overriden", metavar="OUTXYZFILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, _) = parser.parse_args()
return options
if __name__ == '__main__':
sys.exit(main())
|
9,351 | 8e5d05d925d47a85ad7c211f26af7951be048d32 | import cv2
import numpy as np
import show_imgs as si
IMG_PATH = "../sample_imgs"
def blur():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f"ksize: {ksize}"
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, "cv2.filter2D", 3)
resimg = si.show_imgs(blur_imgs, "cv2.blur", 3)
def gaussian():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["blur"] = cv2.blur(image, kernel_size)
blur_imgs["GaussianBlur"] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, "GaussianBlur", 3, 1000)
def bilateral():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["gaussian"] = cv2.GaussianBlur(image, kernel_size, 0)
blur_imgs["bilateral (5,50,50)"] = cv2.bilateralFilter(image, 5, 50, 50)
blur_imgs["bilateral (5,150,150)"] = cv2.bilateralFilter(image, 5, 150, 150)
result_img = si.show_imgs(blur_imgs, "Bilateral Filter", 2)
if __name__ == "__main__":
# gaussian()
bilateral()
|
9,352 | 86c1aee21639958f707f99bc2468e952ad6c1859 | from app import config
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine(config.DB_URI)
Session = scoped_session(sessionmaker(bind=engine))
|
9,353 | 1eeb7a539f43e9fb013494e2aa0d81b4eab0ae1a | import csv
from sys import argv
import re
import sys
datasave=[]
if len(argv) is not 3: #stop usage if not correct input
print('Usage: python dna.py data.csv sequence.txt')
sys.exit()
#open CSV file and save
with open (argv[1],'r') as csv_file:
datafile = csv.reader(csv_file)
line_count = 0
for row in datafile:
datasave.insert(line_count, row)
line_count += 1
rowlength= len(datasave[0])-1
seqfile= open(argv[2],'r') #read argv2
countvector=[]
def STR(x): #choose between large or small databse
ABC = ["AGATC", "TTTTTTCT", "AATG", "TCTAG", "GATA", "TATC", "GAAA", "TCTG"]
DEF =["AGATC", "AATG", "TATC"]
A = ABC[x]
if argv[1] == 'databases/large.csv':
A= ABC[x]
elif argv[1] == 'databases/small.csv':
A= DEF[x]
return A
seqfile2 = seqfile.read()
#reminder x in count is repeated x-1 in task description , 2 occurence = repeated 1 times
for i in range(rowlength):
newcount= 0
STR1= STR(i)
while True:
Bfound = re.findall(STR1*newcount,seqfile2)
if re.findall(STR1*newcount, seqfile2) == [] :
countvector.append(newcount-1)
break
else:
newcount += 1
countvector= str(countvector)[1:-1] #some formatting lines, converting first integers to string
countvector1= countvector.replace(',','') #removing ,
search_list= countvector1.split(' ') #splitting into list cuz the database i saved as list
rowcount=0
rowplacement=0
for row in datasave:
indexcount=0
truecount=0
for i in range(rowlength):
if search_list[i] in datasave[rowcount]:
truecount+=1 #testing if index matches lists
if truecount == rowlength: #matching all in the row will start this IF line
rowplacement=rowcount
print(datasave[rowplacement][0])
break #this break doesnt work???????
indexcount+=1
rowcount+=1
if truecount is not rowlength and rowplacement == 0 :
print('No match')
|
9,354 | 1257b90781a213ca8e07f67a33b8e847d0525653 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=40)
content = models.TextField()
date_published = models.DateTimeField(auto_now=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
img = models.ImageField(upload_to='post_img', null=True, blank=True)
like = models.ManyToManyField(User, related_name='like_user', blank=True)
dislike = models.ManyToManyField(User, related_name='dislike_user',blank=True)
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
comment_box = models.TextField()
date_comment = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
class Comment_to_comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)
comment = models.TextField()
date_comment = models.DateTimeField(auto_now=True)
def __str__(self):
return self.from_comment.comment_box
class Points(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
point = models.IntegerField(default=0)
|
9,355 | 80bf208f1d658b639d650af8208a744ed2dd258f | import functools
import requests
import time
import argparse
class TracePoint:
classes = []
funcs = []
flow = []
@staticmethod
def clear():
TracePoint.classes = []
TracePoint.funcs = []
TracePoint.flow = []
def __init__(self, cls, func, t):
if cls not in TracePoint.classes:
TracePoint.classes.append(cls)
if cls not in TracePoint.funcs:
TracePoint.funcs.append(func)
TracePoint.flow.append(",".join([cls,t, func]))
def render_flow(self):
first = TracePoint.flow[0]
recods = set()
for no,i in enumerate(TracePoint.flow[1:]):
cls,t, func = i.split(',',2)
fcls,ft, ffunc = first.split(',', 2)
fn = func.split("(")[0]
ffn = ffunc.split("(")[0]
label = "{l} -> {c}".format(l=ffn, c=fn)
if label in recods:
continue
recods.add(label)
lc,_ = self.get_color(cls, func)
yield """{l} -> {c} [label="<span style='color:gray;'>{t}</span>|<span style='font-size:18px;color:red'>{no}</span>" labelType="html" lineInterpolate=basis arrowheadStyle="fill: {lc}" style="stroke: {lc}; stroke-width: 1px;"];""".format(no=no,l=ffn, c=fn, t=time.ctime(float(t)), lc=lc)
first = i
def render_var(self, one):
cls,t, func = one.strip().split(",", 2)
color, color_f = self.get_color(cls, func)
fn = func.split("(")[0]
tmp = """{func_name} [labelType="html" label="<span style='font-size:28px;color:{color_f}'>{func}</span><span style='color:{color};'>class:{cls}</span>"];""".format(func_name=fn, color=color,color_f=color_f,cls=cls, func=func)
return tmp
def get_color(self, cls, func):
base = 4096 // len(TracePoint.classes)
base_f = 4096 // len(TracePoint.funcs)
c = hex(base * TracePoint.classes.index(cls)).replace("0x", "#")
c_f = hex(base_f * TracePoint.funcs.index(func)).replace("0x", "#")
if len(c) < 4:
c = c + '0'* (4- len(c))
if len(c_f) < 4:
c_f = c_f + '0'* (4- len(c_f))
return c,c_f
def __repr__(self):
TEMP = """
digraph {
/* Note: HTML labels do not work in IE, which lacks support for <foreignObject> tags. */
node [rx=7 ry=7 labelStyle="font: 300 14px 'Helvetica Neue', Helvetica"]
edge [labelStyle="font: 300 14px 'Helvetica Neue', Helvetica"]
%s
}
"""
fcon = "\n\t".join([self.render_var(i) for i in TracePoint.flow])
lcon = "\n\t".join(self.render_flow())
return TEMP % (fcon + lcon)
def trace(cls):
def _func(func):
@functools.wraps(func)
def __run(*args, **kargs):
print(func.__name__, args,"|" ,kargs)
return func(*args, **kargs)
return __run
return _func
def trace_cls(method):
def _trace_cls(cls):
# Get the original implementation
orig_getattribute = cls.__getattribute__
# Make a new definition
def new_getattribute(self, name):
if name in cls.__dict__:
f = getattr(cls, name)
args = "(%s)" % ', '.join(f.__code__.co_varnames)
t = str(time.time())
if "http://" in method:
requests.post("http://localhost:12222/", data={
'class':cls.__name__,
'fun':name + args,
'time':t,
})
else:
with open(method, "a+") as fp:
s = ",".join([cls.__name__,t,name + args])
fp.write(s + "\n")
return orig_getattribute(self, name)
# Attach to the class and return
cls.__getattribute__ = new_getattribute
return cls
return _trace_cls
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l","--load",default=None,help="loadfile")
parser.add_argument("--url", default='http://localhost:12222',help="debug server")
args = parser.parse_args()
with open(args.load) as fp:
for l in fp:
cls, t, func = l.strip().split(',', 2)
requests.post(args.url, data={
'class':cls,
'fun':func,
'time':t,
})
if __name__ == '__main__':
main()
|
9,356 | 3e4771d074218fb0a77332ee61a4cc49f1c301b7 | # SPDX-License-Identifier: Apache-2.0
# Copyright (C) 2020 ifm electronic gmbh
#
# THE PROGRAM IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND.
#
"""
This module provides the recording control GUI service for the nexxT framework.
"""
import logging
from pathlib import Path
from nexxT.Qt.QtCore import Qt, QStorageInfo
from nexxT.Qt.QtGui import QAction, QIcon, QTextOption
from nexxT.Qt.QtWidgets import QApplication, QStyle, QWidget, QBoxLayout, QToolBar, QFileDialog
from nexxT.core.Utils import assertMainThread, ElidedLabel
from nexxT.core.Exceptions import PropertyCollectionPropertyNotFound
from nexxT.interface import Services
from nexxT.services.SrvRecordingControl import MVCRecordingControlBase
logger = logging.getLogger(__name__)
class MVCRecordingControlGUI(MVCRecordingControlBase):
"""
This service implements a GUI frontend for the recording service
"""
def __init__(self, config):
assertMainThread()
super().__init__(config)
# state
self._directory = str(Path('.').absolute())
# gui
srv = Services.getService("MainWindow")
config.configLoaded.connect(self._restoreState)
config.configAboutToSave.connect(self._saveState)
self._config = config
recMenu = srv.menuBar().addMenu("&Recording")
style = QApplication.style()
self.actStart = QAction(QIcon.fromTheme("media-record", QIcon(":icons/media-record.svg")),
"Start Recording", self)
self.actStop = QAction(QIcon.fromTheme("media-playback-stop", style.standardIcon(QStyle.SP_MediaStop)),
"Stop Recording", self)
self.actSetDir = QAction(QIcon.fromTheme("document-open-folder", style.standardIcon(QStyle.SP_DirIcon)),
"Choose directory ...", self)
self.actStart.setEnabled(False)
self.actStop.setEnabled(False)
self.actSetDir.setEnabled(False)
self.actStart.triggered.connect(self._startTriggered)
self.actStop.triggered.connect(self._stopTriggered)
self.actSetDir.triggered.connect(self._setDir)
recMenu.addAction(self.actStart)
recMenu.addAction(self.actStop)
recMenu.addAction(self.actSetDir)
self.dockWidget = srv.newDockWidget("RecordingControl", None, Qt.LeftDockWidgetArea,
defaultLoc="PlaybackControl")
self.dockWidgetContents = QWidget(self.dockWidget)
self.dockWidget.setWidget(self.dockWidgetContents)
toolLayout = QBoxLayout(QBoxLayout.TopToBottom, self.dockWidgetContents)
toolLayout.setContentsMargins(0, 0, 0, 0)
toolBar = QToolBar()
toolLayout.addWidget(toolBar)
toolBar.addAction(self.actStart)
toolBar.addAction(self.actStop)
toolBar.addAction(self.actSetDir)
self._directoryLabel = ElidedLabel(self._directory, parent=self.dockWidgetContents)
to = self._directoryLabel.textOption()
to.setWrapMode(QTextOption.NoWrap)
self._directoryLabel.setTextOption(to)
self._directoryLabel.setElideMode(Qt.ElideMiddle)
self._statusLabel = ElidedLabel("(disabled)", parent=self.dockWidgetContents)
to = self._statusLabel.textOption()
to.setWrapMode(QTextOption.NoWrap)
self._statusLabel.setTextOption(to)
self._statusLabel.setElideMode(Qt.ElideMiddle)
toolLayout.addWidget(self._directoryLabel)
toolLayout.addWidget(self._statusLabel, stretch=100)
#toolLayout.addStretch(100)
self.statusUpdate.connect(self._onUpdateStatus)
self.notifyError.connect(self._onNotifyError)
def _startTriggered(self):
self.startRecording(self._directory)
self.actStart.setEnabled(False)
self.actStop.setEnabled(True)
def _stopTriggered(self):
self.stopRecording()
self.actStart.setEnabled(True)
self.actStop.setEnabled(False)
def _setDir(self):
tdir = QFileDialog.getExistingDirectory(parent=self.dockWidget,
caption="Select recording target directory",
dir=self._directory)
if tdir != "" and tdir is not None:
self._directory = str(Path(tdir).absolute())
self._directoryLabel.setText(self._directory)
def _supportedFeaturesChanged(self, featureset):
if len(featureset) > 0 and not self.actSetDir.isEnabled():
self.actStart.setEnabled(True)
self.actStop.setEnabled(False)
self.actSetDir.setEnabled(True)
self._statusLabel.setText("inactive")
elif len(featureset) == 0 and self.actSetDir.isEnabled():
self.actStart.setEnabled(False)
self.actStop.setEnabled(False)
self.actSetDir.setEnabled(False)
self._statusLabel.setText("(disabled)")
def _onUpdateStatus(self, _, file, length, bytesWritten):
lines = self._statusLabel.text().split("\n")
if length < 0:
length = None
if bytesWritten < 0:
bytesWritten = None
updated = False
if bytesWritten is None:
bw = "??"
elif bytesWritten < 1024:
bw = f"{bytesWritten:3d} bytes"
elif bytesWritten < 1024*1024:
bw = f"{bytesWritten/1024:.1f} kb"
elif bytesWritten < 1024*1024*1024:
bw = f"{bytesWritten/1024/1024:.1f} Mb"
else:
bw = f"{bytesWritten/1024/1024/1024:.1f} Gb"
if length is None:
sl = "?? s"
elif length < 60:
sl = f"{length:.1f} sec"
else:
sl = f"{length/60:.1f} min"
bytesAv = QStorageInfo(file).bytesAvailable()
if length is not None and bytesWritten is not None and bytesAv >= 0 and bytesWritten > 0:
timeAv = length*bytesAv/bytesWritten - length
if timeAv < 60:
av = f"{timeAv:.1f} sec"
elif timeAv < 3600:
av = f"{timeAv/60:.1f} min"
else:
av = "> 1 hour"
else:
av = "?? s"
if length is not None or bytesWritten is not None:
newl = Path(file).name + ": " + sl + " | " + bw + " R: " + av
else:
newl = None
if newl is not None:
for i, l in enumerate(lines):
if l.startswith(Path(file).name + ":"):
updated = True
lines[i] = newl
break
if not updated:
lines.append(newl)
if lines[0] == "inactive":
lines = lines[1:]
else:
toDel = None
for i, l in enumerate(lines):
if l.startswith(Path(file).name + ":"):
toDel = i
break
if toDel is not None:
lines = lines[:toDel] + lines[toDel+1:]
if len(lines) == 0:
lines.append("inactive")
self._statusLabel.setText("\n".join(lines))
def _onNotifyError(self, originFilter, errorDesc):
lines = self._statusLabel.text().split("\n")
newl = originFilter.objectName() + ": " + "ERROR: " + errorDesc
updated = False
for i, l in enumerate(lines):
if l.startswith(originFilter.objectName() + ":"):
updated = True
lines[i] = newl
break
if not updated:
lines.append(newl)
if lines[0] == "inactive":
lines = lines[1:]
self._statusLabel.setText("\n".join(lines))
def _defineProperties(self):
propertyCollection = self._config.guiState()
propertyCollection.defineProperty("RecordingControl_directory",
str(Path('.').absolute()),
"Target directory for recordings")
def _saveState(self):
"""
Saves the state of the playback control
:return:
"""
assertMainThread()
self._defineProperties()
propertyCollection = self._config.guiState()
try:
propertyCollection.setProperty("RecordingControl_directory", self._directory)
except PropertyCollectionPropertyNotFound:
pass
def _restoreState(self):
"""
Restores the state of the playback control from the given property collection
:return:
"""
assertMainThread()
self._defineProperties()
propertyCollection = self._config.guiState()
logger.debug("before restore dir=%s", self._directory)
d = propertyCollection.getProperty("RecordingControl_directory")
if Path(d).exists():
self._directory = d
self._directoryLabel.setText(self._directory)
logger.debug("after restore dir=%s", self._directory)
|
9,357 | 59233cd45000cd6d6ad0876eb3812599392d7c05 | # -*- coding:utf-8 -*-
__author__ = 'leandro'
from datetime import *
from PyQt4 import QtGui, QtCore
from baseDatos.ventas.venta import NotaCredito
from gui import CRUDWidget,MdiWidget
from ventanas import Ui_vtnDevolucionDeCliente, Ui_vtnReintegroCliente, Ui_vtnVentaContado
from baseDatos.obraSocial import ObraSocial as ObraSocialModel
from baseDatos.productos import Producto as ProductoModel
from baseDatos.productos import Medicamento as MedicamentoModel
from baseDatos.productos import Monodroga as MonodrogaModel
from baseDatos.obraSocial import Descuento as DescuentoModel
from baseDatos.productos import Lote as LoteModel
from baseDatos.productos import LoteProducto as LoteProductoModel
from baseDatos.ventas import Factura as FacturaModel
from baseDatos.ventas import DetalleFactura as DetalleFacturaModel
from baseDatos.ventas import NotaCredito as NotaCreditoModel
from baseDatos.ventas import DetalleNotaCredito as DetalleNCModel
from baseDatos.ventas import CobroCliente as CobroClienteModel
from genComprobantes import generarNotaCredito,generarFactura
from validarDatos import ValidarDatos
from ventanas import Ui_Dialog
from gui.signals import PoolOfWindows
class DevolucionDeCliente(CRUDWidget, Ui_vtnDevolucionDeCliente):
"""
Clase encargada de modelar la funcionalidad de Devolucion al Cliente
"""
plazo = 7
def __init__(self,mdi):
MdiWidget.__init__(self, mdi)
self.sesion = self.mdi().window().getSesionBD()
self.validadores()
self.btnBuscar.pressed.connect(self.buscarFactura)
self.tableFactura.doubleClicked.connect(self.devolverDetalle)
self.btnAceptar.pressed.connect(self.confirmarOperacion)
self.btnCancelar.pressed.connect(self.cancelarOperacion)
self.lineNumero.returnPressed.connect(self.buscarFactura)
self.facturaSeleccionada = None
self.notaCredito = None
self.productosSeleccionados = 0
self.detallesDevueltos = {}
self.lotesDevueltos = {}
self.data = {}
def validadores(self):
camposRequeridos = [getattr(self,"lineNumero")]
ValidarDatos.setValidador(camposRequeridos)
def buscarFactura(self):
"""
Busca y carga los detalles correspondientes
al Nro de Factura ingresado.
:return:
"""
if not self.lineNumero.isEnabled() and self.facturaSeleccionada != None:
QtGui.QMessageBox.information(self,"Aviso","Ya se ha seleccionado una factura")
elif not self.lineNumero.isEnabled():
self.lineNumero.setEnabled(True)
self.lineNumero.clear()
self.limpiarTabla(self.tableFactura)
else:
self.numeroFacturaActual=str(self.lineNumero.text())
if len(self.numeroFacturaActual)==0:
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("No se ha ingresado número de factura"))
else:
self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)
if self.facturaSeleccionada==None:
QtGui.QMessageBox.warning(self,"Aviso","La factura seleccionada no existe")
elif self.facturaSeleccionada.getNC()!=None:
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("La factura ya ha posee una Nota de Crédito"))
self.facturaSeleccionada = None
elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("El tiempo permitido para la devolución ha expirado"))
elif self.facturaSeleccionada.estaLiquidada(self.sesion):
print self.facturaSeleccionada.estaLiquidada(self.sesion)
QtGui.QMessageBox.information(self,"Aviso","La factura se encuentra liquidada a la Obra Social")
else:
self.lineNumero.setEnabled(False)
self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.sesion),
["nro_linea","producto","cantidad","importe"])
def obtenerValoresItem(self,row):
"""
Obtiene los valores de una fila de
la Tabla de Detalles de Factura
:param row Numero de Fila:
:return Arreglo con valores de la fila:
"""
values=[]
for col in range(0,self.tableFactura.columnCount()):
values.append(self.tableFactura.item(row,col).text())
return values
def armarItem(self,item,cantidad,key):
"""
Genera y guarda el Detalle de la Nota de Credito
correspondiente a una devolucion
:param item Arreglo con informacion del Detalle de Factura:
:param cantidad Cantidad Devuelta:
:param key Clave del detalle de factura devuelto:
:return:
"""
row=self.tableNC.rowCount()
self.tableNC.insertRow(row)
for col, elemento in enumerate(item[1:]):
self.tableNC.setItem(row,col,QtGui.QTableWidgetItem(item[col+1]))
self.tableNC.item(row,1).setText(str(cantidad))
#Arreglo que contiene informacion del item agregado
self.data[key] = [str(item[1]),cantidad,0,float(item[3])]
def devolverDetalle(self):
"""
Incorpora el Detalle de Factura seleccionado
por el usuario a la Nota de Credito
:return:
"""
rowActual=self.tableFactura.currentItem().row()
signal = QtGui.QMessageBox.information(self,"Confirmación","¿Desea devolver este item?",\
QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)
if signal == QtGui.QMessageBox.Ok:
producto = int(self.tableFactura.item(rowActual,1).text())
cantidad_detalle = int(self.tableFactura.item(rowActual,2).text())
linea = int(self.tableFactura.item(rowActual,0).text())
nro_factura = int(self.lineNumero.text())
detalle = FacturaModel.getDetalle(nro_factura,linea,self.sesion)
lotes_detalle = detalle.devolverLotes(self.sesion)
temp = lotes_detalle
finalize_actualizacion = False
cantidad_restante = cantidad_detalle
while not finalize_actualizacion:
cantidad, ok = QtGui.QInputDialog.getInt(self,"Cantidad","Ingrese cantidad del producto",1,1,2000,5)
if ok == False:
finalize_actualizacion = True
self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))
break
lote, ok=QtGui.QInputDialog.getText(self,"Lote","Ingrese lote")
if ok == False:
finalize_actualizacion = True
self.tableFactura.item(rowActual,2).setText(str(cantidad_detalle))
break
if not lote in lotes_detalle.keys():
QtGui.QMessageBox.information(self,"Aviso","El lote ingresado no es valido para este detalle")
elif lotes_detalle[str(lote)] == 0:
QtGui.QMessageBox.information(self,"Aviso","Los productos de este lote ya han sido devueltos")
elif cantidad > lotes_detalle[str(lote)]:
QtGui.QMessageBox.information(self,"Aviso","La cantidad ingresada es mayor a la esperada para este lote")
else:
temp[str(lote)] -= cantidad
cantidad_restante -= cantidad
self.tableFactura.item(rowActual,2).setText(str(cantidad_restante))
if sum(map(lambda x: temp[x],temp)) == 0:
self.productosSeleccionados +=1
key = int(self.tableFactura.item(rowActual,0).text())
self.detallesDevueltos[key] = detalle
self.armarItem(self.obtenerValoresItem(rowActual),cantidad_detalle,key)
self.tableFactura.removeRow(rowActual)
finalize_actualizacion = True
def limpiarVentana(self):
"""
Limpia los componentes de la ventana
:return:
"""
self.limpiarTabla(self.tableFactura)
self.lineNumero.setEnabled(True)
self.lineNumero.clear()
self.limpiarTabla(self.tableNC)
def calcularTotal(self):
"""
Calculo el total a devolver en la
Nota de Credito
:return Total a Devolver:
"""
subtotales=[]
for row in range(0,self.tableNC.rowCount()):
subtotales.append(float(self.tableNC.item(row,2).text()))
return sum(subtotales)
def confirmarOperacion(self):
"""
Imprime la Nota de Credito, una vez que el
usuario confirmo la operacion.
:return:
"""
if self.productosSeleccionados != 0:
nc = NotaCreditoModel(NotaCreditoModel.generarNumero(self.sesion))
nc.guardar(self.sesion)
for nro_lnc, nro_lfactura in enumerate(self.detallesDevueltos):
detalle_nc = DetalleNCModel(nc.numero,nro_lnc+1,self.facturaSeleccionada.numero,nro_lfactura)
detalle_nc.setImporte(self.data[nro_lfactura][3])
detalle_nc.guardar(self.sesion)
self.detallesDevueltos[nro_lfactura].devolver(self.sesion) # Devuelve el detalle asociado de la factura
self.facturaSeleccionada.setNC(nc.numero)
self.facturaSeleccionada.modificar(self.sesion)
QtGui.QMessageBox.information(self,"Aviso","La factura ha sido devuelta")
self.objectModified.emit()
cobros = self.facturaSeleccionada.getCobros(self.sesion)
if len(cobros) == 1 and cobros[0].tipo == "Efectivo":
QtGui.QMessageBox.information(self,"Devolucion","El importe en efectivo a entregar es de: $%.2f" % self.calcularTotal())
#Se genera un diccionario con los datos necesarios para imprimir la nota de credito
data = {}
data["numero"] = nc.numero
data["fecha"] = nc.fecha_emision
data["detalles"] = self.data.values()
generarNotaCredito(data)
self.facturaSeleccionada=None
self.productosSeleccionados=0
self.detallesDevueltos = {}
self.limpiarVentana()
self.data = {}
else:
QtGui.QMessageBox.information(self,"Devolucion Cliente","No se ha agregado ningun producto para devolver")
def cancelarOperacion(self):
"""
Anula la Nota de Credito creada, actuliza el stock
de los productos a sus valores originales y limpia la ventana.
Si la Nota de Credito no fue creada limpia la ventana.
:return:
"""
signal = QtGui.QMessageBox.warning(self,"Advertencia",QtCore.QString.fromUtf8("¿Desea cancelar la operación?"),\
QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)
if signal == QtGui.QMessageBox.Ok:
self.data = {}
self.facturaSeleccionada = None
self.productosSeleccionados = 0
self.detallesDevueltos = {}
self.limpiarVentana()
def cancelarVentana(self):
self.data = {}
self.facturaSeleccionada = None
self.productosSeleccionados = 0
self.detallesDevueltos = {}
self.limpiarVentana()
class ReintegroCliente(CRUDWidget, Ui_vtnReintegroCliente):
"""
Clase encargada de modelar la funcionalidad de Reintegro al cliente
"""
plazo = 7
def __init__(self, mdi):
MdiWidget.__init__(self, mdi)
self.sesion = self.mdi().window().getSesionBD()
self.cargarObras()
self.validadores()
self.btnBuscarOs.pressed.connect(self.buscarOs)
self.tableOs.itemDoubleClicked.connect(self.obtenerObra)
self.btnBuscarFac.pressed.connect(self.buscarFactura)
self.lineRazon.returnPressed.connect(self.filtrarObra)
self.lineCuit.returnPressed.connect(self.filtrarObra)
self.lineNumeroFac.returnPressed.connect(self.buscarFactura)
self.btnAceptar.pressed.connect(self.confirmarOperacion)
self.btnCancelar.pressed.connect(self.cancelarOperacion)
self.tableFactura.itemDoubleClicked.connect(self.agregarProducto)
self.gbFactura.setEnabled(False)
self.gbNotaCredito.setEnabled(False)
self.detallesReintegrables = []
self.detallesImprimibles = []
self.obraSocial = None
self.facturaSeleccionada = None
def filtrarObra(self):
"""
Filtra la tabla de Obras Sociales de acuerdo
a los criterios de busqueda impuestos
:return:
"""
razon_social = str(self.lineRazon.text())
cuit = str(self.lineCuit.text())
data = self.getAllTabla(self.tableOs)
if razon_social != "":
dataRazon = filter(lambda x: x[0].upper() == razon_social.upper(), data.values())
else:
dataRazon = data.values()
if cuit != "":
dataCuit = filter(lambda x: x[1].upper() == cuit.upper(), dataRazon)
else:
dataCuit = dataRazon
for dato in data:
self.tableOs.setRowHidden(dato,False)
for dato in data:
if not data[dato] in dataCuit:
self.tableOs.setRowHidden(dato,True)
def cargarObras(self):
"""
Carga las Obras Sociales disponibles
en la tabla correspondiente
:return:
"""
self.cargarObjetos(self.tableOs,
ObraSocialModel.buscarTodos("razon_social", self.sesion).all(),
("razon_social", "cuit", "direccion")
)
def validadores(self):
"""
Setea los validadores correspondientes a
los campos de la ventana
:return:
"""
camposRequeridos = [getattr(self,"lineRazon")]
ValidarDatos.setValidador(camposRequeridos)
camposRequeridos = [getattr(self,"lineCuit")]
ValidarDatos.setValidador(camposRequeridos)
camposRequeridos = [getattr(self,"lineNumeroFac")]
ValidarDatos.setValidador(camposRequeridos)
def buscarOs(self):
"""
Busca una Obra Social de acuerdo
a los criterios del usuario
:return:
"""
if self.lineRazon.isEnabled():
self.filtrarObra()
elif not self.lineRazon.isEnabled() and (self.tableNC.rowCount() != 0 or self.tableFactura.rowCount() != 0):
QtGui.QMessageBox.information(self,"Aviso","Imposible cambiar de Obra Social. Ya se ha seleccionado\
una")
else:
self.gbNotaCredito.setEnabled(False)
self.gbFactura.setEnabled(False)
self.lineRazon.clear()
self.lineRazon.setEnabled(True)
self.lineCuit.clear()
self.lineCuit.setEnabled(True)
self.tableOs.setEnabled(True)
def obtenerObra(self):
"""
Carga la Obra Social seleccionada
en los campos correspondientes.
:return:
"""
rowActual = self.tableOs.currentItem().row()
self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))
self.lineRazon.setEnabled(False)
self.obraSocial=str(self.tableOs.item(rowActual,0).text())
self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))
self.lineCuit.setEnabled(False)
self.tableOs.setEnabled(False)
self.gbFactura.setEnabled(True)
self.gbNotaCredito.setEnabled(True)
def buscarFactura(self):
"""
Busca la factura indica por el usuario.
En caso de no existir, notifica lo mismo
:return:
"""
if not self.lineNumeroFac.isEnabled() and self.tableNC.rowCount() != 0:
QtGui.QMessageBox.information(self,"Aviso","Ya se ha seleccionado una factura")
elif not self.lineNumeroFac.isEnabled():
self.lineNumeroFac.setEnabled(True)
self.lineNumeroFac.clear()
self.limpiarTabla(self.tableFactura)
else:
self.numeroFacturaActual=str(self.lineNumeroFac.text())
if len(self.numeroFacturaActual)==0:
self.showMsjEstado("No se ha ingresado numero de factura")
else:
self.facturaSeleccionada=FacturaModel.existeFactura(int(self.numeroFacturaActual),self.sesion)
if self.facturaSeleccionada==None:
QtGui.QMessageBox.information(self,"Aviso","La factura seleccionada no existe")
elif self.facturaSeleccionada.getObra() != None and self.facturaSeleccionada.getObra() != self.obraSocial:
QtGui.QMessageBox.information(self,"Aviso","La Obra Social seleccionada no corresponde con la factura")
elif self.facturaSeleccionada.getFechaEmision()+timedelta(days=int(self.plazo))<date.today():
QtGui.QMessageBox.information(self,"Aviso","El tiempo permitido para el reintegro ha expirado")
elif self.facturaSeleccionada.estaLiquidada(self.sesion):
QtGui.QMessageBox.information(self,"Aviso","La factura se encuentra liquidada a la Obra Social")
elif self.facturaSeleccionada.getNC()!=None:
QtGui.QMessageBox.information(self,"Aviso","La factura ya posee una Nota de Crédito")
else:
self.lineNumeroFac.setEnabled(False)
if self.facturaSeleccionada.getObra() == None:
self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetalles(self.obraSocial, self.sesion),
["producto","cantidad","importe"])
else:
self.cargarObjetos(self.tableFactura,self.facturaSeleccionada.getDetallesSinDescuento(self.sesion),
["producto","cantidad","importe"])
def agregarProducto(self):
"""
Agrega un producto a la Nota de Credito
:return:
"""
itemActual=self.tableFactura.currentItem()
producto = int(self.tableFactura.item(itemActual.row(),0).text())
descuento = DescuentoModel.buscar(DescuentoModel.obra_social,self.sesion,self.obraSocial).\
filter(DescuentoModel.producto==producto)[0].descuento
cantidad = int(self.tableFactura.item(itemActual.row(), 1).text())
importe = float(self.tableFactura.item(itemActual.row(), 2).text()) * descuento
row = self.tableNC.rowCount()
self.tableNC.insertRow(row)
self.tableNC.setItem(row, 0, QtGui.QTableWidgetItem(str(producto)))
self.tableNC.setItem(row, 1, QtGui.QTableWidgetItem(str(cantidad)))
self.tableNC.setItem(row, 2, QtGui.QTableWidgetItem(str(importe)))
self.detallesReintegrables.append([int(self.numeroFacturaActual),itemActual.row()+1,descuento,importe])
self.detallesImprimibles.append([producto,cantidad,descuento,importe])
self.tableFactura.hideRow(itemActual.row())
def limpiarVentana(self):
"""
Limpia la ventana una vez que la operacion finalizó
:return:
"""
self.obraSocial = None
self.facturaSeleccionada = None
self.detallesReintegrables = []
self.detallesImprimibles = []
self.limpiarTabla(self.tableFactura)
self.limpiarTabla(self.tableNC)
self.lineCuit.clear()
self.lineRazon.clear()
self.lineNumeroFac.clear()
self.lineCuit.setEnabled(True)
self.lineRazon.setEnabled(True)
self.tableOs.setEnabled(True)
self.lineNumeroFac.setEnabled(True)
self.gbFactura.setEnabled(False)
self.gbNotaCredito.setEnabled(False)
def confirmarOperacion(self):
"""
Confirma la operacion y asienta los datos de la
Nota de Credito en la BD.
:return:
"""
if self.tableNC.rowCount() == 0 :
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("No se han agregado productos a la Nota de Crédito"))
else:
ok = QtGui.QMessageBox.information(self,QtCore.QString.fromUtf8("Confirmación"),\
QtCore.QString.fromUtf8("¿Desea generar la Nota Crédito?"),\
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)
if (ok==1):
notaCredito = NotaCreditoModel(NotaCredito.generarNumero(self.sesion))
notaCredito.guardar(self.sesion)
for lineaNC, data in enumerate(self.detallesReintegrables):
detalleNC = DetalleNCModel(notaCredito.numero, lineaNC+1, data[0], data[1])
detalleNC.setImporte(data[3])
detalleNC.setDescuento(data[2])
detalleNC.guardar(self.sesion)
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("La Nota de Crédito ha sido generada con éxito"))
self.facturaSeleccionada.setNC(notaCredito.numero)
self.facturaSeleccionada.modificar(self.sesion)
#Se genera un diccionario con los datos necesarios para imprimir la nota de credito
data = {}
data["numero"] = notaCredito.numero
data["fecha"] = notaCredito.fecha_emision
data["detalles"] = self.detallesImprimibles
generarNotaCredito(data)
self.limpiarVentana()
else:
QtGui.QMessageBox.information(self,"Aviso",QtCore.QString.fromUtf8("La Nota de Crédito no ha sido generada"))
def cancelarOperacion(self):
"""
Cancela la operacion en curso y limpia la ventana
:return:
"""
ok = QtGui.QMessageBox.information(self,"Confirmacion","¿Desea cancelar la operacion?",\
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)
if (ok==1):
self.limpiarVentana()
def cancelarVentana(self):
self.limpiarVentana()
class VentaContado(CRUDWidget, Ui_vtnVentaContado):
"""
Clase encargada de modelar el comportamiento de Venta al Contado
"""
def __init__(self,mdi):
"""
Constructor de la clase VentaContado
:param mdi:
:return:
"""
MdiWidget.__init__(self, mdi)
self.sesion = self.mdi().window().getSesionBD()
self.validadores()
self.cargar_obras()
self.lineMedicamento.returnPressed.connect(self.buscarProd)
self.lineMonodroga.returnPressed.connect(self.buscarProd)
self.lineCuit.returnPressed.connect(self.buscarObra)
self.lineObra.returnPressed.connect(self.buscarObra)
self.tableObra.itemDoubleClicked.connect(self.cargarObra)
self.tableProductos.itemDoubleClicked.connect(self.agregarProducto)
self.btnBuscar.pressed.connect(self.limpiarObra)
self.btnAceptar.pressed.connect(self.confirmarOperacion)
self.btnCancelar.pressed.connect(self.cancelarOperacion)
self.btnEliminar.pressed.connect(self.eliminarDetalle)
self.rbtnObra.pressed.connect(self.habilitarObras)
self.btnBuscar.setEnabled(False)
self.tableObra.setVisible(False)
self.lineCuit.setEnabled(False)
self.lineObra.setEnabled(False)
self.cargarProductosSinObra()
self.productosAgregados=0
self.lotesVentas={}
self.facturaCobrada=False
self.obraSocialSeleccionada=None
self.formapago = None
self.factura = None
self.data = {}
self.detallesTabla = {}
def buscarProd(self):
"""
Filtra la tabla de Productos de acuerdo
a los criterios de busqueda impuestos
:return:
"""
medicamento = str(self.lineMedicamento.text())
monodroga = str(self.lineMonodroga.text())
data = self.getAllTabla(self.tableProductos)
if medicamento != "":
dataMedic = filter(lambda x: x[1].upper() == medicamento.upper(), data.values())
else:
dataMedic = data.values()
if monodroga != "":
dataMono = filter(lambda x: x[3].upper() == monodroga.upper(), dataMedic)
else:
dataMono = dataMedic
for dato in data:
self.tableProductos.setRowHidden(dato,False)
for dato in data:
if not data[dato] in dataMono:
self.tableProductos.setRowHidden(dato,True)
def buscarObra(self):
"""
Filtra la tabla de Obras Sociales de acuerdo
a los criterios de busqueda impuestos
:return:
"""
razon_social = str(self.lineObra.text())
cuit = str(self.lineCuit.text())
data = self.getAllTabla(self.tableObra)
if razon_social != "":
dataRazon = filter(lambda x: x[0].upper() == razon_social.upper(), data.values())
else:
dataRazon = data.values()
if cuit != "":
dataCuit = filter(lambda x: x[1].upper() == cuit.upper(), dataRazon)
else:
dataCuit = dataRazon
for dato in data:
self.tableObra.setRowHidden(dato,False)
for dato in data:
if not data[dato] in dataCuit:
self.tableObra.setRowHidden(dato,True)
def actualizar(self):
"""
Actualiza la informacion de la
tabla de Productos
:return:
"""
if self.obraSocialSeleccionada!=None:
self.cargar_productos(self.obraSocialSeleccionada)
else:
self.cargarProductosSinObra()
def habilitarObras(self):
"""
Muestra las Obras Sociales si no hay una factura creada.
Si la factura ya se encuentra creada, notifica que no
es posible cambiar la Obra Social actual.
:return:
"""
if self.productosAgregados != 0:
QtGui.QMessageBox.information(self,"Aviso","Ya se han agregado productos a la factura")
else:
if not self.rbtnObra.isChecked():
self.btnBuscar.setEnabled(True)
self.lineObra.setEnabled(True)
self.lineCuit.setEnabled(True)
self.tableObra.setVisible(True)
else:
self.lineObra.clear()
self.lineCuit.clear()
self.btnBuscar.setEnabled(False)
self.lineObra.setEnabled(False)
self.lineCuit.setEnabled(False)
self.tableObra.setVisible(False)
self.obraSocialSeleccionada=None
self.cargarProductosSinObra()
def cargarProductosSinObra(self):
"""
Carga en la tabla de Productos todos los productos
sin descuento de Obra Social
:return:
"""
self.limpiarTabla(self.tableProductos)
##Cnsulta para obtener todos los productos del sistema, con su correspondiente
##codigo de barra, monodroga, descuento, importe
query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\
join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\
join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\
filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)
##Se cargan los datos obtenidos en la tabla de Producto
for n, obj in enumerate(query):
self.tableProductos.insertRow(n)
self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))
self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))
self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))
self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))
self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))
self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))
##Se carga la cantidad de cada producto en la tabla
for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):
self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))
def cargar_productos(self, obraSocial):
"""
Carga en la tabla de Productos todos los
productos del sistema con los correspondientes descuentos
de la Obra Social seleccionada
:param obraSocial:
:return:
"""
self.limpiarTabla(self.tableProductos)
query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,DescuentoModel.descuento,ProductoModel.importe).\
join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\
join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\
join(DescuentoModel).filter(DescuentoModel.producto==ProductoModel.codigo_barra).\
filter(DescuentoModel.obra_social==obraSocial,ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)
for n, obj in enumerate(query):
self.tableProductos.insertRow(n)
for m, campo in enumerate(obj):
self.tableProductos.setItem(n, m, QtGui.QTableWidgetItem(str(campo)))
for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):
self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))
def cargarObra(self):
"""
Carga la informacion de la Obra Social
seleccionada por el usuario
:return:
"""
rowActual=self.tableObra.currentItem().row()
self.lineObra.setText(str(self.tableObra.item(rowActual,0).text()))
self.lineCuit.setText(str(self.tableObra.item(rowActual,1).text()))
self.tableObra.hide()
self.lineObra.setEnabled(False)
self.lineCuit.setEnabled(False)
self.obraSocialSeleccionada = str(self.lineObra.text())
self.cargar_productos(self.obraSocialSeleccionada)
self.gbProducto.setVisible(True)
def limpiarObra(self):
"""
Permite buscar las obras sociales si aun
no hay ninguna seleccionada.
Limpia los campos correspondientes a las
Obras Sociales, si ya hay una cargada.
:return:
"""
if self.lineObra.isEnabled():
self.buscarObra()
else:
self.lineCuit.clear()
self.lineObra.clear()
self.lineCuit.setEnabled(True)
self.lineObra.setEnabled(True)
self.tableObra.setVisible(True)
def validadores(self):
camposRequeridos = [getattr(self,"lineMonodroga")]
ValidarDatos.setValidador(camposRequeridos)
camposRequeridos = [getattr(self,"lineMedicamento")]
ValidarDatos.setValidador(camposRequeridos)
def cargar_obras(self):
"""
Carga todos las obras Sociales en el sistema
en la tabla de Obras Sociales
:return:
"""
self.cargarObjetos(self.tableObra,
ObraSocialModel.buscarTodos("razon_social", self.sesion).all(),
("razon_social", "cuit", "direccion")
)
def descontarCantidad(self,detalle,producto,cantidad):
"""
Actualiza el stock en una determinada cantidad,
de un producto dado
:param detalle Detalle de la Factura :
:param producto Codigo de barra del producto:
:param cantidad Cantidad a descontar:
:return:
"""
query=LoteModel.obtenerLoteProducto(producto,self.sesion)
valores=[]
for a in query:
loteProducto=LoteProductoModel.buscarLoteProducto(self.sesion,producto,a.codigo).first()
if cantidad<=loteProducto.cantidad:
loteProducto.descontarCantidad(cantidad)
loteProducto.modificar(self.sesion)
valores.append([loteProducto,cantidad])
break
else:
cantidad-=loteProducto.cantidad
valores.append([loteProducto,loteProducto.cantidad])
loteProducto.descontarCantidad(loteProducto.cantidad)
loteProducto.modificar(self.sesion)
self.lotesVentas[detalle]=valores
detalle.agregarLotes(self.sesion,self.lotesVentas[detalle])
def agregarProducto(self):
"""
Agrega un producto seleccionada a la Factura
:return:
"""
itemActual=self.tableProductos.currentItem()
cantidad, ok = QtGui.QInputDialog.getInt(self,"Cantidad","Ingrese cantidad del producto",1,1,2000,5)
if not ok:
self.showMsjEstado("No se ha seleccionado cantidad del producto")
else:
cantidadProducto=int(self.tableProductos.item(itemActual.row(),6).text())
if cantidad>cantidadProducto:
QtGui.QMessageBox.information(self,"Aviso","La cantidad ingresada es mayor que la del stock")
else:
if self.productosAgregados == 0 and self.factura == None:
self.factura=FacturaModel(FacturaModel.generarNumero(self.sesion))
self.factura.guardar(self.sesion)
self.productosAgregados+=1
rowItemActual=itemActual.row()
rows=self.tableFactura.rowCount()
self.tableFactura.insertRow(rows)
#--Carga de items en la tabla--*
producto = int(self.tableProductos.item(rowItemActual,0).text())
importeActual=float(self.tableProductos.item(rowItemActual,5).text())
descuentoActual=float(self.tableProductos.item(rowItemActual,4).text())
subtotal=importeActual*(1-descuentoActual)
####-------------------------#####
detalleFactura=DetalleFacturaModel(self.factura.numero,producto,cantidad,
subtotal*cantidad,descuentoActual,self.productosAgregados
)
self.descontarCantidad(detalleFactura,producto,cantidad)
self.tableFactura.setItem(rows,0,QtGui.QTableWidgetItem(str(detalleFactura.producto)))
self.tableFactura.setItem(rows,1,QtGui.QTableWidgetItem(str(detalleFactura.cantidad)))
self.tableFactura.setItem(rows, 2, QtGui.QTableWidgetItem(str("%.2f"%(subtotal*cantidad))))
detalleFactura.guardar(self.sesion)
self.detallesTabla[rows] = detalleFactura
self.data[rows] = [
producto, cantidad, subtotal*cantidad, descuentoActual
]
self.actualizar()
self.objectModified.emit()
def eliminarDetalle(self):
"""
Elimina el detalle seleccionado por el usuario y actualiza
el stock del producto en particular.
:return:
"""
itemActual = self.tableFactura.currentItem()
if itemActual == None:
self.showMsjEstado("Debe seleccionar un item para dar de baja")
else:
detalle = self.detallesTabla[itemActual.row()]
for loteVenta in self.lotesVentas[detalle]:
loteVenta[0].aumentarCantidad(loteVenta[1])
loteVenta[0].modificar(self.sesion)
detalle.eliminarLotesAsociados(self.sesion)
detalle.bajaFisica(self.sesion)
del self.lotesVentas[detalle]
del self.data[itemActual.row()]
self.tableFactura.hideRow(itemActual.row())
self.actualizar()
self.productosAgregados -=1
self.objectModified.emit()
def limpiarVentana(self):
"""
Limpia la ventana actual
:return:
"""
self.productosAgregados=0
self.lotesVentas={}
self.facturaCobrada=False
self.obraSocialSeleccionada=None
self.formapago = None
self.factura = None
self.data = {}
self.detallesTabla = {}
self.lineObra.clear()
self.lineObra.setEnabled(True)
self.lineCuit.clear()
self.lineCuit.setEnabled(True)
self.tableObra.setVisible(False)
self.rbtnObra.setChecked(False)
self.limpiarTabla(self.tableProductos)
self.limpiarTabla(self.tableFactura)
self.cargarProductosSinObra()
def calcularTotal(self):
"""
Calcula el total a pagar
:return Total a Pagar:
"""
subtotales=[]
for row in range(0,self.tableFactura.rowCount()):
subtotales.append(float(self.tableFactura.item(row,2).text()))
importeTotal=sum(subtotales)
return importeTotal
def confirmarOperacion(self):
"""
Confirma la operacion si todo ha sido exitoso.
De lo contrario notifica que la Factura todavia no ha sido
cobrada o que no se efectuo ninguna venta
:return:
"""
if self.productosAgregados == 0:
QtGui.QMessageBox.information(self,"Aviso","No se ha agregado ningun producto")
else:
ventana = Cobrar(self,self.calcularTotal(),self.factura,self.sesion)
ventana.exec_()
if self.facturaCobrada:
QtGui.QMessageBox.information(self,"Venta","La venta se ha realizado con exito")
data = {}
data["numero"] = self.factura.numero
data["fecha"] = self.factura.fecha_emision
data["detalles"] = self.data.values()
data["formaPago"] = self.formapago
generarFactura(data)
self.factura.setObra(self.obraSocialSeleccionada)
self.factura.modificar(self.sesion)
self.limpiarVentana()
else:
QtGui.QMessageBox.information(self,"Aviso","La factura aun no ha sido cobrada")
def cancelarOperacion(self):
"""
Cancela la operacion actual, y reestablece
los stocks a sus valores originales
:return:
"""
ok=QtGui.QMessageBox.warning(self,"Aviso","¿Desea cancelar la operación?",\
QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok)
if ok == QtGui.QMessageBox.Ok:
if self.factura != None:
self.factura.anular()
for detalle in self.lotesVentas:
for loteVenta in self.lotesVentas[detalle]:
loteVenta[0].aumentarCantidad(loteVenta[1])
loteVenta[0].modificar(self.sesion)
detalle.eliminarLotesAsociados(self.sesion)
detalle.borrar(self.sesion)
self.objectModified.emit()
self.limpiarVentana()
def cancelarVentana(self):
if self.factura != None:
self.factura.anular()
for detalle in self.lotesVentas:
for loteVenta in self.lotesVentas[detalle]:
loteVenta[0].aumentarCantidad(loteVenta[1])
loteVenta[0].modificar(self.sesion)
detalle.eliminarLotesAsociados(self.sesion)
detalle.borrar(self.sesion)
self.objectModified.emit()
self.limpiarVentana()
def addHandlerSignal(self):
self.sender = PoolOfWindows.getVentana("VentaConRemito")
self.sender.objectModified.connect(self.actualizar)
self.sender1 = PoolOfWindows.getVentana("AltaProducto")
self.sender1.objectCreated.connect(self.actualizar)
self.sender2 = PoolOfWindows.getVentana("BajaProducto")
self.sender2.objectDeleted.connect(self.actualizar)
self.sender3 = PoolOfWindows.getVentana("ModificarProducto")
self.sender3.objectModified.connect(self.actualizar)
self.sender4 = PoolOfWindows.getVentana("DevolucionDeCliente")
self.sender4.objectModified.connect(self.actualizar)
self.sender5 = PoolOfWindows.getVentana("ModificarRemito")
self.sender5.objectModified.connect(self.actualizar)
self.sender6 = PoolOfWindows.getVentana("BajaRemito")
self.sender6.objectModified.connect(self.actualizar)
self.sender7 = PoolOfWindows.getVentana("FraccionarProducto")
self.sender7.objectModified.connect(self.actualizar)
self.sender8 = PoolOfWindows.getVentana("AltaLote")
self.sender8.objectCreated.connect(self.actualizar)
self.sender9 = PoolOfWindows.getVentana("ModificarLote")
self.sender9.objectModified.connect(self.actualizar)
class Cobrar(QtGui.QDialog, Ui_Dialog):
"""
Clase que modela la lógica de cobro de una factura
"""
def __init__(self,ventana_padre, total, factura,sesion):
"""
Constuctor de la clase Cobrar
:param ventana_padre Referncia a la ventana padre:
:param total Total a pagar:
:return:
"""
QtGui.QDialog.__init__(self,ventana_padre)
self.setupUi(self)
self.btnAceptar.pressed.connect(self.confirmar)
self.btnCancelar.pressed.connect(self.cancelar)
self.btnEliminar.pressed.connect(self.eliminar)
self.rbtnEfectivo.pressed.connect(self.cobroEfectivo)
self.rbtnNC.pressed.connect(self.cobroNC)
self.rbtnTC.pressed.connect(self.cobroTC)
self.rbtnTD.pressed.connect(self.cobroTD)
self.total_a_pagar = total
self.padre = ventana_padre
self.factura = factura
self.sesion = sesion
self.actualizar_total()
self.detalles_cobro = {}
def actualizar_total(self):
"""
Actualiza el importe a pagar en
el line de la ventana
:param total:
:return:
"""
self.lblImporte.setText("Saldo Restante: $%.2f" % self.total_a_pagar)
def cobroNC(self):
"""
Se encarga de efectuar el cobro con NC
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
self.rbtnNC.setChecked(True)
totalFactura = self.total_a_pagar
numero,ok = QtGui.QInputDialog.getText(self,"Cobro c/Nota de Crédito","Ingrese número de Nota de Crédito")
if ok:
notaCredito = NotaCreditoModel.getNotaCredito(self.padre.sesion,int(numero))
if notaCredito == None:
QtGui.QMessageBox.information(self,"Aviso","La Nota de Crédito ingresada no existe")
elif notaCredito.getTotal(self.padre.sesion) < totalFactura:
QtGui.QMessageBox.information(self,"Aviso","El monto de la Nota de Credito es insuficiente")
elif notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero) < totalFactura:
dif = notaCredito.getTotal(self.padre.sesion) - CobroClienteModel.getTotalNC(self.padre.sesion,notaCredito.numero)
QtGui.QMessageBox.information(self,"Aviso","La Nota solo posee $" + str(dif))
else:
temp = ["Nota de Crédito",self.total_a_pagar,notaCredito.numero]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar = 0
self.actualizar_total()
self.actualizar_tabla()
def cobroTC(self):
"""
Se encarga de efectuar el cobro con Tarjeta de Crédito
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,"Cobro Tarjeta Crédito","Ingrese monto a pagar",0,0,2000,2)
if ok:
if monto_a_pagar > self.total_a_pagar:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado es mayor al total a pagar")
elif monto_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado no puede ser cero")
else:
temp = ["Tarjeta de Crédito",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar -= monto_a_pagar
self.actualizar_total()
self.actualizar_tabla()
def cobroTD(self):
"""
Se encarga de efectuar el cobro con Tarjeta de Débito
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,"Cobro Tarjeta Débito","Ingrese monto a pagar",0,0,2000,2)
if ok:
if monto_a_pagar > self.total_a_pagar:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado es mayor al total a pagar")
elif monto_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado no puede ser cero")
else:
temp = ["Tarjeta de Débito",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar -= monto_a_pagar
self.actualizar_total()
self.actualizar_tabla()
def cobroEfectivo(self):
"""
Se encarga de efectuar el cobro en efectivo del cliente
:return:
"""
if self.total_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El saldo restante a pagar es cero")
else:
self.rbtnEfectivo.setChecked(True)
monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,"Cobro Efectivo","Ingrese monto a pagar",0,0,2000,2)
if ok:
if monto_a_pagar >= self.total_a_pagar:
QtGui.QMessageBox.information(self,"Cobro Efectivo","Su vuelto es:%.2f" % (monto_a_pagar - self.total_a_pagar))
temp = ["Efectivo",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar = 0
elif monto_a_pagar == 0:
QtGui.QMessageBox.information(self,"Aviso","El monto ingresado no puede ser cero")
else:
temp = ["Efectivo",monto_a_pagar]
self.detalles_cobro[self.tablePagos.rowCount()] = temp
self.total_a_pagar -= monto_a_pagar
self.actualizar_total()
self.actualizar_tabla()
def eliminar(self):
"""
Elimina un pago determinado
:return:
"""
itemActual = self.tablePagos.currentItem()
if itemActual == None:
self.showMsjEstado("Debe seleccionar un para poder eliminar")
else:
monto = self.detalles_cobro[itemActual.row()][1]
del self.detalles_cobro[itemActual.row()]
self.total_a_pagar += monto
self.tablePagos.setRowHidden(itemActual.row(),True)
self.actualizar_total()
def actualizar_tabla(self):
"""
Actualiza la tabla de cobros
:return:
"""
self.padre.limpiarTabla(self.tablePagos)
for row, cobro in enumerate(self.detalles_cobro.values()):
self.tablePagos.insertRow(row)
self.tablePagos.setItem(row,0,QtGui.QTableWidgetItem(cobro[0]))
self.tablePagos.setItem(row,1,QtGui.QTableWidgetItem("$"+str(cobro[1])))
def confirmar(self):
"""
Confirma los cobros efectuados
:return Tupla con la señal indicando exito y lista de cobros:
"""
if self.total_a_pagar == 0:
for cobro in self.detalles_cobro.values():
if len(cobro) == 3:
cobroCliente = CobroClienteModel(CobroClienteModel.obtenerNumero(self.sesion),self.factura.numero,\
cobro[0],cobro[1])
cobroCliente.setNC(cobro[2])
else:
cobroCliente = CobroClienteModel(CobroClienteModel.obtenerNumero(self.sesion),self.factura.numero,\
cobro[0],cobro[1])
cobroCliente.guardar(self.sesion)
if len(self.detalles_cobro.values())>1:
self.padre.formapago = "Varios"
else:
self.padre.formapago = self.detalles_cobro.values()[0][0]
self.padre.facturaCobrada = True
self.accept()
else:
QtGui.QMessageBox.information(self,"Aviso","Restan $%.2f por pagar" % self.total_a_pagar)
def cancelar(self):
"""
Cancela la operacion de cobrar
:return Tupla con la señal indicando cancelacion y None:
"""
signal = QtGui.QMessageBox.information(self,"Aviso","¿Desea cancelar la operacion?",\
QtGui.QMessageBox.Close | QtGui.QMessageBox.Ok)
if signal == QtGui.QMessageBox.Ok:
self.detalles_cobro = {}
self.padre.limpiarTabla(self.tablePagos)
self.close()
|
9,358 | 4efd22d132accd0f5945a0c911b73b67654b92e4 | from django.urls import path
from .views import FirstModelView
urlpatterns = [
path('firstModel', FirstModelView.as_view())
] |
9,359 | 5f1cbe1019f218d2aad616ea8bbe760ea760534c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# Add gumpy path
sys.path.append('../shared')
from gumpy import signal
import numpy as np
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=2, bp_high=60, notch=False,
hp_filter=False, bp_filter=False, artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
def highpass_filter(data, hp_freq):
return signal.butter_highpass(data, hp_freq)
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=sample_rate)
def normalize_data(data, strategy):
return signal.normalize(data, strategy)
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
|
9,360 | ae775e25179546156485e15d05491e010cf5daca | # encoding=utf8
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException
from selenium.webdriver.support.select import Select
import time
import threading
import random
import string
from urllib import parse, request
import re
import json
import datetime
import threading
from datetime import timedelta, date
class BookRoomThread(threading.Thread):
startHour = 23
startMin = 58
userName = '2160652004'
passWord = '270749'
roomName = '研究间17'
startTime = '800'
endTime = '2200'
bookDate = date.today()+timedelta(2)
isSlectDate = False
def __init__(self, name):
super().__init__()
self.__running = threading.Event() # 用于停止线程的标识
self.__running.set() # 将running设置为True
self.name = name
self.isStart = False
def stop(self):
if self.driver:
self.driver.close()
self.__running.clear() # 设置为False
def run(self): # 固定名字run !!!必须用固定名
while self.__running.isSet():
if not self.isStart:
self.openbrowser()
self.isStart = True
time.sleep(1)
# 前台开启浏览器模式
def openChrome(self):
# 加启动配置
option = webdriver.ChromeOptions()
option.add_argument('disable-infobars')
self.driver = webdriver.Chrome(chrome_options=option)
# 打开chrome浏览器
# driver = webdriver.Chrome()
def goLogin(self):
try:
username = self.driver.find_element_by_id("username")
password = self.driver.find_element_by_id("password")
username.send_keys(self.userName)
password.send_keys(self.passWord)
self.driver.find_element_by_class_name('btn-success').click()
return True
except Exception as e:
print(str(e))
return False
def goUserCenter(self):
try:
dialogtitle = self.driver.find_element_by_id('ui-id-3')
if dialogtitle.text == '提醒':
self.driver.find_element_by_class_name('ui-button-text-only').click()
return True
else:
return False
except Exception as e:
print(str(e))
return False
def goBookRoomSelection(self):
try:
title = self.driver.find_element_by_class_name('h_title')
if title.text == 'Home Page':
self.driver.find_element_by_link_text('研究小间').click()
return True
else:
return False
except Exception as e:
print(str(e))
return False
def inUserCenter(self):
try:
title = self.driver.find_element_by_class_name('h_title')
if title.text.strip() == '个人中心':
result = self.driver.find_element_by_css_selector('.orange.uni_trans')
if result.text.strip() == '预约成功':
return True
else:
self.driver.find_element_by_link_text('研究小间').click()
return False
else:
return False
except Exception as e:
print(str(e))
return False
def changeTheDate(self):
try:
datetitles = self.driver.find_elements_by_class_name('cld-h-cell')
isFindDateTitle = False
print(self.bookDate)
for i in range(len(datetitles)):
if datetitles[i].get_attribute('date') == str(self.bookDate):
isFindDateTitle = True
if datetitles[i].get_attribute('class').find('cld-d-sel') == -1:
datetitles[i].click()
else:
if self.isSlectDate:
self.isSlectDate = False
if i == 6:
datetitles[5].click()
else:
datetitles[i+1].click()
else:
self.isSlectDate = True
if not isFindDateTitle:
datetitles[9].click()
else:
roomtitles = self.driver.find_elements_by_class_name('cld-obj-qz')
for i in range(len(roomtitles)):
if roomtitles[i].get_attribute('objname') == self.roomName:
if len(roomtitles[i].find_elements_by_class_name('cld-ttd')) > 2:
roomtitles[i].find_element_by_class_name('cld-ttd-title').click()
break
return True
except Exception as e:
print(str(e))
return False
def comitBook(self):
try:
dialogtitle = self.driver.find_element_by_class_name('ui-dialog-title')
if dialogtitle.text == '预约申请':
st = self.driver.find_elements_by_name('start_time')[2]
et = self.driver.find_elements_by_name('end_time')[2]
Select(st).select_by_value(self.startTime)
Select(et).select_by_value(self.endTime)
self.driver.find_element_by_class_name('submitarea').find_element_by_xpath(
"//input[@value='提交']").click()
return True
else:
return False
except Exception as e:
print(str(e))
return False
def book_room(self):
if self.driver.title == "IC空间管理系统":
if not self.goLogin():
print('not login')
if not self.inUserCenter():
print('not in user center')
if not self.goUserCenter():
print('not go user center')
if not self.comitBook():
print('not go 研究小间')
if not self.changeTheDate():
print('not go commit')
if not self.goBookRoomSelection():
print('not go Date')
else:
print('book success')
self.driver.close()
self.stop()
return
self.start_timer()
# 注册操作
def operationBook(self):
url = "http://seatlib.fjtcm.edu.cn"
self.driver.get(url)
while True:
now = datetime.datetime.now()
if now.hour > self.startHour or (now.hour == self.startHour and now.minute >= self.startMin):
self.driver.refresh()
break
# 每隔10秒检测一次
time.sleep(10)
self.start_timer()
def start_timer(self, interval=0.5):
self.timer = threading.Timer(interval, self.book_room)
self.timer.start()
def openbrowser(self):
self.openChrome()
self.operationBook()
|
9,361 | 1b1b646a75fe2ff8d54e66d025b60bde0c9ed2d6 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# ### Bài tập 1.
# - <ins>Yêu cầu</ins>: Ý tưởng cơ bản của thuật toán ``Support Vector Machine`` (``SVM``) là gì? Ý tưởng của thuật toán biên mềm (``soft margin``) ``SVM``. Nêu ý nghĩa của siêu tham số ``C`` trong bài toán cực tiểu hàm mất mát.
#
# 1. Ý tưởng cơ bản của SVM là đưa toàn bộ dataset vào không gian nhiều chiều (n chiều), từ đó tìm ra mặt phẳng thích hợp nhất (hyperplane) để phân chia
# 2. Support Vector Machine thuần (hard margin) thì gặp hai vấn đề chính đó là nó chỉ hoạt động trên dataset ``Linearly Separable`` và thứ 2 đó là nó khá nhạy cảm với biến nhiễu (sensitive to noise). Để tránh vấn đề này, chúng ta cần sử dụng một mô hình linh hoạt
# hơn. Nhiệm vụ của nó là tìm được mặt phẳng vẫn phân loại tốt nhưng chấp nhận sai lệch ở một mức độ chấp nhận được.
# 3. Tham số `C` là hằng số dương giúp cân đối độ lớn của margin và sự hy sinh của các điểm nằm trong vùng không an toàn. Khi $C = \infty $ hoặc rất lớn, Soft Margin SVM trở thành Hard Margin SVM.
# %% [markdown]
# ### Bài tập 2.
# - <ins>Yêu cầu</ins>: Sử dụng mô hình ``SVM`` thuộc thư viện ``sklearn`` để xây dựng mô hình phân loại dựa trên tập dữ liệu huấn luyện ``X_train``, ``y_train``. Hãy nhận xét về tỉ lệ nhãn ``0`` và ``1`` trong bộ dữ liệu đã cho như đoạn code bên dưới. Hãy thử thay đổi giá trị của tham số ``C`` và nhận xét các độ đo ``Recall``, ``Precison``, ``F1-score``, và ``Accuracy`` của mô hình thu được trên tập dữ liệu kiểm tra ``X_test``, ``y_test``.
# - Nguồn tham khảo dữ liệu ``thyroid_sick.csv``: https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
# %%
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import itertools
import numpy as np
from sklearn import preprocessing
# %%
df = pd.read_csv('thyroid_sick.csv')
X = df[[column_name for column_name in df.columns if column_name != 'classes']]
y = df[['classes']]
X = preprocessing.StandardScaler().fit(X).transform(X.astype(float))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# %%
df.pivot_table(index =['classes'], aggfunc='size')
# %% [markdown]
# * Nhận xét:
# %%
from sklearn import svm
C_parameter = 10.5
mean_acc = []
for n in np.arange(0.5, C_parameter, 0.5):
#Train Model and Predict
clf = svm.SVC(C=n).fit(X_train,y_train)
yhat=clf.predict(X_test)
cnf_matrix = confusion_matrix(y_test, yhat)
mean_acc.append(float(accuracy_score(y_test, yhat)))
print("Result with C = " + str(n))
np.set_printoptions(precision=2)
print (classification_report(y_test, yhat))
print( "The best accuracy was with", max(mean_acc), "with C=", n)
# %% [markdown]
# ### Bài tập 3.
# - <ins>Yêu cầu</ins>: Ý tưởng của hàm ``kernel`` $K(\dots, \dots)$ là gì? Khi nào chúng ta áp dụng hàm ``kernel``? Chúng ta có cần biết biểu thức của hàm $\Phi(x)$ không?
# 1. Kernel SVM là việc đi tìm một hàm số biến đổi dữ liệu $x$ từ không gian feature ban đầu thành dữ liệu trong một không gian mới bằng hàm số $\Phi(\mathbf{x})$. Hàm số này cần thoả mãn mục đích đó là tronng không gian mới, dữ liệu giữa hai classes là phân biệt tuyến tính hoặc gần như phần biệt tuyến tính.
# 2. Chúng ta áp dụng hàm ``kernel`` khi dữ liệu không phân biệt tuyến tính, Với dữ liệu gần phân biệt tuyến tính, linear và poly kernels cho kết quả tốt hơn.
# 3.
# %% [markdown]
# ### Bài tập 4.
# - <ins>Yêu cầu</ins>: Cho điểm dữ liệu trong không gian hai chiều $x = [x_1, x_2]^T$ và hàm biến đổi sang không gian năm chiều $\Phi(x) = [1, \sqrt{2}x_1, \sqrt{2}x_2, x_1^2, \sqrt{2}x_1x_2, x_2^2]^T$. Hãy tính hàm ``kernel`` $K(a, b)$.
#
# \begin{eqnarray}
# \Phi(\mathbf{x})^T\Phi(\mathbf{z}) &=& [1, \sqrt{2} x_1, \sqrt{2} x_2, x_1^2, \sqrt{2} x_1x_2, x_2^2] [1, \sqrt{2} z_1, \sqrt{2} z_2, z_1^2, \sqrt{2} z_1z_2, z_2^2]^T \\
# &=& 1 + 2x_1z_1 + 2x_2z_2 + x_1^2x_2^2 + 2x_1z_1x_2z_2 + x_2^2z_2^2 \\
# &=& (1 + x_1z_1 + x_2z_2)^2 = (1 + \mathbf{x}^T\mathbf{z})^2 = k(\mathbf{x}, \mathbf{z})
# \end{eqnarray}
# %% [markdown]
# ### Bài tập 5.
# - <ins>Yêu cầu</ins>: Giả sử bạn dùng bộ phân loại ``SVM`` với hàm ``kernel`` (radial basis function) ``RBF`` cho tập huấn luyện và thấy mô hình phân loại chưa tốt. Để cải thiện, bạn sẽ giảm hay tăng tham số $\gamma$ trong công thức hàm ``kernel``, tham số ``C`` trong hàm mất mát.
# %% [markdown]
# ### Bài tập 6. (Exercise 9 trang 174, Chapter 5: Support Vector Machines)
# - <ins>Yêu cầu</ins>: Huấn luyện một bộ phân lớp ``SVM`` dựa trên bộ dữ liệu ``MNIST`` (dùng để phân loại hình ảnh các ký tự số có cùng kích thước). Bởi vì bộ phân loại ``SVM`` là bộ phân lớp nhị phân, chúng ta sẽ cần sử dụng chiến thuật ``one-versus-the-rest`` để phân loại tất cả ``10`` ký tự số (trong thực tế chúng ta chỉ dùng chiến thuật ``one-versus-one`` trong các trường hợp dữ liệu nhỏ). Bạn hãy báo cáo độ chính xác (``accuracy``) của mô hình đã huấn luyện trên tập test.
# %%
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.datasets import fetch_openml
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
mnist = fetch_openml('mnist_784', version=1, cache=True)
# %%
X = mnist["data"]
y = mnist["target"].astype(np.uint8)
X_train = X[:60000]
y_train = y[:60000]
X_test = X[60000:]
y_test = y[60000:]
lin_clf = LinearSVC(random_state=42)
lin_clf.fit(X_train, y_train)
y_pred = lin_clf.predict(X_train)
accuracy_score(y_train, y_pred)
# %%
y_test_predict =lin_clf.predict(X_test)
accuracy_score(y_test, y_test_predict)
# %%
Scaler = StandardScaler()
X_train_scaled = Scaler.fit_transform(X_train.astype(np.float32))
X_test_scaled = Scaler.fit_transform(X_test.astype(np.float32))
# %%
lin_clf = LinearSVC(random_state =42)
lin_clf.fit(X_train, y_train)
y_pred = lin_clf.predict(X_train)
accuracy_score(y_train, y_pred)
# %%
y_test_predict = lin_clf.predict(X_test_scaled)
accuracy_score(y_test, y_test_predict)
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciproca, uniform
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
from sklearn.svm import SVC
param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)}
rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3)
rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000])
# %% [markdown]
# ### Bài tập 7. (Exercise 10 trang 174, Chapter 5: Support Vector Machines)
# - <ins>Yêu cầu</ins>: Hãy huấn luyện một mô hình hồi quy tuyến tính với dữ liệu giá nhà ``California housing dataset``.
# %%
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
housing = fetch_california_housing()
X = housing["data"]
y = housing["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
9,362 | c889fd081eb606dca08fade03aa0a4d32319f98d | import requests
import json
URL = 'https://www.sms4india.com/api/v1/sendCampaign'
# get request
def sendPostRequest(reqUrl, apiKey, secretKey, useType, phoneNo, senderId, textMessage):
req_params = {
'apikey':EON386947EGSUZ4VEMIL8AWQX8RQW6UH,
'secret':FB2K25JVMFPEM310,
'usetype':useType,
'phone': 8355996202,
'message':textMessage,
'senderid':usaidh99@gmail.com
}
return requests.post(reqUrl, req_params)
# get response
response = sendPostRequest(URL, 'provided-api-key', 'provided-secret', 'prod/stage', 'valid-to-mobile', 'active-sender-id', 'message-text' )
"""
Note:-
you must provide apikey, secretkey, usetype, mobile, senderid and message values
and then requst to api
"""
# print response if you want
print response.text
<?php
//post
$url="https://www.sms4india.com/api/v1/sendCampaign";
$message = urlencode("message-text");// urlencode your message
$curl = curl_init();
curl_setopt($curl, CURLOPT_POST, 1);// set post data to true
curl_setopt($curl, CURLOPT_POSTFIELDS, "apikey=[povided-api-key]&secret=[provided-secret-key]&usetype=[stage or prod]&phone=[to-mobile]&senderid=[active-sender-id]&message=[$message]");// post data
// query parameter values must be given without squarebrackets.
// Optional Authentication:
curl_setopt($curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
curl_setopt($curl, CURLOPT_URL, $url);
curl_setopt($curl, CURLOPT_RETURNTRANSFER, 1);
$result = curl_exec($curl);
curl_close($curl);
echo $result;
?>
|
9,363 | bf3e7f1aa9fd20b69e751da9ac8970c88b1144eb | """
Test the OOD-detection capabilities of models by scaling a random feature for all sample in the data set.
"""
# STD
import os
import pickle
from copy import deepcopy
from collections import defaultdict
import argparse
from typing import Tuple, Dict, List
# EXT
import numpy as np
from tqdm import tqdm
import torch
# PROJECT
from uncertainty_estimation.utils.model_init import AVAILABLE_MODELS
from uncertainty_estimation.utils.model_init import init_models
from uncertainty_estimation.utils.datahandler import DataHandler
from uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer
# CONST
SCALES = [10, 100, 1000, 10000]
N_FEATURES = 100
RESULT_DIR = "../../data/results"
def run_perturbation_experiment(
nov_an: NoveltyAnalyzer, X_test: np.ndarray, scoring_func: str = None
) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(
np.arange(0, X_test.shape[1]), N_FEATURES, replace=False
)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
if __name__ == "__main__":
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_origin", type=str, default="MIMIC", help="Which data to use"
)
parser.add_argument(
"--models",
type=str,
nargs="+",
default=AVAILABLE_MODELS,
choices=AVAILABLE_MODELS,
help="Determine the models which are being used for this experiment.",
)
parser.add_argument(
"--result_dir",
type=str,
default=RESULT_DIR,
help="Define the directory that results should be saved to.",
)
args = parser.parse_args()
# Loading the data
dh = DataHandler(args.data_origin)
feature_names = dh.load_feature_names()
train_data, test_data, val_data = dh.load_data_splits()
y_name = dh.load_target_name()
for ne, scoring_funcs, name in init_models(
input_dim=len(feature_names), selection=args.models, origin=args.data_origin
):
print(name)
nov_an = NoveltyAnalyzer(
ne,
train_data[feature_names].values,
test_data[feature_names].values,
val_data[feature_names].values,
train_data[y_name].values,
test_data[y_name].values,
val_data[y_name].values,
)
nov_an.train()
for scoring_func in scoring_funcs:
aucs_dict, recall_dict = run_perturbation_experiment(
nov_an, test_data[feature_names], scoring_func=scoring_func
)
dir_name = os.path.join(
args.result_dir,
args.data_origin,
"perturbation",
name,
"detection",
scoring_func,
)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(os.path.join(dir_name, "recall.pkl"), "wb") as f:
pickle.dump(recall_dict, f)
with open(os.path.join(dir_name, "detect_auc.pkl"), "wb") as f:
pickle.dump(aucs_dict, f)
|
9,364 | 28532fe798b6a764bec7ea511ba9e66a1d096b6f | #!/usr/bin/python
import argparse
import contextlib
import os.path
import shutil
import subprocess
import sys
import tempfile
from Bio import SeqIO
BOOTSTRAP_MODES = 'a',
# Some utilities
@contextlib.contextmanager
def sequences_in_format(sequences, fmt='fasta', **kwargs):
with tempfile.NamedTemporaryFile(**kwargs) as tf:
SeqIO.write(sequences, tf, fmt)
tf.flush()
yield tf.name
@contextlib.contextmanager
def temp_dir(**kwargs):
"""Maintains a temporary directory for the life of the context manager."""
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
# Cleanup
# ermm... this is breaking something (maybe bootstrapping replicates?), so leaving out for now
#shutil.rmtree(temp_dir)
pass
def stripext(f, basename=False):
if basename:
return stripext(os.path.basename(f))
return os.path.splitext(f)[0]
def nonextant_file(path):
if os.path.exists(path):
raise ValueError("Exists: " + path)
return path
def joiner(base_path):
def p(*args):
return os.path.join(base_path, *args)
return p
def move(x, y):
subprocess.check_output(['cp', x, y])
def raxml(sequences, output_tree, stats_path=None, log_path=None, quiet=False,
executable='raxmlHPC-SSE3', model='GTRGAMMA', threads=None,
rapid_bootstrap=None, bootstrap_seed=None, tmp_prefix=None, outgroup=None):
name = os.path.basename(os.path.splitext(output_tree)[0])
def f(n):
"Gets the RAxML file name associated with a key"
return 'RAxML_{1}.{0}'.format(name, n)
with temp_dir(prefix='raxml-') as td:
with sequences_in_format(sequences, fmt='phylip-relaxed',
prefix=tmp_prefix, dir=td) as seq_file:
p = joiner(td)
# note: -p is needed for some reason now but didn't use to be?
cmd = [executable, '-n', name, '-m', model, '-s', seq_file, '-p', '9988']
if threads and threads > 1:
cmd.extend(('-T', str(threads)))
if rapid_bootstrap:
cmd.extend(('-f', 'a', '-x', bootstrap_seed,
'-N', rapid_bootstrap))
if outgroup:
cmd.extend(('-o', outgroup))
stdout = stderr = None
if quiet:
stdout = stderr = open(os.path.devnull)
cmd = map(str, cmd)
print >> sys.stderr, "Running:", ' '.join(cmd)
try:
subprocess.check_call(cmd, stdout=stdout, stderr=stderr, cwd=td)
except subprocess.CalledProcessError, e:
raise SystemExit(e.returncode)
# Get the result - either bootstrap-annotated tree or result
key = 'bipartitions' if rapid_bootstrap else 'result'
move(p(f(key)), output_tree)
if stats_path:
move(p(f('info')), stats_path)
if log_path:
move(p(f('log')), log_path)
def main():
parser = argparse.ArgumentParser(description="""Simple wrapper around
RAxML. Abstracts executable selection and sequence formatting; only keeps
desired files; name specification. Most arguments are *not* supported""")
parser.add_argument('alignment_file', type=argparse.FileType('r'),
help="""Input alignment""")
parser.add_argument('--input-format', default='fasta',
help="""Format of input file [default: %(default)s]""")
parser.add_argument('output_tree', type=nonextant_file, help="""Destination
for output tree""")
parser.add_argument('--stats', type=nonextant_file, metavar="<stats file>",
help="""Save RAxML stats to <stats file>""")
parser.add_argument('--log', type=nonextant_file, metavar="<log file>",
help="""Write RAxML log file to <log file>""")
parser.add_argument('-q', '--quiet', action='store_true',
help="""Suppress output""")
bs_group = parser.add_argument_group("Bootstrap Options")
bs_group.add_argument('--rapid-bootstrap', metavar='N',
help="""Run rapid bootstrap analysis with N replicates""",
type=int)
bs_group.add_argument('-x', '--bootstrap-seed', help="""Bootstrap seed""",
dest='bootstrap_seed', type=int, default=1)
rax_group = parser.add_argument_group(title="""RAxML options""")
rax_group.add_argument('-T', '--threads', help="""Number of
threads to use [default: 1]""", type=int)
rax_group.add_argument('--executable', help="""RAxML executable to use.
[default: raxmlHPC-PTHREADS-SSE3 if threads > 1, raxmlHPC-SSE3
otherwise]""")
rax_group.add_argument('-m', '--model', default='GTRGAMMA', help="""RAxML
model to use [default: %(default)s]""")
parser.add_argument('-o', '--outgroup',
help="""Fix output for tree""")
args = parser.parse_args()
if not args.executable:
args.executable = ('raxmlHPC-PTHREADS-SSE3' if args.threads else
'raxmlHPC-SSE3')
with args.alignment_file as fp:
sequences = SeqIO.parse(fp, args.input_format)
raxml(sequences, args.output_tree, executable=args.executable,
stats_path=args.stats, quiet=args.quiet,
threads=args.threads, model=args.model, log_path=args.log,
rapid_bootstrap=args.rapid_bootstrap,
bootstrap_seed=args.bootstrap_seed,
outgroup=args.outgroup,
tmp_prefix=stripext(fp.name, True))
if __name__ == '__main__':
main()
|
9,365 | 7f7adc367e4f3b8ee721e42f5d5d0770f40828c9 | from setuptools import setup
import os.path
# Get the long description from the README file
with open('README.rst') as f:
long_description = f.read()
setup(name='logging_exceptions',
version='0.1.8',
py_modules=['logging_exceptions'],
author="Bernhard C. Thiel",
author_email="thiel@tbi.univie.ac.at",
description="Self-logging exceptions: Attach log messages to exceptions and output them conditionally.",
long_description=long_description,
url='https://github.com/Bernhard10/logging_exceptions',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
],
keywords='logging exceptions'
)
|
9,366 | ae27f97b5633309d85b9492e1a0f268847c24cd5 | import random
import numpy as np
import matplotlib.pyplot as plt
import torchvision
def plot_image(img, ax, title):
ax.imshow(np.transpose(img, (1,2,0)) , interpolation='nearest')
ax.set_title(title, fontsize=20)
def to_numpy(image, vsc):
return torchvision.utils.make_grid(
image.view(1, vsc.channels, vsc.height, vsc.width)
).cpu().detach().numpy()
def plot_encoding(image, vsc, latent_sz, alpha=None, width=1/7):
image = vsc.transform(image).to(vsc.device)
# decoded, mu, logvar, logspike = vsc.model.forward(image)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z = z.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14,5))
plot_image(to_numpy(image, vsc), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
if alpha is not None:
title = r"Latent Dimension %d - $\alpha$ = %.2f " % (latent_sz, alpha)
else:
title = r"Latent Dimension %d" % (latent_sz)
ax1.set_title(title, fontsize=20)
plot_image(to_numpy(img, vsc), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_encoding_tcvae(image, vae, latent_sz, alpha=1, width=1/7):
xs, x_params, zs, z_params = vae.reconstruct_img(image.to('cuda'))
img = xs.cpu()[0]
z = zs.cpu().detach().numpy()[0]
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(14,5))
plot_image(to_numpy(image, vae), ax0, 'Input Image')
ax1.bar(np.arange(latent_sz), height=z, width=width, align='center')
ax1.scatter(np.arange(latent_sz), z, color='blue')
ax1.set_title(r"Latent Dimension %d - $\alpha$ = %.2f " % \
(latent_sz, alpha), fontsize=20)
plot_image(to_numpy(img, vae), ax2, 'Decoded Image')
plt.subplots_adjust(hspace=0.5)
def plot_horizontal_traversal(image, vsc, latent_sz, length,
delta, threshold=1e-4, plot_all=False,
plot_list=None, width=1/4, n_indices=15, plot=True):
image = vsc.transform(image).to(vsc.device)
# decoded, mu, logvar, logspike = vsc.model.forward(image)
decoded_params = vsc.model.forward(image)
z = vsc.model.reparameterize(*decoded_params[1:])
img = vsc.inverse_transform(vsc.model.decode(z))
z_ = z.cpu().detach().numpy()[0]
if plot:
plt.bar(np.arange(latent_sz), height=z_, width=width, align='center')
plt.scatter(np.arange(latent_sz), z_, color='blue')
plt.show()
non_zero = [i for i in range(latent_sz) if np.abs(z_[i]) > threshold]
inds = np.random.choice(non_zero, n_indices)
if plot:
print(inds)
if not plot_all:
non_zero = inds # [ind]
if plot_list:
non_zero = plot_list
if plot:
print(non_zero)
hor_traversal = []
for ind in non_zero:
images = []
z1 = z.clone()
for i in range(length):
img = to_numpy(vsc.model.decode(z1), vsc)
img = np.transpose(img, (1,2,0))
img[:,0] = 1
img[:,-1] = 1
img[0,:] = 1
img[-1,:] = 1
images.append(img)
z1[0, ind] = z1[0, ind] + delta if z[0,ind] < 0 else z1[0, ind] - delta
hor_traversal.append(np.concatenate(images, axis=1))
traversal = np.concatenate(hor_traversal, axis=0)
if plot:
plt.figure(figsize=(14,24))
plt.axis('off')
plt.imshow(traversal)
plt.show()
return traversal |
9,367 | 60c3f6775d5112ff178bd3774c776819573887bb | import smtplib
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime
from threading import Thread
FROM = os.getenv('EMAIL_FROM')
TO = os.getenv('EMAIL_TO')
HOST = os.getenv('EMAIL_HOST')
PORT = os.getenv('EMAIL_PORT')
PASSWORD = os.getenv('EMAIL_PASSWORD')
def send_email(body, subject=f'ERROR LOG [{datetime.strftime(datetime.now(), "%b %d, %Y - %I:%M %p")}]'):
"""
Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'
"""
# Send the email on a separate thread so the server doesn't
# have to wait for it to finish
thread = Thread(target=_send, args=(body, subject))
thread.start()
def _send(body, subject):
msg = MIMEMultipart()
msg['From'] = FROM
msg['To'] = TO
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(host=HOST, port=int(PORT))
server.starttls()
server.login(FROM, PASSWORD)
senders = server.sendmail(FROM, TO, msg.as_string())
server.quit()
return senders
|
9,368 | 6b6b734c136f3c4ed5b2789ab384bab9a9ea7b58 | # Generated by Django 3.0.5 on 2020-05-02 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weatherData', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='city',
name='username',
field=models.CharField(default='test@gmail.com', max_length=100),
),
]
|
9,369 | da218e6d9ee311eefb8e9ae4dac5053793eb5514 | """
Class for manage tables in Storage and Big Query
"""
# pylint: disable=invalid-name, too-many-locals, too-many-branches, too-many-arguments,line-too-long,R0801,consider-using-f-string
from pathlib import Path
import json
from copy import deepcopy
import textwrap
import inspect
from io import StringIO
from loguru import logger
from google.cloud import bigquery
import ruamel.yaml as ryaml
import requests
import pandas as pd
import google.api_core.exceptions
from basedosdados.upload.base import Base
from basedosdados.upload.storage import Storage
from basedosdados.upload.dataset import Dataset
from basedosdados.upload.datatypes import Datatype
from basedosdados.upload.metadata import Metadata
from basedosdados.exceptions import BaseDosDadosException
class Table(Base):
"""
Manage tables in Google Cloud Storage and BigQuery.
"""
def __init__(self, dataset_id, table_id, **kwargs):
super().__init__(**kwargs)
self.table_id = table_id.replace("-", "_")
self.dataset_id = dataset_id.replace("-", "_")
self.dataset_folder = Path(self.metadata_path / self.dataset_id)
self.table_folder = self.dataset_folder / table_id
self.table_full_name = dict(
prod=f"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}",
staging=f"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}",
)
self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))
self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)
@property
def table_config(self):
"""
Load table_config.yaml
"""
return self._load_yaml(self.table_folder / "table_config.yaml")
def _get_table_obj(self, mode):
"""
Get table object from BigQuery
"""
return self.client[f"bigquery_{mode}"].get_table(self.table_full_name[mode])
def _is_partitioned(self):
"""
Check if table is partitioned
"""
## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic
partitions = self.table_config["partitions"]
if partitions is None or len(partitions) == 0:
return False
if isinstance(partitions, list):
# check if any None inside list.
# False if it is the case Ex: [None, 'partition']
# True otherwise Ex: ['partition1', 'partition2']
return all(item is not None for item in partitions)
raise ValueError("Partitions must be a list or None")
def _load_schema(self, mode="staging"):
"""Load schema from table_config.yaml
Args:
mode (bool): Which dataset to create [prod|staging].
"""
self._check_mode(mode)
json_path = self.table_folder / f"schema-{mode}.json"
columns = self.table_config["columns"]
if mode == "staging":
new_columns = []
for c in columns:
# case is_in_staging are None then must be True
is_in_staging = (
True if c.get("is_in_staging") is None else c["is_in_staging"]
)
# append columns declared in table_config.yaml to schema only if is_in_staging: True
if is_in_staging and not c.get("is_partition"):
c["type"] = "STRING"
new_columns.append(c)
del columns
columns = new_columns
elif mode == "prod":
schema = self._get_table_obj(mode).schema
# get field names for fields at schema and at table_config.yaml
column_names = [c["name"] for c in columns]
schema_names = [s.name for s in schema]
# check if there are mismatched fields
not_in_columns = [name for name in schema_names if name not in column_names]
not_in_schema = [name for name in column_names if name not in schema_names]
# raise if field is not in table_config
if not_in_columns:
raise BaseDosDadosException(
"Column {error_columns} was not found in table_config.yaml. Are you sure that "
"all your column names between table_config.yaml, publish.sql and "
"{project_id}.{dataset_id}.{table_id} are the same?".format(
error_columns=not_in_columns,
project_id=self.table_config["project_id_prod"],
dataset_id=self.table_config["dataset_id"],
table_id=self.table_config["table_id"],
)
)
# raise if field is not in schema
if not_in_schema:
raise BaseDosDadosException(
"Column {error_columns} was not found in publish.sql. Are you sure that "
"all your column names between table_config.yaml, publish.sql and "
"{project_id}.{dataset_id}.{table_id} are the same?".format(
error_columns=not_in_schema,
project_id=self.table_config["project_id_prod"],
dataset_id=self.table_config["dataset_id"],
table_id=self.table_config["table_id"],
)
)
# if field is in schema, get field_type and field_mode
for c in columns:
for s in schema:
if c["name"] == s.name:
c["type"] = s.field_type
c["mode"] = s.mode
break
## force utf-8, write schema_{mode}.json
json.dump(columns, (json_path).open("w", encoding="utf-8"))
# load new created schema
return self.client[f"bigquery_{mode}"].schema_from_json(str(json_path))
def _make_publish_sql(self):
"""Create publish.sql with columns and bigquery_type"""
### publish.sql header and instructions
publish_txt = """
/*
Query para publicar a tabela.
Esse é o lugar para:
- modificar nomes, ordem e tipos de colunas
- dar join com outras tabelas
- criar colunas extras (e.g. logs, proporções, etc.)
Qualquer coluna definida aqui deve também existir em `table_config.yaml`.
# Além disso, sinta-se à vontade para alterar alguns nomes obscuros
# para algo um pouco mais explícito.
TIPOS:
- Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.
- Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`
- Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
*/
"""
# remove triple quotes extra space
publish_txt = inspect.cleandoc(publish_txt)
publish_txt = textwrap.dedent(publish_txt)
# add create table statement
project_id_prod = self.client["bigquery_prod"].project
publish_txt += f"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n"
# sort columns by is_partition, partitions_columns come first
if self._is_partitioned():
columns = sorted(
self.table_config["columns"],
key=lambda k: (k["is_partition"] is not None, k["is_partition"]),
reverse=True,
)
else:
columns = self.table_config["columns"]
# add columns in publish.sql
for col in columns:
name = col["name"]
bigquery_type = (
"STRING"
if col["bigquery_type"] is None
else col["bigquery_type"].upper()
)
publish_txt += f"SAFE_CAST({name} AS {bigquery_type}) {name},\n"
## remove last comma
publish_txt = publish_txt[:-2] + "\n"
# add from statement
project_id_staging = self.client["bigquery_staging"].project
publish_txt += (
f"FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t"
)
# save publish.sql in table_folder
(self.table_folder / "publish.sql").open("w", encoding="utf-8").write(
publish_txt
)
def _make_template(self, columns, partition_columns, if_table_config_exists, force_columns):
# create table_config.yaml with metadata
self.metadata.create(
if_exists=if_table_config_exists,
columns=partition_columns + columns,
partition_columns=partition_columns,
force_columns=force_columns,
table_only=False,
)
self._make_publish_sql()
@staticmethod
def _sheet_to_df(columns_config_url_or_path):
"""
Convert sheet to dataframe
"""
url = columns_config_url_or_path.replace("edit#gid=", "export?format=csv&gid=")
try:
return pd.read_csv(StringIO(requests.get(url, timeout=10).content.decode("utf-8")))
except Exception as e:
raise BaseDosDadosException(
"Check if your google sheet Share are: Anyone on the internet with this link can view"
) from e
def table_exists(self, mode):
"""Check if table exists in BigQuery.
Args:
mode (str): Which dataset to check [prod|staging].
"""
try:
ref = self._get_table_obj(mode=mode)
except google.api_core.exceptions.NotFound:
ref = None
return bool(ref)
def update_columns(self, columns_config_url_or_path=None):
"""
Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate
publish.sql and autofill type using bigquery_type.
The sheet must contain the columns:
- name: column name
- description: column description
- bigquery_type: column bigquery type
- measurement_unit: column mesurement unit
- covered_by_dictionary: column related dictionary
- directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>
- temporal_coverage: column temporal coverage
- has_sensitive_data: the column has sensitive data
- observations: column observations
Args:
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
"""
ruamel = ryaml.YAML()
ruamel.preserve_quotes = True
ruamel.indent(mapping=4, sequence=6, offset=4)
table_config_yaml = ruamel.load(
(self.table_folder / "table_config.yaml").open(encoding="utf-8")
)
if "https://docs.google.com/spreadsheets/d/" in columns_config_url_or_path:
if (
"edit#gid=" not in columns_config_url_or_path
or "https://docs.google.com/spreadsheets/d/"
not in columns_config_url_or_path
or not columns_config_url_or_path.split("=")[1].isdigit()
):
raise BaseDosDadosException(
"The Google sheet url not in correct format."
"The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>"
)
df = self._sheet_to_df(columns_config_url_or_path)
else:
file_type = columns_config_url_or_path.split(".")[-1]
if file_type == "csv":
df = pd.read_csv(columns_config_url_or_path, encoding="utf-8")
elif file_type in ["xls", "xlsx", "xlsm", "xlsb", "odf", "ods", "odt"]:
df = pd.read_excel(columns_config_url_or_path)
else:
raise BaseDosDadosException(
"File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported."
)
df = df.fillna("NULL")
required_columns = [
"name",
"bigquery_type",
"description",
"temporal_coverage",
"covered_by_dictionary",
"directory_column",
"measurement_unit",
"has_sensitive_data",
"observations",
]
not_found_columns = required_columns.copy()
for sheet_column in df.columns.tolist():
for required_column in required_columns:
if sheet_column == required_column:
not_found_columns.remove(required_column)
if not_found_columns:
raise BaseDosDadosException(
f"The following required columns are not found: {', '.join(not_found_columns)}."
)
columns_parameters = zip(
*[df[required_column].tolist() for required_column in required_columns]
)
for (
name,
bigquery_type,
description,
temporal_coverage,
covered_by_dictionary,
directory_column,
measurement_unit,
has_sensitive_data,
observations,
) in columns_parameters:
for col in table_config_yaml["columns"]:
if col["name"] == name:
col["bigquery_type"] = (
col["bigquery_type"]
if bigquery_type == "NULL"
else bigquery_type.lower()
)
col["description"] = (
col["description"] if description == "NULL" else description
)
col["temporal_coverage"] = (
col["temporal_coverage"]
if temporal_coverage == "NULL"
else [temporal_coverage]
)
col["covered_by_dictionary"] = (
"no"
if covered_by_dictionary == "NULL"
else covered_by_dictionary
)
dataset = directory_column.split(".")[0]
col["directory_column"]["dataset_id"] = (
col["directory_column"]["dataset_id"]
if dataset == "NULL"
else dataset
)
table = directory_column.split(".")[-1].split(":")[0]
col["directory_column"]["table_id"] = (
col["directory_column"]["table_id"]
if table == "NULL"
else table
)
column = directory_column.split(".")[-1].split(":")[-1]
col["directory_column"]["column_name"] = (
col["directory_column"]["column_name"]
if column == "NULL"
else column
)
col["measurement_unit"] = (
col["measurement_unit"]
if measurement_unit == "NULL"
else measurement_unit
)
col["has_sensitive_data"] = (
"no" if has_sensitive_data == "NULL" else has_sensitive_data
)
col["observations"] = (
col["observations"] if observations == "NULL" else observations
)
with open(self.table_folder / "table_config.yaml", "w", encoding="utf-8") as f:
ruamel.dump(table_config_yaml, f)
# regenerate publish.sql
self._make_publish_sql()
def init(
self,
data_sample_path=None,
if_folder_exists="raise",
if_table_config_exists="raise",
source_format="csv",
force_columns = False,
columns_config_url_or_path=None,
): # sourcery skip: low-code-quality
"""Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.
The folder should contain:
* `table_config.yaml`
* `publish.sql`
You can also point to a sample of the data to auto complete columns names.
Args:
data_sample_path (str, pathlib.PosixPath): Optional.
Data sample path to auto complete columns names
It supports Comma Delimited CSV, Apache Avro and
Apache Parquet.
if_folder_exists (str): Optional.
What to do if table folder exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace folder
* 'pass' : Do nothing
if_table_config_exists (str): Optional
What to do if table_config.yaml and publish.sql exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace files with blank template
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
Raises:
FileExistsError: If folder exists and replace is False.
NotImplementedError: If data sample is not in supported type or format.
"""
if not self.dataset_folder.exists():
raise FileExistsError(
f"Dataset folder {self.dataset_folder} folder does not exists. "
"Create a dataset before adding tables."
)
try:
self.table_folder.mkdir(exist_ok=(if_folder_exists == "replace"))
except FileExistsError as e:
if if_folder_exists == "raise":
raise FileExistsError(
f"Table folder already exists for {self.table_id}. "
) from e
if if_folder_exists == "pass":
return self
if not data_sample_path and if_table_config_exists != "pass":
raise BaseDosDadosException(
"You must provide a path to correctly create config files"
)
partition_columns = []
if isinstance(
data_sample_path,
(
str,
Path,
),
):
# Check if partitioned and get data sample and partition columns
data_sample_path = Path(data_sample_path)
if data_sample_path.is_dir():
data_sample_path = [
f
for f in data_sample_path.glob("**/*")
if f.is_file() and f.suffix == f".{source_format}"
][0]
partition_columns = [
k.split("=")[0]
for k in data_sample_path.as_posix().split("/")
if "=" in k
]
columns = Datatype(self, source_format).header(data_sample_path)
else:
columns = ["column_name"]
if if_table_config_exists == "pass":
# Check if config files exists before passing
if (
Path(self.table_folder / "table_config.yaml").is_file()
and Path(self.table_folder / "publish.sql").is_file()
):
pass
# Raise if no sample to determine columns
elif not data_sample_path:
raise BaseDosDadosException(
"You must provide a path to correctly create config files"
)
else:
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
elif if_table_config_exists == "raise":
# Check if config files already exist
if (
Path(self.table_folder / "table_config.yaml").is_file()
and Path(self.table_folder / "publish.sql").is_file()
):
raise FileExistsError(
f"table_config.yaml and publish.sql already exists at {self.table_folder}"
)
# if config files don't exist, create them
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
else:
# Raise: without a path to data sample, should not replace config files with empty template
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
if columns_config_url_or_path is not None:
self.update_columns(columns_config_url_or_path)
return self
def create(
self,
path=None,
force_dataset=True,
if_table_exists="raise",
if_storage_data_exists="raise",
if_table_config_exists="raise",
source_format="csv",
force_columns=False,
columns_config_url_or_path=None,
dataset_is_public=True,
location=None,
chunk_size=None,
):
"""Creates BigQuery table at staging dataset.
If you add a path, it automatically saves the data in the storage,
creates a datasets folder and BigQuery location, besides creating the
table and its configuration files.
The new table should be located at `<dataset_id>_staging.<table_id>` in BigQuery.
It looks for data saved in Storage at `<bucket_name>/staging/<dataset_id>/<table_id>/*`
and builds the table.
It currently supports the types:
- Comma Delimited CSV
- Apache Avro
- Apache Parquet
Data can also be partitioned following the hive partitioning scheme
`<key1>=<value1>/<key2>=<value2>` - for instance,
`year=2012/country=BR`. The partition is automatcally detected
by searching for `partitions` on the `table_config.yaml`.
Args:
path (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
job_config_params (dict): Optional.
Job configuration params from bigquery
if_table_exists (str): Optional
What to do if table exists
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
force_dataset (bool): Creates `<dataset_id>` folder and BigQuery Dataset if it doesn't exists.
if_table_config_exists (str): Optional.
What to do if config files already exist
* 'raise': Raises FileExistError
* 'replace': Replace with blank template
* 'pass'; Do nothing
if_storage_data_exists (str): Optional.
What to do if data already exists on your bucket:
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
dataset_is_public (bool): Control if prod dataset is public or not. By default staging datasets like `dataset_id_staging` are not public.
location (str): Optional. Location of dataset data.
List of possible region names locations: https://cloud.google.com/bigquery/docs/locations
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if path is None:
# Look if table data already exists at Storage
data = self.client["storage_staging"].list_blobs(
self.bucket_name, prefix=f"staging/{self.dataset_id}/{self.table_id}"
)
# Raise: Cannot create table without external data
if not data:
raise BaseDosDadosException(
"You must provide a path for uploading data"
)
# Add data to storage
if isinstance(
path,
(
str,
Path,
),
):
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
path,
mode="staging",
if_exists=if_storage_data_exists,
chunk_size=chunk_size,
)
# Create Dataset if it doesn't exist
if force_dataset:
dataset_obj = Dataset(self.dataset_id, **self.main_vars)
try:
dataset_obj.init()
except FileExistsError:
pass
dataset_obj.create(
if_exists="pass", location=location, dataset_is_public=dataset_is_public
)
self.init(
data_sample_path=path,
if_folder_exists="replace",
if_table_config_exists=if_table_config_exists,
columns_config_url_or_path=columns_config_url_or_path,
source_format=source_format,
force_columns=force_columns
)
table = bigquery.Table(self.table_full_name["staging"])
table.external_data_configuration = Datatype(
self, source_format, "staging", partitioned=self._is_partitioned()
).external_config
# Lookup if table alreay exists
table_ref = None
try:
table_ref = self.client["bigquery_staging"].get_table(
self.table_full_name["staging"]
)
except google.api_core.exceptions.NotFound:
pass
if isinstance(table_ref, google.cloud.bigquery.table.Table):
if if_table_exists == "pass":
return None
if if_table_exists == "raise":
raise FileExistsError(
"Table already exists, choose replace if you want to overwrite it"
)
if if_table_exists == "replace":
self.delete(mode="staging")
self.client["bigquery_staging"].create_table(table)
logger.success(
"{object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="created",
)
return None
def update(self, mode="all"):
"""Updates BigQuery schema and description.
Args:
mode (str): Optional.
Table of which table to update [prod|staging|all]
not_found_ok (bool): Optional.
What to do if table is not found
"""
self._check_mode(mode)
mode = ["prod", "staging"] if mode == "all" else [mode]
for m in mode:
try:
table = self._get_table_obj(m)
except google.api_core.exceptions.NotFound:
continue
# if m == "staging":
table.description = self._render_template(
Path("table/table_description.txt"), self.table_config
)
# save table description
with open(
self.metadata_path
/ self.dataset_id
/ self.table_id
/ "table_description.txt",
"w",
encoding="utf-8",
) as f:
f.write(table.description)
# when mode is staging the table schema already exists
table.schema = self._load_schema(m)
fields = ["description", "schema"] if m == "prod" else ["description"]
self.client[f"bigquery_{m}"].update_table(table, fields=fields)
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="updated",
)
def publish(self, if_exists="raise"):
"""Creates BigQuery table at production dataset.
Table should be located at `<dataset_id>.<table_id>`.
It creates a view that uses the query from
`<metadata_path>/<dataset_id>/<table_id>/publish.sql`.
Make sure that all columns from the query also exists at
`<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including
the partitions.
Args:
if_exists (str): Optional.
What to do if table exists.
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
Todo:
* Check if all required fields are filled
"""
if if_exists == "replace":
self.delete(mode="prod")
self.client["bigquery_prod"].query(
(self.table_folder / "publish.sql").open("r", encoding="utf-8").read()
).result()
self.update()
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="published",
)
def delete(self, mode):
"""Deletes table in BigQuery.
Args:
mode (str): Table of which table to delete [prod|staging]
"""
self._check_mode(mode)
if mode == "all":
for m, n in self.table_full_name[mode].items():
self.client[f"bigquery_{m}"].delete_table(n, not_found_ok=True)
logger.info(
" {object} {object_id}_{mode} was {action}!",
object_id=self.table_id,
mode=mode,
object="Table",
action="deleted",
)
else:
self.client[f"bigquery_{mode}"].delete_table(
self.table_full_name[mode], not_found_ok=True
)
logger.info(
" {object} {object_id}_{mode} was {action}!",
object_id=self.table_id,
mode=mode,
object="Table",
action="deleted",
)
def append(
self,
filepath,
partitions=None,
if_exists="replace",
chunk_size=None,
**upload_args,
):
"""Appends new data to existing BigQuery table.
As long as the data has the same schema. It appends the data in the
filepath to the existing table.
Args:
filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
partitions (str, pathlib.PosixPath, dict): Optional.
Hive structured partition as a string or dict
* str : `<key>=<value>/<key2>=<value2>`
* dict: `dict(key=value, key2=value2)`
if_exists (str): 0ptional.
What to do if data with same name exists in storage
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if not self.table_exists("staging"):
raise BaseDosDadosException(
"You cannot append to a table that does not exist"
)
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
filepath,
mode="staging",
partitions=partitions,
if_exists=if_exists,
chunk_size=chunk_size,
**upload_args,
)
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="appended",
)
|
9,370 | 5d97a2afed26ec4826c8bce30c84863d21f86001 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_security import SQLAlchemySessionUserDatastore, Security
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile("config.py", silent=True)
db = SQLAlchemy(app)
from .blueprints.cart.views import cart_blueprint
from .blueprints.admin.views import admin_blueprint
from .blueprints.products.views import product_blueprint
from .blueprints.orders.views import order_blueprint
from .blueprints.account.views import account_blueprint
from .blueprints.categories.views import category_blueprint
from .blueprints.static_pages.views import static_blueprint
app.register_blueprint(static_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(cart_blueprint)
app.register_blueprint(product_blueprint)
app.register_blueprint(account_blueprint)
app.register_blueprint(category_blueprint)
app.register_blueprint(order_blueprint)
from .blueprints.account.models import AccountUser, AccountRole
from .blueprints.account.forms import RegistrationForm, LoginForm
try:
AccountUser.query.first()
except Exception as e:
db.create_all()
user_datastore = SQLAlchemySessionUserDatastore(db.session, AccountUser, AccountRole)
security = Security(
app, user_datastore, register_form=RegistrationForm, login_form=LoginForm
)
|
9,371 | d28571214805df766c2cc2f45a6b5bea88d7ac18 | #!/usr/bin/env python
from setuptools import setup, find_packages
#if sys.argv[-1] == 'publish':
# os.system('python setup.py sdist upload')
# sys.exit()
with open('bace/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
readme = open('README.md').read()
doclink = """
Documentation
-------------
The full documentation is at http://bace.rtfd.org."""
VERSION = '1.0.0'
setup(
name='bace',
version=VERSION,
description='bace',
long_description=readme + '\n\n' + doclink + '\n\n',
author='Krzysztof Joachimiak',
url='https://github.com/krzjoa/bace',
packages=find_packages(where='.', exclude=('tests')),
package_dir={'bace': 'bace'},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
license='MIT',
zip_safe=False,
keywords='bayes',
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
],
)
|
9,372 | 4ad4cf46be735c6ac26b5b0953d4c2458f37496a | import os, shutil, cv2
from PIL import Image
INP_DIR = '/dataset/test_set_A_full'
# Lọc thư mục data test ra thành 3 thư mục: None, Square (1:1), và phần còn lại (đã được crop ngay chính giữa)
# Trả về path dẫn đến 3 thư mục nói trên
def pre_proc(INP_DIR):
INP_DIR = INP_DIR + '/'
NONE_DIR = os.path.dirname(INP_DIR) + '_none'
SQUARE_DIR = os.path.dirname(INP_DIR) + '_square'
CROP_DIR = os.path.dirname(INP_DIR) + '_cropped'
os.makedirs(NONE_DIR, exist_ok=True)
os.makedirs(SQUARE_DIR, exist_ok=True)
os.makedirs(CROP_DIR, exist_ok=True)
dir = os.listdir(INP_DIR)
dir.sort()
for fi in dir:
print(fi)
inp_path = os.path.join(INP_DIR, fi)
img = Image.open(inp_path)
if img.format == 'GIF':
shutil.move(inp_path, os.path.join(NONE_DIR, fi))
continue
width, height = img.size
if width == height:
shutil.copyfile(inp_path, os.path.join(SQUARE_DIR, fi))
continue
img = cv2.imread(inp_path)
if width > height:
img = img[:, width//2-height//2:width//2+height//2]
else:
img = img[height//2-width//2:height//2+width//2, :]
cv2.imwrite(os.path.join(CROP_DIR, fi), img)
return NONE_DIR, SQUARE_DIR, CROP_DIR
if __name__ == '__main__':
pre_proc(INP_DIR)
|
9,373 | 02a1f84e72b412636d86b9bdb59856ae8c309255 | '''
Each new term in the Fibonacci sequence is generated by adding the previous two terms.
By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million,
find the sum of the even-valued terms.
'''
def fib(n):
'''
Binet's formula for nth Fibonacci number
http://mathworld.wolfram.com/BinetsFibonacciNumberFormula.html
((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5))
'''
return int(0.4472135954999579392818347337462552470881236719223051448541*
(pow(1.6180339887498948482045868343656381177203091798057628621354,n) -
pow(-0.618033988749894848204586834365638117720309179805762862135,n)))
total = 0
max = 4000000
for k in range(2, max):
x = fib(k)
if x > max:
break
if x % 2 == 0:
total += x
print total
|
9,374 | 601d32bf30aa454bbc7d31d6ce4b7296cef0fdfe | """Largest product in a series
Problem 8
The four adjacent digits in the 1000-digit number that have the greatest product
are 9 x 9 x 8 x 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the
greatest product. What is the value of this product?"""
import numpy
series = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725"""
series = [int(s) for s in series if s not in [' ', '\n']]
old_max = 0
for offset in range(13, len(series) + 1):
numbers = series[offset-13:offset]
result = numpy.prod(numbers)
if result > old_max:
old_max = result
print old_max |
9,375 | 438fe1ccf265706e202d7cc6044e57590f29801f | import pytest
from eth_utils import encode_hex, remove_0x_prefix
from ethereum.tester import keys
import os
import json
from microraiden.client.client import CHANNEL_MANAGER_ABI_NAME, TOKEN_ABI_NAME
from microraiden.crypto import privkey_to_addr
@pytest.fixture
def contracts_relative_path():
return 'data/contracts.json'
@pytest.fixture
def compiled_contracts_path(test_dir, contracts_relative_path):
return os.path.join(test_dir, contracts_relative_path)
@pytest.fixture
def compiled_contracts(compiled_contracts_path):
return json.load(open(compiled_contracts_path))
@pytest.fixture
def test_dir():
return os.path.dirname(os.path.dirname(__file__)) + "/../"
@pytest.fixture(scope='session')
def use_tester(request):
return request.config.getoption('use_tester')
@pytest.fixture
def api_endpoint():
"""address of a paywall proxy"""
return 'localhost'
@pytest.fixture
def api_endpoint_port():
"""port the paywall proxy listens on"""
return 5000
@pytest.fixture
def api_endpoint_address(api_endpoint, api_endpoint_port):
return api_endpoint + ":" + str(api_endpoint_port)
@pytest.fixture
def init_contract_address():
return "0x" + "a" * 40
@pytest.fixture
def manager_state_path():
return '/tmp/rmp-state.pkl'
@pytest.fixture(scope='session')
def deployer_privkey():
return remove_0x_prefix(encode_hex(keys[3]))
@pytest.fixture(scope='session')
def deployer_address(deployer_privkey):
return privkey_to_addr(deployer_privkey)
@pytest.fixture(scope='session')
def contract_abi_path():
return os.path.join(os.path.dirname(os.path.dirname(__file__)), '../data/contracts.json')
@pytest.fixture(scope='session')
def contract_abis(contract_abi_path):
abi_file = open(contract_abi_path, 'r')
return json.load(abi_file)
@pytest.fixture(scope='session')
def channel_manager_abi(contract_abis):
return contract_abis[CHANNEL_MANAGER_ABI_NAME]['abi']
@pytest.fixture(scope='session')
def channel_manager_bytecode(contract_abis):
return contract_abis[CHANNEL_MANAGER_ABI_NAME]['bytecode']
@pytest.fixture(scope='session')
def token_abi(contract_abis):
return contract_abis[TOKEN_ABI_NAME]['abi']
@pytest.fixture(scope='session')
def token_bytecode(contract_abis):
return contract_abis[TOKEN_ABI_NAME]['bytecode']
@pytest.fixture(scope='session')
def kovan_block_time():
return 4
|
9,376 | 9d2c0d59b0b2b4e4fca942e648059738053c53d0 | from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(
name='ckanext-MYEXTENSION',
version=version,
description="description",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='ldhspace',
author_email='ldhspace@yahoo.co.kr',
url='www.naver.com',
license='free',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['ckanext', 'ckanext.MYEXTENSION'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points=\
"""
[ckan.plugins]
# Add plugins here, eg
usmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin
""",
)
|
9,377 | 8fbfa53be826b45b53b530a1766f6a68c61f5be9 | from tkinter import *
class Menuutje:
def __init__(self, master):
menu = Menu(master)
master.config(menu=menu)
subMenu = Menu(menu)
menu.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="New Game...", command=self.doNothing)
subMenu.add_command(label="New...", command=self.doNothing)
subMenu.add_separator()
subMenu.add_command(label="Exit", command=self.doNothing)
editMenu = Menu(menu)
menu.add_cascade(label="Edit", menu=editMenu)
editMenu.add_command(label="Redo", command=self.doNothing)
def doNothing(self):
print("Okay I do nothing..")
class MenuGameRPS:
def __init__(self, master):
menu = Menu(master)
master.config(menu=menu)
subMenu = Menu(menu)
menu.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="New Game...", command=self.newGame)
subMenu.add_separator()
subMenu.add_command(label="Exit", command=self.exitGame)
def exitGame(self):
exit()
def newGame(self):
|
9,378 | 467327b98ab99bdad429943c701c751be4f67940 | import json
import sys
from copy import deepcopy
from argparse import ArgumentParser
# TODO: Ord category's IDs after deletion
def return_cat_name(json_coco, category):
"""Return the category name of a category ID
Arguments:
json_coco {dict} -- json dict file from coco file
category {int} -- category ID
Returns:
string -- category name
Raises:
KeyError: Category ID not found
"""
for cat in json_coco['categories']:
if cat['id'] == category:
return cat['name']
print("Categoria não encontrada: ", category)
sys.exit()
def main():
"""Remove a category from a coco json file
"""
parser = ArgumentParser(
description='Category Filter: Filter a List of Categories from a JSON')
parser.add_argument('json_file_path', help='JSON file path')
parser.add_argument('out_file', help='Output filename')
args = parser.parse_args()
ann_file = open(args.json_file_path)
category_names = ["sports ball", "cell phone", "couch", "elephant", "tie", "spoon", "skis", "apple", "giraffe", "laptop", "tennis racket", "sink", "dog", "fork", "cat", "teddy bear", "train", "skateboard", "toilet", "sandwich", "bed", "keyboard", "baseball glove", "baseball bat", "airplane", "oven", "hot dog", "refrigerator", "frisbee", "mouse", "fire hydrant", "stop sign", "bear", "snowboard", "parking meter", "toothbrush", "microwave", "scissors", "hair drier", "toaster"]
json_coco = json.load(ann_file)
new_json = deepcopy(json_coco)
for ann in json_coco['annotations']:
if return_cat_name(json_coco, ann['category_id']) in category_names:
new_json['annotations'].remove(ann)
for cat in json_coco['categories']:
if cat['name'] in category_names:
new_json['categories'].remove(cat)
output = open(args.out_file, "w")
json.dump(new_json, output)
output.close()
if __name__ == "__main__":
main()
|
9,379 | 0f257d199ad0285d8619647434451841144af66d | #Tom Healy
#Adapted from Chris Albon https://chrisalbon.com/machine_learning/linear_regression/linear_regression_using_scikit-learn/
#Load the libraries we will need
#This is just to play round with Linear regression more that anything else
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_iris
import warnings
import numpy as np
import pandas as pd
#HE (Chris Albon) recommends to suppress the warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
#Load the data and assign the X and y to the data and target respectively
iris = load_iris()
X = iris.data
y = iris.target
#Create a linear regression
regr = LinearRegression()
#Fit the model
model = regr.fit(X, y)
#View the intercept
model.intercept_
#Print the intercept, so this where the data hits the y axis (I wish I paid more attention in Algebra......)
print(model.intercept_)
#View the coefficients
model.coef_
print(model.coef_)
|
9,380 | 17a442a85b910ff47c2f3f01242b7f64a6237146 | from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.applications import InceptionV3
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
from keras.layers import Flatten,Dense,Dropout
from keras.preprocessing.image import img_to_array
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
import os
#######################################################################################################################
#######################################################################################################################
# ПАРАМЕТРЫ ДЛЯ ИЗМЕНЕНИЯ!!!!!!!!!!!!!!!
path=r"C:\Users\Dmitry\Desktop\CNN_Research_2018\cats_vs_dogs"
image_size=(150, 150) # здесь задавать размер входных изображений
batch_size=32 # выбирать из этих значений [8,16,32]
batch_size2=100
inception_nontrainable_layers_count=205 # количество слоёв InceptionV3, чьи веса мы не меняем при обучении, в процессе переноса обучения(transfer learning)
nb_epoch=1# количество эпох обучения нейронной сети
fc_nb_epoch=10# количество эпох обучения классификационной части сети
n_classes=2# количество классов для обучения
train_path=os.path.join(path,"test")# FIXME train!!!
validation_path=os.path.join(path,'validation')
test_path=os.path.join(path,'test')
path_to_save_np=path# путь для сохранений нумпай-массивов ПОМЕНЯЙТЕ НА СВОЙ ПУТЬ, КУДА ХОТИТЕ СОХРАНЯТЬ 10 ГБ!!!
########################################################################################################################
########################################################################################################################
inc_model=InceptionV3(include_top=False,
weights='imagenet',
input_shape=((3, image_size[0], image_size[1])))
bottleneck_datagen = ImageDataGenerator(rescale=1. / 255) # собственно, генератор
train_generator = bottleneck_datagen.flow_from_directory(train_path,
target_size=image_size,
batch_size=batch_size,
class_mode=None,
shuffle=False)
validation_generator = bottleneck_datagen.flow_from_directory(validation_path,
target_size=image_size,
batch_size=batch_size,
class_mode=None,
shuffle=False)
bottleneck_features_train = inc_model.predict_generator(train_generator,steps=int(len(train_generator.filenames)/batch_size))# пока не разобрался со steps, мб для универсальности опустить этот параметр
np.save(open(path_to_save_np+'/bn_features_train.npy', 'wb'), bottleneck_features_train)
bottleneck_features_validation = inc_model.predict_generator(validation_generator,int(len(validation_generator.filenames)/batch_size))
np.save(open(path_to_save_np+'/bn_features_validation.npy', 'wb'), bottleneck_features_validation)
train_data = np.load(open(os.path.join(path_to_save_np,'bn_features_train.npy'), 'rb'))
train_labels = np.array([0] * int(train_data.shape[0]/2) + [1] * int(train_data.shape[0]/2))
validation_data = np.load(open(os.path.join(path_to_save_np,'bn_features_validation.npy'), 'rb'))
validation_labels = np.array([0] * int(validation_data.shape[0]/2) + [1] * int(validation_data.shape[0]/2)) # за счёт отсутсвия перемешивания(shuffle=False) в генераторе(flow_from_directory)
fc_model = Sequential()
fc_model.add(Flatten(input_shape=train_data.shape[1:]))
fc_model.add(Dense(64, activation='relu', name='dense_one'))
fc_model.add(Dropout(0.5, name='dropout_one'))
fc_model.add(Dense(64, activation='relu', name='dense_two'))
fc_model.add(Dropout(0.5, name='dropout_two'))
if n_classes==2:
fc_model.add(Dense(1, activation='sigmoid', name='output'))
else:
fc_model.add(Dense(n_classes, activation='softmax', name='output'))
fc_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
fc_model.fit(train_data, train_labels,
nb_epoch=fc_nb_epoch, batch_size=batch_size,
validation_data=(validation_data, validation_labels))
fc_model.save_weights(os.path.join(path_to_save_np,'fc_inception_cats_dogs_250.hdf5')) # сохраняем веса
fc_model.evaluate(validation_data, validation_labels)
################################################################################################
# PART 2 UNITE 2 MODELS
weights_filename=os.path.join(path_to_save_np,'fc_inception_cats_dogs_250.hdf5')
x = Flatten()(inc_model.output)
x = Dense(64, activation='relu', name='dense_one')(x)
x = Dropout(0.5, name='dropout_one')(x)
x = Dense(64, activation='relu', name='dense_two')(x)
x = Dropout(0.5, name='dropout_two')(x)
if n_classes==2:
top_model=Dense(1, activation='sigmoid', name='output')(x)
else:
top_model = Dense(n_classes, activation='softmax', name='output')(x)
model = Model(input=inc_model.input, output=top_model)
model.load_weights(weights_filename, by_name=True) # загрузить веса в определённые слои по имени (by_name=True)
for layer in inc_model.layers[:inception_nontrainable_layers_count]:
layer.trainable = False
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=1e-4, momentum=0.9),
#optimizer='rmsprop',
metrics=['accuracy']) # тонкая настройка (в первый раз использовали RMSProp, во второй раз используем стохастический градиентный бустинг для того, чтобы веса не слищком сильно обновлялись)
filepath=os.path.join(path_to_save_np,"weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5")
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True) # здесь происходит аугментация данных, в частности, горизонтальное отражение
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_path,
target_size=image_size,
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_path,
target_size=image_size,
batch_size=batch_size,
class_mode='binary')
pred_generator=test_datagen.flow_from_directory(validation_path,
target_size=image_size,
batch_size=batch_size2,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=np.ceil(train_data.shape[0]/batch_size),
epochs=2,
validation_data=validation_generator,
validation_steps=np.ceil(validation_data.shape[0]/batch_size),
callbacks=callbacks_list)
model.evaluate_generator(pred_generator, val_samples=batch_size2)# val_samples должен быть равен величине батча в генераторе!!!
imgs,labels=pred_generator.next() # загружает изображения в генератор и присваивает ей label
array_imgs=np.transpose(np.asarray([img_to_array(img) for img in imgs]),(0,2,1,3))
predictions=model.predict(imgs)
rounded_pred=np.asarray([np.round(i) for i in predictions])
print("Accuracy score: "+str(accuracy_score(labels,rounded_pred)))
print("Confusion matrix: ")
print(confusion_matrix(labels,rounded_pred))
print("F1-score(average='macro'): "+str(f1_score(labels, rounded_pred, average='macro')))
print("F1-score(average='micro'): "+str(f1_score(labels, rounded_pred, average='micro')))
print("F1-score(average='weighted'): "+str(f1_score(labels, rounded_pred, average='weighted')))
print("F1-score(average=None): ")
print(f1_score(labels, rounded_pred, average=None))
print("Precision-score(average='macro'):"+str(precision_score(labels, rounded_pred, average='macro')))
print("Precision-score(average='micro'): "+str(precision_score(labels, rounded_pred, average='micro')))
print("Precision-score(average='weighted'): "+str(precision_score(labels, rounded_pred, average='weighted')))
print("Precision-score(average=None): ")
print(precision_score(labels, rounded_pred, average=None))
print("Recall-score(average='macro'):"+str(recall_score(labels, rounded_pred, average='macro')))
print("Recall-score(average='micro'): "+str(recall_score(labels, rounded_pred, average='micro')))
print("Recall-score(average='weighted'): "+str(recall_score(labels, rounded_pred, average='weighted')))
print("Recall-score(average=None): ")
print(recall_score(labels, rounded_pred, average=None))
if n_classes==2:
print("ROC_AUC score:" + str(roc_auc_score(labels, rounded_pred)))
print("F1-score(average='binary'): " + str(f1_score(labels, rounded_pred, average='binary')))
print("Precision-score(average='binary'):" + str(precision_score(labels, rounded_pred, average='binary')))
print("Recall-score(average='binary'):" + str(recall_score(labels, rounded_pred, average='binary'))) |
9,381 | d23700f03e8498a5ff3d1d03d8808048ba79a56b | import os
from os import listdir
from openpyxl import load_workbook, Workbook
ROOT_PATH = os.getcwd()
# print(f'ROOT_PATH : {ROOT_PATH}')
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
# print(f'CUR_PATH : {CUR_PATH}')
path = f'{ROOT_PATH}/xlsx_files'
files = listdir(path)
result_xlsx = Workbook()
result_sheet = result_xlsx.active
for myfile in files:
if myfile[-4:] != 'xlsx':
continue
tg_xlsx = load_workbook(os.path.join(path, myfile), read_only=True)
tg_sheet = tg_xlsx.active
for row in tg_sheet.iter_rows():
row_data = []
for cell in row:
row_data.append(cell.value)
result_sheet.append(row_data)
result_xlsx.save(f'{CUR_PATH}/result.xlsx') |
9,382 | 4e04e748a97c59a26a394b049c15d96476b98517 | # Generated by Django 2.2.16 on 2020-10-27 14:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trades', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='orderinfo',
name='nonce_str',
field=models.CharField(blank=True, max_length=50, null=True, unique=True, verbose_name='随机加密串'),
),
]
|
9,383 | aaeca18f3771a6032c0fe51b75502f730c888888 | import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_10_1_Kji.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_1_1_oTR.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_2_1_A2c.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_3_1_TNY.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_4_1_F6B.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_5_1_L2p.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_6_1_vz3.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_7_1_c3E.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_8_1_w8E.root',
'/store/user/skaplan/noreplica/ADDdiPhoton/sherpa/mgg750-2000_Ms3000/sherpaevents_9_1_FNA.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_10_1_KF1.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_11_1_4q7.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_12_1_aQ4.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_13_1_Pwh.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_14_1_1cS.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_15_1_dnc.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_16_1_Kh7.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_17_1_Gt5.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_18_1_Bvh.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_19_1_6KB.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_1_1_uAS.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_20_1_8Ra.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_21_1_l0p.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_22_1_rCA.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_23_1_Ec0.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_24_1_NtE.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_25_1_QNK.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_2_1_kmn.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_3_1_LIi.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_4_1_Q4Z.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_5_1_ap1.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_6_1_KyS.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_7_1_FQo.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_8_1_MdF.root',
'/store/user/hn99/noreplica/ADDdiPhoton/sherpa_morestats/mgg750-2000_Ms3000/sherpaevents_9_1_NrM.root',
)
)
|
9,384 | e6694403eecf2c4511c1fce959b5939f5f457bb8 | v1 = 3+4*2
print(v1)
v2 = (2+6)*2
print(v2)
v3 = 2**3**2
print(v3)
v4 = 20+80/2
print(v4)
|
9,385 | 9d7bc2d93b855fbd22a4707a6237ac51069beb53 | """
进程对象属性
"""
from multiprocessing import Process
import time
def tm():
for i in range(3):
print(time.ctime())
time.sleep(2)
p = Process(target=tm,name='Tarena')
# 设置子进程随父进程退出
p.daemon = True
p.start()
print("Name:",p.name) # 进程名称
print("PID:",p.pid) # 进程PID
print("is alive:",p.is_alive()) # 是否在生命周期 |
9,386 | 3d742505d480493fbc729e7a0febdcab3a7dc041 | from __future__ import annotations
from typing import Generator, Optional
from collections import Counter
from itertools import zip_longest
from re import finditer
codon_table = """UUU F CUU L AUU I GUU V
UUC F CUC L AUC I GUC V
UUA L CUA L AUA I GUA V
UUG L CUG L AUG M GUG V
UCU S CCU P ACU T GCU A
UCC S CCC P ACC T GCC A
UCA S CCA P ACA T GCA A
UCG S CCG P ACG T GCG A
UAU Y CAU H AAU N GAU D
UAC Y CAC H AAC N GAC D
UAA Stop CAA Q AAA K GAA E
UAG Stop CAG Q AAG K GAG E
UGU C CGU R AGU S GGU G
UGC C CGC R AGC S GGC G
UGA Stop CGA R AGA R GGA G
UGG W CGG R AGG R GGG G"""
codons = dict(zip(codon_table.split()[::2], codon_table.split()[1::2]))
def consensus(*args):
"""Return a consensus sequence from n Seq objects."""
counts = map(Counter, zip_longest(*args))
consensus = ""
for c in counts:
del c[None]
consensus += c.most_common(1)[0][0]
return Seq(consensus, args[0].id)
class Base(str):
"""Class for nucleotide bases"""
pass
class Seq:
"""Class for nucleotide sequences"""
def __init__(self, sequence: str, id: str = None, codons: dict = codons):
self.sequence = sequence
self.id = id
self.codons = codons
def __repr__(self):
if not self.id:
return f"Seq({self.sequence[:60]})"
concat = ""
if len(self) > 60:
concat = "..."
return f"Seq({self.sequence[:60]}{concat}, id='{self.id}')"
def __str__(self):
return self.sequence
def __len__(self) -> int:
return len(self.sequence)
def __invert__(self) -> Seq:
"""Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence"""
return self.reverse_complement()
def __eq__(self, other) -> bool:
"""Compare the string representations of two Seq objects"""
return str(self) == str(other)
def __add__(self, other: Seq) -> Seq:
"""Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the
concatenation of the two objects sequences. ID is taken from eh first object"""
new_sequence = self.sequence + other.sequence
return Seq(new_sequence, self.id)
def __sub__(self, other: Seq) -> int:
"""Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them"""
return sum(i != j for i, j in zip_longest(self.sequence, other.sequence))
def __getitem__(self, index):
if type(index) == int:
return Base(self.sequence[index])
if type(index) == str:
return self.find(index, overlapping=True)
return Seq(self.sequence[index], self.id)
def __setitem__(self, index, nt):
self.sequence = self.sequence[:index] + nt + self.sequence[index + 1 :]
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self):
result = self[self.n]
self.n += 1
return result
else:
raise StopIteration
def __contains__(self, other):
if str(other) in str(self):
return True
else:
return False
@property
def gc(self) -> float:
"""Return the GC content of the sequence"""
g = self.count("G")
c = self.count("C")
return (g + c) / len(self) * 100
@property
def counts(self) -> dict:
"""Return the counts of letters in the sequence"""
return Counter(self.sequence)
def to_fasta(self, line_length: int = 60) -> str:
formated_sequence = "\n".join(
[str(s) for s in self.kmers(line_length, line_length)]
)
return f">{self.id}\n{formated_sequence}\n"
def kmers(self, n: int, step: int = 1) -> Generator:
"""Return a generator for kmers of length n"""
return (
Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step)
)
def count(self, string: str, max_diff: int = 0) -> int:
if max_diff == 0:
return self.sequence.count(string)
other = Seq(string)
return sum((kmer - other) <= max_diff for kmer in self.kmers(len(other)))
def substitute(self, old: str, new: str, count: int = -1):
return Seq(self.sequence.replace(str(old), str(new), count), self.id)
def find(self, target: str, count: int = -1, overlapping: bool = False):
locs = []
if overlapping and len(target) > 1:
target = f"(?=({target}))"
matches = finditer(target, self.sequence)
for i, match in enumerate(matches, 1):
locs.append(match.start())
if i == count:
break
return locs
def find_one(self, target: str) -> Optional[str]:
loc = self.sequence.find(str(target))
if loc == -1:
return None
return loc
def reverse_complement(self, rna: bool = False) -> Seq:
complements = {"A": "T", "T": "A", "G": "C", "C": "G"}
if rna:
complements = {"A": "U", "U": "A", "G": "C", "C": "G"}
revc = "".join(complements[nt] for nt in reversed(self))
return Seq(revc, self.id)
def transcribe(self) -> Seq:
return Seq(self.sequence.replace("T", "U"), self.id)
def reverse_transcribe(self) -> Seq:
return Seq(self.sequence.replace("U", "T"), self.id)
def translate(self) -> Seq:
"""
Return the translated sequence.
*Currently stop signals are ignored.*
"""
AA = "".join(
self.codons[self.sequence[i : i + 3]]
for i in range(0, len(self.sequence), 3)
if self.codons[self.sequence[i : i + 3]] != "Stop"
)
return Seq(AA, self.id)
def startswith(self, seq: str) -> bool:
return self.sequence.startswith(str(seq))
def endswith(self, seq: str) -> bool:
return self.sequence.endswith(str(seq))
|
9,387 | 1a28aea824752d18cbd462693f8f8980dba4974e | import re
BASICPATTERN = '[!/](%s)\s{,1}(.*)' # example "/animefind baka" -> (animefind, baka)
# returns compiled BASICPATTERN for each given string
def basicRegex(strings):
if not isinstance(strings,list):
return []
ans = []
for string in strings:
pattern = re.compile(BASICPATTERN % string.strip())
ans.append(pattern)
return ans
|
9,388 | 07e068dbc1ba1bcb85121ee49f2f9337cae188ba | #!/usr/bin/env python3
""" brightness an image"""
import tensorflow as tf
def change_brightness(image, max_delta):
"""brightness an image"""
img = tf.image.adjust_brightness(image, max_delta)
return img
|
9,389 | 475cb57ce5fda0d0389bfa1b9b227a2147e1abde | #------------------------------------------------------------
# Copyright 2016 Congduc Pham, University of Pau, France.
#
# Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
# IMPORTANT
# Parts that can be modified are identified with
#////////////////////////////////////////////////////////////
# TEXT
# END
#////////////////////////////////////////////////////////////
import sys
import select
import threading
from threading import Timer
import time
import datetime
import getopt
import os
import json
import re
#////////////////////////////////////////////////////////////
# ADD HERE BOOLEAN VARIABLES TO SUPPORT OTHER CLOUDS
# OR VARIABLES FOR YOUR OWN NEEDS
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#with firebase support?
#------------------------------------------------------------
_firebase=False
#------------------------------------------------------------
#with thingspeak support?
#------------------------------------------------------------
_thingspeak=False
#plot snr instead of seq
_thingspeaksnr=False
#------------------------------------------------------------
#with sensorcloud support?
#------------------------------------------------------------
_sensorcloud=False
#------------------------------------------------------------
#with grovestreams support?
#------------------------------------------------------------
_grovestreams=False
#------------------------------------------------------------
#with fiware support?
#------------------------------------------------------------
_fiware=False
#////////////////////////////////////////////////////////////
# ADD HERE APP KEYS THAT YOU WANT TO ALLOW FOR YOUR GATEWAY
#////////////////////////////////////////////////////////////
# NOTE: the format of the application key list has changed from
# a list of list, to a list of string that will be process as
# a byte array. Doing so wilL allow for dictionary construction
# using the appkey to retrieve information such as encryption key,...
app_key_list = [
#for testing
'****',
#change here your application key
'\x01\x02\x03\x04',
'\x05\x06\x07\x08'
]
#////////////////////////////////////////////////////////////
#FOR AES DECRYPTION
#////////////////////////////////////////////////////////////
#put your key here, should match the end-device's key
aes_key="0123456789010123"
#put your initialisation vector here, should match the end-device's initialisation vector
aes_iv="\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
#aes_iv="\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
#association between appkey and aes_key
appkey_aeskey = {
'\x01\x02\x03\x04':"0123456789010123",
'\x05\x06\x07\x08':"0123456789010123"
}
#association between appkey and aes_iv
appkey_aesiv = {
'\x01\x02\x03\x04':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00",
'\x05\x06\x07\x08':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
}
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#header packet information
#------------------------------------------------------------
HEADER_SIZE=4
APPKEY_SIZE=4
PKT_TYPE_DATA=0x10
PKT_TYPE_ACK=0x20
PKT_FLAG_ACK_REQ=0x08
PKT_FLAG_DATA_ENCRYPTED=0x04
PKT_FLAG_DATA_WAPPKEY=0x02
PKT_FLAG_DATA_ISBINARY=0x01
#------------------------------------------------------------
#last pkt information
#------------------------------------------------------------
dst=0
ptype=0
ptypestr="N/A"
src=0
seq=0
datalen=0
SNR=0
RSSI=0
bw=0
cr=0
sf=0
#------------------------------------------------------------
#------------------------------------------------------------
#will ignore lines beginning with '?'
#------------------------------------------------------------
_ignoreComment=1
#------------------------------------------------------------
#with mongoDB support?
#------------------------------------------------------------
_mongodb = False
#------------------------------------------------------------
#log gateway message?
#------------------------------------------------------------
_logGateway=0
#------------------------------------------------------------
#raw output from gateway?
#------------------------------------------------------------
_rawFormat=0
#------------------------------------------------------------
_ourcustomFormat=0;
_lorawanFormat=0
#------------------------------------------------------------
#------------------------------------------------------------
#check for app key?
#------------------------------------------------------------
_wappkey=0
#------------------------------------------------------------
the_app_key = '\x00\x00\x00\x00'
#valid app key? by default we do not check for the app key
_validappkey=1
#------------------------------------------------------------
#for local AES decrypting
#------------------------------------------------------------
_aes=0
_hasClearData=0
#------------------------------------------------------------
#open json file to recover gateway_address
#------------------------------------------------------------
f = open(os.path.expanduser("local_conf.json"),"r")
lines = f.readlines()
f.close()
array = ""
#get all the lines in a string
for line in lines :
array += line
#change it into a python array
json_array = json.loads(array)
#set the gateway_address for having different log filenames
_gwaddr = json_array["gateway_conf"]["gateway_ID"]
#////////////////////////////////////////////////////////////
# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES
#////////////////////////////////////////////////////////////
_folder_path = "/home/pi/Dropbox/LoRa-test/"
_gwlog_filename = _folder_path+"gateway_"+str(_gwaddr)+".log"
_telemetrylog_filename = _folder_path+"telemetry_"+str(_gwaddr)+".log"
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#initialize gateway DHT22 sensor
#------------------------------------------------------------
_gw_dht22 = json_array["gateway_conf"]["dht22"]
_date_save_dht22 = None
if(_gw_dht22):
print "Use DHT22 to get gateway temperature and humidity level"
#read values from dht22 in the gateway box
sys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))
from read_dht22 import get_dht22_values
_temperature = 0
_humidity = 0
# retrieve dht22 values
def save_dht22_values():
global _temperature, _humidity, _date_save_dht22
_humidity, _temperature = get_dht22_values()
_date_save_dht22 = datetime.datetime.utcnow()
print "Gateway TC : "+_temperature+" C | HU : "+_humidity+" % at "+str(_date_save_dht22)
#save values from the gateway box's DHT22 sensor, if _mongodb is true
if(_mongodb):
#saving data in a JSON var
str_json_data = "{\"th\":"+_temperature+", \"hu\":"+_humidity+"}"
#creating document to add
doc = {
"type" : "DATA_GW_DHT22",
"gateway_eui" : _gwaddr,
"node_eui" : "gw",
"snr" : "",
"rssi" : "",
"cr" : "",
"datarate" : "",
"time" : _date_save_dht22,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
def dht22_target():
while True:
print "Getting gateway temperature"
save_dht22_values()
sys.stdout.flush()
global _gw_dht22
time.sleep(_gw_dht22)
#------------------------------------------------------------
#for managing the input data when we can have aes encryption
#------------------------------------------------------------
_linebuf="the line buffer"
_linebuf_idx=0
_has_linebuf=0
def getSingleChar():
global _has_linebuf
# if we have a valid _linebuf then read from _linebuf
if _has_linebuf==1:
global _linebuf_idx
global _linebuf
if _linebuf_idx < len(_linebuf):
_linebuf_idx = _linebuf_idx + 1
return _linebuf[_linebuf_idx-1]
else:
# no more character from _linebuf, so read from stdin
_has_linebuf = 0
return sys.stdin.read(1)
else:
return sys.stdin.read(1)
def getAllLine():
global _linebuf_idx
p=_linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 0
global _linebuf
# return the remaining of the string and clear the _linebuf
return _linebuf[p:]
def fillLinebuf(n):
global _linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 1
global _linebuf
# fill in our _linebuf from stdin
_linebuf=sys.stdin.read(n)
#////////////////////////////////////////////////////////////
# ADD HERE OPTIONS THAT YOU MAY WANT TO ADD
# BE CAREFUL, IT IS NOT ADVISED TO REMOVE OPTIONS UNLESS YOU
# REALLY KNOW WHAT YOU ARE DOING
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#for parsing the options
#------------------------------------------------------------
def main(argv):
try:
opts, args = getopt.getopt(argv,'iftLam:',[\
'ignorecomment',\
'firebase',\
'thingspeak',\
'retrythsk',\
'thingspeaksnr',\
'fiware',\
'sensorcloud',\
'grovestreams',\
'loggw',\
'addr',\
'wappkey',\
'raw',\
'aes',\
'mongodb'])
except getopt.GetoptError:
print 'post_processing_gw '+\
'-i/--ignorecomment '+\
'-f/--firebase '+\
'-t/--thingspeak '+\
'--retrythsk '+\
'--thingspeaksnr '+\
'--fiware '+\
'--sensorcloud '+\
'--grovestreams '+\
'-L/--loggw '+\
'-a/--addr '+\
'--wappkey '+\
'--raw '+\
'--aes '+\
'-m/--mongodb'
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ignorecomment"):
print("will ignore commented lines")
global _ignoreComment
_ignoreComment = 1
elif opt in ("-f", "--firebase"):
print("will enable firebase support")
global _firebase
_firebase = True
global firebase_uploadSingleData
from FireBase import firebase_uploadSingleData
elif opt in ("-t", "--thingspeak"):
print("will enable thingspeak support")
global _thingspeak
_thingspeak = True
global thingspeak_uploadSingleData, thingspeak_uploadMultipleData
from ThingSpeak import thingspeak_uploadSingleData, thingspeak_uploadMultipleData
elif opt in ("--retrythsk"):
print("will enable thingspeak retry")
global thingspeak_setRetry
from ThingSpeak import thingspeak_setRetry
#set retry to True
thingspeak_setRetry(True)
elif opt in ("--thingspeaksnr"):
print("will plot snr instead of seq")
global _thingspeaksnr
_thingspeaksnr = True
elif opt in ("--fiware"):
print("will enable fiware support")
global _fiware
_fiware = True
elif opt in ("--sensorcloud"):
print("will enable sensorcloud support")
global _sensorcloud
_sensorcloud = True
global sensorcloud_uploadSingleData
from SensorCloud import sensorcloud_uploadSingleData
elif opt in ("--grovestreams"):
print("will enable grovestreams support")
global _grovestreams
_grovestreams = True
global grovestreams_uploadSingleData
from GroveStreams import grovestreams_uploadSingleData
elif opt in ("-L", "--loggw"):
print("will log gateway message prefixed by ^$")
global _logGateway
_logGateway = 1
elif opt in ("-a", "--addr"):
global _gwaddr
_gwaddr = arg
print("overwrite: will use _"+str(_gwaddr)+" for gateway and telemetry log files")
elif opt in ("--wappkey"):
global _wappkey
_wappkey = 1
global _validappkey
_validappkey=0
print("will check for correct app key")
elif opt in ("--raw"):
global _rawFormat
_rawFormat = 1
print("raw output from gateway. post_processing_gw will handle packet format")
elif opt in ("--aes"):
global _aes
_aes = 1
global AES
from Crypto.Cipher import AES
print("enable AES encrypted data")
elif opt in ("-m", "--mongodb"):
print("will enable local MongoDB support, max months to store is "+arg)
global _mongodb
_mongodb = True
global add_document, remove_if_new_month, mongodb_set_max_months
from MongoDB import add_document, remove_if_new_month, mongodb_set_max_months
#setting max months
mongodb_set_max_months(int(arg))
# END
#////////////////////////////////////////////////////////////
if __name__ == "__main__":
main(sys.argv[1:])
#gateway dht22
if (_gw_dht22):
print "Starting thread to measure gateway temperature"
t = threading.Thread(target=dht22_target)
t.daemon = True
t.start()
print "Current working directory: "+os.getcwd()
while True:
sys.stdout.flush()
ch = getSingleChar()
#expected prefixes
# ^p indicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet
# example: ^p1,16,3,0,234,8,-45
#
# ^r indicate a ctrl radio info ^rbw,cr,sf for the last received packet
# example: ^r500,5,12
#
# ^$ indicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file
# example: ^$Set LoRa mode 4
#
# ^l indicates a ctrl LAS info ^lsrc(%d),type(%d)
# type is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA
# example: ^l3,4
#
# \$ indicates a message that should be logged in the (Dropbox) telemetry.log file
# example: \$hello -> hello will be logged in the following format
# (src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello
#
# \& indicates a message that should be logged in the firebase cloud database
# example: \&hello -> hello will be logged in json format
#
# \! indicates a message that should be logged on a thingspeak channel
# example: \!SGSH52UGPVAUYG3S#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1
# \!2#9.4 -> 9.4 will be logged in the default channel at field 2
# \!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2
#
# you can log other information such as src, seq, len, SNR and RSSI on specific fields
#
# \xFF\xFE indicates radio data prefix
#
#
#------------------------------------------------------------
# '^' is reserved for control information from the gateway
#------------------------------------------------------------
if (ch=='^'):
now = datetime.datetime.utcnow()
ch=sys.stdin.read(1)
if (ch=='p'):
data = sys.stdin.readline()
print now.isoformat()
print "rcv ctrl pkt info (^p): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
dst=arr[0]
ptype=arr[1]
ptypestr="N/A"
if ((ptype & 0xF0)==PKT_TYPE_DATA):
ptypestr="DATA"
if (ptype & PKT_FLAG_DATA_ISBINARY)==PKT_FLAG_DATA_ISBINARY:
ptypestr = ptypestr + " IS_BINARY"
if (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:
ptypestr = ptypestr + " WAPPKEY"
if (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:
ptypestr = ptypestr + " ENCRYPTED"
if (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:
ptypestr = ptypestr + " ACK_REQ"
if ((ptype & 0xF0)==PKT_TYPE_ACK):
ptypestr="ACK"
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if (_rawFormat==0):
info_str="(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)
else:
info_str="rawFormat(len=%d SNR=%d RSSI=%d)" % (datalen,SNR,RSSI)
print info_str
# TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file
if (ch=='r'):
data = sys.stdin.readline()
print "rcv ctrl radio info (^r): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
bw=arr[0]
cr=arr[1]
sf=arr[2]
info_str="(BW=%d CR=%d SF=%d)" % (bw,cr,sf)
print info_str
if (ch=='t'):
rcv_timestamp = sys.stdin.readline()
print "rcv timestamp (^t): "+rcv_timestamp
if (ch=='l'):
# TODO: LAS service
print 'not implemented yet'
if (ch=='$' and _logGateway==1):
data = sys.stdin.readline()
print "rcv gw output to log (^$): "+data,
f=open(os.path.expanduser(_gwlog_filename),"a")
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
continue
#------------------------------------------------------------
# '\' is reserved for message logging service
#------------------------------------------------------------
if (ch=='\\'):
now = datetime.datetime.utcnow()
if _validappkey==1:
print 'valid app key: accept data'
ch=getSingleChar()
if (ch=='$'): #log on Dropbox
data = getAllLine()
print "rcv msg to log (\$) on dropbox: "+data,
f=open(os.path.expanduser(_telemetrylog_filename),"a")
f.write(info_str+' ')
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
#/////////////////////////////////////////////////////////////
# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS
# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM
# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD
#////////////////////////////////////////////////////////////
elif (ch=='&' and _firebase): #log on Firebase
ldata = getAllLine()
print 'rcv msg to log (\&) on firebase: '+data
firebase_msg = {
'dst':dst,
'type':ptypestr,
'gateway_eui' : _gwaddr,
'node_eui':src,
'seq':seq,
'len':datalen,
'snr':SNR,
'rssi':RSSI,
'cr' : cr,
'datarate' : "SF"+str(sf)+"BW"+str(bw),
'time':now.isoformat(),
'info_str':info_str+' '+now.isoformat()+'> '+ldata,
'data':ldata
}
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#get the data
data = ldata.split('/')
#change data in two arrays : nomenclature_array and value_array
iteration = 0
nomenclature_array = []
value_array = []
while iteration<len(data) :
if (iteration == 0 or iteration%2 == 0) :
nomenclature_array.append(data[iteration])
else :
value_array.append(data[iteration])
iteration += 1
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
iteration = 0
while iteration < len(nomenclature_array) :
#last iteration, do not add "," at the end
if iteration == len(nomenclature_array)-1 :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]
else :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]+", "
iteration += 1
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
sensor_entry='sensor%d'% (src)
msg_entry='msg%d' % (seq)
#upload data to firebase
firebase_uploadSingleData(firebase_msg, sensor_entry, msg_entry, now)
elif (ch=='!'): #log on thingspeak, grovestreams, sensorcloud and connectingnature
ldata = getAllLine()
# get number of '#' separator
nsharp = ldata.count('#')
#no separator
if nsharp==0:
#will use default channel and field
data=['','']
#contains ['', '', "s1", s1value, "s2", s2value, ...]
data_array = data + re.split("/", ldata)
elif nsharp==1:
#only 1 separator
data_array = re.split("#|/", ldata)
#if the first item has length > 1 then we assume that it is a channel write key
if len(data_array[0])>1:
#insert '' to indicate default field
data_array.insert(1,'');
else:
#insert '' to indicate default channel
data_array.insert(0,'');
else:
#contains [channel, field, "s1", s1value, "s2", s2value, ...]
data_array = re.split("#|/", ldata)
#just in case we have an ending CR or 0
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\n', '')
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\0', '')
#test if there are characters at the end of each value, then delete these characters
i = 3
while i < len(data_array) :
while not data_array[i][len(data_array[i])-1].isdigit() :
data_array[i] = data_array[i][:-1]
i += 2
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
#start from the first nomenclature
iteration = 2
while iteration < len(data_array)-1 :
#last iteration, do not add "," at the end
if iteration == len(data_array)-2 :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]
else :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]+", "
iteration += 2
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
# get number of '/' separator
nslash = ldata.count('/')
index_first_data = 2
if nslash==0:
# old syntax without nomenclature key
index_first_data=2
else:
# new syntax with nomenclature key
index_first_data=3
#------------------
#test for thingspeak
#------------------
if (_thingspeak):
second_data=str(seq)
if (_thingspeaksnr):
second_data=str(SNR)
#data to send to thingspeak
data = []
data.append(data_array[0]) #channel (if '' default)
data.append(data_array[1]) #field (if '' default)
data.append(data_array[index_first_data]) #value to add (the first sensor value in data_array)
#upload data to thingspeak
#JUST FOR UPLOAD A SINGLE DATA IN A SPECIFIC FIELD AND SECOND DATA
thingspeak_uploadSingleData(data, second_data)
# if you want to upload all data starting at field 1, uncomment next line, and comment previous line
#thingspeak_uploadMultipleData(data_array) # upload all data in the fields
#------------------
#test for FIWARE
#need FIWARE access
#------------------
if (_fiware):
print("FIWARE: upload")
#entity_id = 'test_item_'+now.isoformat()
entity_id = 'sensor%d'% (src)
#send the first sensor value in data_array
cmd = 'python ./fiware_UpdateEntityAttribute.py '+entity_id+' test temperature float '+data_array[index_first_data]
print("FiWare: will issue python script")
print(cmd)
args = cmd.split()
try:
out = subprocess.check_output(args, shell=False)
except subprocess.CalledProcessError:
print("FiWare: python script failed")
if out.find('"reasonPhrase" : "OK"') > 0:
print("FiWare: Entity updated with ENTITY_ID "+entity_id)
else:
print("FiWare: Entity update failed")
#------------------
#test for sensorcloud
#------------------
if (_sensorcloud) :
#send the first sensor value in data_array
sensorcloud_uploadSingleData(data_array[index_first_data])
#------------------
#test for grovestreams
#------------------
if (_grovestreams):
nomenclatures = []
data = []
if nslash==0:
# old syntax without nomemclature key, so insert only one key
nomenclatures.append("temp")
data.append(data_array[index_first_data])
else:
#completing nomenclatures and data
i=2
while i < len(data_array)-1 :
nomenclatures.append(data_array[i])
data.append(data_array[i+1])
i += 2
#upload data to grovestreams
grovestreams_uploadSingleData(nomenclatures, data, str(src))
# END
#////////////////////////////////////////////////////////////
else: # not a known data logging prefix
#you may want to upload to a default service
#so just implement it here
print('unrecognized data logging prefix: discard data')
getAllLine()
else:
print('invalid app key: discard data')
getAllLine()
continue
# handle binary prefixes
if (ch == '\xFF' or ch == '+'):
#if (ch == '\xFF'):
print("got first framing byte")
ch=getSingleChar()
# data prefix for non-encrypted data
if (ch == '\xFE' or ch == '+'):
#if (ch == '\xFE'):
#the data prefix is inserted by the gateway
#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)
print("--> got data prefix")
#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage
#if _wappkey is set then we have to first indicate that _validappkey=0
if (_wappkey==1):
_validappkey=0
else:
_validappkey=1
# if we have raw output from gw, then try to determine which kind of packet it is
if (_rawFormat==1):
ch=getSingleChar()
# probably our modified Libelium header where the destination is the gateway
# dissect our modified Libelium format
if ch==1:
dst=ord(ch)
ptype=ord(getSingleChar())
src=ord(getSingleChar())
seq=ord(getSingleChar())
print("Libelium[dst=%d ptype=0x%.2X src=%d seq=%d]" % (dst,ptype,src,seq))
# now we read datalen-4 (the header length) bytes in our line buffer
fillLinebuf(datalen-HEADER_SIZE)
# TODO: dissect LoRaWAN
# you can implement LoRaWAN decoding if this is necessary for your system
# look at the LoRaWAN packet format specification to dissect the packet in detail
#
# LoRaWAN uses the MHDR(1B)
# ----------------------------
# | 7 6 5 | 4 3 2 | 1 0 |
# ----------------------------
# MType RFU major
#
# the main MType is unconfirmed data up which value is 010
if (ch & 0x40)==0x40:
# Do the LoRaWAN decoding
print("LoRaWAN?")
# for the moment just discard the data
fillLinebuf(datalen-1)
getAllLine()
else:
# now we read datalen bytes in our line buffer
fillLinebuf(datalen)
# encrypted data payload?
if ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):
print("--> DATA encrypted: encrypted payload size is %d" % datalen)
_hasClearData=0
if _aes==1:
print("--> decrypting")
decrypt_handler = AES.new(aes_key, AES.MODE_CBC, aes_iv)
# decrypt
s = decrypt_handler.decrypt(_linebuf)
for i in range(0, len(s)):
print "%.2X " % ord(s[i]),
print "\nEnd"
# get the real (decrypted) payload size
rsize = ord(s[APPKEY_SIZE])
print("--> real payload size is %d" % rsize)
# then add the appkey + the appkey framing bytes
rsize = rsize+APPKEY_SIZE+1
_linebuf = s[:APPKEY_SIZE] + s[APPKEY_SIZE+1:rsize]
for i in range(0, len(_linebuf)):
print "%.2X " % ord(_linebuf[i]),
print "\nEnd"
# normally next read from input will get data from the decrypted _linebuf
print "--> decrypted payload is: ",
print _linebuf[APPKEY_SIZE:]
_hasClearData=1
else:
print("--> DATA encrypted: aes not activated")
# drain stdin of all the encrypted data
enc_data=getAllLine()
print("--> discard encrypted data")
else:
_hasClearData=1
# with_appkey?
if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1):
print("--> DATA with_appkey: read app key sequence")
the_app_key = getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
print "app key is ",
print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key)
if the_app_key in app_key_list:
print("in app key list")
if _wappkey==1:
_validappkey=1
else:
print("not in app key list")
if _wappkey==1:
_validappkey=0
else:
#we do not check for app key
_validappkey=1
print("but app key disabled")
continue
if (ch == '?' and _ignoreComment==1):
sys.stdin.readline()
continue
sys.stdout.write(ch)
|
9,390 | 8fd020e7f1854d29cf903f86d91a3a9ffa9d08d3 | /home/rip-acer-vn7-591g-1/catkin_ws/devel_cb/.private/nmea_navsat_driver/lib/python2.7/dist-packages/libnmea_navsat_driver/__init__.py |
9,391 | 763f552329a0d38900e08081a1017b33cd882868 | import random
tree_age = 1
state = "alive"
value = 1
age_display = "Your tree have an age of: {}".format(tree_age)
state_display = "Your tree is {}.".format(state)
def tree_state(x):
if x <= 19:
state = "alive"
return state
elif x <= 49:
rand = random.randrange(tree_age, 51, 1)
if rand == 50:
state = "dead"
else:
state = "alive"
return state
else:
state = "dead"
return state
print("Welcome to your tree garden!")
while value == 1 :
print(age_display)
print(state_display)
print("Please press 1 to increase is age or 2 to quit.")
action = input("Select 1/2 ")
if action == "2" :
value = 2
elif action == "1" :
tree_age += 1
#la fonction tree_state ne se lance pas je crois
tree_state(tree_age)
print(state)
if state == "dead":
print("Sorry your tree is dead.")
quit()
else:
age_display = "Your tree have an age of: {}".format(tree_age)
else:
print("Invalid input, please enter the right input.")
if value == 2:
print("Thanks")
|
9,392 | 32d830f00a9d33b8f7f438c14b522ef186001bf3 | /usr/local/python-3.6/lib/python3.6/abc.py |
9,393 | ab6450ee9038e0c58ca8becf6d2518d5e00b9c90 | """Generic utilities module"""
from . import average
from . import extract_ocean_scalar
from . import git
from . import gmeantools
from . import merge
from . import netcdf
from . import xrtools
__all__ = [
"average",
"extract_ocean_scalar",
"git",
"gmeantools",
"merge",
"netcdf",
"xrtools",
]
|
9,394 | ca403e8820a3e34e0eb11b2fdd5d0fc77e3ffdc4 | # File for the information gain feature selection algorithm
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import mutual_info_classif
# The function which will be called
def get_features(raw_data, raw_ids):
"""
Calculate the information gain of a dataset. This function takes three parameters:
1. data = The dataset for whose feature the IG should be calculated
2. split_attribute_name = the name of the feature for which the information gain should be calculated
3. target_name = the name of the target feature. The default for this example is "class"
"""
df = pd.DataFrame(raw_data)
df["person"] = raw_ids
return_columns = []
cv = CountVectorizer(max_df=1, min_df=1,
max_features=72, stop_words='english')
for column in df:
if column != "person":
X = df[column].astype(str)
Y = df["person"].astype(str)
X_vec = cv.fit_transform(X)
ig = mutual_info_classif(X_vec, Y, discrete_features=True)
avg = sum(ig)
if avg > .5 and column != "person":
return_columns.append(column)
return return_columns
|
9,395 | 7c9b68b2d32d8e435f332d4412ea1ba899607ec4 | """Derivation of variable ``co2s``."""
import dask.array as da
import iris
import numpy as np
import stratify
from ._baseclass import DerivedVariableBase
def _get_first_unmasked_data(array, axis):
"""Get first unmasked value of an array along an axis."""
mask = da.ma.getmaskarray(array)
numerical_mask = da.where(mask, -1.0, 1.0)
indices_first_positive = da.argmax(numerical_mask, axis=axis)
indices = da.meshgrid(
*[da.arange(array.shape[i]) for i in range(array.ndim) if i != axis],
indexing='ij')
indices.insert(axis, indices_first_positive)
first_unmasked_data = np.array(array)[tuple(indices)]
return first_unmasked_data
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable ``co2s``.
Use linear interpolation/extrapolation and surface air pressure to
calculate CO2 mole fraction at surface.
Note
----
In some cases, ``co2`` data is masked. In these cases, the masked values
correspond to values where the pressure level is higher than the surface
air pressure (e.g. the 1000 hPa level for grid cells with high elevation).
To obtain an unmasked ``co2s`` field, it is necessary to fill these masked
values accordingly, i.e. with the lowest unmasked value for each grid cell.
"""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [{'short_name': 'co2'}, {'short_name': 'ps'}]
return required
@staticmethod
def calculate(cubes):
"""Compute mole fraction of CO2 at surface."""
co2_cube = cubes.extract_cube(
iris.Constraint(name='mole_fraction_of_carbon_dioxide_in_air'))
ps_cube = cubes.extract_cube(
iris.Constraint(name='surface_air_pressure'))
# Fill masked data if necessary (interpolation fails with masked data)
(z_axis,) = co2_cube.coord_dims(co2_cube.coord(axis='Z',
dim_coords=True))
mask = da.ma.getmaskarray(co2_cube.core_data())
if mask.any():
first_unmasked_data = _get_first_unmasked_data(
co2_cube.core_data(), axis=z_axis)
dim_map = [dim for dim in range(co2_cube.ndim) if dim != z_axis]
first_unmasked_data = iris.util.broadcast_to_shape(
first_unmasked_data, co2_cube.shape, dim_map)
co2_cube.data = da.where(mask, first_unmasked_data,
co2_cube.core_data())
# Interpolation (not supported for dask arrays)
air_pressure_coord = co2_cube.coord('air_pressure')
original_levels = iris.util.broadcast_to_shape(
air_pressure_coord.points, co2_cube.shape,
co2_cube.coord_dims(air_pressure_coord))
target_levels = np.expand_dims(ps_cube.data, axis=z_axis)
co2s_data = stratify.interpolate(
target_levels,
original_levels,
co2_cube.data,
axis=z_axis,
interpolation='linear',
extrapolation='linear',
)
co2s_data = np.squeeze(co2s_data, axis=z_axis)
# Construct co2s cube
indices = [slice(None)] * co2_cube.ndim
indices[z_axis] = 0
co2s_cube = co2_cube[tuple(indices)]
co2s_cube.data = co2s_data
if co2s_cube.coords('air_pressure'):
co2s_cube.remove_coord('air_pressure')
ps_coord = iris.coords.AuxCoord(ps_cube.data,
var_name='plev',
standard_name='air_pressure',
long_name='pressure',
units=ps_cube.units)
co2s_cube.add_aux_coord(ps_coord, np.arange(co2s_cube.ndim))
co2s_cube.convert_units('1e-6')
return co2s_cube
|
9,396 | 8771f71a69f3afdc5de4d38db6efe61b553ae880 | import cartopy.crs as ccrs
import cartopy.feature as cfeature
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
import xarray as xr
import metpy
from datetime import datetime
import datetime as dt
from metpy.units import units
import scipy.ndimage as ndimage
from metpy.plots import USCOUNTIES
import cartopy
from scipy.ndimage.filters import generic_filter as gf
def mkdir_p(mypath):
'''Creates a directory. equivalent to using mkdir -p on the command line'''
from errno import EEXIST
from os import makedirs,path
try:
makedirs(mypath)
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and path.isdir(mypath):
pass
else: raise
startTime=datetime.now()
m_date='20200903'
m_hour='12'
year = startTime.year
if startTime.month <10:
month = '0'+str(startTime.month)
else:
month = str(startTime.month)
if startTime.day <10:
day = '0'+str(startTime.day)
else:
day = str(startTime.day)
if startTime.hour <10:
hour = '0'+str(startTime.hour)
else:
hour = str(startTime.hour)
mdate = str(year)+str(month)+str(day)
def get_init_hr(hour):
if int(hour) <6:
init_hour = '00'
elif int(hour) <11:
init_hour = '06'
elif int(hour) <17:
init_hour = '12'
elif int(hour) <22:
init_hour = '18'
else:
init_hour = '00'
return(init_hour)
url = 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs'+mdate+'/gfs_0p25_1hr_'+get_init_hr(hour)+'z'
init_hour = get_init_hr(hour)
'''
for i in range(119):
fhr = i+1
'''
# Create new directory
output_dir = str(year)+str(month)+str(day)+'_'+str(init_hour)+'00'
mkdir_p(output_dir)
mkdir_p(output_dir+'/GFS')
#Parse data using MetPy
ds = xr.open_dataset(url)
init_hr = dt.datetime(int(year),int(month),int(day),int(init_hour))
times = ds['tmp2m'].metpy.time
init_time = ds['time'][0]
lats = np.arange(15,70,0.25)
lons = np.arange(220,330,0.25)
for i in range(1,120):
fc_hr = init_hr+dt.timedelta(hours=1*i)
forecast_hour = times[0].values
data = ds.metpy.parse_cf()
data = data.isel(time=i)
#Rename variables to useful things
data = data.rename({
'absvprs':'avort',
'hgtprs':'gph',
'rhprs':'rh',
'tmpprs':'temp',
'ugrdprs':'u',
'vgrdprs': 'v',
})
vertical, = data['temp'].metpy.coordinates('vertical')
time = data['temp'].metpy.time
zH5_crs = data['temp'].metpy.cartopy_crs
t5 = data['temp'].sel(lev=500.0,lat=lats,lon=lons)
u5 = data['u'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449
v5 = data['v'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449
av5 = data['avort'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1e5
rh5 = data['rh'].sel(lev=500.0,lat=lats,lon=lons).squeeze()
h5 = data['gph'].sel(lev=500.0,lat=lats,lon=lons).squeeze()
x, y = t5.metpy.coordinates('x', 'y')
lat, lon = xr.broadcast(y, x)
wind_slice = slice(5,-5,5)
########## SET UP FIGURE ##################################################
fig = plt.figure(figsize=(15,15))
ax1 = fig.add_subplot(111, projection = zH5_crs)
ax1.coastlines(resolution='10m')
ax1.add_feature(cfeature.BORDERS.with_scale('10m'))
ax1.add_feature(cfeature.STATES.with_scale('10m'))
#fig.suptitle("NAM Forecast valid at " + time[0].dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=36)
########## PLOTTING #######################################################
h5c = ax1.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c = ax1.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c = ax1.contourf(x,y,av5,cmap='autumn_r',levels=range(10,60,2),alpha=0.8,extend='max')
a5cb = fig.colorbar(a5c, orientation = 'horizontal', aspect = 80, ax = ax1, pad = 0.01,
extendrect=False, ticks = range(10,61,5))
a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax1.barbs(x[wind_slice],y[wind_slice],u5[wind_slice,wind_slice],v5[wind_slice,wind_slice], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax1.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax1.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax1.set_extent((265, 300, 25, 50))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vort_'+str(i)+'.png')
plt.clf()
plt.close()
########## PLOT 2 #######################################################
wind_slice_s = slice (10,-10,10)
fig2 = plt.figure(figsize=(15,15))
ax2 = fig2.add_subplot(111,projection=zH5_crs)
ax2.coastlines(resolution='50m')
ax2.add_feature(cfeature.BORDERS.with_scale('50m'))
ax2.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax2.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c2 = ax2.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c2 = ax2.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)
a5cb2 = fig2.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax2, pad = 0.01,
extendrect=False, ticks = range(10,60,5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax2.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax2.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax2.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax2.set_extent((225, 300, 20, 65))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortCONUS_v2_'+str(i)+'.png')
########## PLOT 3 #######################################################
wind_slice_s = slice (10,-10,10)
fig3 = plt.figure(figsize=(15,15))
ax3 = fig3.add_subplot(111,projection=zH5_crs)
ax3.coastlines(resolution='50m')
ax3.add_feature(cfeature.BORDERS.with_scale('50m'))
ax3.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax3.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c2 = ax3.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c2 = ax3.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)
a5cb2 = fig3.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax3, pad = 0.01,
extendrect=False, ticks = range(10,60,5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax3.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax3.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax3.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax3.set_extent((260, 320, 20, 65))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortC_ec_v1_'+str(i)+'.png')
fcst_hr = str(0)
print('Hour '+str(i)+' completed!')
plt.close()
timeelapsed = datetime.now()-startTime
print(timeelapsed)
'''
url= 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs20200903/gfs_0p25_1hr_12z'
ds = xr.open_dataset(url)
t2m_ds = ds['tmp2m']
init_hr = t2m_ds['time'][0].values
#fc_hr = t2m.ds['time'][i].values
lats = np.arange(20,50,0.25)
lons = np.arange(240,300,0.25)
t2m = t2m_ds.sel(time = init_hr, lat = lats, lon = lons)
print(t2m)
fig = plt.figure(figsize = (12,12))
fig.clf()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_extent((240,300, 20, 50), crs = ccrs.PlateCarree())
t2m_c = ax.contourf(t2m, cmap='RdPu')
plt.savefig('testingnomads6.png')
'''
|
9,397 | dc2deb7d4c9cc126a6d80435fe9dbc16d6ac8941 | config_prefix = "<"
config_suported_types = ["PNG", "GIF", "JPEG"]
config_pattern = "^[A-Za-z0-9_]*$"
config_max_storage = int(1E9)
config_max_name_length = 20
config_message_by_line = 2
config_max_message_length = 2000
config_max_emote_length = 8*int(1E6)
config_pong = """
,;;;!!!!!;;.
:!!!!!!!!!!!!!!;
:!!!!!!!!!!!!!!!!!;
;!!!!!!!!!!!!!!!!!!!;
;!!!!! P O N G !!!!!!!
;!!!!!!!!!!!!!!!!!!!!'
;!!!!!!!!!!!!!!!!!!!'
:!!!!!!!!!!!!!!!!'
,!!!!!!!!!!!!!''
,;!!!'''''''
.!!!!'
!!!!`
`'
"""
|
9,398 | f2ac9904aaa4c12ef2954b88c37ffd0c97aadf5a | '''
Problem 24
A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:
012 021 102 120 201 210
What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?
'''
from itertools import permutations
p=permutations(range(10))
n=1000000
for i in range(n-1):
p.next()
print ''.join([str(i) for i in p.next()])
|
9,399 | a2626b384d0b7320ee9bf7cd75b11925ccc00666 | import itertools
def sevens_in_a_row(arr,n):
in_a_row={}
for iteration in arr:
if arr[iteration]==arr[iteration+1]:
print blaaa
def main():
n=3
arr=['1','1','1','2','3','-4']
print (sevens_in_a_row(arr,n))
if __name__== '__main__':
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.