index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,400 | 5aa55a96e414ad6b3ceebbcbd71c23a1fd69f0d1 | from .FactorWarData import Get_FactorWar_Data |
6,401 | 4ef6002480fcaa514f41227978bae76f6e02c22d | name = input("Enter your name: ")
print("Hi buddy! Today we will play a game " + name + "!")
print("Are you ready?")
question = input("Are you ready ? Yes or no: ")
print(name + " we are starting!")
liste1 = ['My neighbor ', 'My girlfriend ', 'My boyfriend ', 'My dog ']
num = input("Enter a number: ")
liste1 = liste1[int(num)]
liste2 = ['hates ', 'loves ', 'enjoys ', 'ridicules ']
num = input("Enter a number: ")
liste2 = liste2[int(num)]
liste3 = ['with me ', 'with my grandma ', 'with our home staff ', 'with our money ']
num = input("Enter a number: ")
liste3 = liste3[int(num)]
liste4 = ['in every situation ! ', 'until end of the world ! ']
num = input("Enter a number: ")
liste4 = liste4[int(num)]
print(liste1 + liste2 + liste3 + liste4) |
6,402 | 1861c394fb02643d2e6ac8362f3340f512ef6d72 | import gc
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import cv2
import torch
from torch.utils.data import DataLoader
from torch import optim
from torch.optim import lr_scheduler
from dataset.car_dataset import CarDataset
from nn.network import MyUNet
from utils.utils import coords2str, extract_coords
from utils.evaluate_map import compute_map
from utils.utils import visualize
from efficientnet_pytorch import EfficientNet
camera_matrix = np.array([[2304.5479, 0, 1686.2379],
[0, 2305.8757, 1354.9849],
[0, 0, 1]], dtype=np.float32)
device = torch.device("cuda")
IMG_WIDTH = 1024
IMG_HEIGHT = IMG_WIDTH // 16 * 5
MODEL_SCALE = 8
if __name__ == "__main__":
ROOT_PATH = "/media/andreis/storage/datasets/pku-autonomous-driving/"
df = pd.read_csv(ROOT_PATH + "train.csv")
df_test = pd.read_csv(ROOT_PATH + "sample_submission.csv")
train_images_dir = ROOT_PATH + "train_images/"
test_images_dir = ROOT_PATH + "test_images/"
df_train, df_val = train_test_split(df, test_size=0.01, random_state=72)
df_val_gt = df_val.copy()
# create dataset objects
train_dataset = CarDataset(df_train, train_images_dir, camera_matrix)
val_dataset = CarDataset(df_val, train_images_dir, camera_matrix)
test_dataset = CarDataset(df_test, test_images_dir, camera_matrix)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = MyUNet(10).to(device)
model.load_state_dict(torch.load("model.pth"))
model.eval()
val_loader = DataLoader(dataset=val_dataset, batch_size=1, shuffle=False, num_workers=4)
#img, mask, regr = val_dataset[0]
#output = model(torch.tensor(img[None]).to(device))
#output = output.data.cpu().numpy()
predictions = []
for img, _, _, img0 in tqdm(val_loader):
img_np = np.moveaxis(torch.squeeze(img).numpy(), 0, 2)
img0 = torch.squeeze(img0).numpy()#p.moveaxis(torch.squeeze(img).numpy(), 0, 2)
#print(img_np.shape)
with torch.no_grad():
#output = model(torch.tensor(img[None]).to(device))
output = model(img.to(device))
output = output.data.cpu().numpy()
# looping over batch items
for out in output:
coords = extract_coords(out)
print(coords)
# s = coords2str(coords)
#predictions.append(s)
q_img = visualize(img0, coords, camera_matrix)
print(q_img.shape)
q_img = cv2.resize(q_img, (int(q_img.shape[1]*0.25), int(q_img.shape[0]*0.25) ))
# show predictions on image
cv2.imshow("Prediction", q_img)
cv2.waitKey()
# cv2.imshow("Predictions", visualize(img_np, coords, camera_matrix))
# cv2.waitKey()
#df_val['PredictionString'] = predictions
#df_test.to_csv('predictions.csv', index=False)
#print(df_val.head())
#def sigmoid(x):
# return 1 / (1 + np.exp(-x))
#map = compute_map(df_val_gt, df_val)
#print(map)
#logits = output[0,0].data.cpu().numpy()
#sigmoids = np.apply_along_axis(sigmoid, -1, logits)
#print(output.shape)
#print(logits.shape)
#print(sigmoids.shape)
#print(sigmoids)
#print(np.max(sigmoids))
#points = np.argwhere(logits > 0)
#print(points)
#preds = extract_coords(output)
#img = np.rollaxis(img, 0, 3)
#print(type(img))
#cv2.imshow("imagine", img)
#cv2.imshow("mask", mask)
#cv2.imshow("regr", regr[:,:,-1])
#cv2.imshow("predictions", sigmoids)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
|
6,403 | 16e5a44cb4fbe71eaa9c1f5b00505578de0d2cea | from django.contrib import admin
# Register your models here.
from .models import HuyenQuan
admin.site.register(HuyenQuan)
|
6,404 | 74b1cdcb1aaf6cde7e8ce3eeb73cd82689719b00 | # apport hook for oem-config; adds log file
import os.path
def add_info(report):
if os.path.exists('/var/log/oem-config.log'):
report['OemConfigLog'] = ('/var/log/oem-config.log',)
|
6,405 | 751d2a07b97d080988c54511ca13a97a969e06bd | import pygame
import numpy as np
import random
from enum import Enum
from .config import *
class Actions(Enum):
FORWARD = 0
RIGHT = 1
LEFT = 2
BACK = 3
class MazeEnv():
''' TODO '''
def __init__(self, GW, GH, SW, SH):
global GRID_WIDTH, GRID_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, BOX_WIDTH, BOX_HEIGHT
GRID_WIDTH = GW
GRID_HEIGHT = GH
SCREEN_WIDTH = SW
SCREEN_HEIGHT = SH
BOX_WIDTH = SCREEN_WIDTH/GRID_WIDTH
BOX_HEIGHT = SCREEN_HEIGHT/GRID_HEIGHT
WIN_STATE = random.randint(0, GRID_WIDTH * GRID_HEIGHT - 1)
# Setup ML stuff
self.pos = np.array(self.getPos(SPAWN_STATE))
self.action_space = Actions
self.max_states = GRID_WIDTH * GRID_HEIGHT
self.max_actions = len(self.action_space)
self.Q = np.zeros([GRID_WIDTH*GRID_HEIGHT, len(self.action_space)])
self.tunnel_vision = False
# Other
self.WALLS = list(WALLS)
self.WIN_STATE = WIN_STATE
self.SPAWN_STATE = SPAWN_STATE
def step(self, action):
self.pos = self.moveDir(self.pos, self.action_space(action))
reward = -0.04
done = True
if self.getState() == self.WIN_STATE:
reward = 10
else:
done = False
return (self.getState(), reward, done, {})
def reset(self):
self.pos = np.array(self.getPos(self.SPAWN_STATE))
def render(self, screen, close=False):
self.screen = screen
self.screen.fill((0, 0, 0))
# Draw the grid
# font = pygame.font.Font(None, 22)
for x in range(GRID_WIDTH):
for y in range(GRID_HEIGHT):
all_points = []
all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
all_points.append([[x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
all_points.append([[x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
width = 34
height = 10
text_offs = [[(BOX_WIDTH/2-width/2), height/2], [BOX_WIDTH-width, BOX_HEIGHT/2-height/2], [4, BOX_HEIGHT/2-height/2], [BOX_WIDTH/2-width/2, BOX_HEIGHT-height-4]]
for a in range(4):
s = pygame.Surface((BOX_WIDTH,BOX_HEIGHT), pygame.SRCALPHA)
s.fill((0, 0, 0, 0))
if self.getState((x, y)) == self.WIN_STATE:
col = (0, 255, 0, 255)
elif [x, y] in self.WALLS:
col = (128, 128, 128, 255)
elif len(self.Q) <= self.getState((x, y)) or len(self.Q[self.getState((x, y))]) <= a:
col = (0, 0, 0, 0)
elif self.Q[self.getState((x, y))][a] > 0:
col = (0, 255, 0, 60 + self.Q[self.getState((x, y))][a] / self.Q.max() * 195)
elif self.Q[self.getState((x, y))][a] < 0:
col = (255, 0, 0, 60 + self.Q[self.getState((x, y))][a] / self.Q.min() * 195)
else:
col = (0, 0, 0, 0)
if not self.tunnel_vision or self.getState((x, y)) == self.getState():
pygame.draw.polygon(s, col, [[all_points[a][b][0]-x*BOX_WIDTH, all_points[a][b][1]-y*BOX_HEIGHT] for b in range(3)])
self.screen.blit(s, (x*BOX_WIDTH, y*BOX_HEIGHT))
if self.getState((x, y)) != self.WIN_STATE and [x, y] not in self.WALLS:
pygame.draw.polygon(self.screen, (255, 255, 255), all_points[a], 2)
#if BOX_WIDTH > 80:
#trender = font.render("{0:.2f}".format(self.Q[self.getState((x, y)), a]), True, (255, 255, 255))
#self.screen.blit(trender, (x*BOX_WIDTH+text_offs[a][0], y*BOX_HEIGHT+text_offs[a][1]))
# Draw the player
pygame.draw.circle(self.screen, (0, 0, 255),
(int((self.pos[0]+0.5)*BOX_WIDTH),
int((self.pos[1]+0.5)*BOX_HEIGHT)),
max(10, int(BOX_WIDTH/10)))
pygame.display.update()
def moveDir(self, pos, action):
oldPos = list(pos)
if action == Actions.FORWARD:
pos[1] -= 1
elif action == Actions.RIGHT:
pos[0] += 1
elif action == Actions.LEFT:
pos[0] -= 1
elif action == Actions.BACK:
pos[1] += 1
if pos[0] < 0 or pos[0] >= GRID_WIDTH or pos[1] < 0 or pos[1] >= GRID_HEIGHT \
or self.hitWall(pos):
pos = oldPos
return pos
def hitWall(self, pos):
for w in self.WALLS:
if w[0] == pos[0] and w[1] == pos[1]:
return True
return False
def getState(self, pos=False):
if not pos:
pos = self.pos
return int(pos[1]*GRID_WIDTH+pos[0])
def getPos(self, state):
return [state % GRID_WIDTH, state // GRID_WIDTH] |
6,406 | dd9574ea08beb9bc5f1413afd63c751fd42cba67 | #!/usr/bin/env python3
from pexpect import pxssh
import time
s = pxssh.pxssh()
ip = "" #replace ip address
username= "" #replace username
password= "" #replace password
s.login (ip, username, password)
print ("SSH session login successful")
s.sendline ('application stop')
s.prompt() # match the prompt
print("Stopping the app")
print("\nStarting the app")
s.sendline ('application start')
s.prompt()
print ("\nLogout")
s.logout()
|
6,407 | ab69f4d6afb96d86381bcf507d7810980446c6ea | import msvcrt
import random
import os
def clear():
''' It clears the screen.'''
os.system('cls')
def InitMatrix():
''' It initializes the matrix as a board game.'''
m = [[0 for i in range(4)] for j in range(4)]
for i in range(2):
x = random.randint(0, 3)
y = random.randint(0, 3)
while m[x][y] != 0:
x = random.randint(0, 3)
y = random.randint(0, 3)
m[x][y] = 2
return m
def ShowMatrix():
''' It displays the matrix on the screen.'''
for col in m:
for elem in col:
print("{:6d}".format(elem), end = " ")
print()
def MoveDown():
'''When the down key is pressed it computes to the bottom of the matrix the
adjacent elements with the same value and moves the other elements in the
same direction if there are empty cells.'''
global movement
for j in range(4):
for i in range(3, -1, -1):
x = i - 1
while x > -1:
if m[i][j] != 0 and m[i][j] != m[x][j] and m[x][j] != 0:
break
elif m[x][j] == m[i][j] and m[i][j] != 0:
aux = m[i][j]
m[i][j] = m[x][j] + aux
m[x][j] = 0
movement = True
break
elif m[i][j] == 0 and m[x][j] != 0:
m[i][j] = m[x][j]
m[x][j] = 0
movement = True
x -= 1
def MoveUp():
'''It computes to the matrix upper side the adjacent elements with the same
value and moves the other elements to the same side if there are empty cells
when the up key is pressed.'''
global movement
for j in range(4):
for i in range(3):
x = i + 1
while x < 4:
if m[i][j] != 0 and m[i][j] != m[x][j] and m[x][j] != 0:
break
elif m[x][j] == m[i][j] and m[i][j] != 0:
aux = m[i][j]
m[i][j] = m[x][j] + aux
m[x][j] = 0
movement = True
break
elif m[i][j] == 0 and m[x][j] != 0:
m[i][j] = m[x][j]
m[x][j] = 0
movement = True
x += 1
def MoveLeft():
'''It computes to the matrix left side the adjacent elements with the same
value and moves the other elements to the same side if there are empty cells
when the left key is pressed.'''
global movement
for i in range(4):
for j in range(3):
x = j + 1
while x < 4:
if m[i][j] != 0 and m[i][j] != m[i][x] and m[i][x] != 0:
break
elif m[i][x] == m[i][j] and m[i][j] != 0:
aux = m[i][j]
m[i][j] = m[i][x] + aux
m[i][x] = 0
movement = True
break
elif m[i][j] == 0 and m[i][x] != 0:
m[i][j] = m[i][x]
m[i][x] = 0
movement = True
x += 1
def MoveRight():
''' It computes to the matrix right side the adjacent elements with the same
value and moves the other elements to the same side if there are empty cells
when the right key is pressed.'''
global movement
for i in range(4):
for j in range(3, -1, -1):
x = j - 1
while x > -1:
if m[i][j] != 0 and m[i][j] != m[i][x] and m[i][x] != 0:
break
elif m[i][x] == m[i][j] and m[i][j] != 0:
aux = m[i][j]
m[i][j] = m[i][x] + aux
m[i][x] = 0
movement = True
break
elif m[i][j] == 0 and m[i][x] != 0:
m[i][j] = m[i][x]
m[i][x] = 0
movement = True
x -= 1
def SupplyElem():
''' It inserts a new element (2 or 4) in an random empty cell if at least the two
adjacent elements have been computed or an element has been moved to a new
cell.'''
if movement == True:
RandomElem()
def RandomElem():
''' It generates the position where the new element will be inserted.'''
x = random.randint(0, 3)
y = random.randint(0, 3)
while m[x][y] != 0:
x = random.randint(0, 3)
y = random.randint(0, 3)
m[x][y] = random.randrange(2, 5, 2)
#===========================================================
m = InitMatrix()
max_value = 2
while max_value <= 2048:
movement = False
clear()
print("2048")
ShowMatrix()
keypress = ord(msvcrt.getch())
if keypress == 224:
keypress = ord(msvcrt.getch())
if keypress == 75:
MoveLeft()
SupplyElem()
if keypress == 72:
MoveUp()
SupplyElem()
if keypress == 77:
MoveRight()
SupplyElem()
if keypress == 80:
MoveDown()
SupplyElem()
if keypress == 27: # ESC
break
min_value = min(map(min, m))
max_value = max(map(max, m))
if movement == False and min_value != 0:
clear()
print("\nYOU HAVE LOST!")
print(f"\nSCORE: {max_value}")
with open("Score.txt", "a") as scorefile: #don't forget to write the path of the directory where the Score file is stored
scorefile.write(f" {str(max_value)}")
with open("Score.txt", "r") as scorefile2: #don't forget to write the path of the directory where the Score file is stored
score_read = scorefile2.read()
list_score = score_read.split()
list_score = [int(elem) for elem in list_score]
print(f"\nHigh score: {max(list_score)}")
quit()
if max_value == 2048:
print("\nYOU HAVE WON!")
print(f"\nSCORE: {max_value}")
with open("Score.txt", "a") as scorefile: #don't forget to write the path of the directory where the Score file is stored
scorefile.write(f" {str(max_value)}")
with open("Score.txt", "r") as scorefile2: #don't forget to write the path of the directory where the Score file is stored
score_read = scorefile2.read()
list_score = score_read.split()
list_score = [int(elem) for elem in list_score]
print(f"\nHigh score: {max(list_score)}")
quit()
|
6,408 | 09b14705a6905470058b5eecc6dd0bb214975c66 | """IDQ Importer Exporter
This script defines Import and Export functions through which it can communicate with
a Informatica Model Repository.
It also provides some related functions, such as:
- Create IDQ folder
- Check in IDQ components
Parts by Laurens Verhoeven
Parts by Jac. Beekers
@Version: 20190412.0 - JBE - Initial version to work with deploy lists
@License: MIT
"""
# MIT License
#
# Copyright (c) 2019 Jac. Beekers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import supporting, logging
from cicd.informatica import buildCommand
from cicd.informatica import executeInfacmd
from cicd.informatica import infaConstants as constants
logger = logging.getLogger(__name__)
def import_infadeveloper(**KeyWordArguments):
"""Import IDQ Components"""
KeyWordArguments["Tool"] = "Import"
ImportCommand = buildCommand.build(**KeyWordArguments)
result = executeInfacmd.execute(ImportCommand, constants.DEPLOYARTIFACT)
return result
def export_infadeveloper(**KeyWordArguments):
thisproc = "export_infadeveloper"
KeyWordArguments["Tool"] = "Export"
ExportCommand = buildCommand.build(**KeyWordArguments)
supporting.log(logger, logging.INFO, thisproc, "ExportCommand is >" + ExportCommand + "<.")
result = executeInfacmd.execute(ExportCommand, constants.CREATEARTIFACT)
return result
def CreateFolder(**KeyWordArguments):
"""Create IDQ Folder"""
KeyWordArguments["Tool"] = "CreateFolder"
CreateFolder = buildCommand.build(**KeyWordArguments)
output, error = executeInfacmd.execute(CreateFolder)
return (output, error)
def ListCheckedOutObjects(**KeyWordArguments):
thisproc = "ListCheckedOutObjects"
""" List Components that are currently checked out """
KeyWordArguments["Tool"] = "ListCheckOutObjects"
ListCheckedOutCommand = buildCommand.build(**KeyWordArguments)
output, error = executeInfacmd.execute(ListCheckedOutCommand)
# The output is in the form of one object per line, with properties spearated by a comma + space.
# To filter out irrelevant lines, such as "Command succesful", we keep only line that start with "MRS_PATH="
OutputLines = output.splitlines()
OutputKeyValuePairLines = [Properties.split(", ") for Properties in OutputLines if
Properties.startswith("MRS_PATH=")]
# ObjectsOLD = [[KVPair.split("=", 1) for KVPair in Line] for Line in OutputKeyValuePairLines]
# Each object is a dictionary, with properties as keys
# Since the date field has a comma in it, its not parsed properly. For this reason we need the len == 2 filter
# If the date is required, the parsing of the output should be adjusted
Objects = [dict(KVPair.split("=") for KVPair in Line if len(KVPair.split("=")) == 2) for Line in
OutputKeyValuePairLines]
supporting.log(logger, logging.DEBUG, thisproc, output)
return Objects
def CheckIn(**KeyWordArguments):
"""Check-in IDQ Components"""
KeyWordArguments["Tool"] = "CheckIn"
CheckInCommand = buildCommand.build(**KeyWordArguments)
output, error = executeInfacmd.execute(CheckInCommand)
return (output, error)
def CheckInMutiple(**KeyWordArguments):
thisproc = "CheckInMultiple"
""" Check in Multiple IDQ components """
for key, value in KeyWordArguments.items():
if key == "MultipleObjectPaths":
ObjectPaths = KeyWordArguments["MultipleObjectPaths"]
KeyWordArguments["Tool"] = "CheckIn"
CheckInCommands = []
for ObjectPathName in ObjectPaths:
KeyWordArguments["ObjectPathName"] = ObjectPathName
CheckInCommands.append(buildCommand.build(**KeyWordArguments))
CheckInAllCommand = "\n".join(CheckInCommands)
timebefore = datetime.datetime.now()
output, error = executeInfacmd.execute(CheckInAllCommand)
timeafter = datetime.datetime.now()
duration = timeafter - timebefore
supporting.log(logging.DEBUG, thisproc,
"Infacmd took " + str(duration) + " seconds to check-in " + str(len(ObjectPaths)) + " objects")
# output, error = (CheckInAllCommand, 0)
return (output, error)
def create_iar_file(**KeyWordArguments):
thisproc = "create_iar_file"
KeyWordArguments["Tool"] = "CreateIAR"
create_command = buildCommand.build(**KeyWordArguments)
supporting.log(logger, logging.INFO, thisproc, "Command is >" + create_command + "<.")
result = executeInfacmd.execute(create_command, constants.CREATEARTIFACT)
return result
def deploy_iar_file(**KeyWordArguments):
thisproc = "deploy_iar_file"
KeyWordArguments["Tool"] = "DeployIAR"
deploy_command = buildCommand.build(**KeyWordArguments)
supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.")
result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)
return result
def redeploy_iar_file(**KeyWordArguments):
thisproc = "redeploy_iar_file"
KeyWordArguments["Tool"] = "RedeployIAR"
deploy_command = buildCommand.build(**KeyWordArguments)
supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.")
result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)
return result
def stop_app(**KeyWordArguments):
thisproc = "stop_app"
KeyWordArguments["Tool"] = "StopApp"
deploy_command = buildCommand.build(**KeyWordArguments)
supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.")
result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)
return result
def set_app_privileges(**KeyWordArguments):
thisproc = "set_app_privileges"
KeyWordArguments["Tool"] = "AppPrivileges"
deploy_command = buildCommand.build(**KeyWordArguments)
supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.")
result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)
return result
|
6,409 | dfd5915428dc8f15fb61c5d81f22dfecfe29af15 | from django.urls import reverse
from django.utils.translation import get_language
from drf_dynamic_fields import DynamicFieldsMixin
from geotrek.api.v2.serializers import AttachmentSerializer
from mapentity.serializers import MapentityGeojsonModelSerializer
from rest_framework import serializers as rest_serializers
from rest_framework_gis import fields as rest_gis_fields
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from geotrek.common.serializers import PictogramSerializerMixin, TranslatedModelSerializer
from . import models as sensitivity_models
class RuleSerializer(PictogramSerializerMixin, rest_serializers.ModelSerializer):
class Meta:
model = sensitivity_models.Rule
fields = ('id', 'code', 'name', 'pictogram', 'description', 'url')
class SportPracticeSerializer(TranslatedModelSerializer):
class Meta:
model = sensitivity_models.SportPractice
fields = ('id', 'name')
class SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):
practices = SportPracticeSerializer(many=True)
period = rest_serializers.SerializerMethodField()
def get_period(self, obj):
return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]
class Meta:
model = sensitivity_models.Species
fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']
class SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.ModelSerializer):
category = rest_serializers.CharField(source='category_display')
structure = rest_serializers.SlugRelatedField('name', read_only=True)
species = rest_serializers.CharField(source='species_display')
class Meta:
model = sensitivity_models.SensitiveArea
fields = "__all__"
class SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):
radius = rest_serializers.IntegerField()
class Meta(MapentityGeojsonModelSerializer.Meta):
model = sensitivity_models.SensitiveArea
fields = ['id', 'species', 'radius', 'published']
class SensitiveAreaAPISerializer(TranslatedModelSerializer):
species = SpeciesSerializer()
kml_url = rest_serializers.SerializerMethodField()
attachments = AttachmentSerializer(many=True)
rules = RuleSerializer(many=True)
def get_kml_url(self, obj):
return reverse('sensitivity:sensitivearea_kml_detail', kwargs={'lang': get_language(), 'pk': obj.pk})
class Meta:
model = sensitivity_models.SensitiveArea
fields = ('id', 'species', 'description', 'contact', 'published', 'publication_date', 'kml_url', 'attachments', 'rules')
class SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer, SensitiveAreaAPISerializer):
# Annotated geom field with API_SRID
geom2d_transformed = rest_gis_fields.GeometryField(read_only=True, precision=7)
class Meta(SensitiveAreaAPISerializer.Meta):
geo_field = 'geom2d_transformed'
fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed', )
|
6,410 | 03943e146c0d64cfe888073e3a7534b6615b023f | import sys
from pcaspy import SimpleServer, Driver
import time
from datetime import datetime
import thread
import subprocess
import argparse
#import socket
#import json
import pdb
class myDriver(Driver):
def __init__(self):
super(myDriver, self).__init__()
def printDb(prefix):
global pvdb
print '=========== Serving %d PVs ==============' % len(pvdb)
for key in sorted(pvdb):
print prefix+key
print '========================================='
return
if __name__ == '__main__':
global pvdb
pvdb = {} # start with empty dictionary
global prefix
prefix = ''
parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for TPR')
parser.add_argument('-P', required=True, help='e.g. SXR or CXI:0 or CXI:1', metavar='PARTITION')
parser.add_argument('-v', '--verbose', action='store_true', help='be verbose')
args = parser.parse_args()
myDriver.verbose = args.verbose
#
# Parse the PARTITION argument for the instrument name and station #.
# If the partition name includes a colon, PV names will include station # even if 0.
# If no colon is present, station # defaults to 0 and is not included in PV names.
# Partition names 'AMO' and 'AMO:0' thus lead to different PV names.
#
# PVs
# pvdb[prefix+':ACCSEL' ] = {'type' : 'int', 'value': 0}
# pvdb[prefix+':FRAMERATE' ] = {'type' : 'float', 'value': 0}
prefix = ':SRC'
pvdb[prefix+':LOCK' ] = {'type' : 'int', 'value': 0}
for i in range(14):
prefix = ':CH%u'%(i)
pvdb[prefix+':LOCK' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':PD' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':DPDPS' ] = {'type' : 'float', 'value': 0}
pvdb[prefix+':DPDPSA'] = {'type' : 'float', 'value': 0}
pvdb[prefix+':DPD' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':TXD' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':DTXDPS'] = {'type' : 'float', 'value': 0}
pvdb[prefix+':DTXD' ] = {'type' : 'int' , 'value': 0}
prefix = ':CH%u:SCAN'%(i)
pvdb[prefix+':STAGE' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':TXD' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':PD' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':TXDPS' ] = {'type' : 'float', 'value': 0}
pvdb[prefix+':PDPS' ] = {'type' : 'float', 'value': 0}
pvdb[prefix+':CLK' ] = {'type' : 'int' , 'value': 0}
pvdb[prefix+':DCLK' ] = {'type' : 'int' , 'value': 0}
printDb(args.P)
server = SimpleServer()
server.createPV(args.P, pvdb)
driver = myDriver()
try:
# process CA transactions
while True:
server.process(0.1)
except KeyboardInterrupt:
print '\nInterrupted'
|
6,411 | cd9d10a3ee3956762d88e76a951023dd77023942 | from Get2Gether.api_routes.schedule import schedule_router
from Get2Gether.api_routes.auth import auth_router
from Get2Gether.api_routes.event import event_router
|
6,412 | c234031fa6d43c19515e27c5b12f8e8338f24a1c | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if not head:
return head
fh = ListNode(0)
fh.next = head
cur = head
# ptr = ptr.next
# 1 2 3 5 7 8 4 9
# move the pre until a value larger than cur.next is found
# then change the link
# save the cur.next
# point cur.next as cur.next.next (skip the moved one)
# point cur.next.next as pre.next
# pre.next is now the cur.next
while cur.next:
if cur.next.val < cur.val:
pre = fh
while pre.next.val <= cur.next.val:
pre = pre.next
tmp = cur.next
cur.next = tmp.next
tmp.next = pre.next
pre.next = tmp
else:
cur = cur.next
return fh.next
|
6,413 | d46cda5354640e1c87432d39a2e949d6db034edc | # Generated by Django 3.2 on 2021-04-21 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rate', '0003_auto_20210421_1316'),
]
operations = [
migrations.AlterField(
model_name='song',
name='overall_rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='song',
name='rating_count',
field=models.FloatField(default=0),
),
]
|
6,414 | 28091b7251f980f3f63abdb03140edd0d789be8f | name = raw_input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
x = list()
for line in handle:
line.split() ## unnesssecary
if line.startswith("From "):
x.append(line[line.find(" ")+1:line.find(" ",line.find(" ")+1)])
counts = dict()
for name in x:
if name not in counts:
counts[name]=1
else:
counts[name] = counts[name] + 1
### print max of numbers mails and name with max numbers of mails
for names,vals in counts.items():
if vals == max(counts.values()):
print names,vals
|
6,415 | d60a2d4c819f701e8e439b8839415aa2838df185 | # https://www.acmicpc.net/problem/3584
import sys, collections
input = sys.stdin.readline
N = int(input())
for _ in range(N):
n = int(input())
arr = collections.defaultdict(list)
parent = [i for i in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
arr[a].append(b)
parent[b] = a
node_1, node_2 = map(int, input().split())
p = [i for i, e in enumerate(parent) if i > 0 and i == e]
def bfs(p, goal):
queue = collections.deque()
queue.append([p, [p]])
discoverd = [False] * (n + 1)
while queue:
m, r = queue.popleft()
if not discoverd[m]:
discoverd[m] = True
if m == goal:
return r
for i in arr[m]:
queue.append([i, r + [i]])
for i in p:
a = bfs(i, node_1)
b = bfs(i, node_2)
result = 0
for aa, bb in zip(a,b):
if aa==bb:
result = aa
print(result)
|
6,416 | e2e3b63deba20cd87fdfca81a9f67fa24891a1e0 | '''
Copyright (c) 2011 Jacob K. Schoen (jacob.schoen@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import logging
import datetime
import os
import db
myLogger = logging.getLogger('smugScan')
def getAllPictureInfo(configobj, smugmug, lock):
myLogger.info("getAllPictures() parent process:'{0}' process id:'{1}".format(os.getppid(),os.getpid()))
conn = db.getConn(configobj)
#start fresh on this
myLogger.debug("Emptying smugmug tables.")
_emptySmugMugTables(conn, lock)
#now get the albums
myLogger.debug("Getting album info from smugmug.")
albums = _getAlbums(conn, smugmug, lock)
for album in albums["Albums"]:
#and the pictures in each album
myLogger.debug("geting picture info for album '%s'", album["Title"])
_getPictures(album, conn, smugmug, lock)
#get categories
ids = _getUserCategories(conn, smugmug, lock)
_getUserSubCategories(conn, smugmug, lock, ids)
conn.close()
myLogger.info('Finished Scanning SmugMug')
def _getAlbums(conn, smugmug, lock):
albums = smugmug.albums_get(Extras="LastUpdated")
for album in albums["Albums"]:
myLogger.debug(album)
title = album["Title"]
cat = None
catid = None
subCat = None
subCatid = None
try:
cat = album["Category"]["Name"]
catid = album["Category"]["id"]
except KeyError:
cat = None
catid = None
try:
subCat = album["SubCategory"]["Name"]
subCatid = album["SubCategory"]["id"]
except KeyError:
subCat = None
subCatid = None
lock.acquire()
db.addSmugAlbum(conn,cat, catid, subCat, subCatid, title, datetime.datetime.strptime(album["LastUpdated"],'%Y-%m-%d %H:%M:%S'), album["Key"], album["id"])
lock.release()
return albums
def _getPictures(album, conn, smugmug, lock):
pictures = smugmug.images_get(AlbumID=album["id"], AlbumKey=album["Key"], Extras="MD5Sum,LastUpdated,FileName")
albumId = pictures["Album"]["id"]
for picture in pictures["Album"]["Images"]:
lock.acquire()
db.addSmugImage(conn,albumId, datetime.datetime.strptime(picture["LastUpdated"],'%Y-%m-%d %H:%M:%S'), picture["MD5Sum"], picture["Key"], picture["id"], picture["FileName"])
lock.release()
def _getUserCategories(conn, smugmug, lock):
result = smugmug.categories_get()
categories = result["Categories"]
ids = []
for category in categories:
ids.append(category["id"])
lock.acquire()
db.addUserCategory(conn,category["Type"],category["id"],category["NiceName"],category["Name"])
lock.release()
return ids
def _getUserSubCategories(conn, smugmug, lock, ids):
for categoryid in ids:
result = smugmug.subcategories_get(CategoryID=categoryid)
subcategories = result["SubCategories"]
for subcategory in subcategories:
lock.acquire()
db.addUserSubCategory(conn,subcategory["id"],subcategory["NiceName"],subcategory["Name"], categoryid)
lock.release()
def _emptySmugMugTables(conn, lock):
lock.acquire()
db.execute(conn,"DELETE FROM smug_album")
db.execute(conn,"DELETE FROM smug_image")
db.execute(conn,"DELETE FROM user_category")
db.execute(conn,"DELETE FROM user_subcategory")
lock.release()
|
6,417 | 43b519d7db2e46a0bf9317eddac1f5cf6b7b79e3 | import pandas as pd
import json
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
nlp = spacy.load('en_core_web_sm')
list_data = []
list_data_only_reviews = []
list_data_reviewerid = []
result = []
l = []
for line in open('Automotive_5.json', 'r'):
list_data.append(json.loads(line))
for item in list_data:
list_data_only_reviews.append(item['reviewText'])
list_data_reviewerid.append(item['reviewerID'])
# opening the csv file in 'w+' mode
file = open('review_file.csv', 'w+')
# writing the data into the file
with file:
df = pd.DataFrame(list_data_only_reviews, columns=['Reviews'])
df.to_csv(file,index=False)
npr = pd.read_csv('review_file.csv')
tfidf = TfidfVectorizer(max_df=0.8,min_df=5,stop_words='english')
dtm = tfidf.fit_transform(npr['Reviews'].values.astype('U'))
nmf_model = NMF(n_components=20,random_state=50)
nmf_model.fit(dtm)
#returns index positions that sort the array
#checking which word in the topic has high probability
for i,topic in enumerate(nmf_model.components_):
print(f"THE TOP 30 WORDS FOR TOPIC #{i}")
print([tfidf.get_feature_names()[i] for i in topic.argsort()[-30:] if len(tfidf.get_feature_names()[i]) > 5])
print('\n')
#probability of a document belonging to a topic
topic_results = nmf_model.transform(dtm)
npr['Topic'] = topic_results.argmax(axis=1)
topic_label = {0:'plastic', 1:'winter batteries', 2:'engines', 3:'liquid', 4:'wind', 5:'shipping', 6:'light',
7:'quality', 8:'instructions', 9:'worked', 10:'rubber', 11:'cleaning', 12:'pressure', 13:'washing',
14:'recommendation', 15:'advertise', 16:'bucket', 17:'camp', 18:'brush', 19:'travel'}
npr['Topic Label'] = npr['Topic'].map(topic_label)
npr = npr.assign(Reviews=list_data_reviewerid)
npr.to_csv('classified_output.csv')
|
6,418 | 760daa908ca92e7fb1393bdf28fee086dc1648ef | from collections import Counter
# Complete the isValid function below.
def isValid(s):
if not s:
return True
x = Counter(s)
print(x)
first_c = x.pop(s[0])
cnt = 0
for k, c in x.items():
if c != first_c:
if first_c == 1:
cnt += 1
first_c = c
else:
cnt += abs(c - first_c) if c != 1 else 1
if cnt >= 2:
return False
return True
if __name__ == '__main__':
s = "ibfdgaeadiaefgbhbdghhhbgdfgeiccbi"
r = isValid(s)
print(r) |
6,419 | 61ff5fae02d18d51595e8050d97244574e7d8af1 | from setuptools import setup
setup(
name='nodepool_harness',
version='0.1dev',
description='Nodepool harness',
packages=['nodepool_harness', 'statsd', 'apscheduler'],
install_requires=["PyYAML", "python-novaclient", "paramiko", "sqlalchemy"],
entry_points = {
'console_scripts': [
'nh-install-node = nodepool_harness.scripts:install_node',
]
}
) |
6,420 | aacd5d671090c3305a53d62c3c6c25d4c033f42d | # Spelling bee NYT puzzle solver
with open('words.txt') as words_fh:
# Converts strips and lowercases lexicon (space seperated txt file)
# Use set to remove duplicates (decasing)
lexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))
# NOTE: Could add a CLI to allow users to input this. Manual edits are the way for now
MANDATORY_LETTER = 'l'
LETTERS = set(['t','i','e','v','p','x'] + [MANDATORY_LETTER])
# Search for valid words
valid_words = [word for word in lexicon if set(word).issubset(LETTERS) and MANDATORY_LETTER in set(word) and len(word) >= 4]
sorted_valid_words = sorted(valid_words, key=lambda x: len(x))
print(sorted_valid_words) |
6,421 | 9290294b5df081ef0cae5450a9ea3baef789c041 | from .models import Owner, Vehicle
from rest_framework import viewsets, permissions
from .serializers import OwnerSerializer, VehicleSerializer
class OwnerViewSet(viewsets.ModelViewSet):
queryset = Owner.objects.all().order_by('id')
serializer_class = OwnerSerializer
permission_classes = [permissions.IsAuthenticated]
class VehicleViewSet(viewsets.ModelViewSet):
queryset = Vehicle.objects.all().order_by('id')
serializer_class = VehicleSerializer
permission_classes = [permissions.IsAuthenticated]
|
6,422 | c796123fbbf3adcde59779a104dcafb30a673a79 | from elements import Node, Bar, Material, Group, Load
from pprint import pprint
# query
# next((e for e in result['coordinates']['nodes'] if e.n == int(el[0])), None)
class Reader():
def read(self, filePath):
"""
Reads text file with nodes and returns the result dict with all objects
and their nested properties
"""
result = {
'coordinates': {
'count': 0,
'nodes': []
},
'element_groups': {
'number_of_elements': 0,
'count': 0,
'groups': []
},
'bars': [],
'materials': {
'count': 0,
'materials': []
},
'geometric_properties': {
'count': 0
},
'bcnodes': {
'count': 0
},
'loads': {
'count': 0
}
}
# print(result['coordinates']['nodes'])
with open(filePath,'r') as f:
lines = f.readlines()
elementCounter = 0
groupCounter = 0
geometricCounter = 0
for line in lines:
line = line.strip()
el = line.split(' ')
if len(line) == 0:
continue
if len(line) != 0 and line[0] == "*":
section = line[1:].lower()
continue
if section == 'coordinates':
if len(el) == 1 :
result[section]['count'] = el[0]
else:
result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))
elif section == 'element_groups':
if len(line) == 1:
result[section]['count'] = int(el[0])
else:
result[section]['groups'].append(Group(el[0], el[1], el[2]))
result[section]['number_of_elements'] += int(el[1])
elif section == 'incidences':
groups = result['element_groups']['groups']
nodes = result['coordinates']['nodes']
print(el)
currentGroup = groups[groupCounter]
if (currentGroup.amount == 0):
groupCounter += 1
currentGroup = groups[groupCounter]
print("Group n: {} count: {}".format(currentGroup.n, currentGroup.amount))
bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])
print(
"""
Bar {} created
Start node: {} End Node: {} Group: {}
""".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))
result['bars'].append(bar)
currentGroup.amount -= 1
elif section == 'materials':
if len(el) == 1:
result[section]['count'] = el[0]
groupCounter = 0
else:
material = Material(el[0], el[1], el[2])
result[section]['materials'].append(material)
result['element_groups']['groups'][groupCounter].setMaterial(material)
groupCounter += 1
elif section == 'geometric_properties':
if geometricCounter == 0:
result[section]['count'] = el[0]
else:
result['element_groups']['groups'][geometricCounter - 1].setSectionArea(
el[0]
)
geometricCounter += 1
elif section == 'bcnodes':
if len(el) == 1:
result[section]['count'] = el[0]
else:
nodeIndex = next((e for e, item in enumerate(
result['coordinates']['nodes']) if item.n == int(el[0])), None
)
result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))
elif section == 'loads':
if len(el) == 1:
result[section]['count'] = el[0]
else:
load = Load(el[1], el[2])
nodeIndex = next((e for e, item in enumerate(
result['coordinates']['nodes']) if item.n == int(el[0])), None
)
result['coordinates']['nodes'][nodeIndex].addLoad(load)
for bar in result['bars']:
bar.createLocalArray()
print('---------- Parsing complete! ----------')
pprint(result)
print('---------------------------------------')
return result
# reader = Reader()
# reader.read("./arquivoentrada.fem")
|
6,423 | 775ac823f6784510fa919b08ee4150eb500710c4 | # coding: utf-8
"""
CityPay POS API
CityPay Point of Sale API for payment with card present devices including EMV readers and contactless POS readers. The API is available from https://github.com/citypay/citypay-pos-api The API makes it simple to add EMV and contactless card acceptance to iOS, Android, Tablet and desktop applicaitons using a HTTPS protocol. It segregates the complexity of payment processing from the sales environment and eliminates any necessity for the target system to handle card data. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: dev@citypay.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from citypay.api_client import ApiClient
class PaymentModuleApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def receipt(self, **kwargs): # noqa: E501
"""Receipt Print # noqa: E501
Reprint a merchant or customer receipt for a transaction that exists on the device (i.e. has not been cleared by End of Day process). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.receipt(async=True)
>>> result = thread.get()
:param async bool
:param TransactionProgress body:
:return: TransactionResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.receipt_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.receipt_with_http_info(**kwargs) # noqa: E501
return data
def receipt_with_http_info(self, **kwargs): # noqa: E501
"""Receipt Print # noqa: E501
Reprint a merchant or customer receipt for a transaction that exists on the device (i.e. has not been cleared by End of Day process). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.receipt_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param TransactionProgress body:
:return: TransactionResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method receipt" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/receipt', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionResult', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def refund(self, body, **kwargs): # noqa: E501
"""Refund Transaction # noqa: E501
Initiates a new refund to a device. The action will contact the device and request a transaction start including the amount and a unique reference to track the transaction through it's lifecycle. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.refund(body, async=True)
>>> result = thread.get()
:param async bool
:param SaleRequest body: (required)
:return: SaleResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.refund_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.refund_with_http_info(body, **kwargs) # noqa: E501
return data
def refund_with_http_info(self, body, **kwargs): # noqa: E501
"""Refund Transaction # noqa: E501
Initiates a new refund to a device. The action will contact the device and request a transaction start including the amount and a unique reference to track the transaction through it's lifecycle. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.refund_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param SaleRequest body: (required)
:return: SaleResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method refund" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `refund`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/refund', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SaleResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reversal(self, body, **kwargs): # noqa: E501
"""Reversal Tranasction # noqa: E501
Initiates a reversal to a device. No confirmation is made and the transaction reversal process is run. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.reversal(body, async=True)
>>> result = thread.get()
:param async bool
:param ReversalRequest body: (required)
:return: SaleResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.reversal_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.reversal_with_http_info(body, **kwargs) # noqa: E501
return data
def reversal_with_http_info(self, body, **kwargs): # noqa: E501
"""Reversal Tranasction # noqa: E501
Initiates a reversal to a device. No confirmation is made and the transaction reversal process is run. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.reversal_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param ReversalRequest body: (required)
:return: SaleResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reversal" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `reversal`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/reversal', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SaleResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def sale(self, body, **kwargs): # noqa: E501
"""Sale Transaction # noqa: E501
Initiates a new sale to a device. The action will contact the device and request a transaction start including the amount and a unique reference to track the transaction through it's lifecycle. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.sale(body, async=True)
>>> result = thread.get()
:param async bool
:param SaleRequest body: (required)
:return: SaleResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.sale_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.sale_with_http_info(body, **kwargs) # noqa: E501
return data
def sale_with_http_info(self, body, **kwargs): # noqa: E501
"""Sale Transaction # noqa: E501
Initiates a new sale to a device. The action will contact the device and request a transaction start including the amount and a unique reference to track the transaction through it's lifecycle. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.sale_with_http_info(body, async=True)
>>> result = thread.get()
:param async bool
:param SaleRequest body: (required)
:return: SaleResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `sale`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/sale', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SaleResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def transaction(self, **kwargs): # noqa: E501
"""Transaction Status # noqa: E501
Request the status of a transaction in progress or a complete transaction using the identifier as the reference. Depending on the state of the transaction, the response will indicate if it is not found, in progress (and the current stage in the transaction workflow) or complete (with all transaction data). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transaction(async=True)
>>> result = thread.get()
:param async bool
:param TransactionProgress body:
:return: TransactionResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.transaction_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.transaction_with_http_info(**kwargs) # noqa: E501
return data
def transaction_with_http_info(self, **kwargs): # noqa: E501
"""Transaction Status # noqa: E501
Request the status of a transaction in progress or a complete transaction using the identifier as the reference. Depending on the state of the transaction, the response will indicate if it is not found, in progress (and the current stage in the transaction workflow) or complete (with all transaction data). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.transaction_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param TransactionProgress body:
:return: TransactionResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method transaction" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/transaction', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionResult', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
6,424 | a8d52d81ef6538e9cb8a0a9cab7cd0a778454c8e | import json
from constants import *
from coattention_layer import *
from prepare_generator import *
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = get_generator(
dataset)
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True,
verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# Save the weights using the `checkpoint_path` format
model.save_weights(CHECKPOINT_PATH +
'/cp-{epoch: 04d}.ckpt'.format(epoch=0))
history = model.fit(x=train_generator,
epochs=EPOCHS,
validation_data=val_generator,
callbacks=[checkpoint, earlystop_callback],
workers=6,
use_multiprocessing=True)
# save history
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
# prediction
predictions = model.predict(val_generator,
workers=6,
use_multiprocessing=True,
verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
def save_config(dataset):
if dataset == 0:
DATASET = 'English'
if dataset == 1:
DATASET = 'Google'
if dataset == 2:
DATASET = 'Targoman'
config = {'NAME': 'coattention',
'EMBEDDING': 'keras',
"DATASET": DATASET,
"OPTIMIZER": 'Adam',
"EARLY STOPPING": 'val_loss',
"LOSS": 'categorical_crossentropy',
'DROPOUT_RATE': DROPOUT_RATE,
"EMBEDDING_DIM": EMBEDDING_DIM,
"EPOCHS": EPOCHS,
"BATCH_SIZE": BATCH_SIZE,
"SEQ_LENGTH": SEQ_LENGTH,
"NUM_CLASSES": NUM_CLASSES}
print("save config in" + str(CONFIG_PATH))
with open(CONFIG_PATH, 'w') as file:
json.dump(config, file)
return
Train(dataset=2)
|
6,425 | 015b06d7f08f9de60a46d8428820333621732c53 | import os
def log(text, level=2, outFile='log.txt'):
text = str(text)
if level == 0:
return True
if level == 3:
with open(outFile, 'a') as logger:
logger.write(text)
logger.close()
print(text)
return True
if level == 2:
print(text)
if level == 1:
with open(outFile, 'a') as logger:
logger.write(text)
logger.close()
if __name__ == '__main__':
log('hello world',2,'temp.txt')
|
6,426 | 629649abe9d855122a5db6d61a20735ceb89c5cf | from pathlib import Path
import eyed3
import csv
import sys
import filetype
import os
pathFile = Path('C:\\Users\\JORGE\\Music\\Vicente Garcia - Te Soñé (Lyric Video)(MP3_160K).mp3')
audiofile = eyed3.load(pathFile)
with open('loveMusic.csv', 'w', newline='') as csvFile:
fieldsName = ['nameFile', 'tittle', 'artist', 'gender', 'path']
writer = csv.DictWriter(csvFile, fieldnames=fieldsName)
writer.writeheader()
tittle = audiofile.tag.title.encode('ascii', 'ignore') if audiofile.tag.title is not None else ''
artist = audiofile.tag.artist.encode('ascii', 'ignore') if audiofile.tag.artist is not None else ''
gender = audiofile.tag.genre.name.encode('ascii', 'ignore') if audiofile.tag.genre is not None else ''
writer.writerow({'nameFile':'nameFile','tittle':tittle,'artist':artist,'gender':gender,'path':str(pathFile).encode('ascii','ignore')}) |
6,427 | 935853a4afdb50a4652e14913d0cdb251a84ea14 | from typing import Sized
import pygame
import time
from pygame.locals import *
import random
SIZE = 20
BACKGROUND = (45, 34, 44)
W = 800
H = 400
SCREEN = (W, H)
class Snake:
def __init__(self, parent_screen, length):
self.parent_screen = parent_screen
self.length = length
self.snake = pygame.image.load(
"resources/snake.png").convert() # inserting snake image
self.snake_x = [W//2]*length # list with 'length' number of elements
self.snake_y = [H//2]*length
self.direction = "left" # default direction LEFT
def increase_length(self):
self.length += 1
# adds another block to snake
# appends a random value to the list...cause it will change immidiately in 'move()' method
self.snake_x.append(0)
self.snake_y.append(0)
def draw(self):
# self.parent_screen.fill(BACKGROUND)
for i in range(self.length):
self.parent_screen.blit(
self.snake, (self.snake_x[i], self.snake_y[i])) # drawing snake
pygame.display.flip()
def move(self):
# Logic gor moving the TAIL snakes [like 2nd snake will come to 1st pos, 3rd will move to 2nd pos.]
for i in range(self.length-1, 0, -1): # reverse for loop
self.snake_x[i] = self.snake_x[i-1]
self.snake_y[i] = self.snake_y[i-1]
# Logic for moving the head snakes
if self.direction == 'up':
self.snake_y[0] -= SIZE
if self.direction == 'down':
self.snake_y[0] += SIZE
if self.direction == 'right':
self.snake_x[0] += SIZE
if self.direction == 'left':
self.snake_x[0] -= SIZE
self.draw()
def move_up(self):
self.direction = 'up'
def move_down(self):
self.direction = 'down'
def move_right(self):
self.direction = 'right'
def move_left(self):
self.direction = 'left'
# Apple class
class Food:
def __init__(self, parent_screen):
self.parent_screen = parent_screen
self.food1 = pygame.image.load(
"resources/food.png").convert() # inserting food image
self.food2 = pygame.image.load(
"resources/snake1.png").convert()
self.food_x = SIZE*3
self.food_y = SIZE*2
def draw(self):
seq = [self.food1, self.food2]
self.parent_screen.blit(random.choice(seq), (self.food_x, self.food_y)) # drawing snake
pygame.display.flip()
def move(self):
self.food_x = random.randint(0, W//SIZE - 1) * SIZE
self.food_y = random.randint(0, H//SIZE - 1) * SIZE
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption("Snake Game")
self.surface = pygame.display.set_mode(
SCREEN) # crating game window 1000x720
self.surface.fill(BACKGROUND) # rgb color combination
# snake object (surface, size_of_snake)
self.snake = Snake(self.surface, 3)
self.snake.draw()
self.food = Food(self.surface) # Food object(Surface)
self.food.draw()
pygame.mixer.init() # pygame class mixer...for sound
# start playing background b_music
self.background_music()
def is_collision(self, x1, y1, x2, y2):
if x1 >= x2 and x1 < x2 + SIZE:
if y1 >= y2 and y1 < y2 + SIZE:
return True
else:
return False
def play_sound(self, sound_location):
sound = pygame.mixer.Sound(sound_location) # sound is for short time
pygame.mixer.Sound.play(sound)
def background_music(self):
pygame.mixer.music.load("resources/b_music1.mp3")
pygame.mixer.music.play(-1) #plays music infinitely
def render_background(self):
bg = pygame.image.load("resources/background.jpg")
self.surface.blit(bg, (0, 0))
def play(self):
self.render_background() # render the background
self.snake.move()
self.food.draw()
self.display_score()
self.screen_msgs()
pygame.display.flip()
# Snake colloding with apple
if self.is_collision(self.snake.snake_x[0], self.snake.snake_y[0], self.food.food_x, self.food.food_y):
self.food.move() # moves apple to random position
self.snake.increase_length()
# play sound when eating the food
self.play_sound("resources/ding.mp3") # passing the music location
# to play the sound
# Snake colliding with itself Game Over logic
for i in range(2, self.snake.length):
if self.is_collision(self.snake.snake_x[0], self.snake.snake_y[0], self.snake.snake_x[i], self.snake.snake_y[i]):
# play sound when game Over
self.play_sound("resources/fail_buzz.mp3")
raise "Game Over" # raising exeption
self.touch_border_action()
def pause_msg(self):
font = pygame.font.SysFont('arial', 20)
font1 = pygame.font.SysFont('Rockwell', 80)
line1 = font1.render(
f"<Paused>", True, (200, 200, 200))
line2 = font.render(
f"Press <UP, DOWN, LEFT, RIGHT> To Resume", True, (255,255, 0))
self.surface.blit(line1, (W//4 + 20, H//3))
self.surface.blit(line2, (W//4 + 30, H//3 + 100))
pygame.display.flip()
def show_game_over(self):
# self.surface.fill(BACKGROUND)
self.render_background()
font = pygame.font.SysFont('Cooper Black', 30)
font1 = pygame.font.SysFont('Cooper Black', 60)
line1 = font1.render(
f"GAME OVER !!", True, (200, 0, 0))
line1B = font.render(
f"<<Score : {self.snake.length - 3}>>", True, (10, 255, 10))
line2 = font.render(
f"Press <UP, DOWN, LEFT, RIGHT> To Play Again", True, (200, 200, 200))
line3 = font.render(
f"Press ESC to EXIT!", True, (255, 200, 0))
self.surface.blit(line1, (W//4 - 25, H//3-45))
self.surface.blit(line1B, (W//4 + 100, H//3 + 60))
self.surface.blit(line2, (45, H//3 + 110))
self.surface.blit(line3, (W//4+50, H//3 + 160))
pygame.display.flip()
# pause the background_music when game over
pygame.mixer.music.rewind()
pygame.mixer.music.pause()
def touch_border_action(self):
if self.snake.snake_x[0] == W:
self.snake.snake_x[0] = 0
elif self.snake.snake_x[0] < 0:
self.snake.snake_x[0] = W
if self.snake.snake_y[0] == H:
self.snake.snake_y[0] = 0
elif self.snake.snake_y[0] < 0:
self.snake.snake_y[0] = H
def reset_game(self):
self.snake = Snake(self.surface, 3)
self.food = Food(self.surface) # Food object(Surface)
def display_score(self):
font = pygame.font.SysFont('Algerian', 30)
score = font.render(
f"[Score : {self.snake.length - 3}]", True, (0, 255, 255))
self.surface.blit(score, (W //2 - 70 , 5))
def screen_msgs(self):
font = pygame.font.SysFont('aharoni',16)
msgs1 = font.render("[SPACE] to Pause", True, (200, 204, 255))
msgs2 = font.render("[ESC] to EXIT", True, (200, 204, 255))
self.surface.blit(msgs1, (W - 100, H - 20))
self.surface.blit(msgs2, (10, H - 20))
def run(self):
clock = pygame.time.Clock()
running = True
pause_game = False
while running:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE: # PRESS esc to escape the screen
running = False
if event.key == K_SPACE: # to pause the game
pygame.mixer.music.pause()
self.pause_msg()
pause_game = True
if event.key == K_UP:
self.snake.move_up()
pause_game = False
pygame.mixer.music.unpause()
if event.key == K_DOWN:
self.snake.move_down()
pause_game = False
pygame.mixer.music.unpause()
if event.key == K_LEFT:
self.snake.move_left()
pause_game = False
pygame.mixer.music.unpause()
if event.key == K_RIGHT:
self.snake.move_right()
pause_game = False
pygame.mixer.music.unpause()
elif event.type == QUIT:
running = False
if not pause_game:
try:
self.play()
except Exception as e:
self.show_game_over()
pause_game = True
self.reset_game()
clock.tick(60)
if __name__ == "__main__":
game = Game() # Game class object
game.run()
# auto-py-to-exe.exe # run this commande to convert to exe
|
6,428 | 39abda1dd8b35889405db1b3971917d2a34180e3 | #Matthew Shrago
#implementation of bisection search.
import math
low = 0
high = 100
ans = int((high + low)/2)
print "Please think of a number between 0 and 100!"
while ans != 'c':
#print high, low
print "Is your secret number " + str(ans) + "?",
number = raw_input("Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter\n'c' to indicate I guessed correctly. ")
if number == 'c':
break
if number == 'l':
low = ans
elif number == 'h':
high = ans
else:
print "Sorry, I did not understand your input."
ans = int((high + low)/2)
print "Game over. Your secret number was: " + str(ans)
|
6,429 | 819607d89035413fc2800e9f16222619a74a5d64 | from functools import wraps
import maya.cmds as mc
import maya.mel as mel
import pymel.core as pm
from PySide2 import QtCore, QtGui, QtWidgets
import adb_core.Class__multi_skin as ms
import adbrower
from CollDict import pysideColorDic as pyQtDic
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import adb_tools.adb_pyQt.Class__rightClickCustom as adbRC
from maya_script import Adbrower
adb = adbrower.Adbrower()
VERSION = 1.0
PATH_WINDOW = Adbrower.PATH_WINDOW_INIT + 'AppData/Roaming'
PATH_LINUX = Adbrower.PATH_LINUX_INIT
FOLDER_NAME = Adbrower.FOLDER_NAME_INIT
ICONS_FOLDER = Adbrower.ICONS_FOLDER_INIT
YELLOW = '#ffe100'
ORANGE = '#fd651d'
GREEN = '#597A59'
DARKRED = '#745a54'
def undo(func):
'''
Puts the wrapped `func` into a single Maya Undo action, then
undoes it when the function enters the finally: block
from schworer Github
'''
@wraps(func)
def _undofunc(*args, **kwargs):
try:
# start an undo chunk
mc.undoInfo(ock=True)
return func(*args, **kwargs)
finally:
# after calling the func, end the undo chunk
mc.undoInfo(cck=True)
return _undofunc
def flatList(ori_list=''):
"""
Flatten a list
"""
flat_list = []
for item in ori_list:
if isinstance(item, list):
for sub_item in item:
flat_list.append(sub_item)
else:
flat_list.append(item)
return flat_list
#-----------------------------------
# CLASS
#-----------------------------------
class MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
__dialog = None
@classmethod
def show_dialog(cls):
if cls.__dialog is None:
cls.__dialog = cls()
else:
cls.__dialog.raise_()
cls.__dialog.show()
def __init__(self,parent=None):
super(MultiSkin_UI, self).__init__(parent=parent)
self.meshTreeWidget=QtWidgets.QTreeWidget()
self.setObjectName('multi skin ui')
self.starting_height = 500
self.starting_width = 390
self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))
self.setWindowFlags(QtCore.Qt.Tool)
self.setMinimumWidth(self.starting_width)
self.resize(self.starting_width, self.starting_height)
# -----------------------------
# --- Create scrollArea
self.mainBox = QtWidgets.QVBoxLayout()
self.mainBox.setContentsMargins(0, 0, 0, 0)
self.scroll_layout = QtWidgets.QScrollArea()
self.mainBox.addWidget(self.scroll_layout)
self.setLayout(self.mainBox)
self.scroll_layout.setContentsMargins(0, 0, 0, 0)
self.scroll_layout.setWidgetResizable(True)
self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)
self.scroll_widget = QtWidgets.QWidget()
self.scroll_layout.setWidget(self.scroll_widget)
# -----------------------------
# --- Main Layout
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(*[5] * 4)
self.main_layout.setSpacing(2)
self.setLayout(self.main_layout)
self.scroll_widget.setLayout(self.main_layout)
self.widgetsAndLayouts()
self.create_Button()
self.buildMainLayout()
def widgetsAndLayouts(self):
# --------- Predefine widgets
def addLine():
line = QtWidgets. QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
return line
def addText(message, alignement=QtCore.Qt.AlignCenter, height=30, bold=False):
myFont = QtGui.QFont()
myFont.setBold(bold)
text = QtWidgets.QLabel(message)
text.setAlignment(alignement)
text.setFixedHeight(height)
text.setFont(myFont)
return text
# ------------------------------
#--------- Layouts
self.vLayoutAndFunctions = [
# name, margins
['treeWidget', [1, 1, 1, 1]],
]
self.vlayout = {}
for layoutName, margins, in self.vLayoutAndFunctions:
self.vlayout[layoutName] = QtWidgets.QVBoxLayout()
self.vlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],)
self.hLayoutAndFunctions = [
# name, margins
['filterOptions', [1, 1, 1, 1]],
['buttonsOptions', [1, 1, 1, 1]],
['searchBarWidget', [1, 1, 1, 1]],
]
self.hlayout = {}
for layoutName, margins, in self.hLayoutAndFunctions:
self.hlayout[layoutName] = QtWidgets.QHBoxLayout()
self.hlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],)
# ------------------------------
# --------- QLINE EDIT WIDGET
self.searchBar = QtWidgets.QLineEdit()
self.searchBar.setPlaceholderText('Search...')
self.searchBar.textEdited.connect(self.searchBarEdited)
self.hlayout['searchBarWidget'].addWidget(self.searchBar)
# ------------------------------
# --------- CHECKBOX WIDGET
self.matchCaseChx = QtWidgets.QCheckBox()
self.matchCaseChx.setChecked(False)
self.matchCaseChx.setText('Match Case')
self.matchCaseChx.stateChanged.connect(self.searchBarEdited)
# ------------------------------
# --------- RADIO BUTTON WIDGET
self.allFilter = QtWidgets.QRadioButton('All', self)
self.allFilter.setChecked(True)
self.allFilter.toggled.connect(self.refreshQtree)
self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)
self.skinClusterFilter.setChecked(True)
self.skinClusterFilter.toggled.connect(self.refreshQtree)
# ------------------------------
# --------- TREE LIST WIDGET
self.meshTreeWidget=QtWidgets.QTreeWidget()
self.meshTreeWidget.setHeaderLabel('Cloth Tree View')
self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.ExtendedSelection)
self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)
header = QtWidgets.QTreeWidgetItem(["Geometries"])
self.meshTreeWidget.setHeaderItem(header)
self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)
self.meshTreeWidget.itemSelectionChanged .connect(self.singleClickedAction)
self.refreshQtree()
def create_Button(self):
""" Create the buttons """
self.buttonAndFunctions = [
# name, function , group number, labelColor, backgroundColor, layout, layout_coordinate width
['Show Selected', self.showSelected, 0, pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'], '', 30],
['Refresh', self.refreshQtree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Clear', self.meshTreeWidget.clear, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],
['Close All', self.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],
]
# Build Buttons
self.buttons = {}
for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width, in self.buttonAndFunctions:
self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)
self.buttons[buttonName].clicked.connect(buttonFunction)
try:
layout.addWidget(self.buttons[buttonName], int(layout_coord.split(',')[0]), int(layout_coord.split(',')[1]))
except ValueError:
layout.addWidget(self.buttons[buttonName])
# add Right Clicked Options
_optionsExpandAll = self.buttons['Expand All'].addButtonActions(['Shapes', 'Skin Clusters'])
_optionsExpandAll['Shapes'].triggered.connect(lambda:self.expandTree('shape'))
_optionsExpandAll['Skin Clusters'].triggered.connect(lambda:self.expandTree('skin cluster'))
_optionsCloseAll = self.buttons['Close All'].addButtonActions(['Shapes', 'Skin Clusters'])
_optionsCloseAll['Shapes'].triggered.connect(lambda:self.closeTree('shape'))
_optionsCloseAll['Skin Clusters'].triggered.connect(lambda:self.closeTree('skin cluster'))
def buildMainLayout(self):
# ------------------------------
# --------- BUILD MAIN LAYOUT
self.main_layout.addLayout(self.hlayout['filterOptions'])
self.hlayout['filterOptions'].addWidget(self.allFilter)
self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)
self.hlayout['filterOptions'].addStretch()
self.main_layout.addLayout(self.hlayout['searchBarWidget'])
self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)
self.main_layout.addLayout(self.hlayout['buttonsOptions'])
self.main_layout.addLayout(self.vlayout['treeWidget'])
# ==================================
# SLOTS
# ==================================
def refreshQtree(self):
self.meshTreeWidget.clear()
all_status = self.allFilter.isChecked()
if all_status:
_filter = 'all'
else:
_filter = 'skinClusters'
self.filterList = self.filterMeshes(filter=_filter)
self.populateQTree(self.filterList)
def getSearchBarText(self):
searchBarText = self.searchBar.text()
return searchBarText
def searchBarEdited(self):
matchCase=bool(self.matchCaseChx.checkState())
query = self.searchBar.text()
if matchCase:
query_words = str(query).split(" ")
else:
query_words = str(query).lower().split(" ")
query_words = filter(None, query_words)
scoreList = {}
for item in [str(x) for x in self.filterList]:
score = 0
for query_word in query_words:
if matchCase:
if query_word in item:
score += 1
else:
if query_word in item.lower():
score += 1
scoreList[item] = score
# If user enter more than one words, get only result with a score at least equal to the number of words in the query
sorted_matches = [i for i in scoreList.items() if i[1] >= len(query_words)]
# Sort matches by score
sorted_matches = sorted(sorted_matches, key=lambda x: x[0])
sorted_matches_string = [name for name, index in sorted_matches]
self.meshTreeWidget.clear()
self.populateQTree(sorted_matches_string)
def populateQTree(self, filterList):
# Meshes
# ----------------------
self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(item)]) for item in filterList]
[root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]
[root.setExpanded(True) for root in self.roots]
# Shapes
# ----------------------
self.QtShapes = []
shape_dic = self.getAllShapes(self.getAllMeshes())
QTroots_dic = {} # Keys are Qtree object
for root in self.roots:
try:
QTroots_dic.update({root:shape_dic[root.text(0)]})
except KeyError:
pass
# added the shapes under there mesh
for QTroot, shapesList in QTroots_dic.items():
[QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in shapesList]
# changed their color
child_count=QTroot.childCount()
children=[QTroot.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in children]
[child.setExpanded(True) for child in children]
[self.QtShapes.append(child) for child in children]
# skinClusters
# ----------------------
self.QTClusters = []
cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))
QTshape_dic = {}
for shape in self.QtShapes:
QTshape_dic.update({shape:cluster_dic[shape.text(0)]})
# added the skinCluster under there shape
for QTshape, clusterList in QTshape_dic.items():
if clusterList == 'None':
pass
else:
QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])
# changed their color
child_count=QTshape.childCount()
children=[QTshape.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in children]
[self.QTClusters.append(child) for child in children]
# Joints
# ----------------------
bindJoints_dic = self.getBindJointsFromCluster([x for x in cluster_dic.values() if x != 'None'])
QTcluster_dic = {}
for cluster in self.QTClusters:
QTcluster_dic.update({cluster:bindJoints_dic[cluster.text(0)]})
for QTCluster, jointList in QTcluster_dic.items():
[QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in jointList]
# changed their color
child_count=QTCluster.childCount()
children=[QTCluster.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in children]
def closeTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(False) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(False) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(False) for sclus in self.QTClusters]
def expandTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(True) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(True) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(True) for sclus in self.QTClusters]
def showSelected(self):
selection = pm.selected()
selection.sort()
self.meshTreeWidget.clear()
self.populateQTree(selection)
def singleClickedAction(self):
mySelection = self.meshTreeWidget.selectedItems()
str_selected = [x.text(0) for x in mySelection]
pm.select(str_selected, r=1)
def filterMeshes(self, filter = 'all'):
"""
filter:
all : all meshes
skinClusters : all meshes with skinClusters
None
"""
if filter =='all':
return self.getAllMeshes()
elif filter == "skinClusters":
clusters = pm.ls(type='skinCluster')
meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for c in clusters], []))
meshes = set([x.getParent() for x in meshesShapes if pm.objectType(x) == 'mesh'])
return meshes
elif filter == 'None':
return None
# ==================================
# STATIC METHOD
# ==================================
@staticmethod
def test():
print ('test')
@staticmethod
def getSkinCluster(_transform):
"""
Find a SkinCluster from a transform
Returns the skinCluster node
"""
result = []
if not (pm.objExists(_transform)):
return result
validList = mel.eval('findRelatedDeformer("' + str(_transform) + '")')
if validList is None:
return result
for elem in validList:
if pm.nodeType(elem) == 'skinCluster':
result.append(elem)
pm.select(result, r=True)
result_node = pm.selected()
if len(result_node) > 1:
return result_node
else:
try:
return result_node[0]
except IndexError:
return False
@staticmethod
def getBindJointsFromCluster(clusterList):
"""
Find all joints attached to a skinCluster
@param clusterList: List. list of skin Clusters
return dic with key: skin Cluster. Value: list of joint
"""
bindJoints_dic = {}
for cluster in clusterList:
all_binds_jnts = [x for x in pm.listConnections(str(cluster) + '.matrix[*]', s=1)]
bindJoints_dic.update({str(cluster):all_binds_jnts})
return bindJoints_dic
@staticmethod
def getAllMeshes():
"""
return: list of all meshes / geometry
"""
shapesList = pm.ls(type="mesh", ni=1)
transformList = list(set(pm.listRelatives(shapesList ,parent=True)))
transformList.sort()
return transformList
@staticmethod
def getAllShapes(transforms):
"""
@param transforms: List.
return : dictionnary with key:mesh / values: shapes
"""
shapes_dic = {}
for transform in transforms:
all_shapes = pm.PyNode(transform).getShapes(ni=True)
shapes_dic.update({str(transform):all_shapes})
return shapes_dic
def getSkinClusterbyShape(self, shapes):
"""
get skinCluster attached to the shape
@param shapes: List
return: List
"""
cluster_dic = {}
for shape in shapes:
try:
incoming = mc.listConnections('{}.inMesh'.format(shape))[0]
if pm.objectType(incoming) == 'skinCluster':
cluster_dic.update({str(shape):incoming})
else:
skinCluster = self.getSkinCluster(shape)
if skinCluster:
if len(skinCluster) > 1:
cluster_dic.update({str(shape):'None'})
else:
cluster_dic.update({str(shape):skinCluster})
else:
cluster_dic.update({str(shape):'None'})
except TypeError:
cluster_dic.update({str(shape):'None'})
return cluster_dic
# ===============================
# BUILD WINDOW
# ===============================
def showUI(dialog = False):
if dialog:
MultiSkin_UI.show_dialog()
else:
# Make sure the UI is deleted before recreating
global tools_cw_ui
try:
tools_cw_ui.deleteLater()
except:
pass
tools_cw_ui = MultiSkin_UI()
tools_cw_ui.show()
# showUI()
|
6,430 | 9761070a75b043f6cc9e6134e09810b215ccd0c0 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# 可写函数说明
def sum(arg1, arg2):
# 返回2个参数的和."
total = arg1 + arg2
print "函数内 : ", total
return total;
# 调用sum函数
total = sum(10, 20);
def nop():
pass
a = nop(); |
6,431 | 150603004a4b194a7c08f1f23e37c613aa3b883a | import Utility
import copy
class Ratio_Execution_Time:
utility = None
def __init__(self):
self.utility = Utility.Utility()
print("Successfully Found Ration Corrssponding to Execution Time")
def calculatePercentage(self,B,total,strr):
E = {}
for i in B:
s=''
for j in range(i[0],i[1]+1):
s+= '<p>' + strr[j] + '</p>'
if total == 0:
E[s] = 0.0
continue
E[s] = (B[i]/total) * 100
return E
def calculateHighestPercentageBlock(self,B):
mx = -1
temp = {}
for i in B:
if B[i] > mx:
mx = B[i]
for i in B:
if B[i] == mx:
temp[i] = B[i]
return temp
def tableMaking(self,B,title,clr,blr,H):
stringff = '<table style="width:70%; border: 1px solid black;border-collapse: collapse;">'
stringff+= '<caption style="font:italic;font-size:20;font-weight:bold;color:#2b20a1 ">' + title + '</caption>'
stringff += '<tr>'
stringff += '<th style = "font-size:18;" > Block </th>'
stringff += '<th style = "font-size:18;"> Time Percentage </th>'
stringff += '</tr>'
for line in B:
if B[line] in H.values():
curColor = clr
burColor = blr
else:
curColor = 'black'
burColor = '#cfd2d2'
stringff += '<tr>'
stringff += '<td style="border: 1px solid black;font-family:verdana;font-size:16;font-weight:bold;background-color:'+burColor+';color:'+curColor+'" >' + line + '</td>'
stringff += '<td style="text-align: center;border: 1px solid black;font-family:verdana;font-size:16;font-weight:bold;background-color:'+burColor+';color:'+curColor+'">' + str(B[line]) + ' %</td>'
stringff += '</tr>'
stringff += '</table>'
return stringff
def blockWiseExecutionTimePercentage(self,total,E1,E2,E3,strr):
E_ifElse = self.calculatePercentage(E1,total,strr)
E_Loop = self.calculatePercentage(E2,total,strr)
E_Function = self.calculatePercentage(E3,total,strr)
self.ShowTheTableOfPercentage(E_ifElse,E_Loop,E_Function)
def ShowTheTableOfPercentage(self,e1,e2,e3):
#print(b1)
Highest_ifElse = self.calculateHighestPercentageBlock(e1)
Highest_Loop = self.calculateHighestPercentageBlock(e2)
Highest_Function = self.calculateHighestPercentageBlock(e3)
stringff = "<html><body><code>\n"
if e1:
stringff += self.tableMaking(e1,'If-ElseIf-Else Operations',"#9d0235","#7bc8ff",Highest_ifElse)
if e2:
stringff += self.tableMaking(e2,'Loop Operations',"red","#5cffee",Highest_Loop)
if e3:
stringff += self.tableMaking(e3,'Function Operation',"blue","#f4fc76",Highest_Function)
stringff += "</code></body></html>\n"
f = open("reportExecute.html", "w")
f.write(stringff)
f.close()
|
6,432 | aeb986360c6990f9375f2552cbdeef595af815b4 | import numpy as np
np.random.seed(1)
class MonteCarloGameDriver():
def __init__(self):
self.default_moves = np.array(['w','a','s','d'])
self.probability_distribution = np.array([.25,.25,.25,.25])
def run_game(self, simulation_size=20):
from game import GameLayout
from copy import deepcopy
game = GameLayout()
while game.active:
# simulate simulation_size games starting at this point
game_performance = self.simulate(game, simulation_size)
if len(game_performance)==0:
game.end_game()
print("After {} simulations, achieved max tile {} and score {}".format(simulation_size, game.final_layout.max(), game.score))
break
# return the first move with highest average score
recommendation = max(game_performance, key=game_performance.get)
game.swipe(recommendation)
# game is over
self.log_game(game)
def simulate(self, game, simulation_size):
from collections import defaultdict
game_performance = defaultdict(list)
from copy import deepcopy
for i in range(simulation_size):
# run copy game multiple times, saving final scores and first moves each time
game_copy = deepcopy(game)
game_copy.reset()
while game_copy.active:
move_order = self.weighted_shuffle(self.default_moves, self.probability_distribution)
for move in move_order:
try:
game_copy.swipe(move)
break
except:
# move didn't work, try next move
continue
# log final score and first move
try:
game_performance[self.default_moves[(game_copy.moves[0]==1).argmax()]].append(game_copy.score)
except AttributeError:
pass
# get average score for each first move
game_performance = {key: np.mean(val) for key, val in game_performance.items()}
return game_performance
def weighted_shuffle(self, options,weights):
lst = list(options)
w = [None]*len(lst) # make a copy
for i in range(len(lst)):
win_idx = np.random.choice(range(len(lst)), p=weights)
w[i] = lst[win_idx]
del lst[win_idx]
weights = np.delete(weights, win_idx)
weights = weights/weights.sum()
return w
def log_game(self, game):
assert not game.active # must be a finished game
try:
self.final_scores = np.append(self.final_scores, game.score)
self.num_moves = np.append(self.num_moves, game.num_moves)
self.layouts.append(game.layouts)
self.final_layouts.append(game.final_layout)
self.moves.append(game.moves)
self.scores.append(game.scores)
self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())
self.max_tile = np.append(self.max_tile, game.final_layout.max())
self.wins = np.append(self.wins, game.won)
except AttributeError:
self.final_scores = np.array(game.score)
self.num_moves = np.array(game.num_moves)
self.layouts = [game.layouts]
self.final_layouts = [game.final_layout]
self.moves = [game.moves]
self.scores = [game.scores]
self.tile_sums = np.array(game.final_layout.sum())
self.max_tile = np.array(game.final_layout.max())
self.wins = np.array(game.won) |
6,433 | 347bfb2d8809b55046f698620a690099cc83fb56 | import sys
import vector
import matrix
def convert_arg_to_list(arg):
try:
return [float(elem) for elem in arg]
except:
sys.exit("Invalid content inside {}".format(arg))
if __name__ == "__main__":
try:
vector1 = sys.argv[1].split(' ')
vector2 = sys.argv[2].split(' ')
except:
sys.exit("Invalid vectors")
try:
matrix1 = sys.argv[1].split(' ')
matrix2 = sys.argv[2].split(' ')
except:
sys.exit("Invalid Matricies")
print("\nVector tests : ", end='\n\n')
v = vector.Vector(convert_arg_to_list(vector1))
v2 = vector.Vector(convert_arg_to_list(vector2))
#--------------------------------------------#
# Vector part #
v.add(v2)
print("Add :", v)
v.sub(v2)
print("Sub :",v)
v.scale(v2)
print("Scale :",v)
# #
#--------------------------------------------#
print("\nMatrix tests : ", end='\n\n')
#--------------------------------------------#
# Matrix part #
m = matrix.Matrix(convert_arg_to_list(matrix1))
m2 = matrix.Matrix(convert_arg_to_list(matrix2))
m.add(m2)
print("Add :\n", m)
m.sub(m2)
print("\nSub :\n", m)
m.scale(m2)
print("\nScale :\n", m)
#--------------------------------------------#
|
6,434 | 7b047ba110732d1b0a749bcbbaa9b55306ca2071 | # --- Do not remove these libs ---
from freqtrade.strategy.interface import IStrategy
from typing import Dict, List
from functools import reduce
from pandas import DataFrame
# --------------------------------
import datetime
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
import numpy as np# noqa
class ema(IStrategy):
max_open_trades = 10
stake_amount = 50
# Minimal ROI designed for the strategy.
# This attribute will be overridden if the config file contains "minimal_roi"
# Optimal stoploss designed for the strategy
# This attribute will be overridden if the config file contains "stoploss"
stoploss = -1
minimal_roi = {
"0": 10
}
# Optimal timeframe for the strategy
timeframe = '5m'
# trailing stoploss
trailing_stop = False
trailing_stop_positive = 0.1
trailing_stop_positive_offset = 0.2
# run "populate_indicators" only for new candle
process_only_new_candles = False
# Experimental settings (configuration will overide these if set)
use_sell_signal = True
sell_profit_only = False
ignore_roi_if_buy_signal = False
# Optional order type mapping
order_types = {
'buy': 'limit',
'sell': 'limit',
'stoploss': 'market',
'stoploss_on_exchange': False
}
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Adds several different TA indicators to the given DataFrame
Performance Note: For the best performance be frugal on the number of indicators
you are using. Let uncomment only the indicator you are using in your strategies
or your hyperopt configuration, otherwise you will waste your memory and CPU usage.
"""
dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)
dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)
dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)
dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)
dataframe['ema'] =dataframe['ema6']-dataframe['ema24']
dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']
dataframe['ema']= dataframe['ema']*0.6 + dataframe['ema2']*0.5
dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the buy signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[
(
(qtpylib.crossed_above(dataframe['ema'],dataframe['ema2']))
),'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the sell signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[(qtpylib.crossed_below(dataframe['ema'], dataframe['ema2'])),'sell'] = 1
return dataframe |
6,435 | 326b2dcbef339aeb196bef23debad75fa079b121 | import string
import pandas as pd
import nltk
from nltk import word_tokenize
from nltk.stem import SnowballStemmer
from nltk.tokenize import WordPunctTokenizer
import json
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
import pickle
import re
import nlpaug.augmenter.word as naw
import nlpaug.flow as naf
class Processing:
def __init__(self, stopwords_path='data/', tokenizer_path='models/', max_len=80):
# It needs a stopwords file to init
stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt', header=None)
stop_words = stop_words[0].tolist() + ['secuela']
self.stop_words = stop_words
self.n_words = 8000
self.max_len = max_len
# self.aug = naf.Sequential([
# naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action="insert", aug_p=0.1),
# naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action="substitute", aug_p=0.9),
# naw.RandomWordAug(action="delete", aug_p=0.1)
# ])
try:
self.stemmer = SnowballStemmer("spanish", ignore_stopwords=True)
except:
nltk.download("popular")
self.stemmer = SnowballStemmer("spanish", ignore_stopwords=True)
# loading
with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.__vocab_size = len(self.tokenizer.word_index) + 1
@property
def vocab_size(self):
return self.__vocab_size
def normalize(self, s):
s = s.lower()
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ñ", "n")
)
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def split_punt(self, x):
words = WordPunctTokenizer().tokenize(x)
x = str(' '.join(words))
x = re.sub(' +', ' ', x)
return x
def delete_stop_words(self, x):
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))
words = x.split(' ')
words = [word for word in words if word not in self.stop_words]
x = str(' '.join(words))
return x
def stem_sentence(self, sentence):
# Stem the sentence
stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(sentence)]
return " ".join(stemmed_text)
def augment(self, x):
try:
return self.aug.augment(x)
except:
return None
def clean_overview(self, df):
# Execute the full cleaning process into every overview
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))
df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))
df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)
df['overview'] = df.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))
return df
# Get staff and paste to overview
@staticmethod
def eval_cell(cell):
try:
cell_array = eval(cell)
except:
cell_array = []
return cell_array
def get_actors(self, cast):
eval_cast = self.eval_cell(cast)
if len(eval_cast) > 2:
up = 3
else:
up = len(eval_cast)
actors = ''
for i in range(0, up):
actor = eval_cast[i]['name']
actor = self.normalize(actor.replace(' ', '_').lower())
actors = actors + ' ' + actor
return actors
def get_director(self, crew):
eval_crew = self.eval_cell(crew)
directors = [member['name'] for member in eval_crew if member['job'] == 'Director']
directors = [self.normalize(director.replace(' ', '_').lower()) for director in directors]
directors = str(' '.join(directors))
return directors
def paste_cast(self, data):
data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)
data['overview'] = data.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)
return data
# Split train_test
def split_data(self, data):
overviews = data['overview'].values
y = data['like'].values
overviews_train, overviews_test, y_train, y_test = train_test_split(overviews, y, test_size=0.15, stratify=y,
random_state=9)
return overviews_train, overviews_test, y_train, y_test
def fit_tokenizer(self, overviews_train, num_words):
self.tokenizer = Tokenizer(num_words)
self.tokenizer.fit_on_texts(overviews_train)
# Adding 1 because of reserved 0 index
self.vocab_size = len(self.tokenizer.word_index) + 1
def tokenize_overview(self, overviews, max_len):
X = self.tokenizer.texts_to_sequences(overviews)
# print(len(max(X, key=len)))
from keras.preprocessing.sequence import pad_sequences
# We pad the sentence for the left to fit with max_len
X = pad_sequences(X, padding='pre', maxlen=max_len)
# print(X[1])
return X
def process(self, data, train_dev):
df = self.clean_overview(data)
df = self.paste_cast(df)
if train_dev:
X_train, X_test, y_train, y_test = self.split_data(df)
self.fit_tokenizer(X_train, self.n_words)
X_train = self.tokenize_overview(X_train, self.max_len)
X_test = self.tokenize_overview(X_test, self.max_len)
return X_train, X_test
else:
X = df['overview'].values
X = self.tokenize_overview(X, self.max_len)
return X
|
6,436 | b2944a95dbe25057155aaf6198a97d85b3bb620b | from typing import Dict, Tuple
import torch
from tqdm import tqdm
import schnetpack.properties as structure
from schnetpack.data import AtomsLoader
__all__ = ["calculate_stats"]
def calculate_stats(
dataloader: AtomsLoader,
divide_by_atoms: Dict[str, bool],
atomref: Dict[str, torch.Tensor] = None,
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
"""
Use the incremental Welford algorithm described in [h1]_ to accumulate
the mean and standard deviation over a set of samples.
References:
-----------
.. [h1] https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Args:
dataset: atoms data set
divide_by_atoms: dict from property name to bool:
If True, divide property by number of atoms before calculating statistics.
atomref: reference values for single atoms to be removed before calculating stats
Returns:
"""
property_names = list(divide_by_atoms.keys())
norm_mask = torch.tensor(
[float(divide_by_atoms[p]) for p in property_names], dtype=torch.float64
)
count = 0
mean = torch.zeros_like(norm_mask)
M2 = torch.zeros_like(norm_mask)
for props in tqdm(dataloader):
sample_values = []
for p in property_names:
val = props[p][None, :]
if atomref and p in atomref.keys():
ar = atomref[p]
ar = ar[props[structure.Z]]
idx_m = props[structure.idx_m]
tmp = torch.zeros((idx_m[-1] + 1,), dtype=ar.dtype, device=ar.device)
v0 = tmp.index_add(0, idx_m, ar)
val -= v0
sample_values.append(val)
sample_values = torch.cat(sample_values, dim=0)
batch_size = sample_values.shape[1]
new_count = count + batch_size
norm = norm_mask[:, None] * props[structure.n_atoms][None, :] + (
1 - norm_mask[:, None]
)
sample_values /= norm
sample_mean = torch.mean(sample_values, dim=1)
sample_m2 = torch.sum((sample_values - sample_mean[:, None]) ** 2, dim=1)
delta = sample_mean - mean
mean += delta * batch_size / new_count
corr = batch_size * count / new_count
M2 += sample_m2 + delta**2 * corr
count = new_count
stddev = torch.sqrt(M2 / count)
stats = {pn: (mu, std) for pn, mu, std in zip(property_names, mean, stddev)}
return stats
|
6,437 | b7d7b6c070f237f9ab59f3367417ecf2672fbaaf | """
Copyright (c) 2017- Sinergise and contributors
For the full list of contributors, see the CREDITS file in the root directory of this source tree.
This source code is licensed under the MIT license, see the LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from eolearn.core import FeatureType
from eolearn.mask import CloudMaskTask
from eolearn.mask.cloud_mask import _get_window_indices
@pytest.mark.parametrize(
("num_of_elements", "middle_idx", "window_size", "expected_indices"),
[
(100, 0, 10, (0, 10)),
(100, 1, 10, (0, 10)),
(100, 50, 10, (45, 55)),
(271, 270, 10, (261, 271)),
(314, 314, 10, (304, 314)),
(100, 0, 11, (0, 11)),
(100, 1, 11, (0, 11)),
(100, 50, 11, (45, 56)),
(271, 270, 11, (260, 271)),
(314, 314, 11, (303, 314)),
(11, 2, 11, (0, 11)),
(11, 2, 33, (0, 11)),
],
ids=str,
)
def test_window_indices_function(num_of_elements, middle_idx, window_size, expected_indices):
min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx, window_size)
assert (min_idx, max_idx) == expected_indices
test_list = list(range(num_of_elements))
assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)
def test_mono_temporal_cloud_detection(test_eopatch):
add_tcm = CloudMaskTask(
data_feature=(FeatureType.DATA, "BANDS-S2-L1C"),
all_bands=True,
is_data_feature=(FeatureType.MASK, "IS_DATA"),
mono_features=("CLP_TEST", "CLM_TEST"),
mask_feature=None,
average_over=4,
dilation_size=2,
mono_threshold=0.4,
)
eop_clm = add_tcm(test_eopatch)
assert_array_equal(eop_clm.mask["CLM_TEST"], test_eopatch.mask["CLM_S2C"])
assert_array_equal(eop_clm.data["CLP_TEST"], test_eopatch.data["CLP_S2C"])
def test_multi_temporal_cloud_detection_downscaled(test_eopatch):
add_tcm = CloudMaskTask(
data_feature=(FeatureType.DATA, "BANDS-S2-L1C"),
processing_resolution=120,
mono_features=("CLP_TEST", "CLM_TEST"),
multi_features=("CLP_MULTI_TEST", "CLM_MULTI_TEST"),
mask_feature=(FeatureType.MASK, "CLM_INTERSSIM_TEST"),
average_over=8,
dilation_size=4,
)
eop_clm = add_tcm(test_eopatch)
# Check shape and type
for feature in ((FeatureType.MASK, "CLM_TEST"), (FeatureType.DATA, "CLP_TEST")):
assert eop_clm[feature].ndim == 4
assert eop_clm[feature].shape[:-1] == eop_clm.data["BANDS-S2-L1C"].shape[:-1]
assert eop_clm[feature].shape[-1] == 1
assert eop_clm.mask["CLM_TEST"].dtype == bool
assert eop_clm.data["CLP_TEST"].dtype == np.float32
# Compare mean cloud coverage with provided reference
assert np.mean(eop_clm.mask["CLM_TEST"]) == pytest.approx(np.mean(eop_clm.mask["CLM_S2C"]), abs=0.01)
assert np.mean(eop_clm.data["CLP_TEST"]) == pytest.approx(np.mean(eop_clm.data["CLP_S2C"]), abs=0.01)
# Check if most of the same times are flagged as cloudless
cloudless = np.mean(eop_clm.mask["CLM_TEST"], axis=(1, 2, 3)) == 0
assert np.mean(cloudless == eop_clm.label["IS_CLOUDLESS"][:, 0]) > 0.94
# Check multi-temporal results and final mask
assert_array_equal(eop_clm.data["CLP_MULTI_TEST"], test_eopatch.data["CLP_MULTI"])
assert_array_equal(eop_clm.mask["CLM_MULTI_TEST"], test_eopatch.mask["CLM_MULTI"])
assert_array_equal(eop_clm.mask["CLM_INTERSSIM_TEST"], test_eopatch.mask["CLM_INTERSSIM"])
|
6,438 | 66474b8cdca9a4aa48b8dc710d161a3a16495aed | import numpy as np
count = 0 # счетчик попыток
number = np.random.randint(1, 101) # загадали число
print("Загадано число от 1 до 100")
def game_core_v3(number):
'''Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.
Функция принимает загаданное число и возвращает число попыток'''
count = 1
allAnsvers = [x for x in range(1, 101)]
a = int(len(allAnsvers) / 2) - 1
predict = allAnsvers[a]
tempList = allAnsvers
while number != predict:
count += 1
if predict > number:
tempList = tempList[0: a]
a = int(len(tempList) / 2) - 1
elif predict < number:
tempList = tempList[a:]
a = int(len(tempList) / 2)
predict = tempList[a]
return(count) # выход из цикла, если угадали
def score_game(game_core):
'''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число'''
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1, 101, size=(1000))
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return (score)
# запускаем
score_game(game_core_v3) |
6,439 | 5a9e0b220d2c94aea7e3d67338771cf48c3aec8f | import os
import io
import yaml
from collections import OrderedDict
from rich.console import Console
from malwarebazaar.platform import get_config_path, get_config_dir
class Config(OrderedDict):
instance = None
def __init__(self):
ec = Console(stderr=True, style="bold red")
Config.ensure_path(ec)
config_file = get_config_path()
if not os.path.exists(config_file) or os.path.getsize(config_file) == 0:
ec.print("Config does not exist, please run the init command.")
exit(-1)
with io.open(config_file, "r") as handle:
config_data = yaml.load(handle.read(), Loader=yaml.Loader)
super().__init__(**config_data)
@staticmethod
def get_instance():
if not Config.instance:
return Config()
return Config.instance
@staticmethod
def ensure_path(ec: Console = Console(stderr=True, style="bold red")):
config_dir = get_config_dir()
if not os.path.exists(config_dir):
os.mkdir(config_dir)
if not os.path.isdir(config_dir):
ec.print(f"{config_dir} should be a dir, but is a file.")
exit(-1)
@staticmethod
def init_config(key: str):
Config.ensure_path()
with io.open(get_config_path(), "w") as handle:
bytes = handle.write(yaml.dump(
{
"api_key": key,
"csv_columns": {
"md5": "md5_hash",
"sha1": "sha1_hash",
"sha256": "sha256_hash",
"imphash": "imphash",
"signature": "signature",
"tags": "tags"
}
},
Dumper=yaml.Dumper
))
if bytes <= 0:
raise IOError(f"Writing to config file failed.")
return True
|
6,440 | c955057d7f8d5289898ecb96a290f5a7d241b787 | import pandas as pd
import matplotlib.pyplot as plt
import math
import seaborn as sns
import numpy as np
suv_data=pd.read_csv("F:/Development/Machine Learning/suv-data/suv_data.csv")
print(suv_data.head(10))
print("the no of passengers in the list is"+str(len(suv_data.index)))
sns.countplot(x="Purchased",data=suv_data)
sns.countplot(x="Purchased",hue="Gender",data=suv_data)
suv_data['Age'].plot.hist()
suv_data.info()
suv_data['EstimatedSalary'].plot.hist(bins=50,figsize=(10,5))
print(suv_data.isnull())
print(suv_data.isnull().sum())
sns.heatmap(suv_data.isnull(),yticklabels=False,cmap="viridis")
plt.show()
sns.boxplot(x="Gender",y="Age",data=suv_data)
plt.show()
suv_data.drop("User ID",axis=1,inplace=True)
suv_data.columns
suv_data.head(10)
Gen=pd.get_dummies(suv_data['Gender'],drop_first=True)
print(Gen.head(5))
suv_data=pd.concat([suv_data,Gen],axis=1)
print(suv_data.head(5))
suv_data.drop("Gender",axis=1,inplace=True)
print(suv_data.head(10))
X=suv_data.iloc[:,[0,1,3]].values
y=suv_data.iloc[:,2].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
from sklearn.linear_model import LogisticRegression
logmodel=LogisticRegression()
logmodel.fit(X_train, y_train)
predictions=logmodel.predict(X_test)
print(predictions)
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test,predictions))
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,predictions)*100)
|
6,441 | 983473129bfd56138a615e0f5bdb1353e9c6d8af | import abc
import numpy as np
import ray
from tqdm.autonotebook import tqdm
from src.algorithm.info_theory.it_estimator import (CachingEstimator,
MPCachingEstimator)
from src.algorithm.utils import differ, independent_roll, union
class FeatureSelector(metaclass=abc.ABCMeta):
def __init__(self, itEstimator, trajectories, discrete=False, nproc=None):
self.trajectories = trajectories
self.nproc = nproc
self.discrete = discrete
if nproc != 1:
self.itEstimator = MPCachingEstimator(
itEstimator, self._get_arrays, nproc=nproc)
self.two = ray.put(2)
else:
self.itEstimator = CachingEstimator(itEstimator, self._get_arrays)
self.seed()
self._setup()
def _setup(self):
self.n_features = self.trajectories[0].shape[1] - 1
self.id_reward = self.n_features
self.set_reward = frozenset({self.id_reward})
self.id_J_k = -1
self.set_J_k = frozenset({self.id_J_k})
self.idSet = frozenset(range(self.n_features))
self.idSelected = None
self.tot_t = min(len(tr) for tr in self.trajectories)
self.data_per_traj = np.dstack(
[tr[:self.tot_t, :] for tr in self.trajectories])
self.Rts = np.abs(self.data_per_traj[:, self.id_reward, :]).max(axis=1)
self.Rmax = self.Rts.max()
self.on_mu = None
self.trajectories = None
def _prep_data(self, max_t, on_mu):
if hasattr(self, 't_step_data') and max_t + 1 == self.t_step_data.shape[2] and on_mu == self.on_mu:
return
self.itEstimator.cache.clear()
assert max_t < self.tot_t, f"max timestep {max_t} is not less than the shortest trajectory (len {self.tot_t})"
self.on_mu = on_mu
if on_mu:
stop_len = 1
stop_ids = -1
else:
stop_len = self.tot_t - max_t
stop_ids = slice(None)
shift = np.zeros(self.n_features + 1, dtype=np.int)
shift[self.id_reward] = -1
self.t_step_data = []
for t in range(max_t + 1):
t_shift = t*shift
t_step_eps = []
for ep in self.data_per_traj.transpose(2, 0, 1):
t_step_eps.append(independent_roll(
ep, t_shift)[: stop_len, stop_ids])
self.t_step_data.append(np.vstack(t_step_eps))
if self.t_step_data:
self.t_step_data = np.dstack(self.t_step_data)
else:
self.t_step_data = np.empty((self.data_per_traj.shape[-1], 1, 1))
def _get_arrays(self, ids, t):
if not isinstance(ids, list):
ids = list(ids)
# memory efficiency
if self.on_mu:
if t == self.t_step_data.shape[2]:
ft, t = t, 0
else:
ft = 0
feats = self.data_per_traj[ft, :-1, :].T
rew = self.t_step_data[:, 0, t][:, None]
data = np.hstack([feats, rew, self.J_k])
return data[:, ids]
return self.t_step_data[:, ids, t]
def _generate_steplist(self, k, sampling, freq):
if sampling == "frequency":
max_t = (k-1) * freq
return np.arange(k*freq, step=freq), max_t
if sampling == "decaying":
p = np.exp(-np.arange(self.tot_t)/freq) / freq
p = p/p.sum()
steplist = np.sort(self.np_random.choice(
self.tot_t, size=k, replace=False, p=p))
return steplist, steplist[-1]
if sampling == "variance":
variances = np.var(
self.data_per_traj[:, self.id_reward, :], axis=1)
most_var = np.argsort(variances)[::-1][:k]
steplist = np.sort(most_var)
return steplist, steplist[-1]
raise NotImplemented
def _get_weights_by_steplist(self, steplist, gamma, use_Rt):
k = len(steplist)
gamma = gamma
weights = np.ones(k + 1)
weights[:-1] = gamma ** steplist
weights[k] = 1 - (1 - gamma) * weights[:-1].sum()
if use_Rt:
Rsteps = self.Rts[steplist]
weights[:-1] *= Rsteps
weights[k] *= self.max_J_k
return weights
def _prep_J_k(self, k, gamma):
self.J_k = np.polyval(
self.data_per_traj[k:, -1, :], gamma).reshape(-1, 1)
self.max_J_k = np.abs(self.J_k).max()
def _prep_all(self, k, gamma, sampling, freq, use_Rt, on_mu):
self.reset()
steplist, max_t = self._generate_steplist(k, sampling, freq)
self.steplist = steplist
self._prep_data(max_t, on_mu)
self._prep_J_k(k, gamma)
self.weights = self._get_weights_by_steplist(steplist, gamma, use_Rt)
return steplist
def scoreFeatures(self, *args, **kwargs):
if self.nproc != 1:
return self._scoreFeatureParallel(*args, **kwargs)
else:
return self._scoreFeatureSequential(*args, **kwargs)
def scoreSubset(self, *args, **kwargs):
if self.nproc != 1:
return self._scoreSubsetParallel(*args, **kwargs)
else:
return self._scoreSubsetSequential(*args, **kwargs)
def computeError(self, residual=None, correction=None, use_Rt=True):
if residual is None:
residual = self.residual_error
if correction is None:
correction = self.correction_term
if use_Rt:
Rmax = 1
else:
Rmax = self.Rmax
return 2**(1/2) * (Rmax * residual + correction)
def reset(self):
self.residual_error = 0
self.correction_term = 0
self.weights = None
self.steplist = None
self.idSelected = None
def seed(self, seed=None):
self.np_random = np.random.seed(seed)
return
def _scoreSubsetSequential(self, k, gamma, S, sampling="frequency", freq=1, use_Rt=True, on_mu=True, show_progress=True):
steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)
S = frozenset(S)
no_S = self.idSet.difference(S)
score = np.zeros(k+1)
for j, t in enumerate(steplist):
score[j] = self.itEstimator.estimateCMI(
self.set_reward, no_S, S, t=t)
score[k] = self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k)
score = np.clip(score, 0, 2)
score = np.sqrt(score)
self.residual_error = score[:-1] @ self.weights[:-1]
self.correction_term = score[-1] * self.weights[-1]
return self.computeError(use_Rt=use_Rt)
def _scoreSubsetParallel(self, k, gamma, S, sampling="frequency", freq=1, use_Rt=True, on_mu=True, show_progress=True):
steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)
S = frozenset(S)
no_S = self.idSet.difference(S)
res = []
for t in steplist:
res.append(self.itEstimator.estimateCMI(
self.set_reward, no_S, S, t=t))
res.append(self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k))
res = map(lambda x: ray.get(x), res)
score = np.fromiter(res, np.float64)
score = np.clip(score, 0, 2)
score = np.sqrt(score)
self.residual_error = score[:-1] @ self.weights[:-1]
self.correction_term = score[-1] * self.weights[-1]
return self.computeError(use_Rt=use_Rt)
def _scoreFeatureParallel(self, steplist, gamma, sum_cmi, show_progress):
k = len(steplist)
S = frozenset(self.idSelected)
no_S = self.idSet.difference(self.idSelected)
if self.forward:
shrink_S = no_S
op_S, op_noS = union, differ
else:
shrink_S = S
op_S, op_noS = differ, union
list_ids = np.fromiter(shrink_S, dtype=np.int)
res = []
for i, id in enumerate(list_ids):
id = frozenset({id})
S_next = op_S(S, id)
no_S_next = op_noS(no_S, id)
if sum_cmi:
target = id
else:
target = no_S_next
for j, t in enumerate(steplist):
res.append(self.itEstimator.estimateCMI(
self.set_reward, target, S_next, t=t))
res.append(self.itEstimator.estimateCMI(self.set_J_k, target,
S_next, t=k))
res = map(lambda x: ray.get(x), tqdm(
res, leave=False, disable=not show_progress))
score_mat = np.fromiter(res, np.float64).reshape(k + 1, -1, order='F')
score_mat = np.clip(score_mat, 0, 2)
scores = np.sqrt(score_mat)
cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])
new_cond_entropy = self.weights[-1] * scores[-1, :]
sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)
return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[sorted_idx], score_mat[:, sorted_idx]
def _scoreFeatureSequential(self, steplist, gamma, sum_cmi, show_progress):
k = len(steplist)
S = frozenset(self.idSelected)
no_S = self.idSet.difference(self.idSelected)
if self.forward:
shrink_S = no_S
op_S, op_noS = union, differ
else:
shrink_S = S
op_S, op_noS = differ, union
list_ids = np.fromiter(shrink_S, dtype=np.int)
score_mat = np.zeros((k+1, len(list_ids)))
for i, id in enumerate(tqdm(list_ids, leave=False, disable=not show_progress)):
id = frozenset({id})
S_next = op_S(S, id)
no_S_next = op_noS(no_S, id)
if sum_cmi:
target = id
else:
target = no_S_next
for j, t in enumerate(steplist):
score_mat[j, i] = self.itEstimator.estimateCMI(
self.set_reward, target, S_next, t=t)
score_mat[k, i] = self.itEstimator.estimateCMI(
self.set_J_k, target, S_next, t=k)
score_mat = np.clip(score_mat, 0, 2)
scores = np.sqrt(score_mat)
cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])
new_cond_entropy = self.weights[-1] * scores[-1, :]
sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)
return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[sorted_idx], score_mat[:, sorted_idx]
@abc.abstractmethod
def selectOnError(self, k, gamma, max_error, sampling="frequency", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):
pass
@abc.abstractmethod
def selectNfeatures(self, n, k, gamma, sampling="frequency", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):
pass
@abc.abstractmethod
def try_all(self, k, gamma, all_scores=False, max_n=None, sampling="frequency", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):
pass
|
6,442 | 8b9336113f64a88eeabe6e45021938fac9efd1c6 | class Vehicle(object):
count_list = []
def __init__(self, registration_number):
self.registration_number = registration_number
Vehicle.count_list.append(self)
Vehicle.count = len(Vehicle.count_list) |
6,443 | 23d4619527b5fce7fed0b0a66d834e26bb984129 | import hive
from ..bind import Instantiator as _Instantiator
from ..event import bind_info as event_bind_info
bind_infos = (event_bind_info,)
def build_scene_instantiator(i, ex, args, meta_args):
bind_bases = tuple((b_i.environment_hive for b_i in bind_infos if b_i.is_enabled(meta_args)))
# Update bind environment to use new bases
environment_class = i.bind_meta_class.start_value
i.bind_meta_class.start_value = environment_class.extend("SceneBindEnvironment", bases=tuple(bind_bases))
Instantiator = _Instantiator.extend("Instantiator", build_scene_instantiator,
bases=tuple(b_i.bind_hive for b_i in bind_infos))
class SceneClass:
def __init__(self):
self._entities = {}
self.scene = None
def get_entity_id(self, identifier):
return self._entities[identifier]
def get_position_absolute(self, entity):
return tuple(entity.worldPosition)
def get_orientation_absolute(self, entity):
return tuple(entity.worldOrientation.to_quaternion())
def get_position_relative(self, entity, other):
return tuple(entity.worldPosition - other.worldPosition)
def get_orientation_relative(self, entity, other):
return tuple(entity.worldOrientation.to_quaternion().rotation_difference(other.worldPosition.to_quaternion()))
def spawn_entity(self, class_name, identifier):
entity = self.scene.addObject(class_name, 'Empty')
# entity.worldTransform = entity.worldTransform.inverted() * entity.worldTransform
self._entities[identifier] = entity
return entity
def get_scene(self):
return self.scene
def build_scene(cls, i, ex, args):
i.bge_scene = hive.property(cls, "scene")
ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier="entity.get")
ex.get_position_absolute = hive.plugin(cls.get_position_absolute, identifier="entity.position.absolute.get")
ex.get_position_relative = hive.plugin(cls.get_position_relative, identifier="entity.position.relative.get")
ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute, identifier="entity.orientation.absolute.get")
ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative, identifier="entity.orientation.relative.get")
ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier="entity.spawn")
ex.get_scene = hive.plugin(cls.get_scene, identifier="entity.get_current")
import dragonfly
ex.on_tick = dragonfly.event.Tick()
def f(self):
print("I")
if not hasattr(self, 'a'):
self.a = 1
self.spawn_entity.plugin()("Cube", "c1")
i.mod_tick = hive.modifier(f)
hive.trigger(ex.on_tick, i.mod_tick)
Scene = hive.hive("Scene", build_scene, builder_cls=SceneClass)
|
6,444 | 61135a10adefd6ba8ffd63e997fa91ce9c78de06 | from setuptools import setup
setup(name = "dragonfab",
version = "1.3.0",
description = "Fabric support",
author = "Joel Pitt",
author_email = "joel@joelpitt.com",
url = "https://github.com/ferrouswheel/dragonfab",
install_requires = ['fabric', 'pip>=1.4', 'wheel'],
packages = ['dragonfab'],
)
|
6,445 | 3cca7408eb88f91f295c581c29d3d1e95298f337 | r""" 测试dispatch
>>> from url_router.map import Map
>>> from url_router.rule import Rule
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/foo', endpoint='foo'),
... Rule('/bar/', endpoint='bar'),
... Rule('/any/<name>', endpoint='any'),
... Rule('/string/<string:name>', endpoint='string'),
... Rule('/integer/<int:name>', endpoint='integer'),
... Rule('/float/<float:name>', endpoint='float')
... ])
>>> adapter = m.bind('example.org', '/')
>>> def view_func(endpoint, args):
... print(f'endpoint:{endpoint}\nargs:{args}')
... return str(endpoint)
...
>>> adapter.dispatch(view_func, '/')
endpoint:index
args:{}
'index'
>>> adapter.dispatch(view_func, '/any/value')
endpoint:any
args:{'name': 'value'}
'any'
>>> adapter.dispatch(view_func, '/missing')
Traceback (most recent call last):
...
url_router.exceptions.NotFound
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
6,446 | 47025a30d79341ff0819fe87638e35960a5fc87d | from typing import Union
from django.db.models import Q, Value
from django.db.models.functions import Lower, Replace, Trim
from .normalization import (
normalize_doi,
normalize_funkcja_autora,
normalize_grupa_pracownicza,
normalize_isbn,
normalize_kod_dyscypliny,
normalize_nazwa_dyscypliny,
normalize_nazwa_jednostki,
normalize_nazwa_wydawcy,
normalize_public_uri,
normalize_tytul_naukowy,
normalize_tytul_publikacji,
normalize_tytul_zrodla,
normalize_wymiar_etatu,
)
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.search import TrigramSimilarity
from bpp.models import (
Autor,
Autor_Jednostka,
Dyscyplina_Naukowa,
Funkcja_Autora,
Grupa_Pracownicza,
Jednostka,
Rekord,
Tytul,
Wydawca,
Wydawnictwo_Ciagle,
Wydawnictwo_Zwarte,
Wydzial,
Wymiar_Etatu,
Zrodlo,
)
from bpp.util import fail_if_seq_scan
def matchuj_wydzial(nazwa):
try:
return Wydzial.objects.get(nazwa__iexact=nazwa.strip())
except Wydzial.DoesNotExist:
pass
def matchuj_tytul(tytul: str, create_if_not_exist=False) -> Tytul:
"""
Dostaje tytuł: pełną nazwę albo skrót
"""
try:
return Tytul.objects.get(nazwa__iexact=tytul)
except (Tytul.DoesNotExist, Tytul.MultipleObjectsReturned):
return Tytul.objects.get(skrot=normalize_tytul_naukowy(tytul))
def matchuj_funkcja_autora(funkcja_autora: str) -> Funkcja_Autora:
funkcja_autora = normalize_funkcja_autora(funkcja_autora)
return Funkcja_Autora.objects.get(
Q(nazwa__iexact=funkcja_autora) | Q(skrot__iexact=funkcja_autora)
)
def matchuj_grupa_pracownicza(grupa_pracownicza: str) -> Grupa_Pracownicza:
grupa_pracownicza = normalize_grupa_pracownicza(grupa_pracownicza)
return Grupa_Pracownicza.objects.get(nazwa__iexact=grupa_pracownicza)
def matchuj_wymiar_etatu(wymiar_etatu: str) -> Wymiar_Etatu:
wymiar_etatu = normalize_wymiar_etatu(wymiar_etatu)
return Wymiar_Etatu.objects.get(nazwa__iexact=wymiar_etatu)
def matchuj_jednostke(nazwa, wydzial=None):
nazwa = normalize_nazwa_jednostki(nazwa)
try:
return Jednostka.objects.get(Q(nazwa__iexact=nazwa) | Q(skrot__iexact=nazwa))
except Jednostka.DoesNotExist:
if nazwa.endswith("."):
nazwa = nazwa[:-1].strip()
try:
return Jednostka.objects.get(
Q(nazwa__istartswith=nazwa) | Q(skrot__istartswith=nazwa)
)
except Jednostka.MultipleObjectsReturned as e:
if wydzial is None:
raise e
return Jednostka.objects.get(
Q(nazwa__istartswith=nazwa) | Q(skrot__istartswith=nazwa),
Q(wydzial__nazwa__iexact=wydzial),
)
except Jednostka.MultipleObjectsReturned as e:
if wydzial is None:
raise e
return Jednostka.objects.get(
Q(nazwa__iexact=nazwa) | Q(skrot__iexact=nazwa),
Q(wydzial__nazwa__iexact=wydzial),
)
def matchuj_autora(
imiona: str,
nazwisko: str,
jednostka: Union[Jednostka, None] = None,
bpp_id: Union[int, None] = None,
pbn_uid_id: Union[str, None] = None,
system_kadrowy_id: Union[int, None] = None,
pbn_id: Union[int, None] = None,
orcid: Union[str, None] = None,
tytul_str: Union[Tytul, None] = None,
):
if bpp_id is not None:
try:
return Autor.objects.get(pk=bpp_id)
except Autor.DoesNotExist:
pass
if orcid:
try:
return Autor.objects.get(orcid__iexact=orcid.strip())
except Autor.DoesNotExist:
pass
if pbn_uid_id is not None and pbn_uid_id.strip() != "":
# Może być > 1 autor z takim pbn_uid_id
_qset = Autor.objects.filter(pbn_uid_id=pbn_uid_id)
if _qset.exists():
return _qset.first()
if system_kadrowy_id is not None:
try:
int(system_kadrowy_id)
except (TypeError, ValueError):
system_kadrowy_id = None
if system_kadrowy_id is not None:
try:
return Autor.objects.get(system_kadrowy_id=system_kadrowy_id)
except Autor.DoesNotExist:
pass
if pbn_id is not None:
if isinstance(pbn_id, str):
pbn_id = pbn_id.strip()
try:
pbn_id = int(pbn_id)
except (TypeError, ValueError):
pbn_id = None
if pbn_id is not None:
try:
return Autor.objects.get(pbn_id=pbn_id)
except Autor.DoesNotExist:
pass
queries = [
Q(
Q(nazwisko__iexact=nazwisko.strip())
| Q(poprzednie_nazwiska__icontains=nazwisko.strip()),
imiona__iexact=imiona.strip(),
)
]
if tytul_str:
queries.append(queries[0] & Q(tytul__skrot=tytul_str))
for qry in queries:
try:
return Autor.objects.get(qry)
except (Autor.DoesNotExist, Autor.MultipleObjectsReturned):
pass
try:
return Autor.objects.get(qry & Q(aktualna_jednostka=jednostka))
except (Autor.MultipleObjectsReturned, Autor.DoesNotExist):
pass
# Jesteśmy tutaj. Najwyraźniej poszukiwanie po aktualnej jednostce, imieniu, nazwisku,
# tytule itp nie bardzo się powiodło. Spróbujmy innej strategii -- jednostka jest
# określona, poszukajmy w jej autorach. Wszak nie musi być ta jednostka jednostką
# aktualną...
if jednostka:
queries = [
Q(
Q(autor__nazwisko__iexact=nazwisko.strip())
| Q(autor__poprzednie_nazwiska__icontains=nazwisko.strip()),
autor__imiona__iexact=imiona.strip(),
)
]
if tytul_str:
queries.append(queries[0] & Q(autor__tytul__skrot=tytul_str))
for qry in queries:
try:
return jednostka.autor_jednostka_set.get(qry).autor
except (
Autor_Jednostka.MultipleObjectsReturned,
Autor_Jednostka.DoesNotExist,
):
pass
return None
def matchuj_zrodlo(
s: Union[str, None],
issn: Union[str, None] = None,
e_issn: Union[str, None] = None,
alt_nazwa=None,
) -> Union[None, Zrodlo]:
if s is None or str(s) == "":
return
if issn is not None:
try:
return Zrodlo.objects.get(issn=issn)
except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):
pass
if e_issn is not None:
try:
return Zrodlo.objects.get(e_issn=e_issn)
except (Zrodlo.DoesNotExist, Zrodlo.MultipleObjectsReturned):
pass
for elem in s, alt_nazwa:
if elem is None:
continue
elem = normalize_tytul_zrodla(elem)
try:
return Zrodlo.objects.get(Q(nazwa__iexact=elem) | Q(skrot__iexact=elem))
except Zrodlo.MultipleObjectsReturned:
pass
except Zrodlo.DoesNotExist:
if elem.endswith("."):
try:
return Zrodlo.objects.get(
Q(nazwa__istartswith=elem[:-1])
| Q(skrot__istartswith=elem[:-1])
)
except Zrodlo.DoesNotExist:
pass
except Zrodlo.MultipleObjectsReturned:
pass
def matchuj_dyscypline(kod, nazwa):
nazwa = normalize_nazwa_dyscypliny(nazwa)
try:
return Dyscyplina_Naukowa.objects.get(nazwa=nazwa)
except Dyscyplina_Naukowa.DoesNotExist:
pass
except Dyscyplina_Naukowa.MultipleObjectsReturned:
pass
kod = normalize_kod_dyscypliny(kod)
try:
return Dyscyplina_Naukowa.objects.get(kod=kod)
except Dyscyplina_Naukowa.DoesNotExist:
pass
except Dyscyplina_Naukowa.MultipleObjectsReturned:
pass
def matchuj_wydawce(nazwa, pbn_uid_id=None, similarity=0.9):
nazwa = normalize_nazwa_wydawcy(nazwa)
try:
return Wydawca.objects.get(nazwa=nazwa, alias_dla_id=None)
except Wydawca.DoesNotExist:
pass
if pbn_uid_id is not None:
try:
return Wydawca.objects.get(pbn_uid_id=pbn_uid_id)
except Wydawca.DoesNotExist:
pass
loose = (
Wydawca.objects.annotate(similarity=TrigramSimilarity("nazwa", nazwa))
.filter(similarity__gte=similarity)
.order_by("-similarity")[:5]
)
if loose.count() > 0 and loose.count() < 2:
return loose.first()
TITLE_LIMIT_SINGLE_WORD = 15
TITLE_LIMIT_MANY_WORDS = 25
MATCH_SIMILARITY_THRESHOLD = 0.95
MATCH_SIMILARITY_THRESHOLD_LOW = 0.90
MATCH_SIMILARITY_THRESHOLD_VERY_LOW = 0.80
# Znormalizowany tytuł w bazie danych -- wyrzucony ciąg znaków [online], podwójne
# spacje pozamieniane na pojedyncze, trim całości
normalized_db_title = Trim(
Replace(
Replace(Lower("tytul_oryginalny"), Value(" [online]"), Value("")),
Value(" "),
Value(" "),
)
)
# Znormalizowany skrót nazwy źródła -- wyrzucone spacje i kropki, trim, zmniejszone
# znaki
normalized_db_zrodlo_skrot = Trim(
Replace(
Replace(
Replace(Lower("skrot"), Value(" "), Value("")),
Value("-"),
Value(""),
),
Value("."),
Value(""),
)
)
def normalize_zrodlo_skrot_for_db_lookup(s):
return s.lower().replace(" ", "").strip().replace("-", "").replace(".", "")
# Znormalizowany skrot zrodla do wyszukiwania -- wyrzucone wszystko procz kropek
normalized_db_zrodlo_nazwa = Trim(
Replace(Lower("nazwa"), Value(" "), Value("")),
)
def normalize_zrodlo_nazwa_for_db_lookup(s):
return s.lower().replace(" ", "").strip()
normalized_db_isbn = Trim(Replace(Lower("isbn"), Value("-"), Value("")))
def matchuj_publikacje(
klass: [Wydawnictwo_Zwarte, Wydawnictwo_Ciagle, Rekord],
title,
year,
doi=None,
public_uri=None,
isbn=None,
zrodlo=None,
DEBUG_MATCHOWANIE=False,
isbn_matchuj_tylko_nadrzedne=True,
doi_matchuj_tylko_nadrzedne=True,
):
if doi is not None:
doi = normalize_doi(doi)
if doi:
zapytanie = klass.objects.filter(doi__istartswith=doi, rok=year)
if doi_matchuj_tylko_nadrzedne:
if hasattr(klass, "wydawnictwo_nadrzedne_id"):
zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)
res = zapytanie.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
).order_by("-podobienstwo")[:2]
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:
return res.first()
title = normalize_tytul_publikacji(title)
title_has_spaces = False
if title is not None:
title_has_spaces = title.find(" ") > 0
if title is not None and (
(not title_has_spaces and len(title) >= TITLE_LIMIT_SINGLE_WORD)
or (title_has_spaces and len(title) >= TITLE_LIMIT_MANY_WORDS)
):
if zrodlo is not None and hasattr(klass, "zrodlo"):
try:
return klass.objects.get(
tytul_oryginalny__istartswith=title, rok=year, zrodlo=zrodlo
)
except klass.DoesNotExist:
pass
except klass.MultipleObjectsReturned:
print(
f"PPP ZZZ MultipleObjectsReturned dla title={title} rok={year} zrodlo={zrodlo}"
)
if (
isbn is not None
and isbn != ""
and hasattr(klass, "isbn")
and hasattr(klass, "e_isbn")
):
ni = normalize_isbn(isbn)
zapytanie = klass.objects.exclude(isbn=None, e_isbn=None).exclude(
isbn="", e_isbn=""
)
if isbn_matchuj_tylko_nadrzedne:
zapytanie = zapytanie.filter(wydawnictwo_nadrzedne_id=None)
if klass == Rekord:
zapytanie = zapytanie.filter(
pk__in=[
(ContentType.objects.get_for_model(Wydawnictwo_Zwarte).pk, x)
for x in Wydawnictwo_Zwarte.objects.wydawnictwa_nadrzedne_dla_innych()
]
)
elif klass == Wydawnictwo_Zwarte:
zapytanie = zapytanie.filter(
pk__in=Wydawnictwo_Zwarte.objects.wydawnictwa_nadrzedne_dla_innych()
)
else:
raise NotImplementedError(
"Matchowanie po ISBN dla czegoś innego niż wydawnictwo zwarte nie opracowane"
)
#
# Uwaga uwaga uwaga.
#
# Gdy matchujemy ISBN, to w BPP dochodzi do takiej nieciekawej sytuacji: wpisywany jest
# ISBN zarówno dla rozdziałów jak i dla wydawnictw nadrzędnych.
#
# Zatem, na ten moment, aby usprawnić matchowanie ISBN, jeżeli ustawiona jest flaga
# isbn_matchuj_tylko_nadrzedne, to system bedzie szukał tylko i wyłącznie wśród
# rekordów będących wydawnictwami nadrzędnymi (czyli nie mającymi rekordów podrzędnych)
#
res = (
zapytanie.filter(Q(isbn=ni) | Q(e_isbn=ni))
.annotate(
podobienstwo=TrigramSimilarity(
normalized_db_title,
title.lower(),
)
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_VERY_LOW:
return res.first()
public_uri = normalize_public_uri(public_uri)
if public_uri:
res = (
klass.objects.filter(Q(www=public_uri) | Q(public_www=public_uri))
.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:
return res.first()
if title is not None and (
(not title_has_spaces and len(title) >= TITLE_LIMIT_SINGLE_WORD)
or (title_has_spaces and len(title) >= TITLE_LIMIT_MANY_WORDS)
):
res = (
klass.objects.filter(tytul_oryginalny__istartswith=title, rok=year)
.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD:
return res.first()
# Ostatnia szansa, po podobieństwie, niski próg
res = (
klass.objects.filter(rok=year)
.annotate(
podobienstwo=TrigramSimilarity(normalized_db_title, title.lower())
)
.order_by("-podobienstwo")[:2]
)
fail_if_seq_scan(res, DEBUG_MATCHOWANIE)
if res.exists():
if res.first().podobienstwo >= MATCH_SIMILARITY_THRESHOLD_LOW:
return res.first()
|
6,447 | bd06030ace665a0686c894a863e5c779b6d0931c | # -*- coding: utf-8 -*-
"""Chatbot learning
학습시 생성된 vocab 딕셔너리 파일을 Cindy ui 실행시 경로를 동일시 해주어야 연결성 있는 문장을 생성해줍니다.
"""
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras import preprocessing
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import os
import re
from konlpy.tag import Okt
import pickle
import tensorflow as tf
tf.__version__
# 태그 단어
PAD = "<PADDING>" # 패딩
STA = "<START>" # 시작
END = "<END>" # 끝
OOV = "<OOV>" # 없는 단어(Out of Vocabulary)
# 태그 인덱스
PAD_INDEX = 0
STA_INDEX = 1
END_INDEX = 2
OOV_INDEX = 3
# 데이터 타입
ENCODER_INPUT = 0
DECODER_INPUT = 1
DECODER_TARGET = 2
# 한 문장에서 단어 시퀀스의 최대 개수
max_sequences = 30
# 임베딩 벡터 차원
embedding_dim = 100
# LSTM 히든레이어 차원
lstm_hidden_dim = 128
# 정규 표현식 필터
RE_FILTER = re.compile("[.,!?\"':;~()]")
# 챗봇 데이터 로드
chatbot_data = pd.read_csv('./seq2seq/ChatbotData_Cindy.csv', encoding='utf-8')
question, answer = list(chatbot_data['Q']), list(chatbot_data['A'])
chatbot_data.head()
len(chatbot_data['Q'].unique())
# 데이터 개수
len(question)
# 형태소분석 함수
def pos_tag(sentences):
# KoNLPy 형태소분석기 설정
tagger = Okt()
# 문장 품사 변수 초기화
sentences_pos = []
# 모든 문장 반복
for sentence in sentences:
# 특수기호 제거
sentence = re.sub(RE_FILTER, "", sentence)
#print(sentence)
# 배열인 형태소분석의 출력을 띄어쓰기로 구분하여 붙임
sentence = " ".join(tagger.morphs(sentence))
sentences_pos.append(sentence)
return sentences_pos
# 형태소분석 수행
question = pos_tag(question)
answer = pos_tag(answer)
# 질문과 대답 문장들을 하나로 합침
sentences = []
sentences.extend(question)
sentences.extend(answer)
words = []
# 단어들의 배열 생성
for sentence in sentences:
for word in sentence.split():
words.append(word)
# 길이가 0인 단어는 삭제
words = [word for word in words if len(word) > 0]
# 중복된 단어 삭제
words = list(set(words))
# 제일 앞에 태그 단어 삽입
words[:0] = [PAD, STA, END, OOV]
# 단어 개수
len(words)
# 단어와 인덱스의 딕셔너리 생성
word_to_index = {word: index for index, word in enumerate(words)}
index_to_word = {index: word for index, word in enumerate(words)}
#word_index vocab 저장 - >
with open('./seq2seq/vocab_dict/word_to_index_final.pickle', 'wb') as f:
pickle.dump(word_to_index, f, pickle.HIGHEST_PROTOCOL)
with open('./seq2seq/vocab_dict/index_to_word_final.pickle', 'wb') as f:
pickle.dump(index_to_word, f, pickle.HIGHEST_PROTOCOL)
# 단어 -> 인덱스
# 문장을 인덱스로 변환하여 모델 입력으로 사용
print(dict(list(word_to_index.items())[:20]))
# 인덱스 -> 단어
# 모델의 예측 결과인 인덱스를 문장으로 변환시 사용
print(dict(list(index_to_word.items())[:20]))
# 문장을 인덱스로 변환
def convert_text_to_index(sentences, vocabulary, type):
sentences_index = []
# 모든 문장에 대해서 반복
for sentence in sentences:
sentence_index = []
# 디코더 입력일 경우 맨 앞에 START 태그 추가
if type == DECODER_INPUT:
sentence_index.extend([vocabulary[STA]])
# 문장의 단어들을 띄어쓰기로 분리
for word in sentence.split():
if vocabulary.get(word) is not None:
# 사전에 있는 단어면 해당 인덱스를 추가
sentence_index.extend([vocabulary[word]])
else:
# 사전에 없는 단어면 OOV 인덱스를 추가
sentence_index.extend([vocabulary[OOV]])
# 최대 길이 검사
if type == DECODER_TARGET:
# 디코더 목표일 경우 맨 뒤에 END 태그 추가
if len(sentence_index) >= max_sequences:
sentence_index = sentence_index[:max_sequences-1] + [vocabulary[END]]
else:
sentence_index += [vocabulary[END]]
else:
if len(sentence_index) > max_sequences:
sentence_index = sentence_index[:max_sequences]
# 최대 길이에 없는 공간은 패딩 인덱스로 채움
sentence_index += (max_sequences - len(sentence_index)) * [vocabulary[PAD]]
# 문장의 인덱스 배열을 추가
sentences_index.append(sentence_index)
return np.asarray(sentences_index)
# 인코더 입력 인덱스 변환
x_encoder = convert_text_to_index(question, word_to_index, ENCODER_INPUT)
# 첫 번째 인코더 입력 출력 (12시 땡)
x_encoder[0]
# 디코더 입력 인덱스 변환
x_decoder = convert_text_to_index(answer, word_to_index, DECODER_INPUT)
# 첫 번째 디코더 입력 출력 (START 하루 가 또 가네요)
x_decoder[0]
len(x_decoder[0])
# 디코더 목표 인덱스 변환
y_decoder = convert_text_to_index(answer, word_to_index, DECODER_TARGET)
# 첫 번째 디코더 목표 출력 (하루 가 또 가네요 END)
print(y_decoder[0])
# 원핫인코딩 초기화
one_hot_data = np.zeros((len(y_decoder), max_sequences, len(words)))
# 디코더 목표를 원핫인코딩으로 변환
# 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
for i, sequence in enumerate(y_decoder):
for j, index in enumerate(sequence):
one_hot_data[i, j, index] = 1
# 디코더 목표 설정
y_decoder = one_hot_data
# 첫 번째 디코더 목표 출력
print(y_decoder[0])
#--------------------------------------------
# 훈련 모델 인코더 정의
#--------------------------------------------
# 입력 문장의 인덱스 시퀀스를 입력으로 받음
encoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
encoder_outputs = layers.Embedding(len(words), embedding_dim)(encoder_inputs)
# return_state가 True면 상태값 리턴
# LSTM은 state_h(hidden state)와 state_c(cell state) 2개의 상태 존재
encoder_outputs, state_h, state_c = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True)(encoder_outputs)
# 히든 상태와 셀 상태를 하나로 묶음
encoder_states = [state_h, state_c]
#--------------------------------------------
# 훈련 모델 디코더 정의
#--------------------------------------------
# 목표 문장의 인덱스 시퀀스를 입력으로 받음
decoder_inputs = layers.Input(shape=(None,))
# 임베딩 레이어
decoder_embedding = layers.Embedding(len(words), embedding_dim)
decoder_outputs = decoder_embedding(decoder_inputs)
# 인코더와 달리 return_sequences를 True로 설정하여 모든 타임 스텝 출력값 리턴
# 모든 타임 스텝의 출력값들을 다음 레이어의 Dense()로 처리하기 위함
decoder_lstm = layers.LSTM(lstm_hidden_dim,
dropout=0.1,
recurrent_dropout=0.5,
return_state=True,
return_sequences=True)
# initial_state를 인코더의 상태로 초기화
decoder_outputs, _, _ = decoder_lstm(decoder_outputs,
initial_state=encoder_states)
# 단어의 개수만큼 노드의 개수를 설정하여 원핫 형식으로 각 단어 인덱스를 출력
decoder_dense = layers.Dense(len(words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#--------------------------------------------
# 훈련 모델 정의
#--------------------------------------------
# 입력과 출력으로 함수형 API 모델 생성
model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)
# 학습 방법 설정
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
#--------------------------------------------
# 예측 모델 인코더 정의
#--------------------------------------------
# 훈련 모델의 인코더 상태를 사용하여 예측 모델 인코더 설정
encoder_model = models.Model(encoder_inputs, encoder_states)
#--------------------------------------------
# 예측 모델 디코더 정의
#--------------------------------------------
# 예측시에는 훈련시와 달리 타임 스텝을 한 단계씩 수행
# 매번 이전 디코더 상태를 입력으로 받아서 새로 설정
decoder_state_input_h = layers.Input(shape=(lstm_hidden_dim,))
decoder_state_input_c = layers.Input(shape=(lstm_hidden_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# 임베딩 레이어
decoder_outputs = decoder_embedding(decoder_inputs)
# LSTM 레이어
decoder_outputs, state_h, state_c = decoder_lstm(decoder_outputs,
initial_state=decoder_states_inputs)
# 히든 상태와 셀 상태를 하나로 묶음
decoder_states = [state_h, state_c]
# Dense 레이어를 통해 원핫 형식으로 각 단어 인덱스를 출력
decoder_outputs = decoder_dense(decoder_outputs)
# 예측 모델 디코더 설정
decoder_model = models.Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# 인덱스를 문장으로 변환
def convert_index_to_text(indexs, vocabulary):
sentence = ''
# 모든 문장에 대해서 반복
for index in indexs:
if index == END_INDEX:
# 종료 인덱스면 중지
break;
if vocabulary.get(index) is not None:
# 사전에 있는 인덱스면 해당 단어를 추가
sentence += vocabulary[index]
else:
# 사전에 없는 인덱스면 OOV 단어를 추가
sentence.extend([vocabulary[OOV_INDEX]])
# 빈칸 추가
sentence += ' '
return sentence
# len(x_decoder)
#
# len(y_decoder)
#model.summary()
#encoder_model.summary()
#decoder_model.summary()
from tqdm import tqdm
#에폭 반복
for epoch in range(10):
print('Total Epoch :', epoch + 1)
history = model.fit([x_encoder, x_decoder], y_decoder, epochs=100, batch_size=64, verbose=1)
model.summary()
# 정확도와 손실 출력
print('accuracy :', history.history['accuracy'][-1])
print('loss :', history.history['loss'][-1])
# 문장 예측 테스트
# (3 박 4일 놀러 가고 싶다) -> (여행 은 언제나 좋죠)
input_encoder = x_encoder[2].reshape(1, x_encoder[2].shape[0])
input_decoder = x_decoder[2].reshape(1, x_decoder[2].shape[0])
results = model.predict([input_encoder, input_decoder])
# 결과의 원핫인코딩 형식을 인덱스로 변환
# 1축을 기준으로 가장 높은 값의 위치를 구함
indexs = np.argmax(results[0], 1)
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
#모델 가중치 저장
model.save_weights('./seq2seq/seq2seq_model/seq2seq2_model_weights')
encoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_encoder_model_weights')
decoder_model.save_weights('./seq2seq/seq2seq_model/seq2seq2_decoder_model_weights')
# 예측을 위한 입력 생성
def make_predict_input(sentence):
sentences = []
sentences.append(sentence)
sentences = pos_tag(sentences)
input_seq = convert_text_to_index(sentences, word_to_index, ENCODER_INPUT)
return input_seq
# 텍스트 생성
def generate_text(input_seq):
# 입력을 인코더에 넣어 마지막 상태 구함
states = encoder_model.predict(input_seq)
# 목표 시퀀스 초기화
target_seq = np.zeros((1, 1))
# 목표 시퀀스의 첫 번째에 <START> 태그 추가
target_seq[0, 0] = STA_INDEX
# 인덱스 초기화
indexs = []
# 디코더 타임 스텝 반복
while 1:
# 디코더로 현재 타임 스텝 출력 구함
# 처음에는 인코더 상태를, 다음부터 이전 디코더 상태로 초기화
decoder_outputs, state_h, state_c = decoder_model.predict(
[target_seq] + states)
# 결과의 원핫인코딩 형식을 인덱스로 변환
index = np.argmax(decoder_outputs[0, 0, :])
indexs.append(index)
# 종료 검사
if index == END_INDEX or len(indexs) >= max_sequences:
break
# 목표 시퀀스를 바로 이전의 출력으로 설정
target_seq = np.zeros((1, 1))
target_seq[0, 0] = index
# 디코더의 이전 상태를 다음 디코더 예측에 사용
states = [state_h, state_c]
# 인덱스를 문장으로 변환
sentence = convert_index_to_text(indexs, index_to_word)
return sentence
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 같이 놀러가고 싶다')
input_seq
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
# 문장을 인덱스로 변환
input_seq = make_predict_input('3박4일 놀러가려고')
print(sentence)
# 예측 모델로 텍스트 생성
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('SNS 시간낭비인데')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('PPL 너무나 심하네')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가상화폐 망함')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스불')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가스비')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('가족 보고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간식 먹고 싶어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('간접흡연 싫어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('감기 기운 잇어')
sentence = generate_text(input_seq)
print(sentence)
input_seq = make_predict_input('내일 날씨 어떄?')
sentence = generate_text(input_seq)
print(sentence)
|
6,448 | 2500c3562819e4e85ce3cbc30e0ddf1b8437e0a2 | from django.contrib import admin
from lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint
# list_display - Show these fields for each model on the Admin site
# search_fields - Allow searching in these fields
# Register models for the Admin site
class ProgrammingEnvironmentAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('environment_name', 'description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LanguageAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('language_name', 'description', 'environment')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('lesson_number', 'lesson_title', 'language', 'lesson_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonHintAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('hint_title', 'lesson', 'hint_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Lesson, LessonAdmin)
admin.site.register(LessonHint, LessonHintAdmin) |
6,449 | 9a54ff8e7e8d6d46860cb6173f03c52655b30f43 | TheBeatles = ['John', 'Paul', 'George', 'Ringo']
Wings = ['Paul']
for Beatle in TheBeatles:
if Beatle in Wings:
continue
print Beatle
|
6,450 | 8c8b5c1ff749a8563788b8d5be5332e273275be3 | # Standard library
# Third party library
# Local library
from warehouse.server import run_server
from warehouse.server.config import log
if __name__ == "__main__":
log.initialize_logs()
run_server()
|
6,451 | 64cf6b03fb68be8a23c6e87c8d68d0a42db0eb54 | #!/usr/bin/env python3
# coding=utf-8
# title :paramiko_sftp.py
# description :
# author :JackieTsui
# organization :pytoday.org
# date :1/16/18 9:22 PM
# email :jackietsui72@gmail.com
# notes :
# ==================================================
# Import the module needed to run the script
import paramiko
import os,sys,time
jumpip = "192.168.10.1"
jumpuser = "jackie"
jumppass = "123456"
hostname = "192.168.10.2"
user = "root"
password = "654321"
tmpdir = "/tmp"
remotedir = "/data"
localpath = "/home/nginx_access.tar.gz"
tmppath = tmpdir + "/nginx_access.tar.gz"
remotepath = remotedir + "/nginx_access_hd.tar.gz"
port = 22
passinfo = "'s password: "
paramiko.util.log_to_file('syslogin.log')
t = paramiko.Transport((jumpip, port))
t.connect(username=jumpuser, password=jumppass)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put(localpath, remotepath)
sftp.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
channel = ssh.invoke_shell()
channel.settimeout(10)
buff = ""
resp = ""
channel.send("scp " + tmppath + " " + user + "@" + hostname + ":" + remotepath + "\n")
while not buff.endswith(passinfo):
try:
resp = channel.recv(9999)
except Exception as e:
print("Error info: " + str(e))
channel.close()
ssh.close()
sys.exit()
buff += resp
if not buff.find("yes/no") == -1:
channel.send("yes\n")
buff = ""
channel.send(password + "\n")
buff = ""
while not buff.endswith("# "):
resp = channel.recv(9999)
if not resp.find(passinfo) == -1:
print("Error info: Auth failed.")
channel.close()
ssh.close()
sys.exit()
buff += resp
print(buff)
channel.close()
ssh.close()
|
6,452 | 1aca1cf11d64374d0e0786e74c16567a4c5a1dec | class Queue:
def __init__(self):
self.head = None
self.tail = None
class Node:
def __init__(self, data):
self.data = data
self.next = None
def isEmpty(self):
return self.head is None
def peek(self):
return self.head.data if self.head is not None else None
def add(self, data):
node = self.Node(data)
if(self.tail is not None):
self.tail.next = node
self.tail = node
if (self.head is None):
self.head = node
def remove(self):
data = self.head.data
self.head = self.head.next
if (self.head is None):
self.tail = None
return data
|
6,453 | 7626202d1e3ec7321addbb028be2275b882efda2 | """
Unit Tests for endpoints.py
"""
import unittest
import os # pylint: disable=unused-import
from mock import patch, call
from github_approval_checker.utils import util # pylint: disable=unused-import
from github_approval_checker.utils.github_handler import GithubHandler # pylint: disable=unused-import
from github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError # noqa pylint: disable=unused-import
from github_approval_checker.api import endpoints # pylint: disable=unused-import
class EndpointsUnitTests(unittest.TestCase):
"""
Test endpoints.py
"""
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
handler.get_statuses.return_value = [
{
"state": "error",
"context": "context2",
"target_url": "fake://status_target_2",
"description": "Status Check 2"
},
{
"state": "pending",
"context": "context3",
"target_url": "fake://status_target_3",
"description": "Status Check 3"
},
{
"state": "failure",
"context": "context1",
"target_url": "fake://status_target_1",
"description": "Status Check 1"
}
]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "approved",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
handler.post_status.side_effect = [
201,
400
]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with("repo-full-name", "review-commit-id")
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([
call(
"repo-full-name",
"review-commit-id",
"context2",
"fake://status_target_2",
"review-user-login",
"Status Check 2"
),
call(
"repo-full-name",
"review-commit-id",
"context1",
"fake://status_target_1",
"review-user-login",
"Status Check 1"
)
])
self.assertEqual(response, util.STATUS_OK)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_unapproved(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a review where the status is not approved.
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, ({'status': 'OK', 'message': 'Review state is not approved'}, 200))
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
def test_post_pull_request_review_missing(
self,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError("config-error", "{'message': 'bad-config'}")
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_config(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
validate_config.side_effect = ConfigError(
'Config Validation Error',
({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)
)
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with("repo-full-name", None)
validate_config.assert_called_once_with("config-data")
self.assertEqual(
response,
(
{
'status': 'Config Validation Error',
'message': 'Bad config data'
},
500
)
)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_sign(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError("Error validating signature")
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(
response,
(
{
'status': 'Signature Validation Error',
'message': 'Error validating signature'
},
400
)
)
|
6,454 | 66fe0a3b84773ee1d4f91d8fde60f1fc5b3d7e4c | import pickle
import numpy as np
import torch
import time
import torchvision
import matplotlib
import matplotlib.pyplot as plt
def load_cifar_data(data_files):
data = []
labels = []
for file in data_files:
with open(file, 'rb') as fo:
data_dict = pickle.load(fo, encoding='bytes')
if len(data) == 0:
data = data_dict[str.encode('data')]
labels = data_dict[str.encode('labels')]
else:
data = np.vstack((data, data_dict[str.encode('data')]))
labels.extend(data_dict[str.encode('labels')])
return data, labels
def unpickle(file):
with open(file, 'rb') as fo:
res = pickle.load(fo, encoding='bytes')
return res
def get_classwise_indices(labels):
label_indices = {}
for idx, label in enumerate(labels):
if label not in label_indices.keys():
label_indices[label] = [idx]
else:
label_indices[label].append(idx)
return label_indices
def get_data_from_indices(data, indices_dict, count_per_class, image_shape):
generated_data = []
generated_labels = []
for key, val in indices_dict.items():
if count_per_class:
for i in range(count_per_class):
generated_data.append(np.reshape(data[val[i]], image_shape))
generated_labels.append(key)
else:
for i in val:
generated_data.append(np.reshape(data[i], image_shape))
generated_labels.append(key)
return np.asarray(generated_data), np.reshape(np.asarray(generated_labels, dtype=np.int32), (-1,1))
def create_data_loader(data_x, data_y, batch_size, shuffle):
tensor_x = torch.stack([torch.Tensor(i) for i in data_x]) # transform to torch tensors
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) # create datset
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle) # create dataloader
return dataloader
def train_model(model, train_data_loader, test_data_loader, num_epochs=5, learning_rate=0.001, save_epochs=None, model_name="cnn"):
num_epochs = num_epochs
learning_rate = learning_rate
# Loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_data_loader)
train_times = []
train_accuracies = []
train_losses = []
test_accuracies = []
for epoch in range(num_epochs):
start_time = time.time()
for i, (images, labels) in enumerate(train_data_loader):
# Forward pass
outputs = model(images)
target = torch.max(labels.long(), 1)[0]
loss = criterion(outputs, target)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 200 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
end_time = time.time()
if save_epochs and epoch + 1 in save_epochs:
torch.save(model, "../data/models/" + model_name + "_" + str(epoch+1))
train_times.append(end_time - start_time)
train_losses.append(loss.item())
print("Calculating train accuracy...")
train_accuracies.append(get_accuracies(train_data_loader, model)[0])
print("Calculating test accuracy...")
test_accuracies.append(get_accuracies(test_data_loader, model)[0])
print("Average training time per epoch:", np.mean(train_times))
print("Total training time for all epochs:", np.sum(train_times))
return train_accuracies, test_accuracies, train_losses
def get_accuracies(data_loader, model):
start_time = time.time()
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data_loader:
labels = torch.max(labels.long(), 1)[0]
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
end_time = time.time()
time_taken = end_time - start_time
print('Accuracy of the model: {} %'.format(accuracy))
return accuracy, time_taken
def get_model_size(model, model_name):
model = pickle.dumps(net)
byte_size = sys.getsizeof(model)
print('Size of ' + model_name + ' model: ', byte_size/1000000)
def imshow(img, label_names, file_name="../data/sample_images"):
npimg = img.numpy()
npimg = npimg.astype(np.uint8)
npimg = np.transpose(npimg, (1, 2, 0))
plt.clf()
im = plt.imshow(npimg)
ylim = im.get_extent()[2]
plt.yticks(np.arange(0, ylim + 1, ylim/len(label_names)), label_names)
plt.savefig(file_name)
plt.show()
def show_classwise_images(data, labels, label_names, k):
image_dict = {}
for idx, l in enumerate(labels):
label = l[0]
if label in image_dict.keys() and len(image_dict[label]) < k:
image_dict[label].append(data[idx])
elif label not in image_dict.keys():
image_dict[label] = [data[idx]]
images_to_show = []
labels_to_show = []
for label, image in image_dict.items():
labels_to_show.append(label_names[label])
for i in image:
images_to_show.append(i)
images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])
imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)
def outlier_analysis(model, outliers_tensor, outlier_label_names, cifar10_label_names):
model.eval()
predicted_labels = []
with torch.no_grad():
start_time = time.time()
outputs = model(outliers_tensor)
end_time = time.time()
print("Time taken for prediction:", str(end_time - start_time))
_, predicted = torch.max(outputs.data, 1)
for idx, label in enumerate(predicted):
print("Original:", outlier_label_names[idx], "Predicted:", cifar10_label_names[label])
predicted_labels.append(cifar10_label_names[label])
imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1), predicted_labels)
def plot_values(x, y, xlabel, ylabel, title, legend, fig_name):
plt.clf()
for y_i in y:
plt.plot(x, y_i)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend(legend)
plt.savefig("../data/plots/" + fig_name)
plt.show() |
6,455 | d78fd8ebf9ef55700a25a9ce96d9094f1bfa564e | def main():
piso = largura * comprimento
volume_sala = largura * comprimento * altura
area = 2 * altura * largura + 2 * altura * comprimento
print(piso)
print(volume_sala)
print(area)
altura = float(input(""))
largura = float(input(""))
comprimento = float(input(""))
if __name__ == '__main__':
main()
|
6,456 | 42c9e5039e2d5f784bf6405ea8bcaf7d6973ddcb | from mayan.apps.testing.tests.base import BaseTestCase
from .mixins import AssetTestMixin
class AssetModelTestCase(
AssetTestMixin, BaseTestCase
):
def test_asset_get_absolute_url_method(self):
self._create_test_asset()
self.test_asset.get_absolute_url()
|
6,457 | 2420c835ff91c1269cb16fca2e60e191e1e8ce13 | #!/usr/bin/env python
#-*- coding : utf-8 -*-
import string
import keyword
alphas = string.letters + '_'
nums = string.digits
keywords = keyword.kwlist
checklst = alphas + nums
print 'Welcome to the Identifier Checker v1.0'
myInput = raw_input('Identifier to test? ')
if myInput in keywords:
print 'Okay as a keyword'
elif len(myInput) == 1:
if myInput in alphas:
print 'Okay as an Identifier'
else:
print 'invaild: one symbols must be alphanumeric '
elif len(myInput) > 1:
if myInput[0] not in alphas:
print '''invalid: first symbol must be alphabetic'''
else:
for otherChar in myInput[1:]:
if otherChar not in checklst:
print '''invalid:remaining symbols must be alphanumeric'''
break
else:
print '''okay as an identifier'''
|
6,458 | a245cb1f232b152edf40b6399686c6811c522d99 | # common methods to delete data from list
fruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']
#pop method
# fruits.pop(1)
# del
# del fruits[1]
# remove
# fruits.remove('banana')
# append, extend, insert
# pop, remove, del
print(fruits)
|
6,459 | 42d2be7544d2afb9580841422ae35e1a5621df52 | import abc
import math
import random
from typing import Union, Tuple
import numpy as np
from scipy import stats
from . import Rectangle, Line, Point, Shape
__all__ = ['get_critical_angle', 'Paddle', 'Ball', 'Snell', 'Canvas']
EPSILON = 1e-7
def get_critical_angle(s0: float, s1: float) -> Union[float, None]:
"""
Returns the critical angle if it exists for a ball moving from a medium with velocity `s0` to a medium with
velocity `s1`. If the critical angle does not exist, returns None.
:param s0: speed of the initial medium
:param s1: speed of the final medium
:return: critical angle or None
"""
if s0 < s1:
critical_angle = math.asin(s0 / s1)
else:
critical_angle = None
return critical_angle
class Paddle(Rectangle):
def __init__(self, height: float, width: float, speed: float, side: str, max_angle: float, visibility: str):
"""
:param height: The paddle height
:param width: The paddle width (only matters for rendering)
:param side: The side the paddle will be on ('left' or 'right')
:param speed: The units the paddle moves in a single turn
:param visibility: Whether and how to render the paddle. See `Shape.visibility`
:param max_angle: The maximum angle at which the paddle can hit the ball
"""
super().__init__(height=height, width=width, visibility=visibility, render_value=255)
assert side in ['left', 'right'], f"side must be 'left' or 'right', not {side}"
assert 0 <= max_angle <= math.pi / 2, f"max angle must be between 0 and pi/2, not {max_angle}"
self.side = side
self.speed = speed
self.max_angle = max_angle
def up(self):
self.y += self.speed
def down(self):
self.y -= self.speed
def _get_edges(self) -> Tuple[Line]:
"""
Only return the field-side edge
"""
if self.side == 'right':
return Line((self.left_bound, self.bottom_bound), (self.left_bound, self.top_bound)),
elif self.side == 'left':
return Line((self.right_bound, self.bottom_bound), (self.right_bound, self.top_bound)),
def get_fraction_of_paddle(self, point: Point):
"""
Computes the fractional distance from the middle of the paddle, normalized by the paddle's height.
Asserts if the ball was not on the paddle.
:param point: the point where the ball hit the paddle
:return: fraction of the paddle
"""
fraction = (point.y - self.y) / self.height
fraction = max(min(fraction, 0.5), -0.5) # clamp to +/- 0.5
return fraction
class Ball(Rectangle):
def __init__(self, size: float, max_initial_angle: float, visibility: str, has_volume: bool = False):
"""
Ball object
:param has_volume:
:param size: The size to render the ball
:param max_initial_angle: The maximum angle the ball can start with
:param visibility: How to render the ball. See `Shape.visibility`
:param has_volume: determines whether the ball interacts as a point or as an area
"""
super().__init__(width=size, height=size, visibility=visibility, render_value=255)
self.max_initial_angle = max_initial_angle
self.reset(self.pos, direction='left')
self.has_volume = has_volume
def reset(self, position: Union[Tuple[float, float], Point], direction: str = 'right'):
if direction == 'right':
self._angle = (2 * random.random() - 1) * self.max_initial_angle
elif direction == 'left':
self._angle = math.pi - (2 * random.random() - 1) * self.max_initial_angle
else:
raise ValueError(f"direction must be 'left' or 'right', not {direction}")
self.pos = position
@property
def angle(self):
"""
Angle with respect to the right horizontal
"""
return self._angle
@angle.setter
def angle(self, value):
self._angle = value % (2 * math.pi)
@property
def unit_velocity(self) -> Point:
x = math.cos(self.angle)
y = math.sin(self.angle)
return Point(x, y)
@unit_velocity.setter
def unit_velocity(self, value: Union[Tuple[float, float], Point]):
"""
Sets the angle parameter give a set of (x, y) coordinates.
:param value: (x, y)
"""
if isinstance(value, tuple):
value = Point(*value)
assert isinstance(value, Point), f"value must be a point, not {type(value)}"
self.angle = value.angle
def get_velocity(self, speed: Union[float, int]):
return self.unit_velocity * speed
class Snell(Rectangle):
def __init__(self, width, height, speed, change_rate, visibility):
"""
Rectangular area with a different ball speed.
:param width: The width of the layer
:param height: The height of the layer
:param change_rate: Rate at which the ball speed changes, the standard deviation of the change on each step.
:param visibility: Whether and how to render the layer. See `Shape.visibility`
"""
assert change_rate >= 0, "Snell `change_rate` must be non-negative"
super().__init__(width=width, height=height, visibility=visibility, render_value=(235, 76, 52))
self.speed = speed
self._initial_speed = speed
self.change_rate = change_rate
def step(self):
"""
Step the Snell speed using a bounded Gaussian random walk.
- step with mean 0, standard deviation `self.speed`
- Clip the speed at `0.5 * self._initial_speed <= self.speed <= 2.0 * self._initial_speed`
"""
if self.change_rate != 0:
self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()
if self.speed < 0.5 * self._initial_speed:
self.speed = 0.5 * self._initial_speed
if self.speed > 2.0 * self._initial_speed:
self.speed = 2.0 * self._initial_speed
else:
pass
class TrajectoryBase(abc.ABC):
def __init__(self, shape: Union[Point, Line, Rectangle], velocity: Point):
self.shape = shape
self.velocity = velocity
self._reference = None
self.intersection = None
self.intersected_trajectory = None
self.intersected_object = None
self.intersected_edge = None
self.remaining_speed = None
def set_intersection(self, point: Point, trajectory_line: Line, obj: Shape, edge: Line):
assert isinstance(obj, Shape), f"type Shape expected, not {type(obj)}"
assert isinstance(point, Point), f"type Point expected, not {type(point)}"
assert isinstance(edge, Line), f"type Line expected, not {type(edge)}"
self.intersection = point
self.intersected_trajectory = trajectory_line
self.remaining_speed = point.l2_distance(trajectory_line.end)
self.intersected_object = obj
self.intersected_edge = edge
def get_center_at_intersection(self) -> Point:
"""
Get the new center of `self.shape` given that it moved along `intersected_trajectory` to `intersection`
:return: new center point
"""
return self._reference + (self.intersection - self.intersected_trajectory.start)
@property
def corners(self) -> Tuple[Line, ...]:
return self.top_left, self.top_right, self.bottom_right, self.bottom_left
@property
@abc.abstractmethod
def center(self) -> Line: ...
@property
@abc.abstractmethod
def top_right(self) -> Line: ...
@property
@abc.abstractmethod
def top_left(self) -> Line: ...
@property
@abc.abstractmethod
def bottom_right(self) -> Line: ...
@property
@abc.abstractmethod
def bottom_left(self) -> Line: ...
class TrajectoryRectangle(TrajectoryBase):
"""
Compute the trajectory of each corner of the rectangle
"""
def __init__(self, shape: Rectangle, velocity: Point):
super(TrajectoryRectangle, self).__init__(shape, velocity)
assert isinstance(shape, Rectangle)
self._reference = self.shape.pos
@property
def center(self) -> Line:
"""
Line representing the trajectory of the center of the rectangle
"""
return Line(self.shape.pos, self.shape.pos + self.velocity)
@property
def top_right(self) -> Line:
"""
Line representing the trajectory of the point on the top right corner of the rectangle
"""
start = Point(self.shape.right_bound, self.shape.top_bound)
return Line(start, start + self.velocity)
@property
def top_left(self) -> Line:
"""
Line representing the trajectory of the point on the top left corner of the rectangle
"""
start = Point(self.shape.left_bound, self.shape.top_bound)
return Line(start, start + self.velocity)
@property
def bottom_right(self) -> Line:
"""
Line representing the trajectory of the point on the bottom right corner of the rectangle
"""
start = Point(self.shape.right_bound, self.shape.bottom_bound)
return Line(start, start + self.velocity)
@property
def bottom_left(self) -> Line:
"""
Line representing the trajectory of the point on the bottom left corner of the rectangle
"""
start = Point(self.shape.left_bound, self.shape.bottom_bound)
return Line(start, start + self.velocity)
class TrajectoryLine(TrajectoryRectangle):
"""
Create a bounding box around the line and compute the trajectory as if it were a rectangle.
"""
# noinspection PyTypeChecker
# noinspection PyUnresolvedReferences
def __init__(self, shape: Line, velocity: Point):
super(TrajectoryLine, self).__init__(shape, velocity)
assert isinstance(shape, Line)
self._reference = self.shape.start
height = abs(self.shape.start.y - self.shape.end.y)
width = abs(self.shape.start.x - self.shape.end.x)
center = Point((self.shape.start.x + self.shape.end.x) / 2,
(self.shape.start.y + self.shape.end.y) / 2)
self.shape = Rectangle(height=height, width=width)
self.shape.pos = center
class TrajectoryPoint(TrajectoryBase):
def __init__(self, shape: Point, velocity: Point):
super(TrajectoryPoint, self).__init__(shape, velocity)
assert isinstance(shape, Point)
self._reference = self.shape
@property
def corners(self) -> Tuple[Line, ...]:
return (self._trajectory,)
@property
def _trajectory(self) -> Line:
return Line(self.shape, self.shape + self.velocity)
@property
def center(self) -> Line:
return self._trajectory
@property
def top_right(self) -> Line:
return self._trajectory
@property
def top_left(self) -> Line:
return self._trajectory
@property
def bottom_right(self) -> Line:
return self._trajectory
@property
def bottom_left(self) -> Line:
return self._trajectory
class Trajectory(object):
def __new__(cls, shape: Shape, velocity: Point):
if isinstance(shape, Point):
return TrajectoryPoint(shape, velocity)
elif isinstance(shape, Line):
return TrajectoryLine(shape, velocity)
elif isinstance(shape, Rectangle):
return TrajectoryRectangle(shape, velocity)
else:
raise NotImplementedError(f"No implementation of Trajectory for input shape of type {type(shape)}")
class Canvas(Rectangle):
action_meanings = {0: 'NOOP',
1: 'UP',
2: 'DOWN', }
actions = {k: v for v, k in action_meanings.items()}
def __init__(self, paddle_l: Paddle, paddle_r: Paddle, ball: Ball, snell: Snell, ball_speed: int, height: int,
width: int, their_update_probability: float, refract: bool, uniform_speed: bool):
super().__init__(height=height, width=width, visibility='none', render_value=0)
self.pos = self.width / 2, self.height / 2
assert isinstance(their_update_probability, (float, int)), \
f"their_update_probability must be numeric, not {type(their_update_probability)}"
assert 0 <= their_update_probability <= 1, f"{their_update_probability} outside allowed bounds [0, 1]"
self.their_update_probability = their_update_probability
self.default_ball_speed = ball_speed
# Initialize objects
self.snell = snell
self.ball = ball
self.paddle_l = paddle_l
self.paddle_r = paddle_r
self.sprites = [self, snell, paddle_l, paddle_r, ball]
self.uniform_speed = uniform_speed
self.refract = refract
self.we_scored = False
self.they_scored = False
# score
self.our_score = 0
self.their_score = 0
def register_sprite(self, sprite: Shape):
assert issubclass(type(sprite), Shape), f"sprite must be subclassed from Shape"
# noinspection PyTypeChecker
self.sprites.insert(-1, sprite) # insert before ball
@property
def left_bound(self):
return 0
@property
def right_bound(self):
return self.width
@property
def top_bound(self):
return self.height
@property
def bottom_bound(self):
return 0
# noinspection PyMethodOverriding
def to_numpy(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs masked rendering of objects in `self.sprites`. Priority is determined by the ordering of the list,
earlier objects will be obscured by later ones.
:return: (state, rendering)
"""
state = self._zero_rgb_image(round(self.height), round(self.width))
rendering = self._zero_rgb_image(round(self.height), round(self.width))
for sprite in self.sprites[1:]: # skip self
sprite_state, sprite_rendering = sprite.to_numpy(self.height, self.width)
state[sprite_state != 0] = sprite_state[sprite_state != 0]
rendering[sprite_rendering != 0] = sprite_rendering[sprite_rendering != 0]
return state, rendering
def score(self, who):
"""
Increment the score and reset the ball
:param who: 'we' or 'they'
:return: reward
"""
if who == 'they':
reward = -1
self.their_score += 1
elif who == 'we':
reward = 1
self.our_score += 1
else:
raise ValueError(f"who must be 'we' or 'they', not {who}")
self._reset_ball()
return reward
def step(self, action):
self._move_our_paddle(action)
self._step_their_paddle()
reward = self._step_ball()
self._step_snell()
return reward
def get_state_size(self) -> Tuple[int, int]:
"""
Return the tuple (height, width) of the canvas dimensions
"""
return self.height, self.width
def _step_snell(self) -> None:
"""
Step the snell layer
"""
self.snell.step()
def _reset_ball(self):
self.ball.reset((self.width / 2, self.height / 2))
def _move_our_paddle(self, action) -> None:
"""
Move our paddle according to the provided action
:param action: the action code
"""
if not isinstance(action, int):
action = action.item() # pops the item if the action is a single tensor
assert action in [a for a in self.action_meanings.keys()], f"{action} is not a valid action"
if action == self.actions['UP']:
if self.paddle_r.top_bound < self.top_bound:
self.paddle_r.up()
elif action == self.actions['DOWN']:
if self.paddle_r.bottom_bound > self.bottom_bound:
self.paddle_r.down()
def _step_ball(self, speed: Union[float, int] = None):
"""
Move the ball to the next position according to the speed of the layer it is in.
:param speed: used to continue the trajectory of a ball that interacted with an object
"""
trajectory = self._get_trajectory(speed)
self._get_first_intersection(trajectory)
reward = 0
if trajectory.intersection is None: # No intersection
self.ball.pos = trajectory.center.end
else:
reward = self._interaction_dispatcher(trajectory)
return reward
def _get_trajectory(self, speed) -> TrajectoryBase:
"""
Get the ball's trajectory
:param speed: The speed of the starting medium
:return: trajectory `Line`
"""
if speed is None:
speed = self._get_ball_speed()
if self.ball.has_volume:
trajectory = Trajectory(self.ball, self.ball.get_velocity(speed))
else:
trajectory = Trajectory(self.ball.pos, self.ball.get_velocity(speed))
return trajectory
def _interaction_dispatcher(self, trajectory: TrajectoryBase):
"""
Dispatch data to the appropriate method based on the interaction `obj`.
:param trajectory: the trajectory of the ball
"""
reward = 0
obj = trajectory.intersected_object
if obj is self: # border interaction
reward = self._interact_border(trajectory)
elif isinstance(obj, Paddle): # paddle interaction
self._interact_paddle(trajectory)
elif isinstance(obj, Snell):
self._refract(trajectory)
return reward
def _interact_paddle(self, trajectory: TrajectoryBase) -> float:
paddle = trajectory.intersected_object
paddle_fraction = paddle.get_fraction_of_paddle(trajectory.get_center_at_intersection())
angle = paddle_fraction * paddle.max_angle
angle = math.pi - angle if self.ball.unit_velocity.x > 0 else angle
self.ball.angle = angle
reward = self._finish_step_ball(trajectory)
return reward
def _refract(self, trajectory: TrajectoryBase):
edge = trajectory.intersected_edge
if self.refract:
s0, s1 = self._get_start_and_end_speed(trajectory)
angle = edge.angle_to_normal(trajectory.center)
if self._exceeds_critical_angle(angle, s0, s1):
# TODO: reflect to arbitrary angle (non-vertical interface)
self._reflect(Point(-1, 1), trajectory)
return
new_angle = math.asin(s1 / s0 * math.sin(angle))
boundary_angle, new_angle = self._adjust_refraction_to_boundary_angle(edge, new_angle)
new_angle = self._adjust_refraction_to_direction_of_incidence(boundary_angle, new_angle, trajectory)
self.ball.angle = new_angle
return self._finish_step_ball(trajectory)
@staticmethod
def _exceeds_critical_angle(angle: float, s0: float, s1: float) -> bool:
"""
Test if the angle exceeds the critical angle
:param angle: The angle to the normal of the boundary
:param s0: The speed of the original medium
:param s1: The speed of the next medium
:return: True if the angle exceeds the critical angle
"""
if s1 > s0: # if the second speed is faster, there is a critical angle
critical_angle = get_critical_angle(s0, s1)
if abs(angle) >= critical_angle:
return True
return False
@staticmethod
def _adjust_refraction_to_direction_of_incidence(boundary_angle: float, new_angle: float,
trajectory: TrajectoryBase) -> float:
"""
If the direction of incidence was from the right of the boundary, reflect `new_angle`, otherwise, return
`new_angle` without modification.
:param boundary_angle: must be in the first or fourth quadrant
:param new_angle: The angle to be reflected in the return
:param trajectory: The angle of the incoming ball in global coordinates
:return: The (possibly) reflected `new_angle`
"""
angle = trajectory.center.angle
assert -math.pi / 2 <= boundary_angle <= math.pi / 2, "boundary_angle should be in first or fourth quadrant"
# noinspection PyChainedComparisons
if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi) < boundary_angle + math.pi:
new_angle = math.pi - new_angle
elif (boundary_angle < 0 and
boundary_angle % (2 * math.pi) + math.pi < angle % (2 * math.pi) < boundary_angle % (
2 * math.pi)):
new_angle = math.pi - new_angle
return new_angle
@staticmethod
def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float) -> Tuple[float, float]:
"""
Compute the rotation of `new_angle` back to global coordinates. Assume incidence from the left side of the
boundary.
:param boundary: The boundary `primitives.Line` object
:param new_angle: The refracted angle normal to the boundary
:return: The new angle in global coordinates
"""
# TODO: verify this works with a non-vertical interface
boundary_angle = boundary.angle % (2 * math.pi)
if 0 <= boundary_angle < math.pi / 2: # in the first quadrant
boundary_angle = boundary_angle
new_angle = boundary_angle - math.pi / 2 + new_angle
elif math.pi / 2 <= boundary_angle < math.pi: # in the second quadrant
boundary_angle = math.pi - boundary_angle
new_angle = math.pi / 2 - boundary_angle + new_angle
elif math.pi <= boundary_angle < 3 * math.pi / 2: # in the third quadrant
boundary_angle = math.pi - boundary_angle
new_angle = boundary_angle - math.pi / 2 + new_angle
elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi: # in the fourth quadrant
boundary_angle = 2 * math.pi - boundary_angle
new_angle = math.pi / 2 - boundary_angle - new_angle
else:
raise ValueError(f'Unexpected angle {boundary_angle}')
return boundary_angle, new_angle
def _get_start_and_end_speed(self, trajectory: TrajectoryBase) -> Tuple[float, float]:
"""
Get the speed at the start of the trajectory and the speed at the end of the trajectory.
:param trajectory: The trajectory `primitives.Line` object
:return: (initial speed, final speed)
"""
snell = trajectory.intersected_object
# todo: detect if start is in some other snell layer
if snell.is_in(trajectory.center.start):
s0 = snell.speed
s1 = self.default_ball_speed
else:
s0 = self.default_ball_speed
s1 = snell.speed
return s0, s1
def _interact_border(self, trajectory: TrajectoryBase) -> float:
reward = 0.
edge = trajectory.intersected_edge
if edge == self.top_edge or edge == self.bot_edge:
self._reflect(Point(1, -1), trajectory)
elif edge == self.left_edge:
reward = self.score('we')
elif edge == self.right_edge:
reward = self.score('they')
else:
raise ValueError(f'invalid edge, {edge}')
return reward
def _reflect(self, direction: Point, trajectory: TrajectoryBase):
"""
Multiplies the velocity of the ball by `direction`, continues the path of the ball by calculating the remaining
speed using trajectory and point.
:param direction: velocity multiplier
:param trajectory: The original trajectory of the ball
"""
self.ball.unit_velocity *= direction
return self._finish_step_ball(trajectory)
def _finish_step_ball(self, trajectory: TrajectoryBase):
"""
Finish the remainder of the trajectory after any interactions.
:param trajectory: The original trajectory
:return: reward
"""
point = trajectory.get_center_at_intersection()
self.ball.pos = point + self.ball.unit_velocity * EPSILON
return self._step_ball(trajectory.remaining_speed)
def _get_first_intersection(self, trajectory: TrajectoryBase):
"""
Find the first point at which the trajectory interacted with an object.
:param trajectory: the trajectory of the object
:return: (shape object interacted with, point of interaction, line object interacted with)
"""
for trajectory_line in trajectory.corners:
for o in self.sprites:
if not isinstance(o, Ball):
intersection_result = o.get_intersection(trajectory_line)
if intersection_result is not None:
edge, point = intersection_result
if trajectory.intersection is None:
trajectory.set_intersection(point, trajectory_line, o, edge)
elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:
raise NotImplementedError("overlapping parallel edges not implemented")
elif (point.l2_distance(trajectory_line.start) <
trajectory.intersection.l2_distance(trajectory.intersected_trajectory.start)):
trajectory.set_intersection(point, trajectory_line, o, edge)
def _get_ball_speed(self) -> float:
if self.uniform_speed:
return self.default_ball_speed
else:
if self.ball.is_overlapping(self.snell):
return self.snell.speed
else:
return self.default_ball_speed
def _step_their_paddle(self):
"""
Move the opponents paddle. Override this in a subclass to change the behavior.
"""
if random.random() < self.their_update_probability:
if self.paddle_l.y < self.ball.y:
if self.paddle_l.top_bound < self.top_bound:
self.paddle_l.up()
else:
if self.paddle_l.bottom_bound > self.bottom_bound:
self.paddle_l.down()
|
6,460 | 79141679bb2839de9d4a25b6c6c285905dddbb0d | #!/usr/bin/python
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
setup(
name="isc-dhcpd-parser",
version="0.1",
description="Parser for isc-dhcp config files (dhcpd.conf)",
author="Pavel Podkorytov",
author_email="pod.pavel@gmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
scripts=["bin/isc_dhcpd_leases.py"],
install_requires=["ply"],
)
|
6,461 | 536a67935527eb99bc0424613c9b931401db0b06 | from rest_framework import serializers
from .models import Twit, Comment, Message
from django.contrib.auth.models import User
class TwitSerializer(serializers.ModelSerializer):
class Meta:
model = Twit
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ('sender', 'receiver', 'content', 'creation_date')
|
6,462 | 397686964acbf640a5463a3a7095d85832545d9e | import re
def detectPeriod(data):
numWord = "[0-9,一二三四五六七八九十兩半]"
hourWord = "小時鐘頭"
minWord = "分鐘"
secWord = "秒鐘"
timePat = "["+numWord+"]+點?\.?["+numWord+"]*個?半?["+hourWord+"]*半?又?["+numWord+"]*["+minWord+"]*又?["+numWord+"]*["+secWord+"]*"
def main():
detectPeriod("我要去游泳一個小時")
if __name__ == "__main__":
main()
|
6,463 | 658532e1b81b025b8295bbf468dc01ecf12b922a | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem import SnowballStemmer
import pandas as pd
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import re
import numpy as np
trainname = 'train_data.csv'
testname = 'testval_data.csv'
wnl = WordNetLemmatizer()
stemmer = SnowballStemmer("english")
# stemmer = PorterStemmer()
stoplist = set(stopwords.words("english"))
# remove all the punctuation, whitespace and stop words, convert all the disparities of a word into their normalized form.
def process_review(review):
review = re.sub(r'[^a-zA-Z]', ' ', review)
review = review.lower()
# texts = [stemmer.stem(word) for word in review.lower().split() if word not in stoplist]
texts = [wnl.lemmatize(word) for word in review.lower().split() if word not in stoplist]
# texts = [word for word in review.lower().split() if word not in stoplist]
return texts
# Our list of functions to apply.
transform_functions = [
lambda x: len(x),
lambda x: x.count(" "),
lambda x: x.count("."),
lambda x: x.count("!"),
lambda x: x.count("?"),
lambda x: len(x) / (x.count(" ") + 1),
lambda x: x.count(" ") / (x.count(".") + 1),
lambda x: len(re.findall("\d", x)),
lambda x: len(re.findall("[A-Z]", x)),
]
# Apply each function and put the results into a list.
columns = []
for func in transform_functions:
columns.append(reviews["text"].apply(func))
# Convert the meta features to a numpy array.
meta = np.asarray(columns).T
# TfidfVectorizer
tfv = TfidfVectorizer(analyzer='word',min_df=3,ngram_range=(1, 2), smooth_idf=1,stop_words=None, strip_accents=None, sublinear_tf=1,token_pattern=r'\w{1,}', use_idf=1).fit(x1)
# CountVectorizer
train = pd.read_csv(trainname)
test = pd.read_csv(testname)
x1 = train.loc[:, 'text']
x2 = test.loc[:, 'text']
cvt = CountVectorizer(analyzer=process_review).fit(x1)
tx1 = cvt.transform(x1)
tx2 = cvt.transform(x2)
# np.savetxt("x.txt", tx.toarray(), delimiter=",")
y1 = train.loc[:, 'stars']
|
6,464 | 501ca508df5d72b0190b933f07c4bd505d7090c0 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from contextlib import contextmanager
import yaml
from omegaconf import OmegaConf
class CrypTenConfig:
"""
Configuration object used to store configurable parameters for CrypTen.
This object acts as a nested dictionary, but can be queried using dot-notation(
e.g. querying or setting `cfg.a.b` is equivalent to `cfg['a']['b']`).
Users can load a CrypTen config from a file using `cfg.load_config(filepath)`.
Users can temporarily override a config parameter using the contextmanager temp_override:
.. code-block:: python
cfg.a.b = outer # sets cfg["a"]["b"] to outer value
with cfg.temp_override("a.b", inner):
print(cfg.a.b) # prints inner value
print(cfg.a.b) # prints outer value
"""
__DEFAULT_CONFIG_PATH = os.path.normpath(
os.path.join(__file__, "../../../configs/default.yaml")
)
def __init__(self, config_file=None):
self.load_config(config_file)
def load_config(self, config_file):
"""Loads config from a yaml file"""
if config_file is None:
config_file = CrypTenConfig.__DEFAULT_CONFIG_PATH
# Use yaml to open stream for safe load
with open(config_file) as stream:
config_dict = yaml.safe_load(stream)
self.config = OmegaConf.create(config_dict)
def set_config(self, config):
if isinstance(config, CrypTenConfig):
self.config = config.config
else:
self.config = config
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
keys = name.split(".")
result = getattr(self.config, keys[0])
for key in keys[1:]:
result = getattr(result, key)
return result
def __getitem__(self, name):
return self.__getattribute__(name)
def __setattr__(self, name, value):
if name == "config":
object.__setattr__(self, name, value)
try:
# Can only set attribute if already exists
object.__getattribute__(self, name)
object.__setattr__(self, name, value)
except AttributeError:
dotlist = [f"{name}={value}"]
update = OmegaConf.from_dotlist(dotlist)
self.config = OmegaConf.merge(self.config, update)
def __setitem__(self, name, value):
self.__setattr__(name, value)
@contextmanager
def temp_override(self, override_dict):
old_config = self.config
try:
dotlist = [f"{k}={v}" for k, v in override_dict.items()]
update = OmegaConf.from_dotlist(dotlist)
self.config = OmegaConf.merge(self.config, update)
yield
finally:
self.config = old_config
|
6,465 | 78615f6b020e2547e5d9a08d8b4c414184106bb3 | import pandas as pd
import time
from datetime import datetime
from sklearn import metrics
from sklearn import cross_validation
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.naive_bayes import MultinomialNB,BernoulliNB,GaussianNB
from sklearn.cross_validation import StratifiedKFold
from sklearn.svm import SVC, LinearSVC
import gzip
def getData():
print "reading file ..."
data=pd.read_csv('../train.csv')
test=pd.read_csv('../test.csv')
return data,test
def preprocessTest(trainDF):
print "pre processing data ..."
start=time.time()
return trainDF
def decisionTree(trainDF,y,xHat):
start=time.time()
print "#"*70
print "Using Decision Tree Classifier"
print "#"*70
trainingVectors=trainDF.as_matrix()
# clf=DecisionTreeClassifier(criterion="entropy")
clf = MultinomialNB()
print "training classifier ..."
clf.fit(trainDF,y)
print "predicting classes for test data"
# xHat.drop(['Id'])
yHat=clf.predict_proba(xHat)
print"yhat"
print yHat[0]
end=time.time()
print "Execution time for classifier: "+str(end-start)
print "#"*70
return yHat,clf
def daytime(x):
# eMorning=0
# morning=0
# afternoon=0
# evening=0
# night=0
a=0
b=0
c=0
d=0
e=0
f=0
# if (x in [4,5,6,7]):
# eMorning=1
# if (x in [8,9,10,11]):
# morning=1
# if (x in [12,13,14,15,16]):
# afternoon=1
# if (x in [17,18,19,20,21,22,23,0,1,2,3]):
# night=1
if (x in [4,5,6,7]):
a=1
if (x in [8,9,10,11]):
b=1
if (x in [12,13,14,15]):
c=1
if (x in [16,17,18,19]):
d=1
if (x in [20,21,22,23]):
e=1
if (x in [0,1,2,3]):
f=1
return a,b,c,d,e,f
def splitDate(x):
dateObject=datetime.strptime(x,"%Y-%m-%d %H:%M:%S")
time=dateObject.hour
day=dateObject.day
month=dateObject.month
year=dateObject.year
return time,day,month,year
def preprocessData(trainDF):
print "pre processing data ..."
start=time.time()
cols=trainDF.columns.values.tolist()
if ('Category'in cols):
trainDF=trainDF.drop(['Category','Descript','Resolution'],axis=1)
df=pd.DataFrame()
print"seperating districts"
df=pd.get_dummies(trainDF['PdDistrict'],prefix='pD')
trainDF=pd.concat([trainDF,df],axis=1)
df=pd.DataFrame()
print "seperating days of week"
df=pd.get_dummies(trainDF['DayOfWeek'],prefix='day')
trainDF=pd.concat([trainDF,df],axis=1)
print "seperating time"
trainDF["time"],trainDF["day"],trainDF["month"],trainDF["year"]=zip(*trainDF["Dates"].apply(splitDate))
print "getting part of day"
trainDF["a"],trainDF["b"],trainDF["c"],trainDF["d"],trainDF["e"],trainDF["f"]=zip(*trainDF["time"].apply(daytime))
print"generating extra feature Awake"
# trainDF["summer"],trainDF["fall"],trainDF["winter"],trainDF["spring"]=zip(*trainDF["month"].apply(getSeason))
print"generating extra feature Awake"
trainDF["Awake"]=trainDF["time"].apply(lambda x: 1 if (x==0 or (x>=8 and x<=23)) else 0)
print"generating extra feature intersection"
trainDF['intersection']=trainDF['Address'].apply(lambda x: 1 if "/" in x else 0)
print "descretizing X"
xcol={}
trainDF.X=pd.cut(trainDF.X,60)
temp=sorted(trainDF.X.unique())
for i in temp:
xcol[i]=temp.index(i)
trainDF.X=trainDF.X.map(xcol)
df=pd.DataFrame()
df=pd.get_dummies(trainDF['X'],prefix='X')
trainDF=pd.concat([trainDF,df],axis=1)
print "descretizing Y"
ycol={}
trainDF.Y=pd.cut(trainDF.Y,100)
temp=sorted(trainDF.Y.unique())
for i in temp:
ycol[i]=temp.index(i)
trainDF.Y=trainDF.Y.map(ycol)
df=pd.DataFrame()
df=pd.get_dummies(trainDF['Y'],prefix='Y')
trainDF=pd.concat([trainDF,df],axis=1)
print"dropping unnecessary values"
trainDF=trainDF.drop(['DayOfWeek','PdDistrict','Address','time','day','year','month','Dates','X','Y'],axis=1)
print trainDF.head()
end=time.time()
return trainDF
def main():
trainDF,testDF=getData()
y=trainDF.Category.values
idList=testDF.Id.tolist()
testDF=testDF.drop(['Id'],axis=1)
trainDF=preprocessData(trainDF)
testDF=preprocessData(testDF)
predicted,clf=decisionTree(trainDF,y,testDF)
submission = pd.DataFrame(predicted,columns=clf.classes_)
submission['Id']=idList
cols=submission.columns.tolist()
cols=cols[-1:]+cols[:-1]
submission=submission[cols]
print submission.head()
submission.to_csv(open('RF.csv','wt'),index=False)
print "submission file created"
return
if __name__=="__main__":
main() |
6,466 | 6ea651e27620d0f26f7364e6d9d57e733b158d77 | import iris
import numpy as np
import matplotlib.pyplot as plt
import glob
import iris.analysis.cartography
import iris.coord_categorisation
import iris.analysis
import time
def my_callback(cube, field, filename):
cube.remove_coord('forecast_reference_time')
cube.remove_coord('forecast_period')
#the cubes were not merging properly before, because the time coordinate appeard to have teo different names... I think this may work
directory = '/data/data1/ph290/hadgem2es_co2/n_atlantic_co2/'
output_directory = ('/home/ph290/data1/hadgem2es_co2/global_avg/')
runs = glob.glob(directory+'//?????')
run_names = []
run_global_means = []
run_date = []
for i,run in enumerate(runs):
print i
run_name = run.split('/')[7]
run_names.append(run_name)
cube = iris.load_cube(run+'/*.pp',iris.AttributeConstraint(STASH='m02s30i249'),callback=my_callback)
if not cube.coord('longitude').has_bounds():
cube.coord('longitude').guess_bounds()
if not cube.coord('latitude').has_bounds():
cube.coord('latitude').guess_bounds()
grid_areas = iris.analysis.cartography.area_weights(cube)
time_mean = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
run_global_means.append(time_mean.data)
coord = cube.coord('time')
year = np.array([coord.units.num2date(value).year for value in coord.points])
run_date.append(year)
np.savetxt(output_directory + run_name + '.txt',np.vstack((year,time_mean.data)).T,delimiter=',')
fig = plt.figure()
for i,data in enumerate(run_global_means):
plt.plot(run_date[i],i,data,'k')
plt.xlabel('year')
plt.ylabel('air-sea CO$_2$ flux')
plt.show()
|
6,467 | 60c862accbb9cda40ed4c45491f643f065e2868a | #!/usr/bin/env python
import os
from distutils.core import setup, Extension
import distutils.util
setup (name = 'pybanery',
version= '1.0',
description='Python interface for Kanbanery',
author = 'Pablo Lluch',
author_email = 'pablo.lluch@gmail.com',
py_modules = ['pybanery'],
scripts=['pybanery'],
)
|
6,468 | 807b20f4912ab89bf73966961536a4cd4367f851 | # Generated by Django 3.0.1 on 2020-03-20 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0004_auto_20200320_1521'),
]
operations = [
migrations.AddField(
model_name='menu',
name='level',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='menu',
name='lft',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='menu',
name='rght',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='menu',
name='tree_id',
field=models.PositiveIntegerField(db_index=True, default=1, editable=False),
preserve_default=False,
),
migrations.DeleteModel(
name='Menu1',
),
]
|
6,469 | a0349abb3a56ff4bc1700dbf0fa5a1fc2e3453ce | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class FlaskConfig(object):
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY') or 'TuLAsWbcoKr5YhDE'
BOOTSTRAP_SERVE_LOCAL = os.environ.get('FLASK_BOOTSTRAP_SERVE_LOCAL') or True
APPLICATION_ROOT = os.environ.get('FLASK_APPLICATION_ROOT') or ''
# SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
# 'sqlite:///' + os.path.join(basedir, 'app.db')
# SQLALCHEMY_TRACK_MODIFICATIONS = False
|
6,470 | cdaceb2d8804e08f0b35b9b65f2d06695efad002 | # Generated by Django 3.1.7 on 2021-03-28 01:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('details', '0002_auto_20210310_1421'),
]
operations = [
migrations.AlterModelOptions(
name='detail',
options={'get_latest_by': 'created', 'ordering': ['created']},
),
]
|
6,471 | dc934f8db4e0c1113e1398b051b58369d909fff8 | from collections import deque
class Solution:
def slidingPuzzle(self, board: List[List[int]]) -> int:
def board2str(board: List[List[int]]) -> str:
return ''.join([str(board[i][j]) for i in range(2) for j in range(3)])
start = board2str(board)
bfs = deque([(start, 0)])
visited = {start}
while bfs:
path, step = bfs.popleft()
if path == "123450": return step
p = path.index("0")
x, y = p // 3, p % 3
path = list(path)
for nx, ny in [(0, 1), (1, 0), (0, -1), (-1, 0)]:
tx, ty = x + nx, y + ny
if tx < 0 or tx >= 2 or ty < 0 or ty >= 3: continue
path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[tx * 3 + ty]
path_str = "".join(path)
if path_str not in visited:
bfs.append((path_str, step + 1))
visited.add(path_str)
path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[tx * 3 + ty]
return -1
|
6,472 | 80d1979c5767d0ff90f464651c9d0ca6d65effb2 | def foo(x, y=5):
def bar(x):
return x + 1
return bar(y * 2)
print(foo(3)) |
6,473 | 2d69a39be3931aa4c62cadff4cdfad76f6b32c59 | import face_recognition
from glob import glob
import os.path as osp
class FaceRecognitionLib(object):
"""
face_recognition library を利用した顔認証検証
"""
# クラス変数設定
__data_set_dir = './../../dataset/japanese' # データ・セットディレクトリ
__known_image_idx = (1,) # 既存画像のインデックス
__unknown_image_idx = (2, 3, 4, 5) # 検証画像のインデックス
__tolerance = 0.4 # Recognitionの距離threshold
def __init__(self):
# get sub directory
sub_dirs = glob(FaceRecognitionLib.__data_set_dir + '/*/')
# get list of name
self.__people = [sub_dir.split('/')[-2] for sub_dir in sub_dirs]
# 既存画像と検証画像のファイルリストを生成する。
known_images_path = []
unknown_images_path = []
for img_idx in self.__known_image_idx:
known_images_path.extend(
[osp.join(sub_dir, sub_dir.split('/')[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])
for img_idx in self.__unknown_image_idx:
unknown_images_path.extend(
[osp.join(sub_dir, sub_dir.split('/')[-2] + str(img_idx) + '.jpg') for sub_dir in sub_dirs])
self.__unknown_images_paths = unknown_images_path
# set face encodings for known faces
self.__known_face_encodings = self.__make_face_encodings(images_path=known_images_path)
print('shape of known_face_encodings = ({}, {})'.format(len(self.__known_face_encodings),
len(self.__known_face_encodings[0])))
@staticmethod
def __make_face_encodings(images_path):
"""
face encode情報を生成する。
"""
face_encodings = []
for img_path in images_path:
img = face_recognition.load_image_file(img_path)
face_encodings.append(face_recognition.face_encodings(img)[0])
return face_encodings
def recognition(self):
"""
Recognition
"""
unknown_face_encodings = self.__make_face_encodings(images_path=self.__unknown_images_paths)
print('shape of unknown_face_encodings = ({}, {})'.format(len(unknown_face_encodings),
len(unknown_face_encodings[0])))
accuracy = 0
wrong = 0
for face_to_compare in self.__known_face_encodings:
print(face_recognition.face_distance(unknown_face_encodings, face_to_compare))
for i, unknown_face_encoding in enumerate(unknown_face_encodings):
img_file = osp.basename(self.__unknown_images_paths[i])
results = face_recognition.compare_faces(self.__known_face_encodings,
unknown_face_encoding,
tolerance=FaceRecognitionLib.__tolerance)
name = "Unknown"
for person in range(len(self.__people)):
if results[person]:
name = self.__people[person]
break
if name in img_file:
accuracy += 1
else:
wrong += 1
print("Found {} in the photo {}".format(name, img_file))
print('accuracy = {}, wrong = {}'.format(accuracy, wrong))
if __name__ == "__main__":
face_recognition_lib = FaceRecognitionLib()
face_recognition_lib.recognition()
|
6,474 | 8f01934472805b5ad6dca328483a7ac79ae7748a | #This version assumes domains = train/test set
import numpy as np
from ..utils import Dataset
import math
import random
from .interface import TopicModel
from .man_model.models import *
from .man_model import utils
from .man_model.options import opt
import torch.utils.data as data_utils
from tqdm import tqdm
from collections import defaultdict
import itertools
from torchnet.meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
preds = []
for inputs,targets in it:
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
d_features = F_d(inputs)
features = torch.cat((self.F_s(inputs), d_features), dim=1)
outputs = self.C(features)
_, pred = torch.max(outputs, 1)
#preds.extend(pred.data)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
#('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
return acc, correct
#return preds
def get_name(self):
if self._name is None:
self._name = "MAN({},{},{})".format(self.k,self.m,1)
return self._name |
6,475 | dab53d10958b36cf75ab53bf30f744b1ed8a09b6 | from .authenticators import CookieAuthenticator, HeaderAuthenticator
from .paginators import LimitOffsetPaginator, PageNumberPaginator
from .views import * # pylint:disable=W0401
|
6,476 | f39130099ccf467623d65ac328fd02538044d36a | import copy
import datetime
from sacred import Experiment
from tqdm import tqdm
from mms_msg.databases.classical.full_overlap import WSJ2Mix
import paderbox as pb
import padertorch as pt
ex = Experiment('mixture_generator_create_json')
@ex.config
def defaults():
json_path = 'database.json'
database = {
'factory': WSJ2Mix,
}
pt.Configurable.get_config(database)
@ex.automain
def main(json_path, database, _log):
database_config = database
database = pt.configurable.config_to_instance(database)
database_dict = {
'datasets': {
dataset_name: dict(tqdm(
database.get_dataset(dataset_name).items(),
desc=dataset_name,
)) for dataset_name in database.dataset_names
},
'meta': {
'config': pt.configurable.recursive_class_to_str(
copy.deepcopy(database_config)
),
'generated': datetime.datetime.now(),
}
}
pb.io.dump(database_dict, json_path)
_log.info(f'Wrote file: {json_path}')
|
6,477 | 32066db8b43bc70c564cce5a33f50921285b3627 | #!/usr/bin/env python3
# coding: utf-8
# Time complexity: O()
# Space complexity: O()
import math
# 最大公约数 Greatest common divisor
def get_gcd(a, b):
if b == 0:
return a
print(a, b)
return get_gcd(b, a % b)
get_gcd(48, 30)
# 计算约数个数
# 时间复杂度 O(n)
def divisor1(num):
count = 0
for i in range(1, num + 1):
if num % i == 0:
count += 1
return count
# count prime,
class Solution:
def countPrimes(self, n: int) -> int:
if n < 3:
return 0
primes = [True] * n
primes[0] = primes[1] = False
for i in range(int(n ** 0.5) + 1):
if primes[i]:
for j in range(i + i, n, i): # delete all its multiples
primes[j] = False
return sum(primes)
# Use upper limit of (n**0.5)+1, because:
# (a) the smallest factor of a non-prime number will not be > sqrt(n).
# Ex. non-prime = 100,
# 5*20
# 10*10,
# 20*5 # !! we have seen 5 before.
# 判断prime,因为所有prime都是6n+1或者6n-1,同时我们只需要计算到sqrt(n)就可以
def find_primer(n):
if n <= 3:
return n > 1
if n%6 != 1 and n%6 != 5:
return False
for i in range(5, int(n**0.5)+1, 6):
if n%i == 0 or n %(i+2) == 0:
return False
return True
# 计算约数个数
# 时间复杂度 O( sqrt(n) )
def divisor2(num):
count = 0
sqrt = int(num ** 0.5)
for x in range(1, sqrt + 1):
if num % x == 0:
count += 2
print(x, num // x)
return count - (sqrt ** 2 == num)
# power of 4
class Solution:
def isPowerOfFour(self, n: int) -> bool:
if n <= 0:
return False
return n & (n - 1) == 0 and n & 0xAAAAAAAA == 0
# power of 2
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and n & (n - 1) == 0
# power of 2
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and (math.log10(n) / math.log10(2)) % 1 == 0
# devide
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
"""
a / b = c
keep subtracting b, a faster way is to -2*b, -4*b, -1024*b
if a > 2 * b => c should be bigger than 2 (1<<1)
if a > 4 * b => c should be bigger than 4 (1<<2)
if a > 1024 * b => c should be bigger than 1024 (1<<10)
a might == 1024*b + 4*b + 2*b
c = (1024+4+2)
2 * b == b << 1
1024 * b == b << 10
"""
sig = (dividend < 0) == (divisor < 0)
a, b, res = abs(dividend), abs(divisor), 0
while a >= b:
shift = 0
while a >= b << (shift + 1):
print(a, res)
shift += 1
res += 1 << shift
a -= b << shift
return min(res if sig else -res, (1 << 31) - 1)
# power
class Solution:
def myPow(self, x: float, n: int) -> float:
if n == 0:
return 1
if n < 0:
n = -n
x = 1 / x
if n & 1 == 0:
return self.myPow(x * x, n >> 1)
else:
return x * self.myPow(x * x, n >> 1)
# sqrt
class Solution:
def mySqrt(self, x: int) -> int:
l = 1
r = x
while l <= r:
# print(l,r)
mid = (l + r) // 2
if mid * mid == x:
return mid
elif mid * mid > x:
r = mid - 1
else:
l = mid + 1
return r
# root of number, x is the number and n is the root
def root(x, n):
if x == 0:
return 0
low = 0
hi = max(1, x)
root = (low+hi) / 2.0
while root - low >= 0.001:
if root**n > x:
hi = root
elif root**n < x:
low = root
else:
break
root = (low+hi) / 2.0
return root |
6,478 | 9b94e8aed2b0be2771a38cf2d1cf391772f3a9f0 | class SimulatorInfo(object):
def __init__(self, name=None, device_type=None, sdk=None, device_id=None, sim_id=None):
self.name = name
self.device_type = device_type
self.sdk = sdk
self.device_id = device_id
self.sim_id = sim_id
|
6,479 | b48bc9475a8dc593ba858af8ed4e930ae290fd69 | from discord.ext import commands
import discord
import os
import random
bot = commands.Bot(command_prefix="!")
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
@bot.command()
async def ping(ctx):
await ctx.send("pong")
# Lucky command, it picks a number between 0-50 and spams your dm's with that number
@bot.command()
async def lucky(ctx):
spamCount = random.randint(0, 50)
for num in range(int(spamCount)):
await ctx.message.author.send("ARE YOU FELLING LUCKY???")
# Basic spam command, you can provide a message and specify how many messages
@bot.command()
async def spam(ctx, spamCtx="spam", spamCount=1):
for num in range(int(spamCount)):
await ctx.send(str(spamCtx))
# Lets you mention a specific user who would like to spam in their DM's, you can specify a message
@bot.command()
async def attack(ctx, user: discord.User, *, message="GET SPAMMED NERD"):
spamCount = 10
for num in range(int(spamCount)):
await user.send(message)
if __name__ == "__main__":
bot.run(os.environ['TOKEN']) |
6,480 | b99093fb13c59d4b9bb0a4f32fb62423d6752118 | # Generated by Django 3.0.8 on 2020-07-29 18:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scenario', '0005_auto_20200729_1149'),
]
operations = [
migrations.RemoveField(
model_name='weapon',
name='vehicle',
),
migrations.DeleteModel(
name='Vehicle',
),
]
|
6,481 | 8425ee79fcb41799e5edbbab822f93dd40e39d8e | from collections import defaultdict
class Solution:
def multiply(self, A: List[List[int]], B: List[List[int]]) -> List[List[int]]:
output = [[0 for j in range(len(B[0]))] for i in range(len(A))]
rows = defaultdict(list)
cols = defaultdict(list)
for i in range(len(A)):
for j in range(len(A[0])):
if A[i][j]:
rows[i].append((A[i][j], j))
for i in range(len(B)):
for j in range(len(B[0])):
if B[i][j]:
cols[j].append((B[i][j], i))
for i in rows:
for j in cols:
rowVals = rows[i]
colVals = cols[j]
total = 0
p1, p2 = 0, 0
while p1 < len(rowVals) and p2 < len(colVals):
if rowVals[p1][1] == colVals[p2][1]:
total += rowVals[p1][0] * colVals[p2][0]
p1 += 1
p2 += 1
elif rowVals[p1][1] < colVals[p2][1]:
p1 += 1
else:
p2 += 1
output[i][j] = total
return output
|
6,482 | e1cc4e17bffcbbae3e7785e4c55acde167a8a50a | import os
import RPi.GPIO as GPIO
from google.cloud import firestore
import time
############Explicit Credential environment
path="/home/pi/Desktop/Parking.json"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] =path
#GPIO starts
s1=2
s2=21
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(s1,GPIO.IN)
GPIO.setup(s2,GPIO.IN)
#firestore initialization
db = firestore.Client()
doc_ref_s1 = db.collection(u'sensors').document(u'sensor1')
doc_ref_s2 = db.collection(u'sensors').document(u'sensor2')
#here starts main
data1=0
data2=0
counter=0
while 1:
if(GPIO.input(s1)==False): #car found in slot 1
data1=1
counter+=1
else: data1=0
print("Received from 1: %s" % data1)
###Now starts for sensor 2
if(GPIO.input(s2)==False): #car found in slot 2
data2=1
counter-=1
else: data2=0
print("Received from 2: %s" % data2)
if(counter>8):
counter=8
elif(counter<0):
counter=0
print("Counter= %s" % counter)
doc_ref_s1.update({
u'priority': counter
})
|
6,483 | 3153218fe1d67fdc1c1957ffcfdb380688c159c1 | from django.apps import AppConfig
class AutomationserverConfig(AppConfig):
name = 'automationserver'
|
6,484 | 70845ab4aab80d988a5c01d0b4fb76e63b800527 | import sys
byte = int(sys.argv[1])
qlty = float(sys.argv[2])
n = 0
while True:
o = sys.stdin.read(byte)
if qlty>(qlty*n)%1:
oo = o
sys.stdout.write(o)
else:
sys.stdout.write(oo)
if not o:
break
n=n+1 |
6,485 | 9f8fbfb8a9c849ca0e8881c479800c8e190e4a1c | import json
from logger import logger
def parse_json(text):
start = text.find("{")
end = text.find("}") + 1
try:
data = json.loads(text[start:end])
return data
except Exception:
logger.error("json解析失败:%s" % text)
|
6,486 | 0aad96de65cc125e5c026dfd72a9cc9f4ebd3dd2 | from nose.tools import with_setup, nottest
from tests.par_test_base import ParTestBase
from ProbPy import RandVar, Factor, ParFactor
class TestFactorMult(ParTestBase):
def __init__(self):
super().__init__()
def par_test_0(self):
"""
f(X), scalar
"""
for i in range(4):
self.X_par_factor.setMaxDepth(i)
res = [
self.X_factor.mult(self.scalar),
self.X_factor.mult(self.scalarf),
self.scalarf.mult(self.X_factor),
]
par_res = [
self.X_par_factor.mult(self.scalar),
self.X_par_factor.mult(self.par_scalarf),
self.par_scalarf.mult(self.X_par_factor),
]
for i, ele in enumerate(res):
assert (
ele.rand_vars == par_res[i].rand_vars
and ele.values == par_res[i].values
)
def par_test_1(self):
"""
f(X, Y), scalar
"""
for i in range(4):
self.XY_par_factor.setMaxDepth(i)
self.XY_par_factor.setMaxDepth(i)
res = [
self.XY_factor.mult(self.scalar),
self.XY_factor.mult(self.scalarf),
self.scalarf.mult(self.XY_factor),
]
par_res = [
self.XY_par_factor.mult(self.scalar),
self.XY_par_factor.mult(self.par_scalarf),
self.par_scalarf.mult(self.XY_par_factor),
]
for i, ele in enumerate(res):
assert (
ele.rand_vars == par_res[i].rand_vars
and ele.values == par_res[i].values
)
def par_test_2(self):
"""
f(X, Y, Z), scalar
"""
for i in range(4):
self.XYZ_par_factor.setMaxDepth(i)
self.XYZ_par_factor.setMaxDepth(i)
res = [
self.XYZ_factor.mult(self.scalar),
self.XYZ_factor.mult(self.scalarf),
self.scalarf.mult(self.XYZ_factor),
]
par_res = [
self.XYZ_par_factor.mult(self.scalar),
self.XYZ_par_factor.mult(self.par_scalarf),
self.par_scalarf.mult(self.XYZ_par_factor),
]
for i, ele in enumerate(res):
assert (
ele.rand_vars == par_res[i].rand_vars
and ele.values == par_res[i].values
)
def par_test_3(self):
"""
f(X), f(X)
"""
for i in range(4):
self.X_par_factor.setMaxDepth(i)
res = self.X_factor.mult(self.X_factor)
par_res = self.X_par_factor.mult(self.X_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_4(self):
"""
f(X), f(Y)
"""
for i in range(4):
self.X_par_factor.setMaxDepth(i)
self.Y_par_factor.setMaxDepth(i)
res = self.X_factor.mult(self.Y_factor)
par_res = self.X_par_factor.mult(self.Y_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_5(self):
"""
f(X, Y) f(X)
"""
for i in range(4):
self.X_par_factor.setMaxDepth(i)
self.XY_par_factor.setMaxDepth(i)
res = self.XY_factor.mult(self.X_factor)
par_res = self.XY_par_factor.mult(self.X_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_6(self):
"""
f(X, Y) f(Y)
"""
for i in range(4):
self.Y_par_factor.setMaxDepth(i)
self.XY_par_factor.setMaxDepth(i)
res = self.XY_factor.mult(self.Y_factor)
par_res = self.XY_par_factor.mult(self.Y_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_7(self):
"""
f(X, Y) f(Z)
"""
for i in range(4):
self.Z_par_factor.setMaxDepth(i)
self.XY_par_factor.setMaxDepth(i)
res = self.XY_factor.mult(self.Z_factor)
par_res = self.XY_par_factor.mult(self.Z_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_8(self):
"""
f(X, Y) f(X, Y)
"""
for i in range(4):
self.XY_par_factor.setMaxDepth(i)
self.XY_par_factor.setMaxDepth(i)
res = self.XY_factor.mult(self.XY_factor)
par_res = self.XY_par_factor.mult(self.XY_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_9(self):
"""
f(X, Y) F(X, Z)
"""
for i in range(4):
self.XY_par_factor.setMaxDepth(i)
self.XZ_par_factor.setMaxDepth(i)
res = self.XY_factor.mult(self.XZ_factor)
par_res = self.XY_par_factor.mult(self.XZ_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_10(self):
"""
f(X, Y) f(Z, W)
"""
for i in range(4):
self.XY_par_factor.setMaxDepth(i)
self.ZW_par_factor.setMaxDepth(i)
res = self.XY_factor.mult(self.ZW_factor)
par_res = self.XY_par_factor.mult(self.ZW_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_11(self):
"""
f(X, Y, Z) f(X, Y, Z)
"""
for i in range(4):
self.XYZ_par_factor.setMaxDepth(i)
self.XYZ_par_factor.setMaxDepth(i)
res = self.XYZ_factor.mult(self.XYZ_factor)
par_res = self.XYZ_par_factor.mult(self.XYZ_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_12(self):
"""
f(X, Y, Z) f(X, Y, W)
"""
for i in range(4):
self.XYZ_par_factor.setMaxDepth(i)
self.XYW_par_factor.setMaxDepth(i)
res = self.XYZ_factor.mult(self.XYW_factor)
par_res = self.XYZ_par_factor.mult(self.XYW_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_13(self):
"""
f(X, Y, Z) f(X, K, W)
"""
for i in range(4):
self.XYZ_par_factor.setMaxDepth(i)
self.XKW_par_factor.setMaxDepth(i)
res = self.XYZ_factor.mult(self.XKW_factor)
par_res = self.XYZ_par_factor.mult(self.XKW_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
def par_test_14(self):
"""
f(X, Y, Z) f(T, K, W)
"""
for i in range(4):
self.XYZ_par_factor.setMaxDepth(i)
self.TKW_par_factor.setMaxDepth(i)
res = self.XYZ_factor.mult(self.TKW_factor)
par_res = self.XYZ_par_factor.mult(self.TKW_par_factor)
assert res.rand_vars == par_res.rand_vars and res.values == par_res.values
|
6,487 | 22bf65a20f7398b82f528112d2ba50f1dccd465c |
class Thing3:
def __init__(self):
self.letters = 'xyz'
# print(Thing3.letters)
th = Thing3()
print(th.letters) |
6,488 | 15514d5636471b1a311641a40b6a00b81703cd2b | # Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import Pmod_IO
from . import PMOD_GROVE_G1
from . import PMOD_GROVE_G2
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
class Grove_PIR(Pmod_IO):
"""This class controls the PIR motion sensor.
Hardware version: v1.2.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
index : int
The index of the Pmod pin, from 0 to 7.
direction : str
Can only be 'in' for PIR sensor.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of a PIR object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1,
PMOD_GROVE_G2]:
raise ValueError("Group number can only be G1 - G2.")
super().__init__(mb_info, gr_pin[0], 'in')
def read(self):
"""Receive the value from the PIR sensor.
Returns 0 when there is no motion, and returns 1 otherwise.
Returns
-------
int
The data (0 or 1) read from the PIR sensor.
"""
return super().read()
|
6,489 | 5f4d83aa2b530417ecb1598510fb4778b111700b | #%% [markdown]
# # Look at intron-less gene enrichment in Cyte biased expressed genes.
# This is a quick look at if parimary spermatocyte biased genes are enriched in intronless genes.
# Yes this is what we see.
#%%
import os
import pickle
import numpy as np
import pandas as pd
from scipy.stats import fisher_exact, contingency
from IPython.display import display, Markdown
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.api import formula as smf
from tabulate import tabulate
from larval_gonad.io import feather_to_cluster_rep_matrix
from larval_gonad.stats import run_chisq
from larval_gonad.plotting import plot_statsmodels_results
try:
os.chdir(os.path.join(os.getcwd(), "docs"))
print(os.getcwd())
except:
pass
#%%
# Get list of intronless FBgns
fbgns_no_intron = pickle.load(open("../output/paper_submission/intron_less_genes.pkl", "rb"))
background = pickle.load(open("../output/paper_submission/background_fbgns.pkl", "rb"))
#%%
# Get list of X chromosome genes
fbgn2chrom = (
pd.read_feather(
"../references/gene_annotation_dmel_r6-26.feather", columns=["FBgn", "FB_chrom"]
)
.set_index("FBgn")
.squeeze()
)
chrx_fbgns = fbgn2chrom[fbgn2chrom == "X"].index
#%%
# Get gonia biased and cyte biased genes
bias = (
pd.read_feather("../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather")
.assign(gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0), True, False))
.assign(pct_gonia=lambda x: x["pct.1"])
.assign(cyte_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False))
.assign(pct_cyte=lambda x: x["pct.2"])
.set_index("FBgn")
.loc[:, ["gonia_bias", "cyte_bias", "pct_gonia", "pct_cyte"]]
.reindex(background)
.dropna()
)
#%%
# Munge all into a dataframe
df = bias.copy().join(fbgn2chrom)
df["intronless"] = np.where(df.index.isin(fbgns_no_intron), True, False)
df["X"] = np.where(df.index.isin(chrx_fbgns), True, False)
df["bias"] = "NS"
df.loc[df.gonia_bias, "bias"] = "gonia"
df.loc[df.cyte_bias, "bias"] = "cyte"
#%% [markdown]
# ## How are intronless genes expressed in primary spermatocytes?
#%% [markdown]
# ### Intronless genes are expressed in fewer cells than genes with introns.
#%%
# Plot percent cytes with expression by bias*chrom*intronless
g = sns.FacetGrid(
df,
row="bias",
row_order=["cyte", "gonia", "NS"],
col="FB_chrom",
col_order=["X", "2L", "2R", "3L", "3R"],
sharex=True,
sharey=True,
margin_titles=True,
)
g.map(sns.boxplot, "intronless", "pct_cyte", order=[False, True])
g.set_ylabels("% Spermatocyte Cells\nWith Expression")
g.savefig("../output/docs/x_escapers_and_intronless_genes.svg", bbox_inches="tight")
#%% [markdown]
# ### However, intronless genes are enriched in genes with primary spermatocyte biased expression.
#%%
# Cross tab of intronless * bias
ct = pd.crosstab(df.intronless, df.bias)
res = run_chisq(ct).loc[(slice(None), ["observed", "adj std residual", "flag_sig"]), :]
print(tabulate(res.reset_index(), headers="keys", showindex=False, tablefmt="github"))
res
#%%
zscores_intronless = (
feather_to_cluster_rep_matrix("../output/paper_submission/zscore_by_cluster_rep.feather")
.reindex(fbgns_no_intron)
.dropna()
)
ax = sns.clustermap(
zscores_intronless,
col_cluster=False,
xticklabels=True,
yticklabels=False,
cmap="viridis",
vmin=-3,
vmax=3,
rasterized=True,
)
ax.ax_heatmap.set(xlabel="", ylabel="Intronless Genes")
plt.savefig("../output/docs/x_escapers_and_intronless_genes_heatmap.svg", bbox_inches="tight")
#%% [markdown]
# ## Are intronless genes enriched in X chromosome escapers?
#%% [markdown]
# ### Intronless genes are depleted on the X chromosome.
#%%
# intronless genes across the genome
intronless2chrom = fbgn2chrom.to_frame().query(
"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']"
)
intronless2chrom["intronless"] = np.where(intronless2chrom.index.isin(fbgns_no_intron), True, False)
ct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)
res = run_chisq(ct).loc[(slice(None), ["observed", "adj std residual", "flag_sig"]), :]
display(res)
print(tabulate(res.reset_index(), headers="keys", showindex=False, tablefmt="github"))
#%% [markdown]
# ### X chromosome escapers are not enriched for intronless genes.
#%% [markdown]
# #### Main Effects Model Logit(intronless = cyte_biased + X chromosome)
#%%
# Main effects model
model = smf.logit("intronless ~ cyte_bias + X", data=df.replace({True: 1, False: 0}))
results = model.fit()
plot_statsmodels_results(
"../output/docs/x_escapers_and_intronless_genes_main_effects.png", str(results.summary2())
)
display(results.summary2())
np.exp(results.params).rename("Odds Ratio").to_frame()[results.pvalues <= 0.05]
#%% [markdown]
# #### Full Model Logit(intronless = cyte_biased + X chromosome + cyte_biased * X chromosome)
#%%
# FUll Model
model = smf.logit("intronless ~ cyte_bias * X", data=df.replace({True: 1, False: 0}))
results = model.fit()
plot_statsmodels_results(
"../output/docs/x_escapers_and_intronless_genes_full.png", str(results.summary2())
)
display(results.summary2())
np.exp(results.params).rename("Odds Ratio").to_frame()[results.pvalues <= 0.05]
#%%
|
6,490 | 9e896d935cc57e580ed46cd501b41053bbaab38f | from datetime import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class BusLine(Base):
__tablename__ = "bus_lines"
id = Column(Integer, primary_key=True)
line_id = Column(Integer)
line_description = Column(String)
class BusRoute(Base):
__tablename__ = "bus_routes"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer)
route_id = Column(Integer)
route_description = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusRoutePos(Base):
__tablename__ = "bus_route_pos"
id = Column(Integer, primary_key=True)
route_id = Column(Integer, ForeignKey("bus_routes.route_id"), nullable=False)
lat = Column(String)
lon = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusPos(Base):
__tablename__ = "bus_pos"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer, ForeignKey("bus_lines.line_id"), nullable=False)
bus_internal_id = Column(Integer)
lat = Column(String)
lon = Column(String)
orientation = Column(Integer)
timestamp = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusStop(Base):
__tablename__ = "bus_stops"
id = Column(Integer, primary_key=True)
route_id = Column(Integer, ForeignKey("bus_routes.route_id"), nullable=False)
lat = Column(String)
lon = Column(String)
stop_code = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusTrip(Base):
__tablename__ = "bus_trip"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer)
bus_internal_id = Column(Integer)
route_id = Column(Integer)
last_updated = Column(DateTime, default=datetime.utcnow)
last_pos_timestamp = Column(Integer, default=0)
|
6,491 | 20d480517226cb7fbced765554a02fa5cbc29033 | import secrets
from pathlib import Path
HASHCAT_WPA_CACHE_DIR = Path.home() / ".hashcat" / "wpa-server"
ROOT_PRIVATE_DIR = Path(__file__).parent.parent
WORDLISTS_DIR = ROOT_PRIVATE_DIR / "wordlists"
WORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / "wordlists" # user custom wordlists
RULES_DIR = ROOT_PRIVATE_DIR / "rules"
MASKS_DIR = ROOT_PRIVATE_DIR / "masks"
LOGS_DIR = ROOT_PRIVATE_DIR / "logs"
DATABASE_DIR = HASHCAT_WPA_CACHE_DIR / "database"
ESSID_TRIED = DATABASE_DIR / "essid_tried"
DATABASE_PATH = DATABASE_DIR / "hashcat_wpa.db"
# Hashcat
HASHCAT_STATUS_TIMER = 20 # seconds
BENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / "benchmark.csv"
HASHCAT_BRAIN_PASSWORD_PATH = HASHCAT_WPA_CACHE_DIR / "brain" / "hashcat_brain_password"
# mkdirs
HASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)
WORDLISTS_USER_DIR.mkdir(exist_ok=True)
LOGS_DIR.mkdir(exist_ok=True)
DATABASE_DIR.mkdir(exist_ok=True)
HASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)
class Config:
""" Flask application config """
SECRET_KEY = secrets.token_bytes(64)
# Flask-SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(DATABASE_PATH)
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Airodump capture files
CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / "captures"
|
6,492 | ab760ec4cbb9f616f38b0f0f2221987460c6f618 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 21:03:43 2019
@author: 00124175
"""
"""
读取txt文件
该文本中的分割符既有空格又有制表符('/t'),sep参数用'/s+',可以匹配任何空格。
"""
#header=None:没有每列的column name,可以自己设定
#encoding='gb2312':其他编码中文显示错误
#sep=',':用逗号来分隔每行的数据
#index_col=0:设置第1列数据作为index
import pandas as pd
data = pd.read_table("1206sjl.txt",header=None,encoding='gb2312',sep='|',skiprows=1)
data1 = pd.read_table("1206sjl.txt",header=None,encoding='gb2312',sep='|',nrows=1)
cols_name = data1.iloc[:,0:80]
mydata = data.iloc[:,0:80]#读所有的行,0-79列
cols_name = cols_name.values.tolist()#转换为list
mydata.columns = cols_name#加上列名称
mydata.rename(columns=lambda x: x.strip(' '),inplace=True)#去掉dataframe中的前后空格
mydata[['__lat,__deg','__lon,__deg']] = mydata[['__lat,__deg','__lon,__deg']].apply(pd.to_numeric)
my_need_data = mydata[(mydata['__lat,__deg']>39.14) & (mydata['__lat,__deg']<39.17)&(mydata['__lon,__deg']>117.51)&(mydata['__lon,__deg']<117.53)]
print(my_need_data.iloc[:,0:3])
my_need_data.to_csv("result_csv.csv", index=0)
|
6,493 | 3bc9c6a66f749858ea5801202b0ac80755c1b347 | from sklearn.naive_bayes import *
from sklearn import svm
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix
from optparse import OptionParser
from helper import FileHelper, Word2VecHelper, GraphHelper
import helper
from helper.VectorHelper import *
import os
import sys
#log = helper.enableLog()
def trainW2v(args):
clazz = [["Accidents", "Arts", "Attacks", "Economy", "Miscellaneous", "Politics", "Science", "Sports","undefined"], ["Accidents", "Arts", "Attacks", "Economy", "Miscellaneous", "Politics", "Science", "Sports"], ['positive', 'negative']]
models = ['rbf', 'poly']
FileHelper.create("log")
C = 0.5 # SVM regularization parameter
gamma = 0.5
degree = 6
types = ['generic', 'specific']
if args.ontology =='dbpedia':
types.append('normal')
for classes in clazz:
task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes) == 8 else 'task1'
train_instances, train_labels, train_texts = Word2VecHelper.loadData(classes, args, 'train')
test_instances, test_labels, test_texts = Word2VecHelper.loadData(classes, args, 'test')
sys.stdout = open(
"log/{}_{}.txt".format(args.ontology, task), "w")
for model in models:
args.classifier = model
for _type in types:
args.type = _type
for merge in range(2):
args.merge = merge
if args.force == 1 or not os.path.exists("{}_{}_{}.bin".format(args.ontology, args.type, 'merged' if args.merge==1 else 'simple')):
files = ["./train/{}/{}/positive.txt".format(args.ontology, args.type),
"./train/{}/{}/negative.txt".format(args.ontology, args.type)]
model = Word2VecHelper.createModel(files, name="{}_{}".format(args.ontology, args.type),
merge=args.merge)
else:
model = Word2VecHelper.loadModel("{}_{}".format(args.ontology, args.type), merge=args.merge)
w2v = {w: vec for w, vec in zip(model.wv.index2word, model.wv.syn0)}
print("========== Model", args.ontology, args.type, args.merge, task, args.classifier, "==========")
if args.classifier == 'ben':
classifier = Pipeline([("w2v vect", MeanEmbeddingVectorizer(w2v)),
("clf", BernoulliNB())])
else:
classifier = Pipeline([("w2v vect", MeanEmbeddingVectorizer(w2v)),
("clf", svm.SVC(kernel=args.classifier, degree=degree, C=C, gamma=gamma,
probability=True))])
y_score = classifier.fit(train_texts, train_labels).predict_proba(test_texts)
y_pred = classifier.predict(test_texts)
#f.write("========= Classification Report ==========\n")
print("========= Classification Report ==========")
print(classification_report(test_labels, y_pred))
#f.write(classification_report(test_labels, y_pred)+"\n")
print("========= Confusion Matrix ==========")
#f.write("========= Confusion Matrix ==========\n")
print(confusion_matrix(test_labels,y_pred, labels=classes))
#f.write(confusion_matrix(test_labels,y_pred, labels=classes)+"\n")
GraphHelper.savePrediction("{}_{}_{}_{}_{}".format(args.ontology,args.type,args.classifier,task, args.merge), y_pred=y_pred,y_score=y_score,classes=classes,y=test_labels )
GraphHelper.saveClassifier(classifier, "{}_{}_{}_{}_{}.pkl".format(args.ontology,args.type,args.classifier,task, args.merge))
#f.close()
#trainW2v()
if __name__ == "__main__":
parser = OptionParser('''%prog -o ontology -t type -f force ''')
parser.add_option('-o', '--ontology', dest='ontology', default="dbpedia")
parser.add_option('-t', '--type', dest='type', default="generic")
parser.add_option('-f', '--force', dest='force', default=0, type=int)
parser.add_option('-c', '--classifier', dest='classifier', default='poly')
parser.add_option('-j', '--job', dest='job', type=int, default=10)
parser.add_option('-w', '--window', dest='window', type=int, default=2)
parser.add_option('-s', '--size', dest='size', type=int, default=300)
parser.add_option('-m', '--merge', dest='merge', type=int, default=0)
parser.add_option('-e', '--experiment', dest='experiment', type=int, default=1)
opts, args = parser.parse_args()
trainW2v(opts) |
6,494 | 5890525b16b42578ac06e7ab2170c5613feea0a5 | # Benthic Parameters - USEPA OPP defaults from EXAMS
benthic_params = {
"depth": 0.05, # benthic depth (m)
"porosity": 0.65, # benthic porosity
"bulk_density": 1, # bulk density, dry solid mass/total vol (g/cm3)
"froc": 0, # benthic organic carbon fraction
"doc": 5, # benthic dissolved organic carbon content (mg/L)
"bnmas": 0, # benthic biomass intensity (g/m2)
"d_over_dx": 1 # mass transfer coefficient for exchange between benthic and water column (m/s)
# (can be modified later if data exists)
}
def partition_benthic(reach, runoff, runoff_mass, erosion_mass):
from .parameters import soil, stream_channel, benthic
try:
reach = self.region.flow_file.fetch(reach)
q, v, l = reach.q, reach.v, reach.l
except AttributeError:
return None, None, (None, None)
mean_runoff = runoff.mean() # m3/d
baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates), where=(q > mean_runoff))
total_flow = runoff + baseflow
mixing_cell = 40. # meters
cross_section = total_flow / v
width = stream_channel.a * np.power(cross_section, stream_channel.b)
depth = cross_section / width
surface_area = width * l
volume = np.array([(depth * surface_area), # Water column
(benthic.depth * surface_area * benthic.porosity)]) # Benthic zone
# Compute concentration in runoff of runoff mass and erosion mass
runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.n_dates), where=(runoff != 0))
daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.zeros(self.i.n_dates),
where=(runoff_mass + erosion_mass > 0.0) & (mixing_cell > 0.0))
# Divide mass loading between water column and benthic zones
mass_input = np.vstack([runoff_mass + ((1. - soil.prben) * erosion_mass), # Water Column
soil.prben * erosion_mass]).T # Benthic
# Partition concentration into benthic and water column concentrations
# This needs to be cleaned up
# Compute benthic solute holding capacity
fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth, surface_area, self.i.koc)
k_adj = np.array((total_flow / mixing_cell) + (self.i.deg_photolysis + self.i.deg_hydrolysis) * fw1 + \
(self.i.deg_wc * fw1) + self.i.deg_benthic * (1 - fw1))
aqconc_avg_wb, daily_avg, daily_peak = \
concentration_loop(self.i.n_dates, daily_conc, k_adj, volume,
mass_input, fw1, fw2, omega, theta, self.i.deg_aqueous)
return map(lambda x: x * 1000000., (runoff_conc, aqconc_avg_wb, daily_avg, daily_peak))
@njit
def concentration_loop(n_dates, daily_concentration, k_adj, daily_volume, mass_input, fw1, fw2, omega, theta, deg_aq):
# Beginning day aquatic concentrations, considered Peak Aqueous Daily Conc in Water Column
daily_peak = np.zeros((2, n_dates))
daily_avg = np.zeros((2, n_dates))
aqconc_avg_wb = np.zeros(n_dates)
# Reset starting values
exp_k = np.exp(-k_adj)
aqconc_wb = 0
antecedent_mass = np.zeros(2) # mn
for day in range(daily_concentration.size):
# Add mass input to antecedent mass
daily_mass = antecedent_mass + mass_input[day]
# Convert to aqueous concentrations (peak) at beginning of day
# JCH - fw comes from solute_holding_capacity. Fraction going into each section. Should fw[0] + fw[1] = 1?
daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]
daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]
# Compute daily average concentration in the water body - when no Benthic layer considered
aqconc_wb += daily_concentration[day] # initial water body concentration for current time step
# Daily avg aq conc in water body, area under curve/t = Ci/k*(1-e^-k), NO benthic
aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])
# initial water body concentration for next time step
aqconc_wb *= exp_k[day]
# For simul diffeq soln: mn1,mn2,mavg1,mavg2 = new_aqconc1, new_aqconc2, aqconc_avg1[d], aqconc_avg2[d]
# Note: aqconc_avg1 and aqconc_avg2 are outputted - Daily avg aq conc in WC and Benthic regions
new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day], deg_aq, omega, theta[day], daily_peak[:, day])
daily_avg[0, day] = wc_avg
daily_avg[1, day] = benthic_avg
# Masses m1 and m2 after time step, t_end
antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]
antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]
return aqconc_avg_wb, daily_avg, daily_peak
@njit
def simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):
"""
ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:
dm1/dt = Am1 + Bm2
dm2/dt = Em1 + Fm2
WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2
mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T
mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T
mavg1 IS AVERAGE VALUE OF m1 OVER TIME T
"""
t_end = 86400. # seconds, time step of ONE DAY
m1, m2 = daily_aq_peak
# Calculate constants for simultaneous_diffeq: A,B,E,F
# This reduces the model equivalent parameters to the coefficients needed for solving simultaneous_diffeq
a = -gamma1 - omega * theta
b = omega * theta
e = omega
f = -gamma2 - omega
af = a + f
dif = 4 * ((f * a) - (b * e))
bbb = np.sqrt(af * af - dif)
root1 = (af + bbb) / 2.
root2 = (af - bbb) / 2.
dd = (root1 - a) / b
ee = (root2 - a) / b
ff = ee - dd
x1 = (ee * m1 - m2) / ff
y1 = (m2 - dd * m1) / ff
# Calculate new concentrations for next step
rt1 = root1 * t_end
rt2 = root2 * t_end
exrt1 = np.exp(rt1)
exrt2 = np.exp(rt2)
ccc = x1 * exrt1
ddd = y1 * exrt2
# values for m1 and m2 after time step t_end
mn = np.zeros(2)
mn[0] = ccc + ddd # Water column
mn[1] = dd * ccc + ee * ddd # Benthic
# AVERAGE DAILY CONCENTRATION SOLUTION: set up for daily average, but can be changed by adjusting time step
gx = x1 / root1
hx = y1 / root2
term1 = gx * exrt1 # term3 = -X1/root1*exp(root1*T1)
term2 = hx * exrt2 # term4 = -Y1/root2*exp(root2*T1
term3 = -gx
term4 = -hx
mavg_wc = (term1 + term2 + term3 + term4) / t_end # Water column
mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end # Benthic
return mn, mavg_wc, mavg_ben
def solute_holding_capacity(depth, surface_area, koc):
"""Calculates Solute Holding capacities and mass transfer between water column and benthic regions"""
from .parameters import benthic, water_column
# Aqueous volumes in each region
vol1 = depth * surface_area # total volume in water column, approximately equal to water volume alone
vol2a = benthic.depth * surface_area # total benthic volume
vol2 = vol2a * benthic.porosity # total benthic pore water volume
# Default EXAMS conditions for partitioning
kow = koc / .35 # DEFAULT EXAMS CONDITION ON Kow p.35
kpdoc1 = kow * .074 # DEFAULT RELATION IN EXAMS (LITTORAL)
kpdoc2 = koc # DEFAULT RELATION IN EXAMS (BENTHIC) p.16 of EXAMS 2.98 (or is it Kow*.46 ?)
xkpb = 0.436 * kow ** .907 # DEFAULT RELATION IN EXAMS
# mass in littoral region
vol1a = depth[0] * surface_area # initial volume corresponding with suspended matter reference
m_sed_1 = water_column.sused * vol1a * .001 # SEDIMENT MASS LITTORAL
m_bio_1 = water_column.plmas * vol1a * .001 # BIOLOGICAL MASS LITTORAL
m_doc_1 = water_column.doc * vol1a * .001 # DOC MASS LITTORAL
# partitioning coefficients of individual media
kd_sed_1 = koc * water_column.froc * .001 # Kd of sediment in littoral [m3/kg]
kd_sed_2 = koc * benthic.froc * .001 # Kd of sediment in benthic
kd_bio = xkpb / 1000. # Kd of biological organisms
kd_doc_1 = kpdoc1 / 1000. # Kd of DOC in littoral region
kd_doc_2 = kpdoc2 / 1000. # Kd of DOC in benthic region
# mass in benthic region
m_sed_2 = benthic.bulk_density * vol2a * 1000. # as defined by EXAMS parameters m_sed_2 = BULKD/PCTWA*VOL2*100000.
m_bio_2 = benthic.bnmas * surface_area * .001
m_doc_2 = benthic.doc * vol2 * .001
# solute holding capacity in regions 1 and 2
capacity_1 = kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 * m_doc_1 + vol1
capacity_2 = kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 * m_doc_2 + vol2
# Fraction going to water column and benthic
fw1 = vol1 / capacity_1 # fw1 is daily, vol1 is daily
fw2 = vol2 / capacity_2
theta = capacity_2 / capacity_1
sed_conv_factor = vol2 / fw2 / m_sed_2 # converts pore water to [Total Conc normalized to sed mass]
# Omega mass transfer - Calculates littoral to benthic mass transfer coefficient
omega = benthic.d_over_dx / benthic.depth # (m3/hr)/(3600 s/hr)
return fw1, fw2, theta, sed_conv_factor, omega
|
6,495 | f3895f38be29fb07903237d8846cc9d657b39ea9 | import numpy as np
import cv2
from pixcel import *
from scipy import ndimage
import math
from socket import *
from config import *
from time import time
def find_bounding_boxes(fimage, lables):
# initialize boxes array
boxes = []
for lable in lables:
# iterate all lables
# filter out image pixels with current lable
labled = (fimage == lable) + 0
# find indexes
box = find_bounding_box(labled)
# append found bouding box
boxes.append(box)
return boxes
def find_margined_bounding_boxes(fimage, lables, margins):
# initialize boxes array
boxes = []
for lable in lables:
# iterate all lables
# filter out image pixels with current lable
labled = (fimage == lable) + 0
# find indexes
box = find_bounding_box(labled, margins)
# append found bouding box
boxes.append(box)
return boxes
def find_bounding_box(binary_matrix, margins=(0, 0)):
# extract indexes of foreground pixels
indicies = np.array(np.nonzero(binary_matrix + 0))
# get contours
ys = margins[1] + np.amin(indicies[0])
ye = margins[1] + np.amax(indicies[0])
xs = margins[0] + np.amin(indicies[1])
xe = margins[0] + np.amax(indicies[1])
# return contours
return [(xs, ys), (xe, ye)]
def weightFilter(image, lables, weight):
max = 0
weights = np.zeros((lables))
fimage = np.zeros_like(image)
retained_lables = []
for i in range(lables):
weights[i] = np.sum(np.sum(image == i))
if weights[i] > weights[max]:
max = i
if weights[i] > weight:
fimage += np.uint8((image == i) + 0)
retained_lables.append(i)
fimage -= np.uint8((image == max) + 0)
fimage = np.uint8(fimage * 255)
boxes = []
if (len(retained_lables) > 0):
retained_lables.remove(max)
boxes = find_bounding_boxes(image.copy(), retained_lables)
return fimage, boxes
def weightFilterMini(image, weight):
image = np.uint8(image)
# extract contours
image, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
final_contours = []
for cnt in contours:
if cv2.contourArea(cnt) >= weight:
# add it to final_contours
final_contours.append(cnt)
fimage = np.zeros((image.shape[:2]), np.uint8)
cv2.drawContours(fimage, final_contours, -1, 255, -1)
boxes = RBox.toPointBoundingBoxes(RBox.fromClassicalBoundingBoxes([cv2.boundingRect(cnt) for cnt in final_contours]))
return fimage, boxes
def weightFilterMargined(image, lables, weight, margins):
max = 0
weights = np.zeros((lables))
fimage = np.zeros_like(image)
retained_lables = []
for i in range(lables):
weights[i] = np.sum(np.sum(image == i))
if weights[i] > weights[max]:
max = i
if weights[i] > weight:
fimage += np.uint8((image == i) + 0)
retained_lables.append(i)
fimage -= np.uint8(image == max)
fimage = np.uint8(fimage * 255)
boxes = []
if (len(retained_lables) > 0):
retained_lables.remove(max)
boxes = find_margined_bounding_boxes(image.copy(), retained_lables, margins)
return fimage, boxes
def calculatePossiblePadding(box, shape, default = 20):
w_pad = default
h_pad = default
# dynamic padding
if default == 0:
rbox = RBox.fromPointBoundingBox(box)
w_pad = round(0.205 * rbox.w)
h_pad = round(0.205 * rbox.h)
# extract with and height from shape
height, width = shape[0:2]
# extract starting, ending x and y from box
((x_start, y_start), (x_end, y_end)) = box
# check if is it possible to add certain padding
# if not add possible padding for all 4 points
pad_x_start = h_pad
if y_start - pad_x_start < 0:
pad_x_start = y_start
pad_y_start = w_pad
if x_start - pad_y_start < 0:
pad_y_start = x_start
pad_x_end = w_pad
if y_end + pad_x_end >= height:
pad_x_end = height - y_end - 1
pad_y_end = h_pad
if x_end + pad_y_end >= width:
pad_y_end = width - x_end - 1
# return resultant padding
return pad_x_start, pad_x_end, pad_y_start, pad_y_end
def findConnectedComponents(frame, threshold = 150, blur_radius = 1.0):
img = frame.copy() # gray-scale image
# smooth the image (to remove small objects)
imgf = ndimage.gaussian_filter(img, blur_radius)
# find connected components
labeled, nr_objects = ndimage.label(imgf > threshold)
return labeled, nr_objects
def drawBoundingBox(im, start, end, color):
cv2.rectangle(im, start, end, color, 1)
def pwpBasedTracking(image, frame_models, threshold):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
predicted = np.zeros((image.shape[0:2]), np.uint8)
# FOREACH GIVEN PATCH AND ITS MODEL, APPLY MODEL TO PATCH
for fm in frame_models:
patch = extractPatch(image, fm[1])
#patch = cv2.medianBlur(patch, 5)
mask = np.zeros(patch.shape[0:2], np.uint8)
res = applyModel(patch, mask, fm[0])
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
if(len(np.nonzero(res)[0]) > max(fm[2] * threshold, 10) ):
predicted[fm[1][0]: fm[1][1], fm[1][2]: fm[1][3]] += res;
return predicted
def extractPatch(im, box):
# extract coordinates
x1, x2, y1, y2 = box
# extract and return patch
return im[x1: x2, y1: y2, :]
def randomColor():
return np.random.randint(0, 255, (1, 3))[0].tolist()
def performColorProcessing(image, mask, iterations = 1):
# initialize kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for i in range(iterations):
model = computePosteriors(image, np.uint8(mask > 0) + 0)
mask = applyModel(image, mask, model)
cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)
return mask
def killDyingLables(frame, mask, threshold = 0.5):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)]) + 0.00001
# get final labled frame
labled_frame = frame * mask
# get final weights
final_weights = np.array([np.sum(labled_frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = (final_weights/initial_weights) < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
labled_frame -= np.uint8((labled_frame == lable) * lable)
# return final labled frame
return labled_frame
def killSmallLables(frame, threshold = 150):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = initial_weights < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
frame -= np.uint8(np.uint8(frame == lable) * lable)
# return final labled frame
return frame
class RBox:
def __init__(self):
# initialize atributes
self.x = 0
self.y = 0
self.w = 0
self.h = 0
@staticmethod
def fromClassicalBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[1]
rbox.w = box[2]
rbox.h = box[3]
# return rbox
return rbox
@staticmethod
def fromClassicalBoundingBoxes(boxes):
return [RBox.fromClassicalBoundingBox(box) for box in boxes]
@staticmethod
def fromRoughBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[2]
rbox.h = box[1] - box[0]
rbox.w = box[3] - box[2]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0][0]
rbox.y = box[0][1]
rbox.w = box[1][0] - box[0][0]
rbox.h = box[1][1] - box[0][1]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBoxes(boxes):
return [RBox.fromPointBoundingBox(box) for box in boxes]
def classicalBoundingBox(self):
# return array like bounding box
return [self.x, self.y, self.w, self.h]
def pointBoundingBox(self):
# return tuple of end points
return ((self.x, self.y), (self.x + self.w, self.y + self.h))
def area(self):
return self.h * self.w
def __or__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = min(self.x, other_box.x)
rbox.y = min(self.y, other_box.y)
rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y
return rbox
def __and__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = max(self.x, other_box.x)
rbox.y = max(self.y, other_box.y)
rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y
if rbox.w < 0 or rbox.h < 0:
# reinitailize or make it zero
rbox = RBox()
return rbox
def similarity(self, other_box):
# (A & B)/(A | B) = (A & B).area/(A.area + B.area - (A & B).area)
#return (self & other_box).area()/(self.area() + other_box.area() - (self & other_box).area())
min_area = min(self.area(), other_box.area())
return (self & other_box).area()/min_area
def __str__(self):
return "{} {} {} {}".format(self.x, self.y, self.w, self.h)
def __mul__(self, other_box):
# calculate similarity and return
return self.similarity(other_box)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.w == other.w and self.h == other.h
@staticmethod
def similarityStats(boxes):
# create matrix out of boxes
sim_mat = np.array(boxes).reshape((-1, 1))
sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)
# return similarity matrix
return sim_mat
@staticmethod
def similarityThreshold(boxes, threshold = 0.8):
# get similarity matrix
sim_mat = RBox.similarityStats(boxes)
# find thresholded indexes
ind = np.array(np.nonzero(sim_mat > threshold))
# return in the form of list
return list(ind.T)
@staticmethod
def reduceBoxes(boxes, threshold=0.8):
similar_boxes = RBox.similarityThreshold(boxes, threshold)
while len(similar_boxes) > 0:
union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]
# remove similar boxes
del boxes[similar_boxes[0][0]]
del boxes[similar_boxes[0][1]]
boxes.append(union)
similar_boxes = RBox.similarityThreshold(boxes, threshold)
return boxes
@staticmethod
def toPointBoundingBoxes(boxes):
return [box.pointBoundingBox() for box in boxes]
@staticmethod
def toClassicBoundingBoxes(boxes):
return [box.classicalBoundingBox() for box in boxes]
def extractPatchFromImage(self, image, square=False):
# get bounding box end points
(start, end) = self.pointBoundingBox()
start, end = list(start), list(end)
# check if square flag is on
if square:
im_h, im_w = image.shape[0:2]
# adjust start and end so that height and width are equal
if self.h != self.w:
# find bigger size
if self.h > self.w:
# find difference
diff = self.h - self.w
if start[0] >= int(diff/2):
start[0] -= math.floor(diff/2)
diff -= math.floor(diff/2)
else:
diff -= start[0]
start[0] = 0
end[0] += diff
if end[0] >= im_w:
diff = end[0] - im_w + 1
end[1] -= diff
else:
# find difference
diff = self.w - self.h
if start[1] >= int(diff / 2):
start[1] -= math.floor(diff / 2)
diff -= math.floor(diff / 2)
else:
diff -= start[1]
start[1] = 0
end[1] += diff
if end[1] >= im_h:
diff = end[1] - im_h + 1
end[0] -= diff
# return patch
return image[start[1]: end[1], start[0]: end[0]]
def addPatchtoImage(self, image, patch):
# get bounding box end points
(start, end) = self.pointBoundingBox()
# patch in to image
image[start[1]: end[1], start[0]: end[0]] = patch
# return image
return image
def askForLable(patch):
# write an image to send
cv2.imwrite("patch.jpg", patch)
# setup client socket
clientSock = socket(AF_INET, SOCK_STREAM)
clientSock.connect((TCP_IP, TCP_PORT))
# open image
image = open("patch.jpg", 'rb')
# read bytes equal to buffer size
data = image.read(BUFFER_SIZE)
# while image still has data
while (data):
# send data to server
clientSock.send(data)
# read more data if available
data = image.read(BUFFER_SIZE)
# close file
image.close()
# signal server to end data stream
clientSock.shutdown(SHUT_WR)
# recieved lable as binary data from server and convert it to string
label = clientSock.recv(1024)
label = label.decode("utf-8")
return label
|
6,496 | af152e0b739305866902ee141f94641b17ff03ea | """#########################################################################
Author: Yingru Liu
Institute: Stony Brook University
Descriptions: transer the numpy files of the midi songs into midi files.
(Cause the code privided by RNN-RBM tutorial to save midi
runs in python 2.7 but my code is in python 3.6)
----2017.12.29
#########################################################################"""
import numpy as np
from midi.utils import midiread, midiwrite
#
CGRNN_FOLDER = "Samples/CGRNN/"
SRNN_FOLDER = "Samples/SRNN/"
VRNN_FOLDER = "Samples/VRNN/"
ssRnnRbm_FOLDER = "Samples/ssRnnRbm/"
Ground_FOLDER = "Samples/"
for i in range(20):
print('The ' + str(i) + '-th graph.')
Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')
CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')
SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')
VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')
ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')
midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid', Ground_sample, (1, 128), 0.25)
midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1, 128), 0.25)
midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128), 0.25)
midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128), 0.25)
midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid', ssRnnRbm_sample, (1, 128), 0.25)
pass |
6,497 | 69a3471ee8d2c317264b667d6ae0f9b500c6222f | from xml.dom import minidom
import simplejson as json
import re
camel_split = re.compile(r'(^[a-z]+|[A-Z][a-z]+|[a-z0-9]+)')
def get_items(xml):
for obj_node in xml.getElementsByTagName('item'):
obj = dict(obj_node.attributes.items())
if 'name' in obj:
obj['title'] = (' '.join(camel_split.findall(obj['name']))).title()
print 'processing %s' % obj['name']
else:
raise Exception('Item has no name: %s' % obj)
subs =[]
for sub_node in obj_node.getElementsByTagName('ingredient'):
sub = dict(sub_node.attributes.items())
if 'class' in sub:
subs2 = []
for sub2_node in obj_node.getElementsByTagName('ingredient'):
sub2 = dict(sub2_node.attributes.items())
subs2.append(sub2)
sub['properties'] = subs2
subs.append(sub)
obj['properties'] = subs
yield obj
items = {item['name']: item for item in get_items(xml=minidom.parse('/home/sdtd/engine/Data/Config/items.xml'))}
#with open('/var/www/sdtd/static/recipes.json', 'w') as of:
# of.write(json.dumps(recipes))
print items
|
6,498 | 0ec3ca0f952dbc09c7a7a3e746c0aeab28ee9834 | from .base import BaseEngine
import re
class YandexSearch(BaseEngine):
base_url = "https://yandex.com"
search_url = "https://yandex.com/search/"
def get_params(self, query, **params):
params["text"] = query
params["p"] = None
return params
def next_url(self, soup):
if (regex := re.findall(r'"(/search/\?[^>]+p=[^"]+)', str(soup))):
return self.base_url + regex[-1]
def parse_soup(self, soup):
for raw in soup.find_all('li', class_="serp-item"):
if (url := raw.a.get("href")):
yield url
def captcha(self, response):
return "showcaptcha" in response.url
|
6,499 | 2ecd234753fabbca2829dc86db2f740e371e4ea7 |
# coding: utf-8
# # Configuration
# In[1]:
CONNECTION_STRING = "mongodb://localhost:27017"
DATABASE_NAME = "off"
COLLECTION_NAME = "products"
# # MongDB connection
# In[2]:
from pymongo import MongoClient
from bson.code import Code
import plotly, pymongo
plotly.offline.init_notebook_mode()
from plotly.graph_objs import Bar
client = MongoClient(CONNECTION_STRING)
db = client[DATABASE_NAME]
openfood = db[COLLECTION_NAME]
# # Nutrition grade
# In[6]:
mapper = Code("""
function () {
if (typeof this.nutrition_grades !== 'undefined' && this.nutrition_grades !== ""){
emit(this.nutrition_grades, 1);
}
}""")
reducer = Code("""
function (key, values) {
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i];
}
return total;
}""")
grades = openfood.inline_map_reduce(mapper, reducer)
print grades
# In[14]:
import numpy as np
import matplotlib.pyplot as plt
objects = [item['_id'] for item in grades] # [a,b,c,d,e]
y_pos = np.arange(len(objects))
count = [item['value'] for item in grades]
plt.bar(y_pos, count, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Count')
plt.title('Nutrition Grades')
plt.show()
# Each food entry states the countries which the food it is sold. Below, we try to find out the list of countries which the food are sold.
# # Nutrients (100g)
# In[16]:
mapper = Code("""
function () {
if (typeof this.nutriments !== 'undefined' && this.nutriments !== "") {
for (var key in this.nutriments) {
if (key.match(/.*100g/))
emit(key, null);
}
}
}""")
reducer = Code("""
function (key, values) {
return key
}""")
nutriments_100g_fields = openfood.inline_map_reduce(mapper, reducer)
for n in nutriments_100g_fields:
print n
# In[17]:
for n in nutriments_100g_fields:
print n['_id']
# # Additives
# In[24]:
mapper = Code("""
function () {
if (typeof this.additives !== "undefined" && this.additives_n >= 0){
var add = this.additives.substring(3, this.additives.length-3); // remove "^ [ " and " ] $"
var add_str = add.split(" ] [ ");
for (var i = 0; i < add_str.length; i++){
var additive_parts = add_str[i].split(" -> exists -- ");
if (additive_parts.length == 2){
var add_code = additive_parts[0].split(" -> ")[1];
emit(add_code, 1);
}
}
}
}""")
reducer = Code("""
function (key, values) {
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i];
}
return total;
}""")
additives_stats = openfood.inline_map_reduce(mapper, reducer)
print additives_stats
# In[29]:
add_clean = [(x['value'], x['_id']) for x in additives_stats]
add_clean.sort()
print len(add_clean)
for add in add_clean:
print "{}: {}".format(add[0], add[1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.