seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12413589703 | # -*- coding: utf-8 -*-
"""
@author: Gallien FRESNAIS
"""
import pygame
from pygame.locals import *
# - Local - #
from App.src.pygame_test.app_settings import *
# pygame.mixer.init()
"""
Handles the events from the main pygame program
"""
def event_handler():
for event in pygame.event.get():
# if there's a QUIT event, power off the app
if event.type == pygame.QUIT:
Power_Off()
elif event.type == pygame.KEYUP:
# if there's a key pressed and it's the ESCAPE key, power off the app
if event.key == K_ESCAPE:
Power_Off()
"""
Displays text
"""
def Display_Text(surface, text, size=20, color=WHITE, pos=None, centered=True):
# if no position is given, initialize it to 0,0
if pos is None:
pos = [0, 0]
# initialize the text font
font = pygame.font.Font(None, size)
# render the text
text_obj = font.render(text, True, color).convert_alpha()
# get the text dimensions
width, height = text_obj.get_size()
# if the text needs to be centered, center it
if centered:
pos[0] -= width // 2
pos[1] -= height // 2
# add the text to the window surface
surface.blit(text_obj, pos)
"""
Creates a Button
"""
def Button(surface, x, y, target=None, old_state="title", new_state=False, option=False, args=None):
# gets the mouse position
pos = pygame.mouse.get_pos()
# gets the pressed mouse click event
keys = pygame.mouse.get_pressed()
w, h = 200, 35
x, y = x, y
# creates the button surface
rect = pygame.surface.Surface([w, h])
rect.convert()
selected = False
if x - w // 2 < pos[0] < x - w // 2 + w and y - h // 2 < pos[1] < y - h // 2 + h:
selected = True
rect.fill([200, 200, 255])
else:
selected = False
rect.fill([170, 170, 255])
surface.blit(rect, [x - w // 2, y - h // 2])
if selected:
if new_state:
# if left click is pressed
if keys[0]:
return new_state
else:
return old_state
elif target:
if keys[0]:
if args is not None:
return target(args)
else:
return target()
elif option:
if keys[0]:
return True
else:
return False
else:
if new_state:
return old_state
elif option:
return False
"""
Creates a multi-line text
"""
def multi_line_text(surface, size=20, spacing=20, color=WHITE, pos=None, centered=True, *text):
if pos is None:
pos = [0, 0]
next_line = 0
for i in text:
if i == "<n>":
next_line += spacing
else:
Display_Text(surface, i, size, color, [pos[0], pos[1] + next_line], centered)
next_line += spacing
"""
Exits the application
"""
def Power_Off():
pygame.quit()
raise SystemExit
| gfresnais/Lucky_Number_AI | App/src/pygame_test/app_functions.py | app_functions.py | py | 3,020 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pygame.event.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYUP",
... |
28672032992 | import math
import gym
from gym import spaces, logger
from random import seed
from random import randint
from PIL import Image
from gym.utils import seeding
import numpy as np
from numpy import asarray
import cv2
def init_map(size, num_obs, border_size):
# Drawing a empty map
global_map = np.ones((size, size, 1), np.uint8) # Defining size of map
global_map.fill(0) # Drawing it white
cv2.rectangle(global_map, (border_size, border_size), (size - border_size, size - border_size), 255,
-1) # Setting world boundary
# Filling the map with obstacles
num_obstacles = randint(0, num_obs)
for obstacles in range(num_obstacles + 1):
obs_selected = randint(0, 1) # We randomly select between two obstacle types
obstacle = generate_obs(obs_selected) # We get a random obstacle position and type
if obs_selected == 0:
cv2.rectangle(global_map, obstacle[0], obstacle[1], 0, -1)
else:
cv2.circle(global_map, (obstacle[0], obstacle[1]), obstacle[2], 0, -1)
return global_map
def generate_obs(selection):
obs_pos_x = randint(0, 600)
obs_pox_y = randint(0, 600)
obstacle_list = {0: ((obs_pos_x - 30, obs_pox_y - 30), (obs_pos_x + 30, obs_pox_y + 30)),
1: (obs_pos_x, obs_pox_y, 20)}
obstacle = obstacle_list[selection]
return obstacle
def select_agent_pos(env, border_size):
row_size, col_size, _ = env.shape
possible_spawn_spot = []
while len(possible_spawn_spot) < 1:
pos_x = randint(border_size, col_size-border_size)
pos_y = randint(border_size, row_size-border_size)
test_spot = env[pos_y - 3:pos_y + 4, pos_x - 3:pos_x + 4] # We check a 7x7 pixel patch around the agent. MAYBE CORRECT??
test_spot_array = asarray(test_spot) # We convert the patch to a array
if test_spot_array.sum() == 12495:
possible_spawn_spot.append([pos_x, pos_y])
return possible_spawn_spot
class WorldEnv(gym.Env):
def __init__(self):
# RESET PARAMETERS
self.agent_step = 0
self.maxstep = 1000
# MAP PARAMETERS
self.GLOBAL_MAP_SIZE = 750
self.NUM_OBS = 15
self.BORDER_SIZE = 50
self.global_map = np.ones((self.GLOBAL_MAP_SIZE, self.GLOBAL_MAP_SIZE, 1), np.uint8)
self.slam_map = self.global_map.copy()
# AGENT PARAMETERS
self.agent_pos_x = 0
self.agent_pos_y = 0
self.agent_size = 5
self.agent_color = 100
self.agent_range = 25
self.agent_step_size = 5
# --- OBSERVATION AND ACTION SPACE ---
# Definition of observation space. We input pixel values between 0 - 255
self.observation_space = np.array(self.slam_map)
# Definition of action space.
self.action_space = spaces.Discrete(8)
self.action_dic = {0: (-self.agent_step_size, 0),
1: (self.agent_step_size, 0),
2: (0, -self.agent_step_size),
3: (0, self.agent_step_size),
4: (self.agent_step_size, self.agent_step_size),
5: (-self.agent_step_size, -self.agent_step_size),
6: (self.agent_step_size, -self.agent_step_size),
7: (-self.agent_step_size, self.agent_step_size)}
def reset(self):
# We reset the step
self.agent_step = 0
# We collect the generated map
self.global_map = init_map(self.GLOBAL_MAP_SIZE, self.NUM_OBS, self.BORDER_SIZE)
# SLAM MAP creation
self.slam_map = self.global_map.copy()
self.slam_map.fill(150)
# We get a collection of possible spawn-points
possible_spawn_points = select_agent_pos(self.global_map, self.BORDER_SIZE)
# We draw a random spawn-point and draw it on the map
self.agent_pos_x, self.agent_pos_y = possible_spawn_points[0]
pos_x = self.agent_pos_x
pos_y = self.agent_pos_y
# StartY:EndY, StartX:EndX. Initial visible area for the SLAM map
crop_img = self.global_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range]
# We add the initial visible area to the slam map
self.slam_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range] = crop_img
# We add the agent to the global map
cv2.circle(self.global_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
cv2.circle(self.slam_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
return self.slam_map
def step(self, action):
# --- Step related variables ---
collision = False
done = False
reward = 0
self.agent_step += 1
if self.agent_step == self.maxstep: # If the agent has taken a certain number of steps we reset
done = True
# For removal of the previous position on the global map
pre_agent_x = self.agent_pos_x
pre_agent_y = self.agent_pos_y
cv2.circle(self.global_map, (pre_agent_x, pre_agent_y), self.agent_size, 255, -1) # Remove previous global position
cv2.circle(self.slam_map, (pre_agent_x, pre_agent_y), self.agent_size, 255, -1) # Remove previous slam position
old_slam_map = self.slam_map.copy()
# --- Defining movement ---
move_x, move_y = self.action_dic[action]
self.agent_pos_x += move_x
self.agent_pos_y += move_y
# --- Updating position ---
# Adding new area to SLAM map
pos_x = self.agent_pos_x
pos_y = self.agent_pos_y
# Checking collision
test_spot = self.global_map[pos_y - 3:pos_y + 4,
pos_x - 3:pos_x + 4] # We check a 7x7 pixel patch around the agent. MAYBE CORRECT??
test_spot_array = asarray(test_spot) # We convert the patch to a array
if test_spot_array.sum() != 12495:
collision = True
done = True
# New visible area for the SLAM map
crop_img = self.global_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range]
# We add the new visible area to the slam map
self.slam_map[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range] = crop_img
# Checking difference
diff = cv2.absdiff(old_slam_map, self.slam_map)
_, thresh = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY)
crop_img = thresh[pos_y - self.agent_range:pos_y + self.agent_range,
pos_x - self.agent_range:pos_x + self.agent_range]
diff = asarray(crop_img)
cv2.imshow("diff", crop_img)
# We add the new position of the agent to the global and slam map
cv2.circle(self.global_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
cv2.circle(self.slam_map, (self.agent_pos_x, self.agent_pos_y), self.agent_size, self.agent_color, -1)
# Defining reward
if collision:
reward -= 100
else:
num = diff.sum()/63750
if num <= 0:
reward -= 1
else:
reward += round(num, 2)
return self.slam_map, done, reward
def render(self):
cv2.imshow("Global Map", self.global_map)
cv2.imshow("SLAM Map", self.slam_map)
def close(self):
cv2.destroyAllWindows()
quit()
"""
world = WorldEnv()
done = False
for i in range(100):
done = False
state = world.reset()
world.render()
while not done:
num = randint(0, 7)
_, done, reward = world.step(num)
print(reward)
world.render()
cv2.waitKey(500)
"""
| MetalEinstein/Projects | Map_Builder/custom_map_env.py | custom_map_env.py | py | 8,308 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_... |
75127646504 | import sys
from cravat import BaseAnnotator
from cravat import InvalidData
import sqlite3
import os
from functools import reduce
class CravatAnnotator(BaseAnnotator):
"""
CravatAnnotator for the Denovo module.
Querying attributes (input_data):
chrom, pos, ref_base, alt_base
Return attributes (out):
PubmedId, PrimaryPhenotype, Validation
Attributes:
sql_template (str): A sql template for querying Denovo db. Use format str
instance method to insert chrom, pos, ref_base, and alt_base.
"""
sql_template = \
("SELECT "
"PubmedId, PrimaryPhenotype, Validation "
"FROM Denovo "
"WHERE Chr='{}' "
"AND Position='{}' "
"AND Reference='{}' "
"AND Alternate='{}' "
";")
@staticmethod
def concat(accum, x):
"""
Concat a 2D-list into another 2D-list along the indices. Use with functools.reduce.
Ex:
[[1,2,3], [4,5,6], [7,8,9]] -> [[1,4,7], [2,5,8], [3,6,9]]
Arguments:
accum (list<list>): 2D array that accumulates consecutive concats.
x (list): The next list to add to concatanation.
Return:
concated (list<list>): Concatanated 2D list.
"""
for arr, el in zip(accum, x):
arr.append(el)
return accum
def setup(self):
pass
def annotate(self, input_data):
"""
Query the denovo db and return any matched variants in `out` dict.
"""
out = None
sql = self.sql_template.format(
input_data['chrom'].strip('chr'),
input_data['pos'],
input_data['ref_base'],
input_data['alt_base']
)
result = self.cursor.execute(sql).fetchall()
if not result:
return out
concated = reduce(self.concat, result, [[] for i in range(len(result[0]))])
concated = [','.join(arr) for arr in concated]
out = {
'PubmedId': concated[0],
'PrimaryPhenotype': concated[1],
'Validation': concated[2]
}
return out
def cleanup(self):
pass
if __name__ == '__main__':
annotator = CravatAnnotator(sys.argv)
annotator.run()
| KarchinLab/open-cravat-modules-karchinlab | annotators/denovo/denovo.py | denovo.py | py | 2,269 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cravat.BaseAnnotator",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "functools.reduce",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 79,
"usage_type": "attribute"
}
] |
24528496813 | import torch
import random
import numpy as np
from collections import deque
from game import SnakeAI,NSWE
from model import LinearQNet,QTrainer
import matplotlib.pyplot as plt
from IPython import display
Block=20
MAX_MEM=100_000
Batch_Size=1000
Learning_Rate=0.001
class Agent:
def __init__(self):
self.n_games=0
self.epsilon=0 #makes it random
self.gamma=0.9 # rate of discount
self.memory=deque(maxlen=MAX_MEM)
self.model=LinearQNet(11,256,3)
self.trainer=QTrainer(self.model,lr=Learning_Rate,gamma=self.gamma)
def _calcDanger(self,game):
head = game.body[0]
point_l = (head[0] - Block, head[1])
point_r = (head[0] +Block, head[1])
point_u = (head[0], head[1] - Block)
point_d = (head[0], head[1] + Block)
Danger=[0,0,0]
if ((game.direction==NSWE.East and (game.isCollision(point_r))) or (game.direction==NSWE.West and (game.isCollision(point_l))) or (game.direction==NSWE.North and (game.isCollision(point_d))) or (game.direction==NSWE.South and (game.isCollision(point_u)))):
Danger[0]=1
if ((game.direction==NSWE.East and (game.isCollision(point_d))) or (game.direction==NSWE.West and (game.isCollision(point_u))) or (game.direction==NSWE.North and (game.isCollision(point_l))) or (game.direction==NSWE.South and (game.isCollision(point_r)))):
Danger[1]=1
if ((game.direction==NSWE.East and (game.isCollision(point_u))) or (game.direction==NSWE.West and (game.isCollision(point_d))) or (game.direction==NSWE.North and (game.isCollision(point_r))) or (game.direction==NSWE.South and (game.isCollision(point_l)))):
Danger[2]=1
return Danger
def _calcFood(self,foodpos,snake):
lr=foodpos[0]-snake[0]
ud=foodpos[1]-snake[1]
food=[0,0,0,0]
if (lr>0):
food[1]=1
elif (lr<0):
food[0]=1
if (ud>0):
food[3]=1
elif (ud<0):
food[2]=1
return food
def _calcDirection(self,game):
direction=[0,0,0,0]
if (game.direction==NSWE.West):
direction[0]=1
elif (game.direction==NSWE.East):
direction[1]=1
elif (game.direction==NSWE.South):
direction[2]=1
else:
direction[3]=1
return direction
def getState(self,game):
Danger=self._calcDanger(game)
Direction=self._calcDirection(game)
Food=self._calcFood(game.food,game.snake)
return np.array(Danger+Direction+Food,dtype=int)
def remember(self,state,action,reward,next_state,gg):
self.memory.append((state,action,reward,next_state,gg))
def trainLongMem(self):
if (len(self.memory)>Batch_Size):
sample=random.sample(self.memory,Batch_Size)
else:
sample=self.memory
states,actions,rewards,next_states,ggs= zip(*sample)
self.trainer.train_step(states,actions,rewards,next_states,ggs)
def trainShortMem(self,state,action,reward,next_state,gg):
self.trainer.train_step(state,action,reward,next_state,gg)
def getAction(self,state):
self.epsilon=80 -self.n_games#Start 30%, lower 1% every 2 games, lowest=3%
move=[0,0,0]
r=random.randint(0,200)
if (r<self.epsilon):
move[random.randint(0,2)]=1
else:
stateTorch=torch.tensor(state,dtype=torch.float)
prediction=self.model(stateTorch)
x=torch.argmax(prediction).item()
move[x]=1
return move
def train(self):
plot_scores=[]
plot_mean_scores=[]
total_scores=0
record=0
game=SnakeAI()
while True:
state=self.getState(game)
action=self.getAction(state)
reward,gg,score=game.play_step(action)
new_state=self.getState(game)
self.trainShortMem(state,action,reward,new_state,gg)
self.remember(state,action,reward,new_state,gg)
if gg:# train long memory
game.reset()
self.n_games+=1
self.trainLongMem()
if (score>record):
record=score
self.model.save("bestModel.pth")
print ("Game: ",self.n_games,"Score: ",score,"Record: ",record,"Reward: ",10*(score-1))
plot_scores.append(score)
total_scores+=score
mean_scores=total_scores/self.n_games
plot_mean_scores.append(mean_scores)
plot(plot_scores,plot_mean_scores)
def plot(scores,mean_scores):
display.clear_output(wait=True)
display.display(plt.gcf())
plt.clf()
plt.title("Model progress")
plt.xlabel("Number of Games")
plt.ylabel("Score")
plt.plot(scores)
plt.plot(mean_scores)
plt.ylim(ymin=0)
plt.text(len(scores)-1,scores[-1],str(scores[-1]))
plt.text(len(mean_scores)-1, mean_scores[-1],str(mean_scores[-1]))
plt.show(block=False)
plt.pause(.1)
if __name__=="__main__":
plt.ion()
Agent().train()
| MakisEu/SnakeAI | agent.py | agent.py | py | 5,158 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "model.LinearQNet",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "model.QTrainer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "game.body",
"li... |
34358988987 | import SECRETS
import os
import openai
openai.organization = "org-0iQE6DR7AuGXyEw1kD4poyIg"
# openai.api_key = os.getenv(SECRETS.open_ai_api_key)
openai.api_key = SECRETS.open_ai_api_key
# print(openai.Model.list())
print("starting test")
def get_roast_str_from_username(username):
completion = openai.Completion.create(
model = "text-davinci-003",
# prompt="Tell me a joke about a fish.",
# prompt='Roast-bot, I command you to roast user: "MeatballMama55"',
# prompt='Roast-bot, roast this user based on their username: "{' + username + '}"',
prompt='Roast-bot, roast this user based on their username: "' + username + '"',
# temperature = 2,
max_tokens=30
)
return completion.choices[0].text
# print(completion.choices)
# params = {
# "model": "text-davinci-003",
# "prompt": "Say this is a test",
# "max_tokens": 7,
# "temperature": 0,
# "top_p": 1,
# "n": 1,
# "stream": False,
# # "logprobs": null,
# "logprobs": None,
# "stop": "\n"
# }
# print(openai.Model(id, api_key, api_version, api_type, organization, response_ms, api_base, engine))
# print(openai.Completion.)
#
# # list engines
# engines = openai.Engine.list()
#
# # print the first engine's id
# print(engines.data[0].id)
#
# # create a completion
# # completion = openai.Completion.create(engine="ada", prompt="Hello world")
# completion = openai.Completion.create(
# model = "text-davinci-003",
# # prompt="Tell me a joke about a fish.",
# # prompt='Roast-bot, I command you to roast user: "MeatballMama55"',
# prompt='Roast-bot, roast this user based on their username: "MeatballMama55"',
# # temperature = 2,
# max_tokens=30
# )
# print the completion
if __name__ == "__main__":
roast_str = get_roast_str_from_username("MeatballMama55")
print(roast_str)
roast_str = get_roast_str_from_username("ELLOKIKO")
print(roast_str)
print('Done') | Brandon-Valley/tik_live_host | src/open_ai_api_test.py | open_ai_api_test.py | py | 1,931 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openai.organization",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "openai.api_key",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "SECRETS.open_ai_api_key",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name":... |
30181316679 | """
This code allows to compute and evaluate optimal policies for the grid environments. These optimal values are used to normalize the rewards per task.
"""
import envs, gym, argparse
from envs.water.water_world import Ball, BallAgent
if __name__ == '__main__':
# Examples
# >>> python3 test_optimal_policies.py --env Office-v0
# >>> python3 test_optimal_policies.py --env Craft-M0-v0
# Getting params
parser = argparse.ArgumentParser(prog="test_optimal_policies", description='This code allows to evaluate optimal policies for the grid environments.')
parser.add_argument('--env', default='Office-v0', type=str,
help='This parameter indicated which environment to use.')
args = parser.parse_args()
env = gym.make(args.env)
arps = env.test_optimal_policies(num_episodes=1000000, epsilon=0.1, gamma=0.9)
print(args.env)
print(arps)
print(10000*sum(arps)/len(arps)) | RodrigoToroIcarte/reward_machines | reward_machines/test_optimal_policies.py | test_optimal_policies.py | py | 932 | python | en | code | 49 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 19,
"usage_type": "call"
}
] |
72609543784 | import requests
from bs4 import BeautifulSoup
import mysql.connector
from mysql.connector import Error
class ScraperBol():
lijstMetCategorien = ["video games", "nature", "photo", "sports", "tech", "beauty"]
def __init__(self):
self.connection = mysql.connector.connect(host='ID362561_suggesto.db.webhosting.be',
database='ID362561_suggesto',
user='ID362561_suggesto',
password='InspirationLab2022')
def MysqlConnection(self, name, link, prijs, fotoLink, preference):
try:
connection = self.connection
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected", db_Info, "[SUCCES]")
cursor = connection.cursor()
cursor.execute("INSERT INTO gifts(name, link, prijs, fotoLink, preference) VALUES(" + "\""+ name + "\"" + ", " + "\"" +link + "\"" + ", " + prijs + ", " + "\"" + fotoLink + "\"" + ", " +"\""+ preference + "\"" + ");")
connection.commit()
cursor.close()
except Error as e:
print("Error while connecting to MySQL", e)
def DoeRequest(self, url):
request = requests.get(url)
soupBS = BeautifulSoup(request.text, "html.parser")
return soupBS
def MakeLink(self, product): #dit is als alternatief als je geen input wilt
product = product.replace(" ", "+")
begin = "https://www.bol.com/be/nl/s/?page=1&searchtext="
eind = "&view=list&sort=popularity1"
deLink = begin + product + eind
return(deLink)
def GetThem(self, link, hoeveelheid, preference):
reqBolHome = self.DoeRequest(link)
topItems = []
links = []
images = []
prices = []
ratings = []
for i in range(hoeveelheid):
# GET THE NAME------------------------------------------------------
nameOfProduct = reqBolHome.find_all("ul", {"id": "js_items_content"})[0].find_all("li", {"class": "product-item--row js_item_root"})[i].find_all("a", {"class": "product-title px_list_page_product_click"})[0].get_text()
topItems.append(nameOfProduct)
# NU DE PRODUCT LINK------------------------------------------------------
linkNaarProduct = "https://www.bol.com" + reqBolHome.find_all("div", {"class": "h-o-hidden"})[i].find_all("a", {"class": "product-image product-image--list px_list_page_product_click"})[0].get("href")
links.append(linkNaarProduct)
# NU DE FOTO LINK------------------------------------------------------
linkNaarFoto = reqBolHome.find_all("div", {"class": "h-o-hidden"})[i].find_all("a", {"class": "product-image product-image--list px_list_page_product_click"})[0].find_all("img")[0]
if(i == 0): #(de class van de eerste had een andere naam)
linkNaarFoto = reqBolHome.find_all("div", {"class": "h-o-hidden"})[i].find_all("a", {"class": "product-image product-image--list px_list_page_product_click"})[0].find_all("img")[0].get("src")
else:
linkNaarFoto = reqBolHome.find_all("div", {"class": "h-o-hidden"})[i].find_all("a", {"class": "product-image product-image--list px_list_page_product_click"})[0].find_all("img")[0].get("data-src")
images.append(linkNaarFoto)
# NU DE PRIJS------------------------------------------------------
priceOfProduct = "€" + reqBolHome.find_all("ul", {"id": "js_items_content"})[0].find_all("span", {"class": "promo-price"})[i].get_text().replace("\n", "").replace(" ", ",", 1).replace(" ", "") #prijzen mooi maken
prices.append(priceOfProduct)
# NU DE RATINGS------------------------------------------------------
ratingsOfProduct = reqBolHome.find_all("div", {"class": "star-rating"})[i].get("title")
ratings.append(ratingsOfProduct)
#querry doen naar database: ----------------------
for i in range(hoeveelheid):
queryString = ('''INSERT INTO gifts (name, link, prijs, fotoLink, preference) VALUES (''' + "\"" + str(topItems[i]).replace("é", "e").replace("è", "e").replace("ö", "o").replace('®', "").replace("™", "")+ "\"" + ", " + "\"" + str(links[i]) + " \"" + ", " + str(float(prices[i].replace('€', "").replace(",", ".").replace("-", ""))) + ", " + "\"" + str(images[i]) + " \"" + "," + " \"" + str(preference) + "\"" ''')''') + ";"
name = str(topItems[i]).replace("é", "e").replace("è", "e").replace("ö", "o").replace('®', "").replace("™", "")
prodLink = str(links[i])
price = str(float(prices[i].replace('€', "").replace(",", ".").replace("-", "")))
fotoLink = str(images[i])
pref = str(preference)
quer1 = "INSERT INTO gifts (name, link, prijs, fotoLink, preference) VALUES (%s, %s, %f, %s, %s)"
val = (str(name), str(prodLink), float(price), str(fotoLink), str(pref))
self.MysqlConnection(name, prodLink, price, fotoLink, pref)
def GetCategories(self):
for categorie in self.lijstMetCategorien:
preference = categorie
link = self.MakeLink(categorie)
self.GetThem(link, 10, preference)
def clearEntries(self):
connection = self.connection
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected", db_Info, "[SUCCES]")
cursor = connection.cursor()
cursor.execute("DELETE FROM gifts WHERE 1;")
connection.commit()
def closeConnection(self):
self.connection.close() | ThiboVanderkam/Suggesto | v1/assets/python/scraperClass.py | scraperClass.py | py | 5,843 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 9,
"usage_type": "name"
},
{
"api... |
38650181804 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import pi, sin, cos, atan, degrees
from sympy import *
from matplotlib.ticker import MultipleLocator, AutoMinorLocator
DF = True
IS_SCATTER = False
df = pd.read_csv('/Users/shetshield/Desktop/workspace/python_ws/sim_lin_bend/sim_res_1ch_bend_neo575.csv')
d = 48.735 # in mm
lb = 17
lu = 13
l0 = 171.5 - (lb + lu) # in mm
x_tick_list = [0, 20, 40, 60]
y_tick_list = [135, 150, 165]
# Variables to store result
X = list()
Y = list()
Z = list()
L1 = list()
L2 = list()
L3 = list()
p1 = [1000 * i for i in range(0, -37, -1)]
p2 = [0]
p3 = [0]
print(p1)
# A_avg = 545.575319 / 1000000
# A_avg = 530.93 / 1000000 # Force Equilibrium Area
A_avg = 494 / 1000000
# A_avg = 430 / 1000000
l1 = symbols('l1')
l2 = symbols('l2')
l3 = symbols('l3')
cdf_p2 = 0.0232/3
cdf_p1 = 1.0348/3
cdf_n2 = 0.0061/3
cdf_n1 = 0.9374/3
cfd_p2 = -0.0041
cfd_p1 = 0.729
cfd_n2 = -0.0247
cfd_n1 = 0.5549
cdp_p2 = -0.0105
cdp_p1 = 1.258
# Displacement vs Pressure
cdp_n2 = -0.0807
cdp_n1 = 1
cdp_n1_ = 0.325
cdp_n0_ = -61.975
# Chord length adjustment coeff.
'''
c_chord_1 = -0.459
c_chord_0 = -3.3349
c_chord_1_ = 0.1809
c_chord_0_ = 0
'''
c_chord_1 = -0.325
c_chord_0 = 0
c_chord_1_ = -0.325
c_chord_0_ = 0
'''
c_chord_2 = 0.0016
c_chord_1 = 0.0358
c_chord_0 = 0
c_chord_2_ = c_chord_2
c_chord_1_ = c_chord_1
c_chord_0_ = c_chord_0
'''
c_chord_const = 6
def calc_forward_kinematics(p1, p2, p3):
f_l1 = p1 * A_avg * 3
f_l2 = p2 * A_avg * 3
f_l3 = p3 * A_avg * 3
p_thr = -4000/1000
p_thr2 = -24500/1000
p1 = p1/1000
p2 = p2/1000
p3 = p3/1000
if p1 > 0:
dl1 = cdp_p2 * p1**2 + cdp_p1 * p1
# dl1 = cfd_p2 * f_l1 ** 2 + cfd_p1 * f_l1
# f1 = cp2 * l1 ** 2 + cp1 * l1 - f_l1
elif p1 > p_thr2:
dl1 = cdp_n2 * p1 ** 2 + cdp_n1 * p1
# dl1 = cfd_n2 * f_l1 ** 2 + cfd_n1 * f_l1
# f1= cn2 * l1 ** 2 + cn1 * l1 - f_l1
else :
dl1 = cdp_n1_ * p1 + cdp_n0_
if p2 > 0:
dl2 = cdp_p2 * p2 ** 2 + cdp_p1 * p2
# dl2 = cfd_p2 * f_l2 ** 2 + cfd_p1 * f_l2
# f2 = cp2 * l2 ** 2 + cp1 * l2 - f_l2
elif p2 > p_thr2:
dl2 = cdp_n2 * p2 ** 2 + cdp_n1 * p2
# dl2 = cfd_n2 * f_l2 ** 2 + cfd_n1 * f_l2
# f2 = cn2 * l2 ** 2 + cn1 * l2 - f_l2
else :
dl2 = cdp_n1_ * p2 + cdp_n0_
if p3 > 0:
dl3 = cdp_p2 * p3 ** 2 + cdp_p1 * p3
# dl3 = cfd_p2 * f_l3 ** 2 + cfd_p1 * f_l3
# f3 = cp2 * l3 ** 2 + cp1 * l3 - f_l3
elif p3 > p_thr2 :
dl3 = cdp_n2 * p3 ** 2 + cdp_n1 * p3
# dl3 = cfd_n2 * f_l3 ** 2 + cfd_n1 * f_l3
# f3 = cn2 * l3 ** 2 + cn1 * l3 - f_l3
else :
dl3 = cdp_n1_ * p3 + cdp_n0_
if p1 >= p_thr :
# dm1 = c_chord_2_ * p1 ** 2 + c_chord_1_ * p1 + c_chord_0_
dm1 = c_chord_1_ * p1 + c_chord_0_
# dm1 = 0
elif p_thr2 <= p1 < p_thr:
# dm1 = c_chord_2 * p1 ** 2 + c_chord_1 * pi + c_chord_0
dm1 = c_chord_1 * p1 + c_chord_0
else :
dm1 = c_chord_const
if p2 > p_thr :
# dm2 = c_chord_2_ * p2 ** 2 + c_chord_1_ * p2 + c_chord_0_
dm2 = c_chord_1_ * p2 + c_chord_0_
# dm2 = 0
elif p_thr2 <= p2 < p_thr:
# dm2 = c_chord_2 * p2 ** 2 + c_chord_1 * p2 + c_chord_0
dm2 = c_chord_1 * p2 + c_chord_0
else:
dm2 = c_chord_const
if p3 > p_thr :
dm3 = c_chord_1_ * p3 + c_chord_0_
# dm3 = c_chord_2_ * p3 ** 2 + c_chord_1_ * p3 + c_chord_0_
# dm3 = 0
# dm2 = 0.0005 * p_thr2 ** 3 - 0.0022 * p_thr2 ** 2 + 0.0686 * p_thr2
elif p_thr2 <= p3 < p_thr:
# dm3 = c_chord_2 * p3 ** 2 + c_chord_1 * p3 + c_chord_0
dm3 = c_chord_1 * p3 + c_chord_0
else:
dm3 = c_chord_const
"""
f1_d = f1.diff(l1)
f2_d = f2.diff(l2)
f3_d = f3.diff(l3)
l1n = 0
l2n = 0
l3n = 0
for i in range(20):
l1n = l1n - np.float32(f1.evalf(subs={l1: l1n})) / np.float32(f1_d.evalf(subs={l1: l1n}))
# print(f'The {i+1} iteration ln is {l1n:.4} and f(xln) is {np.float(f1.evalf(subs= {l1:l1n})):.3}')
for i in range(20):
l2n = l2n - np.float32(f2.evalf(subs={l2: l2n})) / np.float32(f2_d.evalf(subs={l2: l2n}))
# print(f'The {i+1} iteration ln is {l1n:.4} and f(xln) is {np.float(f1.evalf(subs= {l1:l1n})):.3}')
for i in range(20):
l3n = l3n - np.float32(f3.evalf(subs={l3: l3n})) / np.float32(f3_d.evalf(subs={l3: l3n}))
# print(f'The {i+1} iteration ln is {l1n:.4} and f(xln) is {np.float(f1.evalf(subs= {l1:l1n})):.3}')
"""
# d1 = (8.25 - d1)
# d2 = (8.25 - d2)
# d3 = (8.25 - d3)
# print(dl1, dl2, dl3, d1, d2, d3)
# print(dm1, dm2, dm3)
# print(dl1, dm1)
l1_res = dl1 + l0 + dm1 # + d1
l2_res = dl2 + l0 + dm2 # + d2
l3_res = dl3 + l0 + dm3 # + d3
l1_wo_adj = dl1 + l0
l2_wo_adj = dl2 + l0
l3_wo_adj = dl3 + l0
# print(l1_res, l2_res, l3_res)
# l1_res = dl1 + l0
# l2_res = dl2 + l0
# l3_res = dl3 + l0
# l1_ = dl1 + l0
# l2_ = dl2 + l0
# l3_ = dl3 + l0
# print(dl1, dl2, dl3, d1, d2, d3, l1_res, l2_res, l3_res)
l_q = 1/3 * (l1_res + l2_res + l3_res)
l_q_wo_adj = 1/3 * (l1_wo_adj + l2_wo_adj + l3_wo_adj)
# l_q = 1/3 * (l1_ + l2_ + l3_)
if abs(l2_res - l3_res) < 0.001:
# assume l2 = l3 then
phi_q = 0
else:
phi_q = atan(sqrt(3) * (l2_res + l3_res - 2 * l1_res) / (3 * (l2_res - l3_res)))
if abs(l2_wo_adj - l3_wo_adj) < 0.001 :
phi_q_wo_adj = 0
else :
phi_q_wo_adj = atan(sqrt(3) * (l2_wo_adj + l3_wo_adj - 2 * l1_wo_adj) / (3 * (l2_wo_adj - l3_wo_adj)))
kappa_q = 2 * sqrt(
l1_res ** 2 + l2_res ** 2 + l3_res ** 2 - l1_res * l2_res - l1_res * l3_res - l2_res * l3_res) / (
d * (l1_res + l2_res + l3_res))
kappa_q_wo_adj = 2 * sqrt(
l1_wo_adj ** 2 + l2_wo_adj ** 2 + l3_wo_adj ** 2 - l1_wo_adj * l2_wo_adj - l1_wo_adj * l3_wo_adj - l2_wo_adj * l3_wo_adj) / (
d * (l1_wo_adj + l2_wo_adj + l3_wo_adj))
theta_q = l_q * kappa_q
theta_q_wo_adj = l_q_wo_adj * kappa_q_wo_adj
if kappa_q < 0.000001 :
c = 0
x = 0
y = 0
z = 171.5
else :
c = 2 * (1 / kappa_q) * sin(theta_q / 2)
x = c * sin(theta_q / 2) * cos(phi_q) + lu * sin(theta_q) * cos(phi_q)
y = c * sin(theta_q / 2) * sin(phi_q) + lu * sin(theta_q) * sin(phi_q)
z = c * cos(theta_q / 2) + lb + lu * cos(theta_q)
if kappa_q_wo_adj < 0.000001 :
c_wo_adj = 0
x_wo_adj = 0
y_wo_adj = 0
z_wo_adj = 171.5
else :
c_wo_adj = 2 * (1 / kappa_q_wo_adj) * sin(theta_q_wo_adj / 2)
x_wo_adj = c_wo_adj * sin(theta_q_wo_adj / 2) * cos(phi_q_wo_adj) + lu * sin(theta_q_wo_adj) * cos(phi_q_wo_adj)
y_wo_adj = c_wo_adj * sin(theta_q_wo_adj / 2) * sin(phi_q_wo_adj) + lu * sin(theta_q_wo_adj) * sin(phi_q_wo_adj)
z_wo_adj = c_wo_adj * cos(theta_q_wo_adj / 2) + lb + lu * cos(theta_q_wo_adj)
print(degrees(theta_q), ",")
return (x, y, z, l1_res, l2_res, l3_res, degrees(theta_q), degrees(theta_q_wo_adj))
X = list()
Y = list()
Z = list()
L1 = list()
L2 = list()
L3 = list()
ang_list = list()
ang_list_ = list()
for _p in p1 :
res = calc_forward_kinematics(_p, 0, 0)
if np.isnan(float(res[0])):
X.append(0.0)
else :
X.append(res[0])
if np.isnan(float(res[2])) :
Z.append(1/3*(res[3]+res[4]+res[5]))
else :
Z.append(res[2])
Y.append(res[1])
L1.append(res[3])
L2.append(res[4])
L3.append(res[5])
ang_list.append(res[6])
ang_list_.append(res[7])
print(ang_list)
print(",")
print(ang_list_)
plt.rcParams["figure.autolayout"] = True
if IS_SCATTER :
ax = df.plot.scatter(x='y0', y='z0', c='blue', s=12)
ax.scatter(x=X, y=Z, c='black', s=12)
else :
ax = df.plot.line(x='y0', y='z0', c='blue')
ax.plot(X, Z, 'black')
# ax.scatter(x=X, y=Z, c='black', label='kinematic model')
ax.set_xticks(x_tick_list)
ax.set_yticks(y_tick_list)
ax.xaxis.set_minor_locator(plt.MultipleLocator(20/2))
ax.yaxis.set_minor_locator(plt.MultipleLocator(15/3))
ax.tick_params(which='both', width=1)
ax.tick_params(which='major', length=6)
ax.tick_params(which='minor', length=3, color='black')
ax.set_xlabel('x (mm)', fontsize=12)
ax.set_ylabel('z (mm)', fontsize=12)
ax.legend(['Simulation result', 'Kinematic model'], edgecolor='black', fontsize=12)
plt.show()
# print(X) | shetshield/src | srm/kinematic_model.py | kinematic_model.py | py | 8,580 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.atan",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "math.atan",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 2... |
22784884700 | from .common import *
import numpy
import scipy
import scipy.special
import scipy.sparse
import scipy.sparse.linalg
# Datatypes
bool = bool
float16 = numpy.float16
float32 = numpy.float32
float64 = numpy.float64
uint8 = numpy.uint8
int16 = numpy.int16
int32 = numpy.int32
int64 = numpy.int64
float_fmts.update({
'float16': float16,
'float32': float32,
'float64': float64
})
def asarray(x):
if hasattr(x, 'dtype'):
return array(x, dtype=x.dtype)
else:
return array(x)
def to_float(x):
return numpy.asanyarray(x).astype(float64)
def to_type(x, dtype):
return numpy.asanyarray(x).astype(dtype)
# Convenience Decorators
def type_reg(f):
def _wrapped(*args, **kwargs):
kwargs.setdefault("dtype", float_fmts[float_fmt()])
return f(*args, **kwargs)
_wrapped.original_function = f
return _wrapped
# Fundamental Mathematical Operators
neg = numpy.negative
pow = numpy.power
abs = numpy.abs
sqrt = numpy.sqrt
exp = numpy.exp
expm1 = numpy.expm1
log = numpy.log
log10 = numpy.log10
log1p = numpy.log1p
log2 = numpy.log2
add = numpy.add
sub = numpy.subtract
div = numpy.divide
mul = numpy.multiply
reciprocal = numpy.reciprocal
remainder = numpy.remainder
ceil = numpy.ceil
floor = numpy.floor
round = numpy.round
fmod = numpy.fmod
clip = numpy.clip
sign = numpy.sign
trunc = numpy.trunc
# Trigonometric Functions
cos = numpy.cos
sin = numpy.sin
tan = numpy.tan
cosh = numpy.cosh
sinh = numpy.sinh
tanh = numpy.tanh
acos = numpy.arccos
asin = numpy.arcsin
atan = numpy.arctan
atan2 = numpy.arctan2
# Other Functions
digamma = scipy.special.digamma
erf = scipy.special.erf
erfc = scipy.special.erfc
erfinv = scipy.special.erfinv
sigmoid = scipy.special.expit
def softplus(x, out=None):
return log(1 + exp(x), out=out)
# Additional Definitions
def rsqrt(x, out=None):
return pow(x, -0.5, out=out)
def square(x, out=None):
return pow(x, 2, out=out)
def addcdiv(x, y1=None, y2=None, value=1, out=None):
if y1 is None or y2 is None:
raise ValueError("y1 and y2 must both be specified")
if out is None:
out = value * div(y1, y2)
out = x + out
else:
div(y1, y2, out=out)
mul(value, out, out=out)
add(x, out, out=out)
return out
def addcmul(x, y1=None, y2=None, value=1, out=None):
if y1 is None or y2 is None:
raise ValueError("y1 and y2 must both be specified")
if out is None:
out = value * mul(y1, y2)
out = x + out
else:
mul(y1, y2, out=out)
mul(value, out, out=out)
add(x, out, out=out)
return out
def frac(x, out=None):
if out is None:
return x - floor(x)
floor(x, out=out)
sub(x, out, out=out)
return out
def lerp(start, end, weight, out=None):
if out is None:
return start + weight * (end - start)
sub(end, start, out=out)
mul(weight, out, out=out)
add(start, out, out=out)
return out
def mvlgamma(x, p):
return scipy.special.multigammaln(x, d=p)
# Common Array Operations
einsum = numpy.einsum
concatenate = numpy.concatenate
append = numpy.append
stack = numpy.stack
ravel = numpy.ravel
flatten = numpy.ravel
arange = type_reg(numpy.arange)
logspace = type_reg(numpy.logspace)
linspace = type_reg(numpy.linspace)
eye = type_reg(numpy.eye)
# Reduction Ops
argmax = numpy.argmax
argmin = numpy.argmin
cumprod = numpy.cumprod
cumsum = numpy.cumsum
logsumexp = scipy.special.logsumexp
mean = numpy.mean
median = numpy.median
prod = numpy.prod
std = numpy.std
var = numpy.var
sum = numpy.sum
norm = numpy.linalg.norm
def dist(x, y, ord=2):
if asarray(x).dtype == float16 or asarray(y).dtype == float16:
return numpy.linalg.norm(asarray(x).astype(float64) - asarray(y).astype(float64), ord=ord).astype(float16)
else:
return numpy.linalg.norm(x - y, ord=ord)
# Comparison Ops
allclose = numpy.allclose
argsort = numpy.argsort
eq = numpy.equal
ne = numpy.not_equal
ge = numpy.greater_equal
gt = numpy.greater
le = numpy.less_equal
lt = numpy.less
def equal(*args, **kwargs):
return numpy.all(eq(*args, **kwargs))
isfinite = numpy.isfinite
isinf = numpy.isinf
isnan = numpy.isnan
max = numpy.max
min = numpy.min
any = numpy.any
all = numpy.all
array = type_reg(numpy.array)
zeros = type_reg(numpy.zeros)
ones = type_reg(numpy.ones)
empty = type_reg(numpy.empty)
full = type_reg(numpy.full)
zeros_like = type_reg(numpy.zeros_like)
ones_like = type_reg(numpy.ones_like)
empty_like = type_reg(numpy.empty_like)
full_like = type_reg(numpy.full_like)
def to_numpy(x):
return numpy.asarray(x)
def as_bool_array(x):
return numpy.asarray(x).astype(bool)
def copy(x):
return numpy.copy(x)
def reshape(x, new_dims):
return numpy.reshape(asarray(x), new_dims)
def shape(x):
return numpy.shape(x)
def logical_not(x, out=None, where=True):
return numpy.logical_not(x, out=out, where=where)
def logical_or(a, b, out=None, where=True):
return numpy.logical_or(a, b, out=out, where=where)
def logical_and(a, b, out=None, where=True):
return numpy.logical_and(a, b, out=out, where=where)
def logical_xor(a, b, out=None, where=True):
return numpy.logical_xor(a, b, out=out, where=where)
nonzero = numpy.nonzero
argsort = numpy.argsort
gather = numpy.take
def solve_linear_system(A,b,overwrite_a=False,overwrite_b=False,check_finite=False,sparse=False):
if sparse:
return scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix(A),b)
else:
return scipy.linalg.solve(A,b,overwrite_a=overwrite_a,overwrite_b=overwrite_b,check_finite=check_finite)
matrix_inv = numpy.linalg.inv
eig = numpy.linalg.eig
diag = numpy.diag
| Microno95/desolver | desolver/backend/numpy_backend.py | numpy_backend.py | py | 5,706 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "numpy.float16",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.float64",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8"... |
71017122023 | from multiprocessing import Lock
import dquality.common.constants as const
from multiprocessing import Process
import importlib
from os import path
import sys
import dquality.realtime.pv_feedback_driver as drv
if sys.version[0] == '2':
import thread as thread
else:
import _thread as thread
class Result:
"""
This class is a container of result and parameters linked to the subject of the verification, and the
verification type.
"""
def __init__(self, res, quality_id, error):
self.res = res
self.quality_id = quality_id
self.error = error
class Results:
"""
This class is a container of results of all quality checks for a single frame, and attributes such as flag
indicating if all quality checks passed, dat type, and index.
"""
def __init__(self, type, index, failed, results):
self.type = type
self.index = index
self.failed = failed
self.results = []
for qc in results:
self.results.append(results[qc])
class Data:
"""
This class is a container of data.
"""
def __init__(self, status, slice=None, type=None, acq_time = None):
self.status = status
if status == const.DATA_STATUS_DATA:
self.slice = slice
self.type = type
if acq_time is not None:
self.acq_time = acq_time
class Feedback:
"""
This class is a container of real-time feedback related information.
"""
def __init__(self, feedback_type, ):
"""
Constructor
Parameters
----------
feedback_type : list
a list of configured feedbac types. Possible options: console, log, and pv
"""
self.feedback_type = feedback_type
def set_feedback_pv(self, feedback_pvs, detector):
"""
This function sets feedback_pvs, and detector fields.
Parameters
----------
feedback_pvs : list
a list of feedback process variables names, for each data type combination with the
applicable quality check
detector : str
a pv name of the detector
"""
self.feedback_pvs = feedback_pvs
self.detector = detector
def set_logger(self, logger):
"""
This function sets logger.
Parameters
----------
logger : Logger
an instance of Logger
"""
self.logger = logger
def set_driver(self, driver):
"""
This function sets driver.
Parameters
----------
driver : FbDriver
an instance of FbDriver
"""
self.driver = driver
def write_to_pv(self, pv, index):
"""
This function calls write method on driver field to update pv.
Parameters
----------
pv : str
a name of the pv, contains information about the data type and quality check (i.e. data_white_mean)
index : int
index of failed frame
"""
self.driver.write(pv, index)
def quality_feedback(self, feedbackq):
"""
This function provides feedback as defined by the feedback_type in a real time.
If the feedback type contains pv type, this function creates server and initiates driver handling the feedback
pvs.It dequeues results from the 'feedbackq' queue and processes all feedback types that have been configured.
It will stop processing the queue when it dequeues data indicating end status.
Parameters
----------
feedbackq : Queue
a queue that will deliver Result objects of failed quality check
Returns
-------
none
"""
if const.FEEDBACK_PV in self.feedback_type:
server = drv.FbServer()
driver = server.init_driver(self.detector, self.feedback_pvs)
thread.start_new_thread(server.activate_pv, ())
self.set_driver(driver)
evaluating = True
while evaluating:
while not feedbackq.empty():
try:
result = feedbackq.get_nowait()
if result == const.DATA_STATUS_END:
evaluating = False
else:
if const.FEEDBACK_CONSOLE in self.feedback_type:
print ('failed frame '+str(result.index)+ ' result of '+const.to_string(result.quality_id)+ ' is '+ str(result.res))
if const.FEEDBACK_LOG in self.feedback_type:
self.logger.info('failed frame '+str(result.index)+ ' result of '+const.to_string(result.quality_id)+ ' is '+ str(result.res))
if const.FEEDBACK_PV in self.feedback_type:
quality_check = const.to_string(result.quality_id)
self.write_to_pv(result.type + '_' + quality_check, result.index)
except:
pass
class Aggregate:
"""
This class is a container of results.
The results are organized in three dictionaries.
"bad_indexes": dictionary contains keys that are indexes of slices that not pass one or more quality checks.
The values are results organized in dictionaries, where the keays are quality check method index.
"good_indexes" is a similarly organized dictionary that contains indexes for which all quality checks passed.
"results": a dictionary keyed by quality check id and a value of list of all results for "good" indexes.
The class has locks, for each quality check type. The lock are used to access the results. One thread is adding
to the results, and another thread (statistical checks) are reading the stored data to do statistical calculations.
"""
def __init__(self, data_type, quality_checks, aggregate_limit, feedbackq = None):
"""
Constructor
Parameters
----------
data_type : str
data type related to the aggregate
quality_checks : list
a list of quality checks that apply for this data type
feedbackq : Queue
optional, if the real time feedback is requested, the queue will be used to pass results to the process
responsible for delivering the feedback in areal time
"""
self.data_type = data_type
self.feedbackq = feedbackq
self.aggregate_limit = aggregate_limit
self.bad_indexes = {}
self.good_indexes = {}
self.results = {}
self.lock = Lock()
for qc in quality_checks:
self.results[qc] = []
def get_results(self, check):
"""
This returns the results of a given quality check.
This operation uses lock, as other process writes to results.
Parameters
----------
check : int
a value indication quality check id
Returns
-------
res : list
a list containing results that passed the given quality check
"""
self.lock.acquire()
res = self.results[check]
self.lock.release()
return res
def add_result(self, result, check):
"""
This add a new result for a given quality check to results.
This operation uses lock, as other process reads the results.
Parameters
----------
result : Result
a result instance
check : int
a value indication quality check id
Returns
-------
none
"""
self.lock.acquire()
self.results[check].append(result)
self.lock.release()
def handle_results(self, results):
"""
This handles all results for one frame.
If the flag indicates that at least one quality check failed the index will be added into 'bad_indexes'
dictionary, otherwise into 'good_indexes'. It also delivers the failed results to the feedback process
using the feedbackq, if real time feedback was requasted.
Parameters
----------
result : Result
a result instance
check : int
a value indication quality check id
Returns
-------
none
"""
def send_feedback():
if self.feedbackq is not None:
for result in results.results:
if result.error != 0:
result.index = results.index
result.type = results.type
self.feedbackq.put(result)
if self.aggregate_limit == -1:
if results.failed:
send_feedback()
else:
if results.failed:
self.bad_indexes[results.index] = results.results
send_feedback()
else:
self.good_indexes[results.index] = results.results
for result in results.results:
self.add_result(result.res, result.quality_id)
def is_empty(self):
"""
Returns True if the fields are empty, False otherwise.
Parameters
----------
none
Returns
-------
True if empty, False otherwise
"""
return len(self.bad_indexes) == 0 and len(self.good_indexes) == 0
class Consumer_adapter():
"""
This class is an adapter starting consumer process.
"""
def __init__(self, module_path):
"""
constructor
Parameters
----------
module_path : str
a path where the consumer module is installed
"""
sys.path.append(path.abspath(module_path))
def start_process(self, q, module, args):
"""
This function starts the consumer process.
It first imports the consumer module, and starts consumer process.
Parameters
----------
q : Queue
a queue on which the frames will be delivered
module : str
the module that needs to be imported
args : list
a list of arguments required by the consumer process
"""
mod = importlib.import_module(module)
status_def = [const.DATA_STATUS_DATA, const.DATA_STATUS_MISSING, const.DATA_STATUS_END]
p = Process(target=mod.consume, args=(q, status_def, args,))
p.start()
| bfrosik/data-quality | dquality/common/containers.py | containers.py | py | 10,484 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "sys.version",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "dquality.common.constants.DATA_STATUS_DATA",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "dquality.common.constants",
"line_number": 45,
"usage_type": "name"
},
... |
25628607275 | import numpy as np
import matplotlib.pyplot as plt
def plot(data, part):
x = range(1, data[:, 0].size + 1)
x2 = np.array(range(1, data[:, 0].size + 1))
mask = (data[:, 0] < 0.9196246) & (data[:, 1] > 0.919663)
y_min = np.maximum(data[:, 1], 0.9196246)
y_max = np.minimum(data[:, 0], 0.919663)
if part == 2 and np.any(mask):
plt.vlines(x, data[:, 0], data[:, 1])
plt.vlines(x2[mask], y_min[mask], y_max[mask],
colors="g")
elif part == 3:
plt.vlines(x, data[:, 0], data[:, 1])
plt.hlines(0.919603, 0, 200, colors="g", lw=2)
else:
plt.vlines(x, data[:, 0], data[:, 1])
def interval(data, eps, part, label=""):
for i in range(len(data)):
data[i].values[:, 1] = data[i].values[:, 0] + eps
data[i].values[:, 0] -= eps
plot(data[i].values, part)
plt.xlabel('n')
plt.ylabel('mV')
plt.title(label)
plt.show() | tronyaginaa/math_statistics | lab3/interval.py | interval.py | py | 939 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 1... |
28440101289 | from tensorboardX import SummaryWriter
from multiprocessing import current_process
from .abstract_writer import AbstractWriter
from .helpers import add_value_wrapper
from ..helpers import concurrent
class TensorboardWriter(AbstractWriter):
def __init__(self, use_hdf_hook=True, **kwargs):
AbstractWriter.__init__(self, **kwargs)
self.use_hdf_hook = use_hdf_hook
if current_process().name == 'MainProcess' or self.scope != 'root':
self.summary_writer = SummaryWriter(self.output_dir)
def _run(self):
AbstractWriter._run(self)
self.write_to_writer()
@concurrent
def write_to_writer(self):
for (type, k), v in self.data.items():
for (step, value) in list(v):
if type == 'scalar':
self.summary_writer.add_scalar(k, value, step)
elif type == 'histogram':
self.summary_writer.add_histogram(k, value, step)
elif type == 'image':
self.summary_writer.add_image(k, value, step)
elif type == 'scalars':
self.summary_writer.add_scalars(k, value, step)
self.data.clear()
def add_data(self, type, key, value, step):
self.data[(type, key)].append([step, value])
@concurrent
@add_value_wrapper
def add_scalar(self, key, value, step):
self.add_data('scalar', key, value, step)
@concurrent
@add_value_wrapper
def add_histogram(self, key, value, step):
self.add_data('histogram', key, value, step)
@concurrent
@add_value_wrapper
def add_image(self, key, value, step):
self.add_data('image', key, value, step)
@concurrent
@add_value_wrapper
def add_scalars(self, key, value, step):
self.add_data('scalars', key, value, step)
@concurrent
@add_value_wrapper
def add_array(self, key, value, step):
# Skipping array data
pass
def __repr__(self):
return 'TensorboardWriter'
def close(self):
AbstractWriter.close(self)
if current_process().name == 'MainProcess' or self.scope != 'root':
self.write_to_writer()
self.summary_writer.flush()
self.summary_writer.close()
self.logger.info(f'{self} closed')
| martius-lab/cee-us | mbrl/allogger/writers/tensorboardwriter.py | tensorboardwriter.py | py | 2,331 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "abstract_writer.AbstractWriter",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "abstract_writer.AbstractWriter.__init__",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "abstract_writer.AbstractWriter",
"line_number": 11,
"usage_type": "... |
74207610663 | from turtle import title
from flask import Blueprint, render_template, request, session, redirect, flash
import mysql.connector
from webappfiles import dbconnect
from datetime import datetime
import os
views = Blueprint('views', __name__)
cur, con = dbconnect.get_connection()
#referring to the default page via the “/” route
@views.route("/")
def home():
return render_template("index.html")
@views.route("/houseowner/")
def houseowner():
return render_template("houseowner.html")
@views.route("/login/")
def login():
return render_template("login.html")
@views.route("/loginho/")
def loginho():
return render_template("loginho.html")
@views.route("/signup/")
def signup():
return render_template("signup.html")
@views.route("/viewjobs/")
def viewjobs():
cur.execute("SELECT * FROM tbljob")
rows = cur.fetchall()
return render_template("viewjobs.html", rows = rows)
@views.route("/adminhome/")
def adminhome():
return render_template("adminhome.html")
@views.route("/addjob/")
def add_job():
cur.execute("select* from tbldistrict")
rows1=cur.fetchall()
return render_template("addjob.html",rows1=rows1)
@views.route("/savedetails", methods = ["POST"])
def saveDetails():
msg = "msg"
if request.method == "POST":
try:
# add codes to retrieve the form values
title = request.form["txttitle"]
salary = request.form["txtsal"]
ref = request.form["txtref"]
desr = request.form["txtdesc"]
dt = request.form["txtcdate"]
ho= request.form["txtho"]
f = request.files["filecover"]
f.save(os.path.join("./webappfiles/static/images" , f.filename))
full_filename = os.path.join("/static/images" , f.filename)
sql = "INSERT into tbljob (JOB_TITLE, SALARY, JOB_REFERENCE, JOD_DESCRIPTION, CLOSING_DATE, HOUSEOWNER, JOB_COVER) values (%s,%s,%s,%s,%s,%s,%s)"
# add the form variables for each column
val = (title,salary,ref,desr,dt, ho,full_filename)
cur.execute(sql, val)
con.commit()
msg = str(cur.rowcount) + " job added"
except Exception as e:
con.rollback()
msg = "Job cannot be added " + str(e)
finally:
#pass the msg variable to the return statement
return render_template("addjob.html",msg=msg, title=title)
con.close()
@views.route("/joblisting/")
def job_listing():
cur, con = dbconnect.get_connection()
cur.execute("SELECT * FROM tbljob")
rows = cur.fetchall()
return render_template('joblisting.html', rows=rows)
@views.route("/searchjob/")
def search_job():
return render_template("search.html")
@views.route("/searchjb/", methods = ["GET"])
def searchjob():
#retrieve the querystring txtlang from the URL
title = request.args.get("txtjob")
try:
sql = "select * from tbljob WHERE JOB_TITLE = %s "
val = (title,)
cur.execute(sql, val)
rows = cur.fetchall()
msg = str(cur.rowcount) + " record found!"
except:
msg = "There was an issue while searching!"
finally:
return render_template("search.html", rows=rows,msg = msg, title=title)
@views.route("/updatejob/")
def update_job():
return render_template("updatejob.html")
@views.route("/updaterecord/", methods = ["POST"])
def updaterecord():
#retrieve the form values
salary = request.form["txtsalary"]
title = request.form["txttitle"]
try:
sql = "UPDATE tbljob SET SALARY = %s where JOB_TITLE = %s"
val = (salary, title)
cur.execute(sql, val)
con.commit()
msg = str(cur.rowcount) + " record successfully updated"
except:
msg = "Cannot be updated"
finally:
return render_template("updatejob.html", msg = msg)
@views.route("/profile/")
def profile():
if 'ID' in session:
cur, con = dbconnect.get_connection()
sql = "SELECT * FROM tblhousekeeper where ID = %s"
val = (session.get('ID'),)
cur.execute(sql, val)
rows = cur.fetchall()
return render_template('profile.html', rows=rows)
else:
return redirect('/')
@views.route("/profileho/")
def profileho():
if 'ID' in session:
cur, con = dbconnect.get_connection()
sql = "SELECT * FROM tblhouseowner where ID = %s"
val = (session.get('ID'),)
cur.execute(sql, val)
rows = cur.fetchall()
return render_template('profileho.html', rows=rows)
else:
return redirect('/')
@views.route("/updateprofile/", methods=["GET", "POST"])
def update_profile():
if request.method == "POST":
email = request.form['txtemail']
first_name = request.form['txtfn']
last_name = request.form['txtln']
contactnum = request.form['txtcontactnum']
try:
cur, con = dbconnect.get_connection()
sql = "UPDATE tblhousekeeper SET EMAIL = %s, FNAME = %s, LNAME = %s , CONTACT_NUMBER = %s where ID = %s"
val = (email, first_name, last_name, contactnum, session['ID'])
cur.execute(sql, val)
con.commit()
msg = str(cur.rowcount) + " record successfully updated"
flash(msg, category='success')
except:
msg = "Cannot be updated"
flash(msg, category='error')
finally:
return redirect('/profile/')
else:
return redirect('/login/')
@views.route("/updateprofileho/", methods=["GET", "POST"])
def update_profileho():
if request.method == "POST":
email = request.form['txtemail']
first_name = request.form['txtfn']
last_name = request.form['txtln']
try:
cur, con = dbconnect.get_connection()
sql = "UPDATE tblhouseowner SET EMAIL = %s, FNAME = %s, LNAME = %s where ID = %s"
val = (email, first_name, last_name, session.get('ID'),)
cur.execute(sql, val)
con.commit()
msg = str(cur.rowcount) + " record successfully updated"
flash(msg, category='success')
except:
msg = "Cannot be updated"
flash(msg, category='error')
finally:
return redirect('/profileho/')
else:
return redirect('/adminhome/')
@views.route("/housekeeper/")
def housekeeper():
cur, con = dbconnect.get_connection()
cur.execute("SELECT * FROM tblskills")
rows = cur.fetchall()
return render_template('housekeeper.html', rows=rows)
@views.route("/application/")
def application():
cur.execute("SELECT * FROM tbljob jb INNER JOIN tblhouseowner ho on (jb.HOUSEOWNER=ho.ID)")
rows1 = cur.fetchall()
return render_template("application.html", rows1 = rows1)
@views.route("/submit_app/", methods=['GET', 'POST'])
def submit_app():
if request.method == 'POST':
job = request.form['txtjob']
date = request.form['txtdate']
sql = "INSERT into tblapplication (DATE_OF_APPLICATION, JOB, HOUSEKEEPER) values (%s,%s,%s)"
val = (date, job, session.get('ID'))
cur.execute(sql, val)
con.commit()
return render_template("application.html")
@views.route("/searchskill/")
def searchskill():
return render_template("searchskill.html")
@views.route("/searchsk/", methods = ["GET"])
def searchsk():
#retrieve the querystring txtlang from the URL
title = request.args.get("txtsk")
try:
sql = "select * from tblhousekeeper hk INNER JOIN tblskills sk ON (hk.ID=sk.ID) where SKILL_TYPE = %s"
val = (title,)
cur.execute(sql, val)
rows = cur.fetchall()
msg = str(cur.rowcount) + " record found!"
except:
msg = "There was an issue while searching!"
finally:
return render_template("searchskill.html", msg = msg, rows=rows, title = title) | yelenaM4/Mohandyman | webappfiles/views.py | views.py | py | 7,945 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "webappfiles.dbconnect.get_connection",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "webappfiles.dbconnect",
"line_number": 12,
"usage_type": "name"
},
{
"api_na... |
11844140747 | import configparser
import csv
import os
# Params: abs file path, file section and its key
def get_config_value(conf_file, section, key):
config = configparser.ConfigParser()
config.read(conf_file)
return config[section][key]
def get_tickers(table, fn):
dirname = os.path.dirname(__file__)
path = dirname.replace('utils', 'data/csv/')
path += table.value + "-TICKERS.csv"
data = []
with open(path) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
data.append(row)
return fn(data)
| xka155/PFF | core/utils/config.py | config.py | py | 563 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
... |
75263417065 | from dataminer.file import File
from dataminer.processor import PROCESSORS, Processor
from dataminer.extractor import EXTRACTORS
from pathlib import Path
from fnmatch import fnmatchcase
import os
import yaml
import time
CONFIG = {}
def load_config(path: Path):
global CONFIG
with open(path, "rb") as fd:
CONFIG = yaml.safe_load(fd)
def filter_match(path_to_match, filters):
for pat in filters:
if fnmatchcase(path_to_match, pat):
return True
return False
def process_dir(input_path: Path, output_path: Path):
output_root = output_path.absolute()
if not output_root.exists():
output_root.mkdir(parents=True)
instantiated_processors: list[Processor] = []
proc_timings = {}
proc_dict = {}
for proc in PROCESSORS:
proc_dict[proc.name] = proc
for proc_config in CONFIG["processors"]:
name = proc_config["name"]
proc = proc_dict[name](output_root, proc_config)
proc.pre_process()
instantiated_processors.append(proc)
def run_processors_on_file(file_info: File):
for proc in instantiated_processors:
path_to_match = file_info.path.relative_to(file_info.input_root).as_posix()
pats = proc.config["filters"]
if filter_match(path_to_match, pats):
# print(path_to_match, pat, proc.name)
start_time = time.time()
try:
proc.run_processor(file_info)
except Exception as e:
print(
f'ERROR while running processor "{proc.name}" on file "{file_info.path}"'
)
raise e
final_time = time.time() - start_time
proc_timings[proc.name] = proc_timings.get(proc.name, 0) + final_time
for root, _, files in os.walk(input_path):
for path in files:
file_info = File(
input_root=input_path.absolute(),
path=Path(os.path.join(root, path)).absolute(),
)
run_processors_on_file(file_info)
for ex in EXTRACTORS:
path_to_match = file_info.path.relative_to(
file_info.input_root
).as_posix()
pats = CONFIG["extractors"][ex.name]["filters"]
if filter_match(path_to_match, pats):
# print(path_to_match, pat, ex.name)
for f in ex.get_files(file_info):
run_processors_on_file(f)
print("TIMINGS:")
for (name, timing) in proc_timings.items():
print(f"{name}: {timing}")
| ReplayCoding/tf2-dataminer | dataminer/build.py | build.py | py | 2,671 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fnmatch.fnmatchcase",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"l... |
8635247821 | from django.urls import path
from . import apis
urlpatterns_api_tables = [
path('table-list/', apis.TableListAPI.as_view()),
path('monthly-tables/', apis.MonthlyTableListAPI.as_view()),
path('main-page/', apis.MainPageAPI.as_view()),
path('make-log/', apis.MakeTableLogAPI.as_view()),
path('search/', apis.TableSearchAPI.as_view()),
]
| hanoul1124/healthcare2 | app/tables/urls.py | urls.py | py | 357 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
22766444587 | from flask import Blueprint, render_template, url_for, request, redirect, flash, make_response
from flask_login import login_required, current_user
from models import * # import database details
from sqlalchemy import or_
import matplotlib.pyplot as plt
import io
import base64
import sqlite3
# creating blueprint for analytics page
analytics = Blueprint("analytics", __name__)
@login_required
@analytics.route("/analysis")
def analysis():
conn = sqlite3.connect('ticket.db')
cursor = conn.cursor()
cursor.execute("SELECT name, avg_rating FROM Show")
data = cursor.fetchall()
x = [row[0] for row in data]
y = [row[1] for row in data]
print(x)
print(y)
plt.plot(x, y)
plt.xlabel('Show Name')
plt.ylabel('Avg Rating')
plt.title('Ratings graph')
buffer = io.BytesIO()
plt.savefig(buffer, format='jpeg')
buffer.seek(0)
image_jpeg = buffer.getvalue()
graph = base64.b64encode(image_jpeg).decode('utf-8')
return render_template("analysis.html", user=current_user, admin=current_user, graph=graph)
| Satyajay2020/College_Project | analytics.py | analytics.py | py | 1,109 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyp... |
28833400649 | import time
from machine import Pin
from discharge_stats import DischargeStats
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
AUTO_DISCHARGE = False
class Channel:
def __init__(
self,
channel,
discharge_pin,
leds_object=None,
temperature_sensors=None,
current_sensors=None,
low_voltage_cutoff=2.5,
start_discharge_voltage_cutoff=3,
temperature_cutoff=50,
):
"""[Control an individual discharge channel.]
Args:
channel ([integer]): [Physical/logical channel number 1-8]
discharge_pin ([integer]): [Pin number that corresponds to the MOSFET that controls discharge]
leds_object ([Leds]): [Leds object for showing output status]
"""
self.channel = str(channel)
self.low_voltage_cutoff = low_voltage_cutoff
self.start_discharge_voltage_cutoff = start_discharge_voltage_cutoff
self.temperature_cutoff = temperature_cutoff
self.discharge_pin = Pin(discharge_pin, Pin.OUT)
self.leds = leds_object
self.temperature_sensors = temperature_sensors
self.current_sensors = current_sensors
self.state = "empty"
self.led_state = 0
self.discharge_stats = None
self.temperature = 0
self.voltage_and_current = self.request_voltage_and_current()
self.current = self.request_current()
self.discharge_pin.off()
def __str__(self):
return self.channel
def cell_removed(self):
"""
A cell was removed from the channel.
"""
self.stop_discharge()
self.set_empty()
log.info("Cell removed from slot {}.".format(self.channel))
def cell_inserted(self):
"""
A cell was inserted into the channel.
"""
log.info("Cell inserted on slot {}.".format(self.channel))
if AUTO_DISCHARGE:
self.start_discharge()
else:
self.set_idle()
def start_discharge(self):
"""
Starts the current discharge.
"""
self.set_discharging()
self.discharge_stats = DischargeStats(
self.temperature, self.voltage_and_current["voltage"]
)
self.discharge_pin.on()
log.debug("Cell started discharging on slot {}.".format(self.channel))
def stop_discharge(self):
"""
Stops the current discharge.
"""
self.discharge_pin.off()
# self.send_stats() # TODO: implement
log.info(
"Discharged finished at {}mAh on channel {}.".format(
str(self.discharge_stats), self.channel
)
)
self.set_complete()
def stop_action(self):
if self.state == "discharging":
self.stop_discharge()
else:
log.error("Unable to stop action: " + self.state)
def get_stats(self):
return {
"id": self.channel,
"state": self.state,
"stage": None,
"current": self.voltage_and_current["current"],
"voltage": self.voltage_and_current["voltage"],
"temperature": self.temperature,
"capacity": self.discharge_stats.get_milliamp_hours()
if self.discharge_stats
else 0,
}
def request_temperatures(self):
self.temperature_sensors.temp_bus.convert_temp()
def get_temperature(self):
"""Returns the latest temperature read for the channel.
Returns:
number: Temperature in degrees Celsius.
"""
self.temperature = self.temperature_sensors.get_temperature(
self.channel)
return self.temperature
def request_current(self):
"""Requests a new current reading for the channel.
Returns:
number: Current in milliamps.
"""
self.current = self.current_sensors.get_channel_current(self.channel)
return self.current
def request_voltage_and_current(self):
"""Requests a new voltage and current reading for the channel.
Returns:
number: Voltage in volts and current in milliamps.
"""
self.voltage_and_current = self.current_sensors.get_channel_voltage_and_current(self.channel)
return self.voltage_and_current
def get_current(self):
"""Returns the current for the channel.
Returns:
number: Current in milliamps.
"""
return self.current
def get_voltage_and_current(self):
"""Returns the voltage and current for the channel.
Returns:
number: Voltage in volts and current in milliamps.
"""
return self.voltage_and_current
def set_led(self, colour, write=True):
self.leds.set_channel(self.channel, colour, write)
def set_error(self):
self.state = "error"
def set_verror(self):
self.state = "verror"
def set_idle(self):
self.state = "idle"
def set_empty(self):
self.state = "empty"
def set_discharging(self):
self.state = "discharging"
def set_complete(self):
self.state = "complete"
| koalacreations/kCharge-firmware | kCharge-firmware/channel.py | channel.py | py | 5,242 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "machine.Pin",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "machine.Pin.OUT",
... |
74647178343 | import logging
import os
from wafl.connectors.bridges.llm_task_extractor_bridge import LLMTaskExtractorBridge
from wafl.extractors.dataclasses import Answer
_path = os.path.dirname(__file__)
_logger = logging.getLogger(__file__)
class TaskExtractor:
def __init__(self, config, interface, logger=None):
self._interface = interface
self._connector = LLMTaskExtractorBridge(config)
self._max_num_past_utterances = 3
self._to_ignore = ["yes", "no", "ok", "okay", "sure", "nope", "yep", "you"]
async def extract(self, query: str) -> Answer:
print(__name__)
dialogue = "\n".join(
self._interface.get_utterances_list()[-self._max_num_past_utterances :]
)
if not dialogue:
dialogue = query
if self._ignore_utterances(dialogue):
return Answer.create_neutral()
prediction = await self._connector.get_answer(
"",
dialogue,
query,
)
return Answer(text=prediction.strip())
def _ignore_utterances(self, dialogue: str) -> bool:
utterance = dialogue.split("\n")[-1].split("user:")[-1].strip()
if utterance.lower() in self._to_ignore:
return True
return False
| fractalego/wafl | wafl/extractors/task_extractor.py | task_extractor.py | py | 1,268 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "wafl.connectors.bridges.l... |
485015950 | from PIL import Image
import time
from libsvm.svmutil import svm_predict, svm_train
import numpy as np
import sys
import pandas as pd
from libsvm.svm import *
from libsvm.svmutil import *
train_file = '/Users/aparahuja/Desktop/IITD/ML/Assignment 2/Q2/mnist/train.csv'
test_file = '/Users/aparahuja/Desktop/IITD/ML/Assignment 2/Q2/mnist/test.csv'
if len(sys.argv) > 1:
train_file = sys.argv[1]
test_file = sys.argv[2]
def F1_Confusion(y_true, y_predict):
confusion = np.zeros((10, 10))
n = len(y_true)
for i in range(n):
confusion[int(y_true[i])][int(y_predict[i])] += 1
print("Confusion Matrix: ")
print(confusion)
maxDiagonal = max([i for i in range(10)], key=lambda x: confusion[x][x])
print("Max Diagonal Entry in confusion matrix occurs for the class - " + str(maxDiagonal))
print("F1 scores:")
f1_avg = 0
for i in range(10):
tp = confusion[i][i]
fn = sum([confusion[j][i] if i != j else 0 for j in range(10)])
fp = sum([confusion[i][j] if i != j else 0 for j in range(10)])
f1_score = tp/(tp+(fp+fn)/2)
f1_avg += f1_score/10
print("\tClass " + str(i) + " = " + "{:.5f}".format(f1_score))
print("Macro F1 score = " + "{:.5f}".format(f1_avg))
def Binary_F1_Confusion(y_true, y_predict):
confusion = np.zeros((2, 2))
n = len(y_true)
for i in range(n):
confusion[int(y_true[i])][int(y_predict[i])] += 1
print("Confusion Matrix: ")
print(confusion)
maxDiagonal = max([i for i in range(2)], key=lambda x: confusion[x][x])
print("Max Diagonal Entry in confusion matrix occurs for the class - " + str(maxDiagonal))
print("F1 scores:")
f1_avg = 0
for i in range(2):
tp = confusion[i][i]
fn = sum([confusion[j][i] if i != j else 0 for j in range(2)])
fp = sum([confusion[i][j] if i != j else 0 for j in range(2)])
f1_score = tp/(tp+(fp+fn)/2)
f1_avg += f1_score/2
print("\tClass " + str(i) + " = " + "{:.5f}".format(f1_score))
print("Macro F1 score = " + "{:.5f}".format(f1_avg))
def load_data(file):
data = pd.read_csv(file, header=None)
y = np.array(data[data.shape[1] - 1])
y = y.reshape(-1, 1)
x = np.array(data.drop(data.shape[1] - 1, axis=1))
x = x / 255.0
return x, y
def load_data_binary(file, a, b):
a, b = a % 10, b % 10
data_all = pd.read_csv(file, header=None)
data = data_all[(data_all[data_all.shape[1]-1] == a) |
(data_all[data_all.shape[1] - 1] == b)]
y = np.array(data[data_all.shape[1] - 1])
y = y.reshape(-1, 1)
y[y == a] = 1
y[y == b] = 0
x = np.array(data.drop(data_all.shape[1] - 1, axis=1))
x = x / 255.0
return x, y
print("MULTI-CLASS CLASSIFICATION MODEL")
X, Y = load_data(train_file)
testX, testY = load_data(test_file)
st = time.time()
prob = svm_train(Y.reshape(-1), X, "-s 0 -t 2 -g 0.05 -q")
en = time.time()
print("Training Time = " + "{:.2f}".format(en - st) + " sec")
print("Testing -")
predlabel = svm_predict(testY.reshape(-1), testX, prob)[0]
F1_Confusion(testY.reshape(-1), predlabel)
print("BINARY CLASSIFICATION MODEL")
X, Y = load_data_binary(train_file, 5, 6)
testX, testY = load_data_binary(test_file, 5, 6)
st = time.time()
prob = svm_train(Y.reshape(-1), X, "-s 0 -t 2 -g 0.05 -q")
en = time.time()
print("Training Time = " + "{:.2f}".format(en - st) + " sec")
print("Testing -")
predlabel = svm_predict(testY.reshape(-1), testX, prob)[0]
Binary_F1_Confusion(testY.reshape(-1), predlabel)
g = open("misclassified.txt", 'w')
g.close()
f = open("misclassified.txt", 'a')
for i in range(len(predlabel)):
if predlabel[i] != testY[i][0]:
for x in X[i][:-1]:
f.write(str(int(255*x)) + ",")
f.write(str(int(255*X[i][-1]))+"\n")
f.close()
f = open("misclassified.txt", 'r')
lines = [(l.strip('\n')).split(",") for l in f.readlines()]
lines = [[int(x) for x in line] for line in lines]
cnt = 0
for line in lines:
cnt += 1
data = np.zeros((28, 28), dtype=np.uint8)
for i in range(28):
for j in range(28):
data[i][j] = line[28*i + j]
img = Image.fromarray(data, 'L')
img.save(str(cnt) + '.png')
# comment the lines below to store all misclassified digits as images
if cnt > 10:
break
| AparAhuja/Machine_Learning | Naive Bayes and SVM/Q2/multi_c.py | multi_c.py | py | 4,313 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_num... |
74839486503 | from lxml import etree
import pandas
def getdata(name , indx):
allarr = []
f = open(name, encoding="utf-8")
# 输出读取到的数据
text = f.read()
f.close()
htmll = etree.HTML(text)
arr = []
arr.append(indx)
name = htmll.xpath('//div[@class="Blockreact__Block-sc-1xf18x6-0 Flexreact__Flex-sc-1twd32i-0 gbiTQT jYqxGr"]/div/text()')
time = htmll.xpath(
'//div[@class="AccountHeaderreact__MainContainer-sc-1omadrn-2 etqLOL"]/span/text()')
coll = htmll.xpath(
'//ul[@class="Blockreact__Block-sc-1xf18x6-0 Menureact__MenuBase-sc-1j0z9gq-1 AccountPageNavbarreact__StyledNavbar-sc-10ky4m4-0 elqhCm dlHLLo jZKuuO"]/li[1]/a/div/span/text()')
crea = htmll.xpath(
'//ul[@class="Blockreact__Block-sc-1xf18x6-0 Menureact__MenuBase-sc-1j0z9gq-1 AccountPageNavbarreact__StyledNavbar-sc-10ky4m4-0 elqhCm dlHLLo jZKuuO"]/li[2]/a/div/span/text()')
favc = htmll.xpath(
'//ul[@class="Blockreact__Block-sc-1xf18x6-0 Menureact__MenuBase-sc-1j0z9gq-1 AccountPageNavbarreact__StyledNavbar-sc-10ky4m4-0 elqhCm dlHLLo jZKuuO"]/li[3]/a/div/span/text()')
snss = htmll.xpath(
'//*[@id="main"]/div/div/div[1]/div[2]/div[2]/div/div/button/div/i/text()')
arr.append(' '.join(name))
arr.append(' '.join(time))
arr.append(' '.join(coll))
arr.append(' '.join(crea))
arr.append(' '.join(favc))
arr.append(' '.join(snss))
print(arr)
# //*[@id="main"]/div/div/div[1]/div[2]/div[2]/div/div/button/div/i/text()
allarr.append(arr)
# //ul[@class="Blockreact__Block-sc-1xf18x6-0 Menureact__MenuBase-sc-1j0z9gq-1 AccountPageNavbarreact__StyledNavbar-sc-10ky4m4-0 elqhCm dlHLLo jZKuuO"]/li[2]/a/div/span/text()
return allarr
# return arr
if __name__ == '__main__':
allarr2 = []
filename = "collect-creat-fav"
lb = "家居"
for i in range(1 , 500):
for j in range(1 , 2):
try:
arr2 = getdata('./info2html/'+ "aa1info" +str(j)+ "inj" + str(i) +'.html' , i)
allarr2.extend(arr2)
except Exception as e :
print("------------****" , e)
continue
name = ["index","name","time","coll","crea","favc","snss"]
test = pandas.DataFrame(columns=name, data=allarr2)
test.to_csv("./csv/aa0519v2" + filename + '2.csv', encoding='utf-8')
| chenqiuying1023/opensea-supergucci | handletocsv.py | handletocsv.py | py | 2,353 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "lxml.etree.HTML",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 54,
"usage_type": "call"
}
] |
33596607925 | import sys
import numpy as np
import pandas as pd
#from agents.policy_search import PolicySearch_Agent
from agents.agent import DDPG
from task import Task
num_episodes = 1000
target_pos = np.array([0., 0., 100.])
task = Task(target_pos=target_pos)
agent = DDPG(task)
for i_episode in range(1, num_episodes+1):
state = agent.reset_episode() # start a new episode
while True:
action = agent.act(state)
next_state, reward, done = task.step(action)
agent.step(action, reward, next_state, done)
state = next_state
if done:
print("\rEpisode = {:4d}, score = {:7.3f} (best = {:7.3f}) total_reword = {:7.3f}".format(
i_episode, agent.score, agent.best_score, agent.total_reward), end="") # [debug]
break
sys.stdout.flush()
| AndyClouder/ML | RL-Quadcopter-2/project.py | project.py | py | 817 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "task.Task",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "agents.agent.DDPG",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "task.step",
"line_number":... |
29613368843 | import os,random,warnings,time,math
import torch
import torch.nn as nn
from dataloader.data_loader import prepare_dataset, _collate_fn
from base_builder.model_builder import build_model
from dataloader.vocabulary import KsponSpeechVocabulary
from omegaconf import OmegaConf
from tensorboardX import SummaryWriter
from metric.metric import CharacterErrorRate
from checkpoint.checkpoint import Checkpoint
from torch.utils.data import DataLoader
import pdb
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def test(config):
os.environ["CUDA_VISIBLE_DEVICES"]= config.train.gpu
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
vocab = KsponSpeechVocabulary(config.train.vocab_label)
test_model = config.train.test_model
model = torch.load(test_model, map_location=lambda storage, loc: storage).to(device)
model.eval()
test_metric = CharacterErrorRate(vocab)
print(model)
print(count_parameters(model))
pdb.set_trace()
model.eval()
print(model)
testset = prepare_dataset(config, config.train.transcripts_path_test,vocab, Train=False)
test_loader = torch.utils.data.DataLoader(dataset=testset,batch_size =config.train.batch_size,
shuffle=False,collate_fn = _collate_fn, num_workers=config.train.num_workers)
start_time = time.time()
with torch.no_grad():
for i, (video_inputs,audio_inputs,targets,video_input_lengths,audio_input_lengths,target_lengths) in enumerate(test_loader):
video_inputs = video_inputs.to(device)
audio_inputs = audio_inputs.to(device)
targets = targets.to(device)
video_input_lengths = video_input_lengths.to(device)
audio_input_lengths = audio_input_lengths.to(device)
target_lengths = torch.as_tensor(target_lengths).to(device)
model = model
outputs = model(video_inputs,
video_input_lengths,
audio_inputs,
audio_input_lengths,
targets,
target_lengths,
)
y_hats = outputs.max(-1)[1]
cer = test_metric(targets[:, 1:], y_hats)
print(cer)
print("Total Time")
print(time.time() - start_time)
if __name__ == '__main__':
config = OmegaConf.load('test.yaml')
test(config)
| jungwook518/WOOK_Challenge | test.py | test.py | py | 2,663 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
11747758050 | import streamlit as st
import numpy as np
import pandas as pd
st.title('Steamlit 基礎')
st.write('Hello World!')
st.write('I love World')
df = pd.DataFrame({
'1列目': [1,2,3,4],
'2列目': [10,20,30,40]
})
st.dataframe(df.style.highlight_between(axis=1), width=300,height=150)
df_1 = pd.DataFrame(
np.random.rand(10,3),
columns=['a','b','c'])
st.dataframe(df_1)
st.line_chart(df_1)
st.bar_chart(df_1)
df_m = pd.DataFrame(
# 乱数 + 新宿の緯度と経度
np.random.rand(100,2) / [50, 50] + [35.69, 139.70],
columns = ['lat', 'lon']
)
st.map(df_m)
from PIL import Image
if st.checkbox('Show Image'):
img = Image.open('/Users/tetsu/Desktop/streamlit/Iris.jpg')
st.image(img, caption='Iris', use_column_width=True)
option = st.selectbox('好きな数字を入れてください。', list(range(1,11)))
'好きな数字は、',option,'です。'
text = st.sidebar.text_input('好きなスポーツを入力してください。')
'好きなスポーツは ',text,'です。'
condition = st.sidebar.slider('貴方の調子は',0,100,50)
'Condition', condition
expander1 =st.expander('Question1')
expander1.write('Answer of Question1')
expander2 =st.expander('Question2')
expander2.write('Answer of Question2')
import time
latest_iteration = st.empty()
bar = st.progress(0)
# プログレスバーを0.1秒毎に進める
for i in range(100):
latest_iteration.text(f'Iteration{i + 1}')
bar.progress(i + 1)
time.sleep(0.1)
'Done' | tetsukira/iris_streamlit | main.py | main.py | py | 1,489 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.title",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"... |
7810152186 | import sys
import argparse
import random
import time
import numpy as np
import server.common.compute.diffexp_generic as diffexp_generic
from server.common.config.app_config import AppConfig
from server.data_common.matrix_loader import MatrixDataLoader
def main():
parser = argparse.ArgumentParser("A command to test diffexp")
parser.add_argument("dataset", help="name of a dataset to load")
parser.add_argument("-na", "--numA", type=int, help="number of rows in group A")
parser.add_argument("-nb", "--numB", type=int, help="number of rows in group B")
parser.add_argument("-va", "--varA", help="obs variable:value to use for group A")
parser.add_argument("-vb", "--varB", help="obs variable:value to use for group B")
parser.add_argument("-t", "--trials", default=1, type=int, help="number of trials")
parser.add_argument("-a", "--alg", choices=("default", "generic"), default="default", help="algorithm to use")
parser.add_argument("-s", "--show", default=False, action="store_true", help="show the results")
parser.add_argument(
"-n", "--new-selection", default=False, action="store_true", help="change the selection between each trial"
)
parser.add_argument("--seed", default=1, type=int, help="set the random seed")
args = parser.parse_args()
app_config = AppConfig()
app_config.update_server_config(single_dataset__datapath=args.dataset)
app_config.update_server_config(app__verbose=True)
app_config.complete_config()
loader = MatrixDataLoader(args.dataset)
adaptor = loader.open(app_config)
random.seed(args.seed)
np.random.seed(args.seed)
rows = adaptor.get_shape()[0]
if args.numA:
filterA = random.sample(range(rows), args.numA)
elif args.varA:
vname, vval = args.varA.split(":")
filterA = get_filter_from_obs(adaptor, vname, vval)
else:
print("must supply numA or varA")
sys.exit(1)
if args.numB:
filterB = random.sample(range(rows), args.numB)
elif args.varB:
vname, vval = args.varB.split(":")
filterB = get_filter_from_obs(adaptor, vname, vval)
else:
print("must supply numB or varB")
sys.exit(1)
for i in range(args.trials):
if args.new_selection:
if args.numA:
filterA = random.sample(range(rows), args.numA)
if args.numB:
filterB = random.sample(range(rows), args.numB)
maskA = np.zeros(rows, dtype=bool)
maskA[filterA] = True
maskB = np.zeros(rows, dtype=bool)
maskB[filterB] = True
t1 = time.time()
if args.alg == "default":
results = adaptor.compute_diffexp_ttest(maskA, maskB)
elif args.alg == "generic":
results = diffexp_generic.diffexp_ttest(adaptor, maskA, maskB)
t2 = time.time()
print("TIME=", t2 - t1)
if args.show:
for res in results:
print(res)
def get_filter_from_obs(adaptor, obsname, obsval):
attrs = adaptor.get_obs_columns()
if obsname not in attrs:
print(f"Unknown obs attr {obsname}: expected on of {attrs}")
sys.exit(1)
obsvals = adaptor.query_obs_array(obsname)[:]
obsval = type(obsvals[0])(obsval)
vfilter = np.where(obsvals == obsval)[0]
if len(vfilter) == 0:
u = np.unique(obsvals)
print(f"Unknown value in variable {obsname}:{obsval}: expected one of {list(u)}")
sys.exit(1)
return vfilter
if __name__ == "__main__":
main()
| chanzuckerberg/cellxgene | test/performance/run_diffexp.py | run_diffexp.py | py | 3,548 | python | en | code | 528 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "server.common.config.app_config.AppConfig",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "server.data_common.matrix_loader.MatrixDataLoader",
"line_number": 35,
... |
14257608746 | import csv, os, glob, google_trans_new
from tqdm import tqdm # sweet progress bar
#==============================================================================
def get_lang():
"""
Purpose:
gets language to translate to from user
Dependencies:
google_trans_new
Argument:
None
Returns:
None
"""
# dict of language short code(key) and name(value)
langs = google_trans_new.LANGUAGES
# enumerate and print each language name
for i in range(len(langs)):
key = list(langs.keys())[i]
print(str(i +1) + ": " + langs[key])
selection = int(input("Select target language: ")) - 1
# get short code
lang = list(langs.keys())[selection]
read_file(lang)
return None
#==============================================================================
def read_file(lang):
"""
Purpose:
reads through each line of xml file
Dependencies:
glob, os, tqdm
Argument:
lang- a language short code
Returns:
None
"""
# requires just one xml file be in the folder
with open(glob.glob("./*.xml")[0], 'r', encoding="utf8") as open_f:
lines = open_f.readlines()
open_f.close()
try:
os.remove("config.xml")
except:
pass
try:
os.remove("text.csv")
except:
pass
for line in tqdm(lines):
write_to_xml(line)
if "<rdf:li xml:lang=\"en-US\">" in line and "</rdf:Bag>" not in line:
english = clean_english(line)
translation = translate(english, lang)
write_to_csv(english,translation)
formatted = add_formatting(translation, lang)
write_to_xml(formatted)
return None
#==============================================================================
def clean_english(line):
"""
Purpose:
removes formatting from a line with english text in it
Dependencies:
None
Argument:
line - a string with xml markup and english subtitle text
Returns:
a cleaned line of english text
"""
line = line.split("rtf1")
line = line[1].split("}")
line = line[0].split(" \\par ")
return " ".join(line).strip()
#==============================================================================
def translate(eng, lang):
"""
Purpose:
Uses Google API to get translation
Dependencies:
google_trans_new
Argument:
eng - a string of cleaned english text
lang - short code for target language
Returns:
english translation of target language
"""
translator = google_trans_new.google_translator()
return translator.translate(eng, lang_tgt=lang, lang_src='en').strip()
#==============================================================================
def add_formatting(text, lang):
"""
Purpose:
adds xml markup to translation
Dependencies:
None
Argument:
text - a string of translated text
lang- a language short code
Returns:
translated text with xml formatting
"""
words = text.split()
new_line = ""
count = 0
for word in words:
word_length = len(word)
if count + word_length > 45:
new_line = new_line[:-1] + " \\par "
count = 0
new_line += word + " "
count += word_length + 1
new_line = new_line.strip()
return "<rdf:li xml:lang=\"" + lang + "\">{\\rtf1 "+ new_line + "</rdf:li>"
#==============================================================================
def write_to_csv(eng,trns):
"""
Purpose:
writes the english and translated text in a row in a csv file
Dependencies:
csv
Argument:
eng - a string of english text
trns - a string of translated text
Returns:
None
"""
with open("text.csv",'a', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([eng,trns])
csv_file.close()
return None
#==============================================================================
def write_to_xml(line):
"""
Purpose:
writes a line of xml to the xml file
Dependencies:
None
Argument:
line - a string with xml mark up
Returns:
None
"""
with open("config.xml", 'a', encoding="utf8") as open_f:
open_f.write(line)
open_f.close()
return None
#==============================================================================
def main():
get_lang()
#==============================================================================
if __name__ == '__main__':
main() | mcheffer/xml_translator | xml_translator.py | xml_translator.py | py | 4,817 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "google_trans_new.LANGUAGES",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.remove",
"... |
20160442598 | from superlink import security
import mysql.connector as sqltor
from tabulate import tabulate
security.security_init() #Starts monitoring people by asking username/password
secure = security.isSecured() #Boolean
#_main_
if secure==True:
conn = sqltor.connect(host="localhost", user="root", passwd="root", database="sadn")
if conn.is_connected():
print("Successfully connected!")
cursor = conn.cursor()
#Creating Table
cursor.execute("CREATE TABLE IF NOT EXISTS Collaborators(SNo integer(3),Name varchar(30),SharesGained integer(8), SharesLost integer(8))")
count = int(input('How many collaborators? ' ))
for i in range(count):
Name1 = input("Enter the name of the collaborator: ")
shr1 = int(input('Enter the number of shares gained: '))
shr2 = int(input('Enter the number of shares lost: '))
cursor.execute("INSERT INTO Collaborators VALUES({},'{}',{},'{}')".format(i+1,Name1,shr1,shr2))
conn.commit()
#Reading from Collaborators table and using tabulate
strt = 'SELECT * FROM Collaborators'
cursor.execute(strt)
TotList = []
data1 = cursor.fetchall()
for row in data1:
print(row)
TotList.append(row)
headers = ["SNo", "Collaborator", "Shares Gained", "Shares Lost"]
print("\nCollaborators:\n")
print(tabulate(TotList, headers, tablefmt="grid"))
#Closing the connection
conn.close()
if conn.is_closed():
print("Connection successfully closed!")
| Project-CS-2022/ProjCS-2022-02-08-Final | CS Proj/Project/collaborators.py | collaborators.py | py | 1,556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "superlink.security.security_init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "superlink.security",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "superlink.security.isSecured",
"line_number": 6,
"usage_type": "call"
},
{
"api... |
18291729121 | from PIL import Image
import numpy as np
def calculate_apl(filename):
"""Calculate the average picture level (APL) from an image.
Keyword arguments:
filename -- path of image
"""
# load the image
image = Image.open(filename)
# convert image to numpy array
data = np.asarray(image)
# calculate APL slicing is to remove the 255 at the end of each pixel array so entire array can be averaged
APL = np.average(data[:, :, :3]) / 255
return APL
if __name__ == "__main__":
# prompt user for image
filepath = input("Enter the file name (including extension) of the image: ")
result = calculate_apl(filepath)
print("The APL is ", result)
| jdbremer/scripts | APL.py | APL.py | py | 698 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_numb... |
1817846315 | #! /bin/python
# gsheet flask entry handler
#
# @file: gsheet
# @time: 2022/01/26
# @author: Mori
#
import flask
from service.gsheet import (
init_credentials,
compelete_worksheet,
reading_worksheet,
read_wks_from_google_sheet,
)
SHEET_ID = "1vXqHdA6RTD9cMMv8CvRQMW-jM9GSajAZOVcdJMwEFDM"
WKS_TITLE = "lite填色通知配置记录2"
__all__ = ["compelete_sheet", "reading_sheet"]
def compelete_sheet():
try:
cred = init_credentials()
wks = read_wks_from_google_sheet(SHEET_ID, WKS_TITLE, cred)
compelete_worksheet(wks, cred)
return flask.jsonify({"status": "OK"})
except Exception as e:
print(e)
return flask.jsonify({"status": "ERROR", "msg": str(e)})
def reading_sheet():
try:
cred = init_credentials()
wks = read_wks_from_google_sheet(SHEET_ID, WKS_TITLE, cred)
payload = {"status": "OK", "row": [], "total": 0}
payload["row"] = reading_worksheet(wks)
payload["total"] = len(payload["row"])
return flask.jsonify(payload)
except Exception as e:
return flask.jsonify({"status": "ERROR", "msg": str(e)})
| moriW/auto_firebase | backend/web/gsheet.py | gsheet.py | py | 1,147 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "service.gsheet.init_credentials",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "service.gsheet.read_wks_from_google_sheet",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "service.gsheet.compelete_worksheet",
"line_number": 28,
"usage_... |
20450214874 | import singleRepoStats
import datetime
import pandas as pd
import featureMakers
def getPredictionsTrace(repoString, tend = datetime.date.today()):
weeklyData = singleRepoStats.getRepoWeeklyData(repoString)
weeklyTotal = weeklyData.pivot(index='week_start', columns='author_login', values='commits_num').sum(axis=1)
weeklyTotalResampled = featureMakers.resampleToDays(weeklyTotal)
earliestCommit = weeklyData['week_start'].min()
aWeekAfterFirstCommit = earliestCommit + datetime.timedelta(7)
times = pd.date_range(start=aWeekAfterFirstCommit,
freq='W', end=tend)
times = [x.to_datetime().date() for x in times]
probs = [singleRepoStats.getPredictedProbAlive(weeklyData, t) for t in times]
probTrace = pd.Series(data=probs, index=pd.to_datetime(times))
probTraceResampled = probTrace.reindex(
weeklyTotalResampled.index, method='ffill', limit=1)
probTraceResampled = probTraceResampled.interpolate()
commitsAndProb = pd.DataFrame(
{'commits':weeklyTotalResampled, 'prob':probTraceResampled})
return commitsAndProb
| gauthamnair/repopulse | predictionsVisualizer.py | predictionsVisualizer.py | py | 1,069 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "singleRepoStats.getRepoWeeklyData",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": ... |
1607792294 | from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams
from pdfminer.pdfparser import PDFParser, PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
# from pdfminer.pdfdevice import PDFDevice
# 获取需要读取的PDF文档对象
pdfFile = open("一种折叠屏上使用的付款码使用方式.pdf", "rb")
# 创建PDF解释器
pdfParser = PDFParser(pdfFile)
# 创建PDFDocument对象
pdfDoc = PDFDocument()
# 链接解释器和PDFDocument对象
pdfParser.set_document(pdfDoc)
pdfDoc.set_parser(pdfParser)
# 初始化文档,这里传入参数为PDF文档的密码
pdfDoc.initialize("")
# 创建PDF的资源管理器和参数分析器
pdfRes = PDFResourceManager()
pdfLaparams = LAParams()
# 创建聚合器和页面解释器
pdfDevice = PDFPageAggregator(pdfRes,laparams=pdfLaparams)
pdfInte = PDFPageInterpreter(pdfRes,pdfDevice)
for page in pdfDoc.get_pages():
pdfInte.process_page(page)
for out in pdfDevice.get_result():
if hasattr(out,"get_text"):
print(out.get_text())
#
# for (level,title,dest,a,se) in pdfDoc.get_outlines():
# print(level,title,dest,a,se)
| BianDongLei/study_demo | Python/PythonDemo/PythonTest/pdfreader.py | pdfreader.py | py | 1,196 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "pdfminer.pdfparser.PDFParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pdfminer.pdfparser.PDFDocument",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pdfminer.pdfinterp.PDFResourceManager",
"line_number": 19,
"usage_type": "cal... |
2556587569 | from bs4 import BeautifulSoup
import requests
from time import sleep
import re
from collections import OrderedDict
from utils import save_csv , URL_RESOURCES
# Separar chamadas de cada página usando asyncio
def get_parsed_content(url):
content = requests.get(url).content
parsed_content = BeautifulSoup(content,"html.parser")
return parsed_content
def clean_data(data):
data = re.sub("[^0-9]","",data)
data = data.replace(",",".")
return float(data)
def parsed_data_country(link_country):
parser = get_parsed_content(URL_BASE + link_country)
name = parser.find(id="places_country__row").find(class_="w2p_fw").get_text()
iso_code = parser.find(id="places_iso__row").find(class_="w2p_fw").get_text()
population = parser.find(id="places_population__row").find(class_="w2p_fw").get_text()
area = parser.find(id="places_area__row").find(class_="w2p_fw").get_text()
population = clean_data(population)
area = clean_data(area)
try:
density = round(population / area,2)
except ZeroDivisionError:
density = 0
data = OrderedDict()
data["nome"] = name
data["iso_code"] = iso_code
data["populacao"] = population
data["area"] = area
data["densidade"] = density
return data
URL_BASE = URL_RESOURCES.get("countries_infos")
data = []
parser = get_parsed_content(URL_BASE)
next_link = parser.find("a",string="Next >")
while next_link:
# Collect data
countries_data = parser.find(id="results")
countries = countries_data.find_all("td")
countries_links_from_page = (country.find("a").get("href") for country in countries)
for country_page in countries_links_from_page:
data.append(parsed_data_country(country_page))
sleep(1)
# Update page with next page...if exists
next_link = parser.find("a",string="Next >")
if next_link:
next_page = URL_BASE + next_link.get("href")
parser = get_parsed_content(next_page)
save_csv(__file__,data) | Marlysson/craw | core/resources/crawlers/countries_infos.py | countries_infos.py | py | 1,950 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"... |
34879164954 | import pyvisa as visa
import time
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.rcParams['animation.html'] = 'jshtml'
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib import style
import matplotlib.animation as animation
import tkinter as tk
from tkinter import ttk
from scipy import optimize
import serial
ser = serial.Serial()
ser.timeout = 0.3
ser.port = "com3"
ser.baudrate = 9600
ser.bytesize = serial.EIGHTBITS
ser.parity = serial.PARITY_EVEN
ser.stopbits = serial.STOPBITS_ONE
ser.open()
#ser.write(b'ST1*')
LARGE_FONT = ('Verdana', 12)
style.use('ggplot')
#Check if everything connected properly
rm = visa.ResourceManager()
rm.list_resources()
print(rm.list_resources(), '\n')
#Setting devices names
lock_in = rm.open_resource('GPIB0::1::INSTR', write_termination= '\n', read_termination='\n')
keyt2000 = rm.open_resource('GPIB0::2::INSTR', write_termination= '\n', read_termination='\n')
keyt2010 = rm.open_resource('GPIB0::3::INSTR', write_termination= '\n', read_termination='\n')
#Write command to a device and get it's output
def get(device, command):
device.write(command)
return device.read()
def f(x, a, b):
return a * x + b
def lsq():
calibration = pd.read_csv(r'C:\Users\user\Desktop\Kravtsov\spectrum\calibration.csv', sep = ' ')
#p0=np.array([])
beta_opt, beta_cov = optimize.curve_fit(f, calibration['R_keyt2000'], calibration['lambda'])
#beta_perr = np.sqrt(np.diag(beta_cov))
return beta_opt
a = lsq()[0]
b = lsq()[1]
#forming data into file
zero_time = time.process_time()
data = pd.DataFrame(columns = ['Time', 'lock_in', 'R_keyt2000', 'V_keyt2010'], dtype = np.float)
filename = r'C:\Users\user\Desktop\Kravtsov\spectrum\one_more_try_' + datetime.today().strftime('%H_%M_%d_%m_%Y') + '.csv'
print(filename, '\n')
fig = Figure(figsize = (5, 5), dpi = 200)
ax = fig.add_subplot(111)
def animate(i):
global data
global a
global b
cur_time = time.process_time() - zero_time
data = data.append({'Time': cur_time, 'lock_in': get(lock_in, 'OUTP? 3'), 'R_keyt2000': get(keyt2000, 'FETC?'), 'V_keyt2010': get(keyt2010, 'FETC?')}, ignore_index = True)
data.to_csv(filename, sep = ' ')
#Plotting data
y = np.float(get(lock_in, 'OUTP? 3'))
x = np.float(get(keyt2000, 'FETC?'))
if y == '0.0E+00\x00\r':
y = 0
else:
y = np.float(y)
ax.plot(x * a + b, y, '-o', color = 'blue')
if moving_indicator == True:
data = data.append({'Time': cur_time, 'lock': y, 'R_keyt2000': x, 'V_keyt2010': get(keyt2010, 'FETC?')}, ignore_index = True)
data.to_csv(filename, sep = ' ')
class spectrometer(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.iconbitmap(self)
tk.Tk.wm_title(self, 'spectrometer')
container = tk.Frame(self)
container.pack(side = 'top', fill = 'both', expand = 'True')
container.grid_rowconfigure(0, weight = 1)
container.grid_columnconfigure(0, weight = 1)
self.frames = {}
for F in (StartPage, Settings, Graph):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row = 0, column = 0, sticky = 'nsew')
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text = 'Start Page', font = LARGE_FONT)
label.pack(pady = 10, padx = 10)
button = ttk.Button(self, text = "Settings", command = lambda: controller.show_frame(Settings))
button.pack()
button2 = ttk.Button(self, text = 'Graph', command = lambda: controller.show_frame(Graph))
button2.pack()
class Settings(tk.Frame):
def __init__(self, parent, controller):
global lock_in_speed
global moving_indicator
global starting_time
moving_indicator = False
def move_tapped():
global moving_indicator
global starting_time
ser.write(b'MV*')
print(ser.read(10))
moving_indicator = True
starting_time = time.process_time()
def stop_tapped():
global moving_indicator
ser.write(b'SP*')
print(ser.read(10))
moving_indicator = False
tk.Frame.__init__(self, parent)
label = tk.Label(self, text = 'Settings', font = LARGE_FONT)
label.pack()
button = ttk.Button(self, text = 'Back to Home', command = lambda: controller.show_frame(StartPage))
button.pack()
mv_button = ttk.Button(self, text = 'Start moving', command = move_tapped)
mv_button.place(relx = .8, rely = .4, anchor = 'c')
rev_button = ttk.Button(self, text = 'Reverce', command = lambda: ser.write(b'RS*'))
rev_button.place(relx = .8, rely = .43, anchor = 'c')
stop_button = ttk.Button(self, text = 'Stop', command = stop_tapped)
stop_button.place(relx = .8, rely = .46, anchor = 'c')
lock_in_speed_label = tk.Label(self, text = r'Stepper speed steps/sec', font = LARGE_FONT)
lock_in_speed_label.place(relx = .2, rely = .2, anchor = 'c')
lock_in_speed = '200'
lock_in_speed_entry = tk.Entry(self, textvariable = lock_in_speed)
lock_in_speed_entry.place(relx = .2, rely = .23, anchor = 'c')
save_button = ttk.Button(self, text = 'Save', command = lambda: ser.write(str.encode('SD' + str(lock_in_speed_entry.get()) + '*')))
ser.read(10)
save_button.place(relx = .8, rely = .8, anchor = 'c')
class Graph(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text = 'graph', font = LARGE_FONT)
label.pack(pady = 10, padx = 10)
button = ttk.Button(self, text = 'Back to Home', command = lambda: controller.show_frame(StartPage))
button.pack()
canvas = FigureCanvasTkAgg(fig, self)
canvas.draw()
canvas.get_tk_widget().pack(side = tk.TOP, fill = tk.BOTH, expand = True)
'''
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side = tk.TOP, fill = tk.BOTH, expand = True)
'''
def main():
app = spectrometer()
ani = animation.FuncAnimation(fig, animate, interval = 500)
app.mainloop()
ser.close()
if __name__ == '__main__':
main() | DbIXAHUEOKEAHA/LPI_RAS | GPIB_window.py | GPIB_window.py | py | 7,233 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "serial.... |
15685242927 | import pygame
import glob
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.player_walk_right = []
self.player_walk_left = []
self.player_walk = self.player_walk_left
for player_frame in glob.glob('Static/Character/walk_right/*.png'):
self.player_walk_right.append(pygame.image.load(player_frame).convert_alpha())
for player_frame in glob.glob('Static/Character/walk_left/*.png'):
self.player_walk_left.append(pygame.image.load(player_frame).convert_alpha())
self.player_index = 0
self.image = self.player_walk[self.player_index]
self.rect = self.image.get_rect(midbottom = (300, 400))
def player_input(self, dialogue):
keys = pygame.key.get_pressed()
if not dialogue:
if keys[pygame.K_UP]:
self.rect.y -= 4
if keys[pygame.K_DOWN]:
self.rect.y += 4
if keys[pygame.K_LEFT]:
self.rect.x -= 4
self.player_walk = self.player_walk_left
if keys[pygame.K_RIGHT]:
self.rect.x += 4
self.player_walk = self.player_walk_right
if keys[pygame.K_UP] or keys[pygame.K_DOWN] or keys[pygame.K_LEFT] or keys[pygame.K_RIGHT]:
self.player_index += 0.2
if self.player_index >= len(self.player_walk): self.player_index = 0
self.image = self.player_walk[int(self.player_index)]
def boundaries(self):
if self.rect.top <= 75:
self.rect.top = 75
if self.rect.bottom >= 375:
self.rect.bottom = 375
if self.rect.left <= 0:
self.rect.left = 0
if self.rect.right >= 800:
self.rect.right = 800
def update(self, dialogue):
self.player_input(dialogue)
self.boundaries() | SlimeyTurtles/GameJam1 | player.py | player.py | py | 1,900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"lin... |
19088343099 | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views.generic.simple import direct_to_template
from django.template import RequestContext
from images.models import Image
from images.forms import ImageForm
def images_add(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/staff/login/')
elif request.user.has_perm('images.add_image'):
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
# save new image
new_image = Image(name=form.cleaned_data['name'])
if form.cleaned_data.get('caption'):
new_image.caption = form.cleaned_data['caption']
if form.cleaned_data.get('credit'):
new_image.credit = form.cleaned_data['credit']
new_image.image = form.cleaned_data['image']
new_image.save()
return HttpResponseRedirect(reverse('images.views.images_add_to_markup', args=[new_image.id]))
else:
return render_to_response('images/widget_add.html',
{'form': form},
context_instance=RequestContext(request))
else:
form = ImageForm()
return render_to_response('images/widget_add.html',
{'form': form},
context_instance=RequestContext(request))
else:
return render_to_response('staff/access_denied.html',
{'missing': 'add photos to',
'staffapp': 'this entry'},
context_instance=RequestContext(request))
## return create_object(request,
## model=Image,
## template_name='images/widget_addd.html',
## post_save_redirect='/images/widget/markup/%(id)i/')
def images_list(request):
return direct_to_template(request, template='images/widget_add.html')
def images_add_to_markup(request, image_id):
c = {'image': Image.objects.get(pk=image_id)}
return direct_to_template(request, template='images/widget_add_to_markup.html', extra_context=c)
| queensjournal/queensjournal.ca | apps/images/views.py | views.py | py | 2,225 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "images.forms.ImageForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "images.models.Image",
"line_number": 17,
"usage_type": "call"
},
{
"api_n... |
18924484365 | import pytest
from base.webdriverfactory import WebDriverFactory
from pages.home.login_page import LoginPage
@pytest.yield_fixture()
def setUp():
print("Running method level setUp")
yield
print("Running method level tearDown")
@pytest.yield_fixture(scope="class")
def oneTimeSetUp(request, browser):
print("Running one time setUp")
wdf = WebDriverFactory(browser)
driver = wdf.getWebDriverInstance()
lp = LoginPage(driver)
lp.login("test@email.com", "abcabc")
if request.cls is not None:
request.cls.driver = driver
yield driver
driver.quit()
print("Running one time tearDown")
def pytest_addoption(parser):
parser.addoption("--browser")
parser.addoption("--osType", help="Type of operating system")
@pytest.fixture(scope="session")
def browser(request):
return request.config.getoption("--browser")
@pytest.fixture(scope="session")
def osType(request):
return request.config.getoption("--osType") | PacktPublishing/-Selenium-WebDriver-With-Python-3.x---Novice-To-Ninja-v- | CODES/S31 - Automation Framework -_ Practice Exercise/1_conftest.py | 1_conftest.py | py | 976 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "pytest.yield_fixture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "base.webdriverfactory.WebDriverFactory",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pages.home.login_page.LoginPage",
"line_number": 17,
"usage_type": "call"
},... |
17453339669 | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
def get_fpn_sf_global(num_classes, mode):
return FPN_SF(num_classes, expansion=4, mode=mode)
def get_fpn_sf_local(num_classes, mode):
return FPN_SF(num_classes, expansion=1, mode=mode)
class FPN_SF(nn.Module):
def __init__(self, num_classes, expansion=1, mode='SemanticFlow'):
"""Meta model for FPN based on Semantci Flow
Params:
expansion: 1(resnet18, resenet34), 4(resnet50, resnet101)
mode: upsample mode
"""
super(FPN_SF, self).__init__()
self.mode = mode # SemanticFlow, Bilinear
# PPM
self.toplayer = PSPModule(features=512*expansion, out_features=256)
# lateral layers
self.laterlayer1 = nn.Conv2d(256*expansion, 256, kernel_size=1, stride=1, padding=0)
self.laterlayer2 = nn.Conv2d(128*expansion, 256, kernel_size=1, stride=1, padding=0)
self.laterlayer3 = nn.Conv2d(64*expansion, 256, kernel_size=1, stride=1, padding=0)
# # external lateral layers
# self.toplayer_ext = PSPModule(features=512*6, out_features=256)
# self.laterlayer1_ext = nn.Conv2d(256*6, 256, kernel_size=1, stride=1, padding=0)
# self.laterlayer2_ext = nn.Conv2d(128*6, 256, kernel_size=1, stride=1, padding=0)
# self.laterlayer3_ext = nn.Conv2d(64*6, 256, kernel_size=1, stride=1, padding=0)
# FAM layers
if self.mode == 'SemanticFlow':
self.fam1 = FAM(features=256)
self.fam2 = FAM(features=256)
self.fam3 = FAM(features=256)
self.fam1_ext = FAM(features=256)
self.fam2_ext = FAM(features=256)
self.fam3_ext = FAM(features=256)
# classify layer
self.smooth = nn.Sequential(nn.Conv2d(256*4, 256, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(256),
nn.ReLU(True))
self.classify = nn.Conv2d(256, num_classes, kernel_size=3, stride=1, padding=1)
# init
self._init_params()
def _init_params(self):
for m in self.children():
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, mean=0, std=0.01)
if hasattr(m, 'bias'):
nn.init.constant_(m.bias, 0)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
'''
_, _, H, W = y.size()
if self.mode == 'Bilinear':
return F.interpolate(x, size=(H, W), mode='bilinear') + y
def forward(self, c2, c3, c4, c5):
# upsample
p5 = self.toplayer(c5)
if self.mode == 'SemanticFlow':
p4 = self.fam1(p5, self.laterlayer1(c4))
p3 = self.fam2(p4, self.laterlayer2(c3))
p2 = self.fam3(p3, self.laterlayer3(c2))
# deep supervision
p5 = self.fam1_ext(p5, p2)
p4 = self.fam2_ext(p4, p2)
p3 = self.fam3_ext(p3, p2)
else:
p4 = self._upsample_add(p5, self.laterlayer1(c4))
p3 = self._upsample_add(p4, self.laterlayer1(c3))
p2 = self._upsample_add(p3, self.laterlayer1(c2))
# # deep supervision
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear')
p4 = F.interpolate(p4, size=(H, W), mode='bilinear')
p3 = F.interpolate(p3, size=(H, W), mode='bilinear')
ensemble = self.smooth(torch.cat([p5, p4, p3, p2], dim=1))
output = self.classify(ensemble)
return output
def get_fusion_feature_pyramid(self, c2, c3, c4, c5):
p5 = self.toplayer(c5)
p4 = self.laterlayer1(c4)
p3 = self.laterlayer2(c3)
p2 = self.laterlayer3(c2)
return [p2, p3, p4, p5]
def forward_with_lateral(self, ps):
p5 = ps[3]
if self.mode == 'SemanticFlow':
p4 = self.fam1(p5, ps[2])
p3 = self.fam2(p4, ps[1])
p2 = self.fam3(p3, ps[0])
p5 = self.fam1_ext(p5, p2)
p4 = self.fam2_ext(p4, p2)
p3 = self.fam3_ext(p3, p2)
else:
p4 = self._upsample_add(p5, ps[2])
p3 = self._upsample_add(p4, ps[1])
p2 = self._upsample_add(p3, ps[0])
# # deep supervision
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear')
p4 = F.interpolate(p4, size=(H, W), mode='bilinear')
p3 = F.interpolate(p3, size=(H, W), mode='bilinear')
ensemble = self.smooth(torch.cat([p5, p4, p3, p2], dim=1))
output = self.classify(ensemble)
return output, ensemble
def ensemble_classifier(self, ps):
p2 = ps[0]
if self.mode == 'SemanticFlow':
p5 = self.fam1_ext(ps[3], p2)
p4 = self.fam2_ext(ps[2], p2)
p3 = self.fam3_ext(ps[1], p2)
else:
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), mode='bilinear')
p4 = F.interpolate(p4, size=(H, W), mode='bilinear')
p3 = F.interpolate(p3, size=(H, W), mode='bilinear')
ensemble = self.smooth(torch.cat([p5, p4, p3, p2], dim=1))
output = self.classify(ensemble)
return output, ensemble
class FAM(nn.Module):
def __init__(self, features=256):
super().__init__()
self.smooth_up = nn.Conv2d(features, features, kernel_size=1, stride=1, padding=0)
self.smooth_d = nn.Conv2d(features, features, kernel_size=1, stride=1, padding=0)
self.flow = nn.Conv2d(features*2, 2, kernel_size=3, stride=1, padding=1, bias=True)
# self.flow = nn.Sequential(nn.Conv2d(features*2, features, kernel_size=1, stride=1, padding=0),
# nn.BatchNorm2d(features),
# nn.ReLU(True),
# nn.Conv2d(features, 2, kernel_size=3, stride=1, padding=1))
def forward(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
'''
_, _, H, W = y.size()
x_smooth = self.smooth_up(x)
y_smooth = self.smooth_d(y)
x_smooth = F.interpolate(x, size=(H, W), mode='bilinear')
flow = self.flow(torch.cat([x_smooth, y_smooth], dim=1))
x_warp = self.stn(x, flow)
return x_warp + y
def stn(self, x, flow):
_, _, H, W = flow.size()
grid = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)])
grid = torch.stack(grid)
grid = torch.unsqueeze(grid, 0).float().cuda()
flow += grid
flow[:, 0, ...] = 2 * (flow[:, 0, ...]/(H-1) - 0.5)
flow[:, 1, ...] = 2 * (flow[:, 1, ...]/(W-1) - 0.5)
flow = flow.permute(0, 2, 3, 1)
return F.grid_sample(x, flow)
class PSPModule(nn.Module):
def __init__(self, features, out_features=256, sizes=(1, 2, 3, 6)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.bottleneck = nn.Sequential(
nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1),
nn.Conv2d(out_features, out_features, kernel_size=3, stride=1, padding=1))
self.relu = nn.ReLU()
def _make_stage(self, features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear') for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return self.relu(bottle)
| yida2311/OSCC_SF | models/fpn_semantci_flow.py | fpn_semantci_flow.py | py | 8,230 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
2945906050 | import streamlit as st
import math
st.title("Caesar Cipher")
message = st.text_input("Enter the plaintext: ")
key = math.floor(st.number_input("Enter the secret key: "))
def CaesarCipher(message: str, key: int) -> str:
if not message:
return "Please enter a message."
key %= 26
val = list(message)
for x in range(len(val)):
if val[x].isupper():
shifted = ord(val[x]) + key
if shifted > ord('Z'):
val[x] = chr((shifted - ord('A')) % 26 + ord('A'))
else:
val[x] = chr(shifted)
elif val[x].islower():
shifted = ord(val[x]) + key
if shifted > ord('z'):
val[x] = chr((shifted - ord('a')) % 26 + ord('a'))
else:
val[x] = chr(shifted)
return "".join(val)
if st.button("Encode"):
st.header(f"Cipher Text: {CaesarCipher(message, key)}") | GowthamNats/CaesarCipher | CaesarCipher.py | CaesarCipher.py | py | 954 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.title",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "streamlit.text_input",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.number_input",... |
4084437791 | """
Start with
streamlit run ./src/streamlit_app.py
"""
import datetime
import logging
import streamlit as st
import numpy as np
from models import train_model as tm
import visualization.visualize as vis
from preprocessing.pipeline import get_preprocessing_pipeline
from filehandler.load_input import load_file
st.set_page_config(layout="wide")
def main():
"""
Inserts a dropdown menu to select period of interest: Tomorrow / Next week
Shows 2 tables with products and sales per period
Shows 2 graphs with products and sales per period
"""
sales = load_file("src/data/Bakery_Sales.csv")
weather = load_file("src/data/Seoul_weather.csv", seperator=";")
holidays = load_file("src/data/public_holidays.csv", seperator=";")
x, y = get_preprocessing_pipeline().transform([sales, weather, holidays])
today = '2020-03-01'
real_data = tm.get_data_with_predictions(x, y)
# setup logo and header
col1, mid, col2 = st.columns([1, 1, 20])
with col1:
st.image('SAveBreadLogo.png', width=100)
with col2:
st.header('SaveBread Planning Tool')
st.write('The SaveBread Planning Tool shows a prediction '
'of sales per product for tomorrow or next week. '
'This helps the bakery manager to plan the production '
'of the products according to the market demands. '
'The target of this tool is to show the predicted sales per product'
' with the purpose to save bread from being wasted due to over-production.')
time_window = st.selectbox('Which timeframe do you want to show', (
"Tomorrow",
"Next Week"
))
col1, col2 = st.columns([3, 5])
with col1:
st.header("Aggregated predictions")
df_input = articles_per_timeframe(real_data, today, time_window, True)
st.dataframe(data=df_input, width=None)
st.text('Visualization Descending')
df_input_vis = articles_per_timeframe(real_data, today, time_window)
vis.line_chart(df_input_vis)
with col2:
df_input = articles_per_timeframe(real_data, today, time_window, False)
if time_window == "Tomorrow":
st.header("Predictions for tomorrow")
df_daytime = df_input.drop(['date', 'weekday', 'holiday', 'h_type',
'weather', 'temp', 'Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun'], axis=1)
df_daytime.set_index('daytime', inplace=True)
df_daytime = df_daytime.apply(np.floor)
df_daytime = df_daytime.transpose().reset_index(level=0)
df_daytime.rename(columns={'index': 'products',
1: 'morning',
2: 'afternoon'}, inplace=True)
df_daytime['sum'] = df_daytime['morning'] + df_daytime['afternoon']
st.dataframe(data=df_daytime, width=None)
st.text('Visualization Tomorrow')
vis.bar_chart_day(df_daytime)
elif time_window == 'Next Week':
st.header("Predictions for next week")
df_week = df_input.drop(['date', 'daytime', 'holiday', 'h_type',
'weather', 'temp', 'Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun'], axis=1)
df_week = df_week.apply(np.floor)
df_week = df_week.groupby('weekday').sum()
df_week = df_week.transpose().reset_index(level=0)
df_week.rename(columns={'index': 'products',
0.0: 'Monday',
1.0: 'Tuesday',
2.0: 'Wednesday',
3.0: 'Thursday',
4.0: 'Friday',
5.0: 'Saturday',
6.0: 'Sunday'}, inplace=True)
df_week['sum'] = df_week.iloc[:, 1:].sum(axis=1)
st.dataframe(data=df_week, width=None)
st.text('Visualization next week')
vis.bar_chart_week(df_week)
def articles_per_timeframe(data, today, tw, agg=True):
"""
Shows a df with products and sales per selected period: Tomorrow / Next Week
"""
df_after_today = data[data.date > today]
time_window = tw
if time_window == "Tomorrow":
filtered_df = df_after_today[df_after_today.date == '2020-03-02']
elif time_window == 'Next Week':
filtered_df = df_after_today[df_after_today.date <= '2020-03-09']
if agg:
df = filtered_df.drop(['date', 'daytime', 'weekday', 'holiday',
'h_type', 'weather', 'temp', 'Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun'], axis=1)
df = df.transpose()
df = df.apply(np.floor)
df = df.sum(axis=1)
df = df.reset_index(level=0)
df.rename(columns={'index': 'products'}, inplace=True)
df.columns = ['products', 'amount']
else:
df = filtered_df
return df
@st.cache
def first_execution_date():
""" Used so that reloading the web app doesn't create a new log file every time """
return datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
def setup_logger():
"""
sets up logger
"""
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
file_handler = logging.FileHandler(
f'src/logs/savebread-{first_execution_date()}.log'
)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
if __name__ == '__main__':
logger = setup_logger()
main()
| BerndSaurugger/SaveBread | src/streamlit_app.py | streamlit_app.py | py | 6,014 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "filehandler.load_input.load_file",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "filehandler.load_input.load_file",
"line_number": 27,
"usage_type": "call"
}... |
74267287785 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from fastapi import Request, FastAPI, HTTPException
from linebot.v3.webhook import WebhookParser
from linebot.v3.messaging import (
AsyncApiClient,
AsyncMessagingApi,
Configuration,
ReplyMessageRequest,
TextMessage
)
from linebot.v3.exceptions import (
InvalidSignatureError
)
from linebot.v3.webhooks import (
MessageEvent,
TextMessageContent
)
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
configuration = Configuration(
access_token=channel_access_token
)
app = FastAPI()
async_api_client = AsyncApiClient(configuration)
line_bot_api = AsyncMessagingApi(async_api_client)
parser = WebhookParser(channel_secret)
@app.post("/callback")
async def handle_callback(request: Request):
signature = request.headers['X-Line-Signature']
# get request body as text
body = await request.body()
body = body.decode()
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
raise HTTPException(status_code=400, detail="Invalid signature")
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessageContent):
continue
await line_bot_api.reply_message(
ReplyMessageRequest(
reply_token=event.reply_token,
messages=[TextMessage(text=event.message.text)]
)
)
return 'OK'
| line/line-bot-sdk-python | examples/fastapi-echo/main.py | main.py | py | 2,424 | python | en | code | 1,739 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 45,
"u... |
14980635082 | import cv2
import os
import Predict as p
subjects = ["", "Ranbir", "Elvis Presley","Change it with your name"]
#load test images
test_img1 = cv2.imread("test-data/1.1.jfif")
test_img2 = cv2.imread("test-data/1.2.jfif")
test_img3 = cv2.imread("test-data/1.3.jpg")
#perform a prediction
predicted_img1 = p.predict(test_img1)
predicted_img2 = p.predict(test_img2)
predicted_img3 = p.predict(test_img3)
predicted_img3 = cv2.resize(predicted_img3, (700, 500))
print("Prediction complete")
#display images
cv2.imshow(subjects[1], predicted_img1)
cv2.imshow(subjects[2], predicted_img2)
cv2.imshow(subjects[3], predicted_img3)
cv2.waitKey(0)
cv2.destroyAllWindows() | ankitvarma604/Face-Detection | face_detection.py | face_detection.py | py | 693 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Predict.predict",
"line_number": ... |
3195540576 | import requests
import re
#Obtendo html
nothing = 44827
divide = False
while nothing:
print(f'----------------------------------------\ncurrent nothing is: {nothing}')
url = f"http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing={nothing}"
html = requests.get(url).text
print(html)
numbers = re.findall('(?<=nothing is\s)\d+', html)
if(numbers):
nothing = numbers[0]
elif divide == True:
nothing = numbers[0]
nothing = int(nothing)/2
else:
nothing = int(nothing)/2
divide = True
| TassioS/Python-Challenge | 4.py | 4.py | py | 560 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 12,
"usage_type": "call"
}
] |
13899195903 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from math import e
import seaborn as sn
from sklearn.metrics import confusion_matrix
from pretty_confusion_matrix import pp_matrix_from_data
# Ryan Filgas
# Machine learning
def get_distance(a,b,c,d):
return pd.DataFrame((a-b)**2 + (c-d)**2).pow(.5)
def one_point_distance(a,b,c,d):
return np.sqrt(((a-b)**2 + (c-d)**2))
# Classify a point to a cluster. Return the index.
def classify(point, clusters, K):
if K ==1:
return 0
else:
return (get_distance(clusters[0], point[0], clusters[1], point[1])).idxmin()[0]
#within cluster sum of squares
def WCSS(cluster_buckets, solution, K):
sum = 0
for i in range(K):
bucket = cluster_buckets[i]
bucket_size = len(bucket.T)
for j in range(bucket_size):
sum += (one_point_distance(bucket[j][0], solution[i][0], bucket[j][1], solution[i][1])**2)
return sum
# Kmeans for 1 K value
def kmeans_instance(R,K,data,cluster):
data = pd.DataFrame(data)
solution = np.array(cluster).copy()
for i in range(R):
#classify all points and add to temporary bucket
temp = pd.DataFrame()
cluster_buckets = [temp for i in range(K)]
for j in range(len(data)):
c_index = classify([data[0][j], data[1][j]], cluster, K)
cluster_buckets[c_index] = pd.concat([cluster_buckets[c_index], data.T[j]], axis=1, ignore_index=True)
#update mean to output solution
for k in range(K):
solution[k] = cluster_buckets[k].T.mean()
#calculate error_calc
error_calc = WCSS(cluster_buckets, solution, K)
return solution, error_calc, cluster_buckets
def kmeans(R, K, data):
clusters = list()
for i in range(R):
temp = np.array((data.sample(K)))
clusters.append(temp)
#run k-means here x 10 and store sum of squares error_calc
errors = list()
solutions = list()
all_cluster_buckets = list()
for i in range(R):
solution, error_calc, cluster_buckets = kmeans_instance(R,K,data,pd.DataFrame(clusters[i]))
errors.append(error_calc)
solutions.append(solution)
all_cluster_buckets.append(cluster_buckets)
print("Instance complete.")
solution_location = errors.index(min(errors))
best_error = min(errors)
solution_location = errors.index(best_error)
best_solution = solutions[solution_location]
best_clusters = all_cluster_buckets[solution_location]
return best_solution, best_error, best_clusters
def plot_kmeans(K, R, best_solution, best_error, cluster_buckets):
filename = str(K) + ".jpeg"
points = list()
classes = list()
for i in range(K):
bucket = cluster_buckets[i]
bucket_size = len(bucket.T)
for j in range(bucket_size):
points.append(bucket[j])
classes.append(i)
points_x = np.array(points).T[0]
points_y = np.array(points).T[1]
title = "K = " + str(K) + ", Error = " + str(best_error)
plt.rcParams["figure.figsize"] = (10,10);
plt.scatter(points_x, points_y, c=classes,
s=10, cmap='tab10');
plt.scatter(best_solution.T[0], best_solution.T[1], c='black', s=100, alpha=1);
plt.title(title);
plt.savefig(filename);
plt.clf()
KVALS = [1,2,3,4,5,6,7,8,9,10]
R = 10
data= pd.DataFrame(pd.read_csv('cluster_dataset.txt', sep=" ", header=None))
for K in KVALS:
best_solution, best_error, cluster_buckets = kmeans(R,K,data)
print("Error for K = ", K, ": ", best_error)
plot_kmeans(K, R, best_solution, best_error, cluster_buckets)
print("K complete: ", K)
| rfilgas/ML-AI-CV | ML-K-Means-Classification-Gaussian/kmeans.py | kmeans.py | py | 3,690 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_... |
139802964 | import os
import sys
import logging
from datetime import datetime
from mongoengine.errors import ValidationError
sys.path.insert(1, os.path.join(os.getcwd(), '../', 'src'))
from flaskapp.model import User
logger = logging.getLogger()
def create_user():
new_user = User(lastAccess=datetime.utcnow(), documents=[])
new_user.save()
return new_user
def get_user(cookie):
try:
user = User.objects.with_id(cookie)
return user
except ValidationError:
logger.warning(f'Cookie {cookie} is invalid')
return None
def get_doc_id(cookie, policy='last'):
'''Retrieves a document associated with the user based on the policy
cookie (str): cookie associated with a user
policy (str): policy used to select which document to return
'''
policies = ['first', 'last']
assert policy in policies, f"policy should be one of: {policies}"
user = get_user(cookie)
if user is None:
logger.info('User is None')
return None
if not user.documents:
logger.info(f'User (cookie {cookie}) has no document')
return None
if policy == 'first':
return user.documents[0]
elif policy == 'last':
return user.documents[-1]
raise RuntimeError('Something went wrong')
def log_access(cookie, document_id):
'''Update the user account in the database
cookie (str): cookie associated with a user
document_id (str): document id (usually generated with document.create_doc_id())
'''
user = get_user(cookie)
if user is None:
logger.info(f'User is None')
return
user.lastAccess = datetime.utcnow()
if document_id not in user.documents:
if len(user.documents) == user.maxDocuments:
user.documents.pop(0)
user.documents.append(document_id)
user.save()
def set_kakao_id(cookie, nickname: str):
user = get_user(cookie)
if user is None:
logger.info(f'User is None')
return
user.kakaoid = nickname
user.save()
| bangjh0730/asdf | src/flaskapp/user.py | user.py | py | 2,029 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
33512502317 | from math import sqrt, fabs
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from numpy import genfromtxt
# Grid configurations
plt.grid(True, linewidth=0.2, c='k')
def get_near_psd(A_matrix):
A_sym = (A_matrix + A_matrix.T) / 2
eigval, eigvec = np.linalg.eigh(A_sym)
eigval[eigval < 0] = 0
return np.dot(eigvec, np.dot(np.diag(eigval), eigvec.T))
def is_pos_semidef(x):
return np.all(np.linalg.eigvals(x) >= 0)
data = pd.read_csv('../SyntheticData/data_mcar40_d50_1K.csv')
d = str(50)
X_test = np.loadtxt('X1K_d50_test.csv', delimiter=',')
Y_test = np.loadtxt('Y1K_d50_test.csv', delimiter=',')
Y_test = Y_test[:, np.newaxis]
X = data[data.columns[0:-1]]
Y = data[[d]]
MSK_X = X.isna()
MSK_Y = Y.isna()
MSK_X = MSK_X.values
MSK_Y = MSK_Y.values
originalX = X.copy()
originalY = Y.copy()
# X = pd.DataFrame(X, index=originalX.index, columns=originalX.columns)
# Y = pd.DataFrame(Y)
Y['target'] = Y['50']
Y = Y.drop(['50'], axis=1)
number_of_test_points = Y_test.shape[0]
original_std = np.nanstd(Y_test)
data_points = X.shape[0]
mask_X = X.isna()
mask_X = mask_X.to_numpy()
mask_X = np.ones(shape=mask_X.shape) - mask_X
train_std = sqrt(Y.var()[0])
train_mean = Y.mean()[0]
mask_Y_test = Y.isna()
mask_Y_test = mask_Y_test.to_numpy()
missing_entries = mask_Y_test.sum(axis=0)[0]
mask_Y = np.ones(shape=mask_Y_test.shape) - mask_Y_test
sc = StandardScaler()
sc_y = StandardScaler()
sc.fit(X)
sc_y.fit(Y)
cols = X.columns
inds = X.index
cols_y = Y.columns
inds_y = Y.index
X1 = sc.transform(X)
Y1 = sc_y.transform(Y)
X = sc.transform(X)
Y = sc_y.transform(Y)
X_test = sc.transform(X_test)
train_X = pd.DataFrame(X1, columns=cols, index=inds)
train_Y = pd.DataFrame(Y1, columns=cols_y, index=inds_y)
X[np.isnan(X)] = 0
Y[np.isnan(Y)] = 0
C = np.dot(X.T, X) / np.dot(mask_X.T, mask_X)
b = np.dot(X.T, Y) / np.dot(mask_X.T, mask_Y)
lam = 1
theta = np.dot(np.linalg.inv(C + lam * np.identity(X.shape[1])), b)
Y_pred = np.dot(X_test, theta)
Y_pred = train_std * Y_pred + train_mean
mse = np.linalg.norm(Y_pred - Y_test) ** 2 / number_of_test_points
print("RMSE: ", sqrt(mse))
print("Scaled: ", sqrt(mse) / original_std)
print("-----------------------------")
number_of_features = X.shape[1]
confidence_matrix = np.zeros(shape=(number_of_features, number_of_features))
features = train_X.columns
print(features)
sample_coeff = 100
sampling_number = 30
with_replacement = True
msk_gram = np.dot(mask_X.T, mask_X)
for i in range(number_of_features):
print("Feature num: ", i + 1)
for j in range(i, number_of_features):
feature_i = features[i]
feature_j = features[j]
columns = train_X[[feature_i, feature_j]]
intersections = columns[columns[[feature_i, feature_j]].notnull().all(axis=1)]
intersection_num = len(intersections)
if intersection_num != msk_gram[i][j]:
print(intersection_num)
print(msk_gram[i][j])
print(i, j)
print("Error")
exit(1)
sample_size = intersection_num // sample_coeff
if sample_size < 5:
sample_size = intersection_num
with_replacement = True
estimation_array = []
for ind in range(sampling_number):
current_sample = np.array(intersections.sample(n=sample_size, replace=with_replacement))
with_replacement = False
f1 = current_sample[:, 0]
f2 = current_sample[:, 1]
inner_prod = np.inner(f1, f2) / sample_size
estimation_array.append(inner_prod)
confidence_matrix[i][j] = np.std(estimation_array)
# print(estimation_array)
# print(i, j, C[i][j], confidence_matrix[i][j])
for j in range(number_of_features):
for i in range(j + 1, number_of_features):
confidence_matrix[i][j] = confidence_matrix[j][i]
print("------------Confidence Matrix---------------")
print(confidence_matrix)
print("---------------------------")
# target confidence:
conf_list = []
cov_msk_train = np.dot(mask_X.T, mask_Y)
for i in range(number_of_features):
feature_i = features[i]
current_feature = train_X[[feature_i]].to_numpy()
current_Y = train_Y.to_numpy()
columns = np.concatenate((current_feature, current_Y), axis=1)
columns = pd.DataFrame(columns, columns=[feature_i, 'Y'])
intersections = columns[columns[[feature_i, "Y"]].notnull().all(axis=1)]
intersections2 = columns[columns[[feature_i]].notnull().all(axis=1)]
intersection_num = len(intersections)
intersection_num2 = len(intersections2)
if intersection_num != cov_msk_train[i][0]:
print(intersection_num, intersection_num2, cov_msk_train[i][0])
exit(1)
sample_size = intersection_num // sample_coeff
estimation_array = []
for ind in range(sampling_number):
current_sample = np.array(intersections.sample(n=sample_size, replace=with_replacement))
f1 = current_sample[:, 0]
f2 = current_sample[:, 1]
inner_prod = np.inner(f1, f2) / sample_size
estimation_array.append(inner_prod)
conf_list.append(np.std(estimation_array))
print(conf_list)
np.savetxt("conf_matrix.csv", confidence_matrix, delimiter=",")
np.savetxt("conf_list.csv", conf_list, delimiter=",")
# confidence_matrix = np.loadtxt('conf_matrix.csv', delimiter=',')
# conf_list = np.loadtxt('conf_list.csv', delimiter=',')
# print(confidence_matrix.shape)
# print(conf_list)
const = 0.1
C_min = C - const * confidence_matrix
C_max = C + const * confidence_matrix
y_conf = np.asarray(conf_list)
y_conf = y_conf[:, np.newaxis]
b_min = b - const * y_conf
b_max = b + const * y_conf
sample_coeff = 0.01
sampling_number = 30
number_of_iterations = 2000
rho_list = [0.5, 1]
plots = []
print("----------------------")
checker = False
min_cost = 999999999999999999999999
optimal_theta = np.zeros(theta.shape)
rho = 0.05
b_init = np.dot(X.T, Y) / np.dot(mask_X.T, mask_Y)
C_init = np.dot(X.T, X) / np.dot(mask_X.T, mask_X)
theta_init = np.dot(np.linalg.inv(C + lam * np.identity(X.shape[1])), b)
# rho = 10
d_prime_init = np.zeros(theta.shape)
e_prime_init = np.zeros(theta.shape)
theta_prime_init = theta.copy()
mu_d_init = np.zeros(theta.shape)
mu_e_init = np.zeros(theta.shape)
mu_theta_init = np.zeros(theta.shape)
eta_init = np.zeros(theta.shape)
A_init = np.zeros(C.shape)
A_prime_init = np.zeros(C.shape)
B_init = np.zeros(C.shape)
B_prime_init = np.zeros(C.shape)
G_init = np.zeros(C.shape)
M_A_init = np.zeros(C.shape)
M_B_init = np.zeros(C.shape)
Gamma_init = np.zeros(C.shape)
rmse_list = []
for _ in range(12):
# First block (theta, d, e, A_prime, B_prime, G)
# Updating theta, d, e
c_1 = b_min + rho * d_prime_init - mu_d_init + eta_init
c_2 = -b_max + rho * e_prime_init - mu_e_init - eta_init
c_3 = rho * theta_prime_init - mu_theta_init - 2 * eta_init
theta_init = 1 / (6 * lam + 7 * rho) * (2 * c_1 - 2 * c_2 + 3 * c_3)
d_init = 1 / (6 * lam + 7 * rho) * ((6 * rho + 4 * lam) / rho * c_1 + (rho + 4 * lam) / rho * c_2 + 2 * c_3)
e_init = 1 / (6 * lam + 7 * rho) * ((rho + 2 * lam) / rho * c_1 + (6 * rho + 2 * lam) / rho * c_2 - 2 * c_3)
# Updating A_prime, B_prime, G
A_prime_init = np.maximum(A_init + M_A_init / rho, 0)
B_prime_init = np.maximum(B_init + M_B_init / rho, 0)
G_init = get_near_psd(B_init - A_init + Gamma_init / rho - np.dot(theta_prime_init, theta_prime_init.T)) + np.dot(theta_prime_init, theta_prime_init.T)
# G = B - A + Gamma / rho
# Second Block (d_prime, e_prime, theta_prime, A, B)
# Updating d_prime, e_prime
d_prime = np.maximum(d_init + mu_d_init / rho, 0)
e_prime = np.maximum(e_init + mu_e_init / rho, 0)
# theta_prime = theta + mu_theta / rho
# Updating A and B
D_1 = rho * A_prime_init - rho * G_init + Gamma_init - M_A_init + C_min
D_2 = rho * B_prime_init + rho * G_init - Gamma_init - M_B_init - C_max
A_init = 1 / (3 * rho) * (2 * D_1 + D_2)
B_init = 1 / (3 * rho) * (D_1 + 2 * D_2)
# symmetric_G = (G + G.T) / 2
# print(symmetric_G - G)
# Updating theta_prime
alpha = theta_init + mu_theta_init / rho
U, S, VT = np.linalg.svd(G_init)
gamma = np.dot(U.T, alpha)
optimal_solution = np.zeros(shape=S.shape)
mu_star_min = 0
mu_star_max = 10e5
current_mu = (mu_star_min + mu_star_max) / 2
summation = 0
for ind in range(optimal_solution.shape[0]):
optimal_solution[ind] = S[ind] * gamma[ind][0] / (S[ind] + current_mu)
summation += optimal_solution[ind] * optimal_solution[ind] / (S[ind] + 0.00001)
max_iter = 0
while (summation - 1 > 0.01 or summation - 1 < - 0.01) and max_iter < 5000:
max_iter += 1
if summation - 1 > 0.01:
mu_star_min = current_mu
else:
mu_star_max = current_mu
current_mu = (mu_star_min + mu_star_max) / 2
summation = 0
for ind in range(optimal_solution.shape[0]):
optimal_solution[ind] = S[ind] * gamma[ind][0] / (S[ind] + current_mu)
summation += optimal_solution[ind] * optimal_solution[ind] / S[ind]
optimal_solution = optimal_solution[:, np.newaxis]
theta_prime_init = np.dot(U, optimal_solution)
# Updating Multipliers
mu_d_init += rho * (d_init - d_prime_init)
mu_e_init += rho * (e_init - e_prime_init)
mu_theta_init += rho * (theta_init - theta_prime_init)
eta_init += rho * (2 * theta_init - d_init + e_init)
M_A_init += rho * (A_init - A_prime_init)
M_B_init += rho * (B_init - B_prime_init)
Gamma_init += rho * (B_init - A_init - G_init)
# Main Part
for rho in rho_list:
cost_array = []
real_cost_array = []
constraint_array = []
b = b_init.copy()
C = C_init.copy()
theta = theta_init.copy()
# rho = 10
d_prime = d_prime_init.copy()
e_prime = e_prime_init.copy()
theta_prime = theta_prime_init.copy()
mu_d = mu_d_init.copy()
mu_e = mu_e_init.copy()
mu_theta = mu_theta_init.copy()
eta = eta_init.copy()
A = A_init.copy()
A_prime = A_prime_init.copy()
B = B_init.copy()
B_prime = B_prime_init.copy()
G = G_init.copy()
M_A = M_A_init.copy()
M_B = M_B_init.copy()
Gamma = Gamma_init.copy()
for j in range(number_of_iterations):
# First block (theta, d, e, A_prime, B_prime, G)
# Updating theta, d, e
c_1 = b_min + rho * d_prime - mu_d + eta
c_2 = -b_max + rho * e_prime - mu_e - eta
c_3 = rho * theta_prime - mu_theta - 2 * eta
theta = 1 / (6 * lam + 7 * rho) * (2 * c_1 - 2 * c_2 + 3 * c_3)
d = 1 / (6 * lam + 7 * rho) * ((6 * rho + 4 * lam) / rho * c_1 + (rho + 4 * lam) / rho * c_2 + 2 * c_3)
e = 1 / (6 * lam + 7 * rho) * ((rho + 2 * lam) / rho * c_1 + (6 * rho + 2 * lam) / rho * c_2 - 2 * c_3)
# Updating A_prime, B_prime, G
A_prime = np.maximum(A + M_A / rho, 0)
B_prime = np.maximum(B + M_B / rho, 0)
G = get_near_psd(B - A + Gamma / rho - np.dot(theta_prime, theta_prime.T)) + np.dot(theta_prime, theta_prime.T)
# G = B - A + Gamma / rho
# Second Block (d_prime, e_prime, theta_prime, A, B)
# Updating d_prime, e_prime
d_prime = np.maximum(d + mu_d / rho, 0)
e_prime = np.maximum(e + mu_e / rho, 0)
# theta_prime = theta + mu_theta / rho
# Updating A and B
D_1 = rho * A_prime - rho * G + Gamma - M_A + C_min
D_2 = rho * B_prime + rho * G - Gamma - M_B - C_max
A = 1 / (3 * rho) * (2 * D_1 + D_2)
B = 1 / (3 * rho) * (D_1 + 2 * D_2)
# symmetric_G = (G + G.T) / 2
# print(symmetric_G - G)
# Updating theta_prime
alpha = theta + mu_theta / rho
U, S, VT = np.linalg.svd(G)
gamma = np.dot(U.T, alpha)
optimal_solution = np.zeros(shape=S.shape)
mu_star_min = 0
mu_star_max = 10e5
current_mu = (mu_star_min + mu_star_max) / 2
summation = 0
for ind in range(optimal_solution.shape[0]):
optimal_solution[ind] = S[ind] * gamma[ind][0] / (S[ind] + current_mu)
summation += optimal_solution[ind] * optimal_solution[ind] / (S[ind] + 0.00001)
max_iter = 0
while (summation - 1 > 0.01 or summation - 1 < - 0.01) and max_iter < 5000:
max_iter += 1
if summation - 1 > 0.01:
# current mu should be bigger
mu_star_min = current_mu
else:
mu_star_max = current_mu
current_mu = (mu_star_min + mu_star_max) / 2
summation = 0
for ind in range(optimal_solution.shape[0]):
optimal_solution[ind] = S[ind] * gamma[ind][0] / (S[ind] + current_mu)
summation += optimal_solution[ind] * optimal_solution[ind] / S[ind]
optimal_solution = optimal_solution[:, np.newaxis]
theta_prime = np.dot(U, optimal_solution)
# Updating Multipliers
mu_d += rho * (d - d_prime)
mu_e += rho * (e - e_prime)
mu_theta += rho * (theta - theta_prime)
eta += rho * (2 * theta - d + e)
M_A += rho * (A - A_prime)
M_B += rho * (B - B_prime)
Gamma += rho * (B - A - G)
# Calculating The objective function:
cost = - np.dot(b_min.T, d)[0][0] + np.dot(b_max.T, e)[0][0] - np.sum(np.multiply(C_min, A)) + \
np.sum(np.multiply(C_max, B)) + lam * np.linalg.norm(theta) ** 2 + \
np.sum(np.multiply(A - A_prime, M_A)) + \
rho / 2 * np.linalg.norm(A - A_prime, 'fro') ** 2 + np.sum(np.multiply(B - B_prime, M_B)) + \
rho / 2 * np.linalg.norm(B - B_prime, 'fro') ** 2 + np.dot(mu_d.T, d - d_prime)[0][0] + \
rho / 2 * np.linalg.norm(d - d_prime) ** 2 + np.dot(mu_e.T, e - e_prime)[0][0] + \
rho / 2 * np.linalg.norm(e - e_prime) ** 2 + np.dot(mu_theta.T, theta - theta_prime)[0][0] + \
rho / 2 * np.linalg.norm(theta - theta_prime) ** 2 + np.dot(eta.T, 2 * theta - d + e)[0][0] + \
rho / 2 * np.linalg.norm(2 * theta - d + e) ** 2 + np.sum(np.multiply(B - A - G, Gamma)) + \
rho / 2 * np.linalg.norm(B - A - G, 'fro') ** 2
real_cost = - np.dot(b_min.T, d)[0][0] + np.dot(b_max.T, e)[0][0] - np.sum(np.multiply(C_min, A)) + np.sum(
np.multiply(C_max, B)) + lam * np.linalg.norm(theta) ** 2
print(cost, real_cost)
"""
print("||d - d'||^2: ", np.linalg.norm(d - d_prime) ** 2)
print("||e - e'||^2: ", np.linalg.norm(e - e_prime) ** 2)
print("||theta - theta'||^2: ",
np.linalg.norm(theta - theta_prime) ** 2 / (np.linalg.norm(theta) ** 2 + np.linalg.norm(theta_prime) ** 2))
print("||2theta - d + e||^2: ", np.linalg.norm(2 * theta - d + e) ** 2)
print("||A - A'||^2: ", np.linalg.norm(A - A_prime, 'fro') ** 2)
print("||B - B'||^2: ", np.linalg.norm(B - B_prime, 'fro') ** 2)
print("||B - A - G||^2: ", np.linalg.norm(B - A - G, 'fro') ** 2)
print(- np.dot(b_min.T, d)[0][0])
print(np.dot(b_max.T, e)[0][0])
print(- np.sum(np.multiply(C_min, A)))
print(np.sum(np.multiply(C_max, B)))
print(lam * np.linalg.norm(theta) ** 2)
print('A norm: ', np.linalg.norm(A, 'fro') ** 2)
print('B norm: ', np.linalg.norm(B, 'fro') ** 2)
print('d norm: ', np.linalg.norm(d) ** 2)
print('e norm: ', np.linalg.norm(e) ** 2)
print('theta norm: ', np.linalg.norm(theta) ** 2)
print("#################")
"""
if not checker:
real_cost_array.append(real_cost)
cost_array.append(cost)
constraint_array.append(fabs(cost - real_cost))
# print(real_cost)
checker = False
if real_cost < min_cost and j > 100:
min_cost = real_cost
optimal_theta = theta.copy()
print("||d - d'||^2: ", np.linalg.norm(d - d_prime) ** 2)
print("||e - e'||^2: ", np.linalg.norm(e - e_prime) ** 2)
print("||theta - theta'||^2: ",
np.linalg.norm(theta - theta_prime) ** 2 / (np.linalg.norm(theta) ** 2 + np.linalg.norm(theta_prime) ** 2))
print("||2theta - d + e||^2: ", np.linalg.norm(2 * theta - d + e) ** 2)
print("||A - A'||^2: ", np.linalg.norm(A - A_prime, 'fro') ** 2)
print("||B - B'||^2: ", np.linalg.norm(B - B_prime, 'fro') ** 2)
print("||B - A - G||^2: ", np.linalg.norm(B - A - G, 'fro') ** 2)
print(- np.dot(b_min.T, d)[0][0])
print(np.dot(b_max.T, e)[0][0])
print(- np.sum(np.multiply(C_min, A)))
print(np.sum(np.multiply(C_max, B)))
print(lam * np.linalg.norm(theta) ** 2)
print('A norm: ', np.linalg.norm(A, 'fro') ** 2)
print('B norm: ', np.linalg.norm(B, 'fro') ** 2)
print('d norm: ', np.linalg.norm(d) ** 2)
print('e norm: ', np.linalg.norm(e) ** 2)
print('theta norm: ', np.linalg.norm(theta) ** 2)
print("G - theta' theta'^T", is_pos_semidef(G - np.dot(theta_prime, theta_prime.T)))
H = np.linalg.eigvals(G - np.dot(theta, theta.T))
print(H)
Y_pred = np.dot(X_test, theta)
Y_pred = train_std * Y_pred + train_mean
mse = np.linalg.norm(Y_pred - Y_test) ** 2 / number_of_test_points
print("RMSE: ", sqrt(mse))
print("Scaled: ", sqrt(mse) / original_std)
rmse_list.append(sqrt(mse) / original_std)
print("-----------------------------")
"""
print("And now optimal:")
print("Min cost: ", min_cost)
Y_pred = np.dot(X_test, optimal_theta)
Y_pred = train_std * Y_pred + train_mean
mse = np.linalg.norm(Y_pred - Y_test) ** 2 / number_of_test_points
print("RMSE: ", sqrt(mse))
print("Scaled: ", sqrt(mse) / original_std)
"""
plots.append(real_cost_array)
for item in plots:
plt.plot(item)
plt.show()
print(rmse_list)
| optimization-for-data-driven-science/RIFLE | RIFLE_via_ADMM/ADMM_Synthetic.py | ADMM_Synthetic.py | py | 17,963 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.linalg.eigh",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.lina... |
23115996703 | import torch
from NAQS import NAQS, intial_state
from lnZ_2d import TensorSampling, HonNN
import numpy as np
from scipy.linalg import sqrtm
from scipy import sparse
import csv
import time
def learning(model, model0, optimizer, loss_func, epoch_start, epoch_end, batch_size, my_device, beta=1.0, memo='init', save_path='./data/', B_anneal = 0.999, init_para=False):
accuracy = 1
model = model.to(my_device)
# model_optim = model
if init_para:
model.load_state_dict(model0.state_dict())
# if model.n == model0.n:
# if len(model.layers) == len(model0.layers):
# model.load_state_dict(model0.state_dict())
# else:
# model.layers[0].weight.data = model0.layers[0].weight.detach().clone()
# model.layers[0].bias.data = model0.layers[0].bias.detach().clone()
t0 = time.time()
beta0 = beta
for epoch in range(epoch_start + 1, epoch_end + 1):
beta = beta0 * (1 - B_anneal ** (epoch - 1))
optimizer.zero_grad()
with torch.no_grad():
samples = model.sample(batch_size)
# print(samples.shape)
assert not samples.requires_grad
with torch.no_grad():
loss = loss_func(samples, beta)
assert not loss.requires_grad
log_psi = model.log_psi_conj(samples)
loss_reinforce = - ((2 * (loss - loss.mean()) * log_psi).mean() / loss.mean()).real
loss_reinforce.backward()
optimizer.step()
if epoch % 1000 == 0:
t1 = time.time()
with torch.no_grad():
samples0 = model.sample(batch_size * 100)
loss0 = loss_func(samples0, beta)
loss0 = loss0.mean().item()
# acc = abs(loss0 + 1)
# if epoch > 500:
# if acc < accuracy:
# accuracy = acc
# model_optim = model
with open(save_path + memo +'.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([epoch, loss0,
])
torch.save(model, save_path + memo + '_model_' + str(epoch) + '.pth')
print('epoch= {}, checkpoint_loss= {}, time= {}'.format(epoch, loss0, t1-t0))
return model
def learn_init_state(model0, num_site, net_depth, beta, batch_size, num_epoch, l_r, my_type, my_device, save_path, memo='init', B_anneal=0.999):
model = NAQS(num_site=num_site, net_depth=net_depth, my_type=my_type, my_device=my_device)
optimizer = torch.optim.Adam(model.parameters(), lr=l_r)
TS = TensorSampling(num_site, beta, my_type, my_device)
psi0, log_norm = TS.psi0()
# loss_func = lambda samples, beta: - TS.phi_func(psi0, samples) / model.psi_total(samples)
loss_func = lambda samples, beta: - TS.psi_func(psi0, samples) / model.psi_total(samples)
es, ee = 0, num_epoch // 2
model_out = learning(model=model, optimizer=optimizer, loss_func=loss_func, model0=model0, beta=beta, epoch_start=es,
epoch_end=ee, batch_size=batch_size, my_device=my_device, memo=memo, save_path=save_path)
es, ee = num_epoch // 2, num_epoch
model = NAQS(num_site=num_site, net_depth=net_depth, my_type=my_type, my_device=my_device)
# optimizer = torch.optim.Adam(model.parameters(), lr=l_r)
optimizer.param_groups[0]['lr'] = optimizer.param_groups[0]['lr'] / 10
model_out = learning(model=model, optimizer=optimizer, loss_func=loss_func, model0=model_out, beta=beta,
epoch_start=es,
epoch_end=ee, batch_size=batch_size * 10, my_device=my_device, memo=memo, save_path=save_path, init_para=True)
torch.save(model_out, save_path + memo + '_model_' + '.pth')
torch.save(log_norm, save_path + memo + '_log_norm_' + '.pth')
return model_out, log_norm
def init_base(psi_func, s, num_site):
'''tested.'''
psi = psi_func(s[:, :num_site])
for i in range(num_site):
mask = (s[:, i] * s[:, i + num_site] + 1) / 2
psi *= mask
return psi
def init_base_state(num_site, net_depth, beta, batch_size, num_epoch, l_r, my_type, my_device, save_path, memo='init_base', B_anneal=0.999):
model = NAQS(num_site=num_site * 2, net_depth=net_depth, my_type=my_type, my_device=my_device)
optimizer = torch.optim.Adam(model.parameters(), lr=l_r)
TS = TensorSampling(num_site, beta, my_type, my_device)
psi0, log_norm = TS.psi0()
psi_func = lambda s: TS.psi_func(psi0, s)
loss_func = lambda samples, beta: - init_base(psi_func, samples, num_site) / model.psi_total(samples)
es, ee = 0, num_epoch
model_out = learning(model=model, optimizer=optimizer, loss_func=loss_func, model0=None, beta=beta, epoch_start=es, epoch_end=ee, batch_size=batch_size, my_device=my_device, memo=memo, save_path=save_path, B_anneal=B_anneal)
torch.save(model_out, save_path + memo + '_model_' + '.pth')
torch.save(log_norm, save_path + memo + '_log_norm_' + '.pth')
return model_out, log_norm
def main():
import os
batch_size = 10000
# l_r = 0.01
beta_c = 0.44068679350977147
beta = beta_c
num_epoch = 4000
my_type = torch.float64
my_device = torch.device('cuda:4')
# num_site = 6
# net_depth = 3
B_anneal = 0.95
layer_i = 0
mm = 'init'
for num_site in [6]:
for l_r in [0.1]:
for net_depth in [1]:
for batch_size in [1000]:
save_path = './data/' + '%s/' % mm + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
num_site, net_depth, batch_size, l_r, B_anneal, layer_i)
# model0 = torch.load(save_path + 'init_model_.pth')
if not os.path.exists(save_path):
os.makedirs(save_path)
model0, log_norm0 = learn_init_state(
num_site=num_site,
net_depth=1,
beta=beta,
batch_size=batch_size,
num_epoch=num_epoch,
l_r=l_r,
my_type=my_type,
my_device=my_device,
save_path=save_path,
B_anneal=B_anneal,
model0=None
)
def test():
import os
batch_size = 1000
# l_r = 0.01
beta_c = 0.44068679350977147
beta = beta_c
num_epoch = 10000
my_type = torch.float64
my_device = torch.device('cuda:4')
num_site = 6
net_depth = 1
B_anneal = 0.95
layer_i = 0
l_r = 0.08
mm = 'init'
save_path = './data/' + '%s/' % mm + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
6, 1, batch_size, 0.1, B_anneal, layer_i)
model = torch.load(save_path + 'init_model_.pth')
save_path = './data/' + '%s/' % mm + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
num_site, net_depth, batch_size, l_r, B_anneal, layer_i)
if not os.path.exists(save_path):
os.makedirs(save_path)
init_base_state(num_site=num_site, net_depth=net_depth, beta=beta, batch_size=batch_size, num_epoch=num_epoch, l_r=l_r, my_type=my_type, my_device=my_device, save_path=save_path, memo='init', B_anneal=B_anneal)
def stitch_state(n,psi_func1, psi_func2, s, beta):
sl = s[:, :n-1]
sr = s[:, n-1:]
beta = torch.tensor(beta, dtype=s.dtype, device=s.device)
si = torch.ones(s.shape[0], dtype=s.dtype, device=s.device)
si = si.unsqueeze(1)
phi = torch.zeros([s.shape[0]], dtype=s.dtype, device=s.device)
ilist = torch.tensor([-1, 1], dtype=s.dtype, device=s.device)
for i in ilist:
for j in ilist:
s1 = torch.cat([i * si, sl], dim=1)
s2 = torch.cat([sr, si * j], dim=1)
phi += psi_func1(s1) * psi_func2(s2) * torch.exp(beta * i * j)
return phi
def calc_H(num_site, beta):
I = sparse.eye(2 ** (num_site - 2), format='coo')
b1 = np.array([np.exp(beta), np.exp(-beta)])
I1 = sparse.kron(I, b1, format='coo')
b2 = np.array([np.exp(-beta), np.exp(beta)])
I2 = sparse.kron(I, b2, format='coo')
H = sparse.hstack([I1, I2], format='coo')
return H
def marginal_psi_func(n,psi_func1, psi_func2, s):
sl = s[:, : n]
sr = s[:, n:]
phi = psi_func1(sl) * psi_func2(sr)
return phi
def test_stitch():
import os
batch_size = 1000
# l_r = 0.01
beta_c = 0.44068679350977147
beta = beta_c
num_epoch = 10000
my_type = torch.float64
my_device = torch.device('cuda:4')
num_site = 6
net_depth = 1
B_anneal = 0.95
layer_i = 0
l_r = 0.08
mm = 'init'
save_path = './data/' + '%s/' % mm + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
6, 1, batch_size, 0.1, B_anneal, layer_i)
model1 = torch.load(save_path + 'init_model_.pth')
save_path = './data/' + '%s/' % mm + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
num_site, net_depth, batch_size, l_r, B_anneal, layer_i)
model2 = torch.load(save_path + 'init_model_.pth')
psi_func1 = model1.psi_total
psi_func2 = model2.psi_total
from lnZ_2d import TensorSampling
TS = TensorSampling(num_site=num_site, beta=beta, my_type=my_type, my_device=my_device)
s = TS.all_sample(num_site=16)
# print(s.shape)
phi = stitch_state(num_site, psi_func1, psi_func2, s, beta)
print(torch.sqrt((phi*phi.conj()).sum()))
H =calc_H(num_site=18, beta=beta)
psi_func = lambda s: marginal_psi_func(num_site, psi_func1, psi_func2, s)
from lnZ_2d import HonNN
HN = HonNN(num_site=num_site, beta=beta, my_device=my_device, my_type=my_type)
norm = HN.calc_norm(H, psi_func, n=num_site*3)
print(norm)
# norm =
def training(save_path, psi_func1, psi_func2, num_site=6, net_depth=1, my_type=torch.float64, my_device=torch.device('cuda:4'), batch_size=1000, num_epoch=10000, l_r=0.01, beta=0.44068679350977147, B_anneal=0.95, layer_i=0, memo='stitch'):
model = NAQS(num_site=num_site * 3 - 2, net_depth=net_depth, my_type=my_type, my_device=my_device)
optimizer = torch.optim.Adam(model.parameters(), lr=l_r)
H = calc_H(num_site=num_site * 3, beta=beta)
HN = HonNN(num_site=num_site, beta=beta, my_device=my_device, my_type=my_type)
psi_func = lambda s: marginal_psi_func(num_site, psi_func1, psi_func2, s)
norm = HN.calc_norm(H, psi_func, n=num_site * 3)
log_norm = torch.log(norm)
loss_func = lambda samples, beta: - stitch_state(num_site,psi_func1, psi_func2, samples, beta) / norm / model.psi_total(samples)
es, ee = 0, num_epoch
model_out = learning(model=model, optimizer=optimizer, loss_func=loss_func, model0=None, beta=beta, epoch_start=es,
epoch_end=ee, batch_size=batch_size, my_device=my_device, memo=memo, save_path=save_path,
B_anneal=B_anneal)
torch.save(model_out, save_path + memo + '_model_' + '.pth')
torch.save(log_norm, save_path + memo + '_log_norm_' + '.pth')
return model_out, log_norm
def test_training():
import os
batch_size = 10000
# l_r = 0.01
beta_c = 0.44068679350977147
beta = beta_c
num_epoch = 20000
my_type = torch.float64
my_device = torch.device('cuda:4')
num_site = 6
net_depth = 2
B_anneal = 0.95
layer_i = 0
l_r = 0.01
mm = 'stitch'
save_path = './data/' + '%s/' % 'init' + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
6, 1, 1000, 0.1, B_anneal, layer_i)
model1 = torch.load(save_path + 'init_model_.pth')
save_path = './data/' + '%s/' % 'init' + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
num_site, 1, 1000, 0.08, B_anneal, layer_i)
model2 = torch.load(save_path + 'init_model_.pth')
psi_func1 = model1.psi_total
psi_func2 = model2.psi_total
save_path = './data/' + '%s/' % mm + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
num_site, net_depth, batch_size, l_r, B_anneal, layer_i)
if not os.path.exists(save_path):
os.makedirs(save_path)
training(save_path, psi_func1, psi_func2, num_site=num_site, net_depth=net_depth, my_type=my_type, my_device=my_device, batch_size=batch_size, num_epoch=num_epoch, l_r=l_r, beta=beta, B_anneal=B_anneal, layer_i=layer_i, memo='stitch')
def _B_state(num_site, net_depth, beta, batch_size, num_epoch, l_r, model0, my_type, my_device, save_path, memo='_B_', B_anneal=0.999, i=0):
log_norm = 0
model0 = model0.to(my_device)
HN = HonNN(num_site, beta=beta, my_type=my_type, my_device=my_device)
n = model0.n
# bound
H = calc_H(n, beta=beta)
norm = HN.calc_norm(H, model0.psi_total, n=n)
log_norm += torch.log(norm)
model = NAQS(num_site=n-2, net_depth=net_depth, my_type=my_type, my_device=my_device)
optimizer = torch.optim.Adam(model.parameters(), lr=l_r)
loss_func = lambda samples, beta: - HN.B_on_middle(model0.psi_total, samples, beta=beta) / norm / model.psi_total(samples)
es, ee = 0, num_epoch
model0 = learning(model, model0, optimizer, loss_func, beta=beta, epoch_start=es,epoch_end=ee, batch_size=batch_size,
my_device=my_device, memo=memo + '_' + str(i), save_path=save_path,
B_anneal=B_anneal)
torch.save(model0, save_path + memo + '_model_' + str(i) + '.pth')
torch.save(torch.log(norm), save_path + memo + '_log_norm_' + str(i) + '.pth')
return model0, log_norm
def calc_lnZ():
import os
batch_size = 10000
# l_r = 0.01
beta_c = 0.44068679350977147
beta = beta_c
num_epoch = 20000
my_type = torch.float64
my_device = torch.device('cuda:4')
num_site = 6
net_depth = 2
B_anneal = 0.95
layer_i = 0
l_r = 0.01
mm = 'test0'
save_path = './data/' + '%s/' % 'init' + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
6, 1, 1000, 0.1, B_anneal, layer_i)
model1 = torch.load(save_path + 'init_model_.pth')
save_path = './data/' + '%s/' % 'init' + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
num_site, 1, 1000, 0.08, B_anneal, layer_i)
model2 = torch.load(save_path + 'init_model_.pth')
psi_func1 = model1.psi_total
psi_func2 = model2.psi_total
log_norm = 0
model0 = model1
for j in range(num_site // 2 - 1):
layer_i += j
save_path = './data/' + '%s/' % mm + 'n%d_d%d_b%d_lr%f_a%f_ly%d/' % (
num_site, net_depth, batch_size, l_r, B_anneal, layer_i)
if not os.path.exists(save_path):
os.makedirs(save_path)
psi_func1 = model0.psi_total
model0, log_norm0 = training(save_path, psi_func1, psi_func2, num_site=num_site, net_depth=net_depth,
my_type=my_type,
my_device=my_device, batch_size=batch_size, num_epoch=num_epoch, l_r=l_r,
beta=beta, B_anneal=B_anneal,
layer_i=layer_i, memo='stitch')
log_norm += log_norm0
for i in range(1, num_site):
model0, log_norm0 = _B_state(num_site, net_depth, beta, batch_size, num_epoch, l_r, model0, my_type,
my_device, save_path, memo='_B_', B_anneal=B_anneal, i=1)
log_norm += log_norm0
torch.save(log_norm, save_path + 'log_norm_final.pth')
if __name__ == '__main__':
# main()
# test()
# test_stitch()
# test_training()
calc_lnZ()
# test_stitch_2()
| Sixuan00/Free-Energy-NN | 2D/New Method/main.py | main.py | py | 15,844 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": ... |
36505762158 | import numpy as np
import matplotlib.pyplot as plt
print('Enter n')
n = int(input())
print('Enter 0 if Left Riemann Sum')
print('Enter 1 if Midpoint Riemann Sum')
print('Enter 2 if Right Riemann Sum')
i = int(input())
f = lambda x: x ** 2
a, b = 1, 2
dx = 1 / n
x_left = np.linspace(a, b - dx, n)
x_midpoint = np.linspace(dx / 2, b - dx / 2, n)
x_right = np.linspace(dx, b, n)
left_riemann_sum = np.sum(f(x_left) * dx)
midpoint_riemann_sum = np.sum(f(x_midpoint) * dx)
right_riemann_sum = np.sum(f(x_right) * dx)
if i == 0:
print('Left Riemann Sum:', left_riemann_sum)
if i == 1:
print('Midpoint Riemann Sum:', midpoint_riemann_sum)
if i == 2:
print('Right Riemann Sum:', right_riemann_sum)
# Create matplotlib
x = np.arange(1, 2, 0.05)
y = x**2
fig, ax = plt.subplots()
ax.plot(x, y, 'r')
ax.set_ylim(bottom=0)
for j in range(n):
x = np.linspace(a, b, n + 1)
y = f(x)
X = np.linspace(a, b, n**2 + 1)
Y = f(X)
if i == 0:
plt.plot(X, Y, 'b')
x_left = x[:-1] # Left endpoints
y_left = y[:-1]
plt.plot(x_left, y_left, 'b.', markersize=10)
plt.bar(x_left, y_left, width=(1 / n), alpha=0.2, align='edge', edgecolor='r')
plt.title('Left Riemann Sum, n = {}'.format(n))
if i == 1:
plt.plot(X, Y, 'b')
x_mid = (x[:-1] + x[1:]) / 2 # Midpoints
y_mid = f(x_mid)
plt.plot(x_mid, y_mid, 'b.', markersize=10)
plt.bar(x_mid, y_mid, width=(1 / n), alpha=0.2, edgecolor='r')
plt.title('Midpoint Riemann Sum, n = {}'.format(n))
if i == 2:
plt.plot(X, Y, 'b')
x_right = x[1:] # Left endpoints
y_right = y[1:]
plt.plot(x_right, y_right, 'b.', markersize=10)
plt.bar(x_right, y_right, width=(-1 / n), alpha=0.2, align='edge', edgecolor='r')
plt.title('Right Riemann Sum, n = {}'.format(n))
plt.show()
| alekseevavlada/Laba_Math | main.py | main.py | py | 1,871 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_nu... |
25364145372 | import cv2 as cv
import numpy as np
'''
Tranformação geometrica:
Rotação:
>getRotationMatrix2D(center, angle, scale) -> Obtem uma matriz de rotação.
==>center -> Indica o centro da imagem (X, Y)
==>angle -> Define o ângulo desejado para rotacionar [0, 360]
==>scale -> Escala para imagem
>warpAffine(src, matriz, dsize) -> Gera a imagem rotacionada.
==>src -> Mesma coisa que o .this
==>matriz -> matriz de rotação.
==>dsize -> Tamanho da imagem rotacionada.
'''
img = cv.imread('1.jpg')
linhas, colunas = img.shape[:2]
matriz = cv.getRotationMatrix2D((colunas / 2, linhas / 2), 90, 1)
imgRot = cv.warpAffine(img, matriz, (colunas, linhas))
print(img.shape)
cv.imshow('Rot', imgRot)
cv.waitKey(0)
cv.destroyAllWindows()
matriz = np.float32([[1, 0, 32], [0, 1, 30]])
imgDesloc = cv.warpAffine(img, matriz, (colunas, linhas))
cv.imshow('Desloc', imgDesloc)
cv.waitKey(0)
cv.destroyAllWindows()
'''
*Ajuste Escala
resize(src, dst, fx, fy, interpolation)
'''
# img = cv.imread('5.jpg')
img = cv.resize(cv.imread('5.jpg'), None, None, .5, .5, cv.INTER_NEAREST)
cv.imshow('Resultado', img)
cv.waitKey(0)
cv.destroyAllWindows()
'''
*Ajuste perpectiva
warpPerspective()
getPerpectiveTransform()
'''
| abelsco/PyExerciciosOpencv | transformacaoGeo.py | transformacaoGeo.py | py | 1,210 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.getRotationMatrix2D",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.warpAffine",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"l... |
12233028834 | from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
import os
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
from datetime import date
from pandas_datareader import data as pdr
import yfinance as yf
yf.pdr_override()
import bs4 as bs
import requests
import random
def normalize(data):
max_val = data.max()
min_val = data.min()
data=(data-min_val)/(max_val-min_val)
return data
def create_dataset(dataset):
dataX, dataY = [], []
# for i in range(200, len(dataset)):
for i in range(60, len(dataset) - 30):
# print(i)
# print(dataset[i-60:i,:])
# print(dataset[i:i+30,1])
a = dataset[i-60:i,:]
dataX.append(a)
a = dataset[i:i+30,1]
dataY.append(a)
return np.array(dataX), np.array(dataY)
# def create_dataset_with_test(dataset):
# dataX, dataY = [], []
# # for i in range(200, len(dataset)):
# for i in range(60, len(dataset) - 28):
# a = dataset[i-60:i,:]
# dataX.append(a)
# dataY.append([dataset[i + 7,0],dataset[i + 14,0],dataset[i + 21,0],dataset[i + 28,0]])
# return np.array(dataX[0:int(len(dataX) * .75)]), np.array(dataY[0:int(len(dataY) * .75)]),np.array(dataX[int(len(dataX) * .75):1]),np.array(dataY[int(len(dataY) * .75):1])
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
while (1):
# ticker = input("Enter a ticker to predict or type 'quit' to quit: ")
ticker = "AMD"
if ticker == "quit":
break
ticker = ticker.upper()
today = date.today()
df = pdr.get_data_yahoo(ticker, start="1970-01-01" , end=today)
df.columns = df.columns.str.replace(' ', '')
data = df.iloc[:,0:5].astype('float32').values
max_val = data.max()
min_val = data.min()
data=(data-min_val)/(max_val-min_val)
# datatest = data[2000:]
# data = data[:2000]
# print(data)
# print("======")
# print(datatest)
# index = int(data.shape[0] * 0.75)
# print(index)
# print(int(len*0.75))
# datatrain = data[:index,:]
# datatest = data[index:,:]
# print(datatest)
# print("========")
# print(datatrain)
trainX, trainY = create_dataset(data)
# testX, testY = create_dataset(datatest)
# print("train")
# print(trainX)
# print("test")
# print(testX)
model = Sequential()
# model.add(LSTM(1000, return_sequences=True, input_shape=(60,5)))
# model.add(Dense(500))
# model.add(Dropout(0.2))
# model.add(LSTM(1000, return_sequences=False))
# model.add(Dropout(0.5))
# model.add(Dense(30))
model.add(LSTM(100, return_sequences=True, input_shape=(60,5)))
model.add(Dropout(0.2))
model.add(LSTM(100, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=30))
model.compile(loss='mse', optimizer='adam')
t0 = time.time()
# callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3, mode='min', verbose=1)
hist = model.fit(trainX, trainY, epochs=20, batch_size=64, validation_data=[testX,testY], verbose=2)
model.save(f'models_new/{ticker}.h5')
print("Training time:", time.time()-t0)
plt.plot(hist.history['loss'], label='loss')
# plt.plot(hist.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
today = date.today()
data = pdr.get_data_yahoo(ticker, start="1970-01-01" , end=today)
data = data.iloc[:,0:5].astype('float32').values
rand = random.randint(1000,(data.shape[0]-1000)) * -1
print(rand)
temp = data[rand:rand+30:,1]
max_val = data.max()
min_val = data.min()
data=(data-min_val)/(max_val-min_val)
data = data[rand-60:rand,:]
x = []
x.append(data)
dataPredict = np.array(x)
print("Predicting...")
prediction = model.predict(dataPredict)
prediction = prediction * (max_val - min_val) + min_val
data = data * (max_val - min_val) + min_val
prediction = prediction[0]
totaldiff = 0
for i in range(30):
diff = prediction[i] - temp[i]
print(f'{diff:.2f}% difference')
totaldiff += diff
totaldiff /= 30
print(f'{totaldiff:.2f}% average difference')
x = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29]
plt.plot(x,temp, color = 'black', label = 'Stock Price')
plt.plot(x,prediction, color = 'blue', label = 'Prediction Price')
plt.xlabel('Time')
plt.ylabel('Stock Price')
plt.legend()
plt.show()
break
| markb2575/AI-Stock-Predicter | predict-weekly/weekly.py | weekly.py | py | 4,674 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yfinance.pdr_override",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today... |
21150674567 | from django.shortcuts import render, get_object_or_404, get_list_or_404
from django.http import HttpResponseRedirect, Http404
from django.urls import reverse
from django.contrib import messages
# import model from another module
# import Users model from authentication module
from authentication.models import Users, AccountType
# import model
from .models import Schools
# Create your views here.
def index(request, id):
# fetch all the schools
instance = get_object_or_404(Schools, id=id)
context = {'instance':instance}
return render(request, 'index.html', context)
# School registration page
def register(request):
if request.method == 'POST':
name = request.POST.get('name' or None)
address = request.POST.get('address' or None)
state = request.POST.get('state' or None)
lga = request.POST.get('lga' or None)
logo = request.FILES.get('logo' or None)
school_type = request.POST.get('school_type' or None)
school = Schools()
school.name = name
school.address = address
school.state = state
school.lga = lga
school.logo = logo
school.school_type = school_type
school.save()
if school is not None:
return HttpResponseRedirect(reverse('administrator:register'))
else:
return render(request, 'register.html')
# Show all School
def all_school(request):
schools = Schools.objects.all()
context = {'schools': schools, 'instance':instance}
return render(request, 'all_school.html', context)
# view a specific school
def view_school(request, id):
# fetch all the schools
instance = get_object_or_404(Schools, id=id)
# fetch all the school admin
# school_admin = get_object_or_404(Users(), id=id)
# Register School admin
if request.method == 'POST':
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
middlename = request.POST.get('middlename')
email = request.POST.get('email')
account_type = AccountType.objects.get(id=2)
gender = request.POST.get('gender')
school_id = id
s_admin = Users()
s_admin.first_name = first_name
s_admin.last_name = last_name
s_admin.middlename = middlename
s_admin.email = email
s_admin.account_type = account_type
s_admin.gender = gender
s_admin.school_id = school_id
s_admin.save()
if s_admin is not None:
return HttpResponseRedirect(reverse('administrator:view_school', args=[id]))
else:
return render(request, 'view_school.html')
context = {'instance':instance}
return render(request, 'view_school.html', context)
| Optisoftdev/S5 | administrator/views.py | views.py | py | 2,765 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Schools",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
},
{
"api... |
19036931812 | import logging
import zmq
from cifsdk.actor.manager import Manager as _Manager
from cifsdk.actor import Actor as Enricher
from .constants import TRACE, ENRICHER_ADDR, ENRICHER_SINK_ADDR, \
LOGLEVEL
logger = logging.getLogger(__name__)
logger.setLevel(LOGLEVEL)
if TRACE:
logger.setLevel(logging.DEBUG)
class Manager(_Manager):
def __init__(self, context, threads=0):
if threads > 0:
_Manager.__init__(self, Enricher, threads)
self.socket = context.socket(zmq.PUSH)
self.socket.bind(ENRICHER_ADDR)
self.socket.SNDTIMEO = 5000
self.socket.RCVTIMEO = 5000
self.socket.setsockopt(zmq.LINGER, 3)
self.sink_s = context.socket(zmq.PULL)
self.sink_s.bind(ENRICHER_SINK_ADDR)
| csirtgadgets/cif-v5 | cif/enricher/manager.py | manager.py | py | 765 | python | en | code | 61 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "constants.LOGLEVEL",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "constants.TRACE",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "logging.DEBU... |
33628626994 | from django.http.response import HttpResponse
from django.shortcuts import redirect, render
from django.contrib import messages
from .models import *
# Create your views here.
def index(request):
#put in something to see if the user is already logged in
if "user" in request.session:
context ={
'user':User.objects.get(id=request.session['user'])
}
return render(request, 'index.html', context)
# return redirect('/posts')
return render(request, 'index.html')
def events(request):
#put in something to see if the user is already logged in
if "user" in request.session:
context ={
'user':User.objects.get(id=request.session['user'])
}
return render(request, 'events.html', context)
# return redirect('/posts')
return render(request, 'events.html')
def location(request):
#put in something to see if the user is already logged in
if "user" in request.session:
context ={
'user':User.objects.get(id=request.session['user'])
}
return render(request, 'location.html', context)
# return redirect('/posts')
return render(request, 'location.html')
def about(request):
#put in something to see if the user is already logged in
if "user" in request.session:
userid = request.session["user"]
context ={
'user':User.objects.get(id=userid)
}
return render(request, 'about.html', context)
# return redirect('/posts')
return render(request, 'about.html')
def gallery(request):
#put in something to see if the user is already logged in
if "user" in request.session:
context ={
'user':User.objects.get(id=request.session['user'])
}
return render(request, 'gallery.html', context)
# return redirect('/posts')
return render(request, 'gallery.html')
def login(request):
return render(request, 'login.html')
def register(request):
return render(request, 'register.html')
def register_action(request):
if request.method == 'POST':
errors = User.objects.registration_validator(request.POST)
if len(errors)>0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/register')
else:
hashedpw =bcrypt.hashpw(request.POST['pword'].encode(), bcrypt.gensalt()).decode()
newUser = User.objects.create(
first_name=request.POST['fname'],
last_name=request.POST['lname'],
email=request.POST['email'],
password=hashedpw,
)
request.session['user'] = newUser.id
return redirect('/')
else:
return redirect("/")
def login_action(request):
if request.method == 'POST':
errors = User.objects.login_validator(request.POST)
if len(errors)>0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/login')
else:
LoggedUser = User.objects.get(email=request.POST['email'])
request.session['user'] = LoggedUser.id
return redirect('/')
else:
return redirect('/')
def logout(request):
#request.session.clear()
#request.session.flush()
#request.session = {}
try:
del request.session['user']
except KeyError:
pass
return redirect('/')
def posts(request):
userid = request.session["user"]
context ={
'allposts':Post.objects.all(),
'user':User.objects.get(id=userid)
}
return render(request, 'posts.html', context)
def update(request, userid):
loggedin_userid = request.session["user"]
context={
'username':User.objects.get(id=loggedin_userid)
}
return render(request,"myaccount.html",context)
def process_update(request, userid):
loggedin_userid = request.session["user"]
if request.method == 'POST':
errors = User.objects.update_validator(request.POST)
if len(errors)>0:
for key, value in errors.items():
messages.error(request, value)
return redirect(f'/update/{loggedin_userid}')
else:
user=User.objects.get(id=userid)
user.firstname=request.POST['fname']
user.lastname=request.POST['lname']
user.email=request.POST['email']
user.save()
fname = request.POST['fname']
return redirect(f'/update/{loggedin_userid}')
else:
return redirect('/')
def view_user(request, userid):
loggedin_userid = request.session["user"]
context ={
'allposts':Post.objects.filter(user__id=userid),
'username':User.objects.get(id=userid),
'userid':User.objects.get(id=loggedin_userid)
}
return render(request, 'user.html', context)
def addlike(request, post_id):
user = User.objects.get(id=request.session['user'])
post = Post.objects.get(id=post_id)
user.likes.add(post)
return redirect('/posts')
def addpost(request):
user = User.objects.get(id=request.session['user'])
if request.method == 'POST':
errors = Post.objects.post_validator(request.POST)
if len(errors)>0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/posts')
else:
newquote = Post.objects.create(
text = request.POST['text'],
user = user,
)
return redirect('/posts')
else:
return redirect('/')
def delete(request, post_id):
Post.objects.get(id=post_id).delete()
return redirect('/posts')
| chatbot6000/Troop44 | forumapp/views.py | views.py | py | 5,835 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name"... |
36652176172 |
"""
This is meant to be executed using mpirun. It is called as a subprocess
to run an MPI test.
"""
if __name__ == '__main__':
import sys
import os
import traceback
from mpi4py import MPI
from testflo.test import Test
from testflo.cover import setup_coverage, save_coverage
from testflo.qman import get_client_queue
from testflo.options import get_options
exitcode = 0 # use 0 for exit code of all ranks != 0 because otherwise,
# MPI will terminate other processes
queue = get_client_queue()
os.environ['TESTFLO_QUEUE'] = ''
setup_coverage(get_options())
try:
try:
comm = MPI.COMM_WORLD
test = Test(sys.argv[1])
test.nocapture = True # so we don't lose stdout
test.run()
except:
print(traceback.format_exc())
test.status = 'FAIL'
test.err_msg = traceback.format_exc()
# collect results
results = comm.gather(test, root=0)
if comm.rank == 0:
total_mem_usage = sum(r.memory_usage for r in results)
test.memory_usage = total_mem_usage
# check for errors and record error message
for r in results:
if test.status != 'FAIL' and r.status in ('SKIP', 'FAIL'):
test.err_msg = r.err_msg
test.status = r.status
if r.status == 'FAIL':
break
save_coverage()
except Exception:
test.err_msg = traceback.format_exc()
test.status = 'FAIL'
finally:
sys.stdout.flush()
sys.stderr.flush()
if comm.rank == 0:
queue.put(test)
| naylor-b/testflo | testflo/mpirun.py | mpirun.py | py | 1,731 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "testflo.qman.get_client_queue",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "testflo.cover.setup_coverage",
"line_number": 25,
"usage_type": "call"
},
{
"api_na... |
4897008783 | from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import Group
from django.contrib.auth.models import PermissionsMixin
from django.core.exceptions import ValidationError
from django.core.validators import FileExtensionValidator
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from app_settings.models import SiteSettings
from .managers import UserManager
class User(AbstractBaseUser, PermissionsMixin):
image_validator = FileExtensionValidator(
allowed_extensions=['png', 'jpg', 'gif', 'jpeg'],
message=_('Расширение не поддерживается. Разрешенные расширения .jpg .gif .png .jpeg')
)
def validate_image_size(fieldfile_obj):
settings: SiteSettings = SiteSettings.objects.all().first()
megabyte_limit = settings.avatar_image_max_size_mb
filesize = fieldfile_obj.file.size
if filesize > megabyte_limit * 1024 * 1024:
raise ValidationError(
_("Вы не можете загрузить аватар размером > {} MB").format(settings.avatar_image_max_size_mb)
)
email = models.EmailField(
unique=True,
verbose_name='E-mail'
)
full_name = models.CharField(
max_length=254,
verbose_name=_('Полное имя')
)
date_joined = models.DateTimeField(
auto_now_add=True,
verbose_name=_('Дата регистрации')
)
is_active = models.BooleanField(
default=True,
verbose_name=_('Активен')
)
avatar_file = models.ImageField(
verbose_name=_('Аватар'),
upload_to='images/user_avatars/',
null=True,
blank=True,
validators=[validate_image_size, image_validator]
)
phoneNumberRegex = RegexValidator(
regex=r"^\d{10}$"
)
phoneNumber = models.CharField(
validators=[phoneNumberRegex],
max_length=10,
unique=True,
verbose_name=_('Телефон')
)
is_staff = models.BooleanField(
default=False,
verbose_name=_('Сотрудник')
)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
return f'{self.email} ({self.full_name})'
class Meta:
ordering = ['id']
verbose_name = _('пользователь')
verbose_name_plural = _('пользователи')
db_table = 'users'
class ProxyGroups(Group):
class Meta:
proxy = True
verbose_name = _('группа')
verbose_name_plural = _('группы')
| veresen01/django-shop | ___shop___/app_users/models.py | models.py | py | 2,768 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.base_user.AbstractBaseUser",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.PermissionsMixin",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.core.validators.FileExtensionValidator",
"l... |
13989507368 | from collections import deque
class Solution:
def averageOfLevels(self, root):
queue = deque([(root, 0)])
prev_lev = -1
sum_lev = 0
cnt_lev = 0
avg_lev = []
while queue:
node, lev = queue.popleft()
if lev != prev_lev:
if cnt_lev > 0:
avg_lev.append(sum_lev / cnt_lev)
sum_lev = 0
cnt_lev = 0
prev_lev = lev
sum_lev += node.val
cnt_lev += 1
for child in ((node.left, node.right)):
if child:
queue.append((child, lev+1))
avg_lev.append(sum_lev / cnt_lev)
return avg_lev | dariomx/topcoder-srm | leetcode/zero-pass/facebook/average-of-levels-in-binary-tree/Solution.py | Solution.py | py | 716 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
}
] |
74572677542 | # Implement user-defined window size for the play. For example, an user
# could use a keyboard to input different values of widths and heights.
# Accordingly, the screen will display in different sizes regarding the user's input.
import cv2
print(cv2.__version__)
cam = cv2.VideoCapture(0)
width = int(input('Desired width: '))
height = int(input('Desired height: '))
cam.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
while True:
ret,frame = cam.read()
cv2.imshow('frame', frame)
cv2.moveWindow('frame', 0,0)
if cv2.waitKey(1) == ord('q'):
break
cam.release() | Gabrielmbl/csci380 | lab3/lab3b_gl.py | lab3b_gl.py | py | 623 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.__version__",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2... |
22530241228 | from itertools import zip_longest
from typing import Optional
import numpy as np
import pytest
import gym
from gym.spaces import Box, Graph, utils
from gym.utils.env_checker import data_equivalence
from tests.spaces.utils import TESTING_SPACES, TESTING_SPACES_IDS
TESTING_SPACES_EXPECTED_FLATDIMS = [
# Discrete
3,
3,
# Box
1,
4,
2,
2,
2,
# Multi-discrete
4,
10,
# Multi-binary
8,
6,
# Text
6,
6,
6,
# Tuple
9,
7,
10,
6,
None,
# Dict
7,
8,
17,
None,
# Graph
None,
None,
None,
# Sequence
None,
None,
None,
]
@pytest.mark.parametrize(
["space", "flatdim"],
zip_longest(TESTING_SPACES, TESTING_SPACES_EXPECTED_FLATDIMS),
ids=TESTING_SPACES_IDS,
)
def test_flatdim(space: gym.spaces.Space, flatdim: Optional[int]):
"""Checks that the flattened dims of the space is equal to an expected value."""
if space.is_np_flattenable:
dim = utils.flatdim(space)
assert dim == flatdim, f"Expected {dim} to equal {flatdim}"
else:
with pytest.raises(
ValueError,
):
utils.flatdim(space)
@pytest.mark.parametrize("space", TESTING_SPACES, ids=TESTING_SPACES_IDS)
def test_flatten_space(space):
"""Test that the flattened spaces are a box and have the `flatdim` shape."""
flat_space = utils.flatten_space(space)
if space.is_np_flattenable:
assert isinstance(flat_space, Box)
(single_dim,) = flat_space.shape
flatdim = utils.flatdim(space)
assert single_dim == flatdim
elif isinstance(flat_space, Graph):
assert isinstance(space, Graph)
(node_single_dim,) = flat_space.node_space.shape
node_flatdim = utils.flatdim(space.node_space)
assert node_single_dim == node_flatdim
if flat_space.edge_space is not None:
(edge_single_dim,) = flat_space.edge_space.shape
edge_flatdim = utils.flatdim(space.edge_space)
assert edge_single_dim == edge_flatdim
else:
assert isinstance(
space, (gym.spaces.Tuple, gym.spaces.Dict, gym.spaces.Sequence)
)
@pytest.mark.parametrize("space", TESTING_SPACES, ids=TESTING_SPACES_IDS)
def test_flatten(space):
"""Test that a flattened sample have the `flatdim` shape."""
flattened_sample = utils.flatten(space, space.sample())
if space.is_np_flattenable:
assert isinstance(flattened_sample, np.ndarray)
(single_dim,) = flattened_sample.shape
flatdim = utils.flatdim(space)
assert single_dim == flatdim
else:
assert isinstance(flattened_sample, (tuple, dict, Graph))
@pytest.mark.parametrize("space", TESTING_SPACES, ids=TESTING_SPACES_IDS)
def test_flat_space_contains_flat_points(space):
"""Test that the flattened samples are contained within the flattened space."""
flattened_samples = [utils.flatten(space, space.sample()) for _ in range(10)]
flat_space = utils.flatten_space(space)
for flat_sample in flattened_samples:
assert flat_sample in flat_space
@pytest.mark.parametrize("space", TESTING_SPACES, ids=TESTING_SPACES_IDS)
def test_flatten_roundtripping(space):
"""Tests roundtripping with flattening and unflattening are equal to the original sample."""
samples = [space.sample() for _ in range(10)]
flattened_samples = [utils.flatten(space, sample) for sample in samples]
unflattened_samples = [
utils.unflatten(space, sample) for sample in flattened_samples
]
for original, roundtripped in zip(samples, unflattened_samples):
assert data_equivalence(original, roundtripped)
| openai/gym | tests/spaces/test_utils.py | test_utils.py | py | 3,718 | python | en | code | 33,110 | github-code | 36 | [
{
"api_name": "gym.spaces",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "gym.spaces.utils.flatdim",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "gym.spaces.u... |
36860255897 | __author__ = 'Jonny'
__date__ = '2018-03-07'
__location__ = '西安'
# -*- coding: utf-8 -*-
import time
import login
import check
import requests
import station
import booking
#---------------------------- 登录账户--------------------------------------------------
def logIn(request):
state = 1
while(state != 0):
print('请登录:')
state = login.login(request)
#----------------------------检测余票-------------------------------------------------
def checkStandbyTicket(request,city_from,city_to,train_date):
result= check.ticket_infor(request,1,city_from,city_to,train_date)
train_no = input('请输入需订车次:')
print('#######################################################################################')
count = 0
#循环检测余票
while True:
count += 1
print('小J玩命查询中(%s)........'%count)
if count % 100 == 0:
autoCheck = input('是否继续自动查询,或切换车次:(Y/N/Z105)')
if (autoCheck == 'N' or autoCheck == 'n' ):
print('已终止自动查询!')
return None
if (autoCheck != 'N' or autoCheck != 'n' or autoCheck != 'Y' or autoCheck != 'y'):
train_no = autoCheck
time.sleep(2) #延时2秒,避免重复查询过快
for tempData in result:
if tempData[3] == train_no and tempData[11] =='Y':
print('(注意:若无坐席类型后无余座或为‘无’均表示无剩余席位!)\n', '车次:', tempData[3], '\t始发站:', station.sel_station(tempData[6]),
'\t终点站:',station.sel_station(tempData[7]), '\n乘车日期:',tempData[13], '\t是否当日到达:', tempData[18],
'\t发车时间:', tempData[8], '\t到站时间:', tempData[9], '\t耗时:', tempData[10], '\n特等商务座:', tempData[32], '\t一等座:',
tempData[31], '\t二等座:',tempData[30], '\n高级软卧:', tempData[21], '\t动卧:', tempData[33], '\t硬卧:', tempData[28], '\t硬座:',
tempData[29], '\t无座:', tempData[36])
break
if tempData[3] == train_no and tempData[11] =='IS_TIME_NOT_BUY':
print('无该车次信息或停运!')
break
if tempData[3] == train_no and tempData[11] =='N':
result = check.ticket_infor(request,0,city_from,city_to,train_date)
break
if tempData[11] == 'Y':
return train_no
if __name__ =='__main__':
# 确保是同一个浏览器的操作
request = requests.session()
# ---------------------------输入趁车信息-------------------------------------------------
city_from = input('请输入始发城市:')
city_to = input('请输入终点站:')
train_date = input('请输入乘车时间(格式:2018-01-01):')
# ----------------------------检测余票-------------------------------------------------
while True:
train_no = checkStandbyTicket(request, city_from, city_to, train_date)
if train_no !=None:
print('是否登录购票:')
if input('是否登录购票:(Y/N)') == 'Y' or 'y':
# ---------------------------- 登录账户--------------------------------------------------
logIn(request)
# ---------------------------- 车票下单(由于某些参数问题,暂时无法实现所有车站自动预定)--------------------------------------------------
booking(request, city_from, city_to, train_no)
else:
if input('是否退出:(Y/N)') == 'Y' or 'y':
print('谢谢使用12306爬虫订票系统!')
break
| JonnyLe/Python-12306- | 12306.py | 12306.py | py | 3,866 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "login.login",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "check.ticket_infor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "station.sel_station",
"... |
3533175991 | import gym
from random import sample
from keras import optimizers, Sequential
from keras.layers import Dense
from numpy import random, argmax
import numpy as np
from gym.spaces import Box, Discrete
import logging
from keras.utils import to_categorical
import sys
import pickle
import logging
from actor_evaluator import evaluate_actor, optimal_actions
import math
from actor_evaluator import evaluate_actor
class DQNLearnerFrozenLake:
def __init__(self, env, epsilon=.3, N=100, sample_size=32, lr=.2, discount=.96, obs_preprocess=lambda obs: obs):
self.preprocess = obs_preprocess
self.env = env
self._state = None
self.reset()
self.epsilon = epsilon
self.Qnn = make_dnn_frozen(env)
self.bad_memory = []
self.good_memory = []
self.temp_memory = []
self.memory_size = N
self.sample_size = sample_size
self.lr = lr
self.discount = discount
self.silent = True
def reset(self, good=False):
self.state = self.preprocess(self.env.reset())
if good:
for trans in self.temp_memory:
self._remember(trans, bad=False, dumping=True)
self.temp_memory = []
def step(self, learning):
exploring = random.uniform() < self.epsilon
if learning and exploring:
action = self.env.action_space.sample()
else: # exploiting
action = self.optimal_action()
obs, reward, done, info = self.env.step(action)
obs = self.preprocess(obs)
prev_state = self.state
self.state = obs
if learning:
transition = (prev_state, action, reward, self.state, done)
self._remember(transition)
self._update_q()
if done:
self.reset(reward > 0)
return reward, done
def _update_q(self):
X, y = self._get_x_y(self.sample_size * 2, self.sample_size)
# ®self.Qnn.optimizer.lr.assign(self.lr)
error = self.Qnn.train_on_batch(X, y)
outprint = ''
for i in range(len(self.Qnn.metrics_names)):
outprint += f'{self.Qnn.metrics_names[i]}: {error[i]} '
logging.debug(outprint)
def _get_x_y(self, good, bad):
good_replay = sample(self.good_memory, min(good, len(self.good_memory)))
bad_replay = sample(self.bad_memory, min(bad, len(self.bad_memory)))
replay = np.stack(good_replay + bad_replay, axis=0)
states = np.array([a[0] for a in replay])
new_states = np.array([a[3] for a in replay])
Q = self.Qnn.predict(states)
Q_new = self.Qnn.predict(new_states)
replay_size = len(replay)
X = np.empty((replay_size, len(states[0])))
y = np.empty((replay_size, len(Q[0])))
for i in range(replay_size):
state_r, action_r, reward_r, new_state_r, done_r = replay[i]
target = Q[i]
target[action_r] = reward_r
# If we're done the utility is simply the reward of executing action a in
# state s, otherwise we add the expected maximum future reward as well
if not done_r:
target[action_r] += self.discount * np.amax(Q_new[i])
X[i] = state_r
y[i] = target
return X, y
def silent_level(self):
if self.silent:
return 0
else:
return 2
def optimal_action(self, state=None):
if state is None:
state = self.state
actions = self.Qnn.predict(np.expand_dims(state, axis=0))[0]
assert max(actions) != float('nan') and max(actions) != float('inf')
return argmax(actions)
def _remember(self, transition, bad=True, dumping=False):
if not dumping:
self.temp_memory.append(transition)
if not bad:
self.good_memory.append(transition)
else:
self.bad_memory.append(transition)
while len(self.good_memory) > self.memory_size * 2:
self.bad_memory.append(self.good_memory.pop(0))
while len(self.bad_memory) > self.memory_size:
self.bad_memory.pop(0)
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if type(value) is np.ndarray:
self._state = value
elif type(value) is int or type(value) is np.int64:
arr = np.ndarray((1,))
assert len(arr) == 1
arr[0] = value
self._state = arr
else:
raise ValueError(f'got value {value} of type {type(value)}')
def learn(self, steps, static_epsilon=None):
i = 0
self.reset()
while i < steps:
epsilon = (steps - i) / steps
self.epsilon = epsilon
if static_epsilon is not None:
self.epsilon = static_epsilon
i += 1
self.step(True)
def preproc(obs):
return to_categorical(obs, num_classes=16)
if __name__ == '__main__':
import pickle
env_name = 'FrozenLake-v0'
env = gym.make(env_name)
actor = DQNLearnerFrozenLake(env, obs_preprocess=preproc)
file = open("pickles/dqn_f.pkl", 'wb+')
pickle.dump(actor, file)
def make_dnn_frozen(env):
adam = optimizers.Adam(learning_rate=0.0003)
input_dim = get_dimension(env.observation_space, False)
output_dim = get_dimension(env.action_space, True)
input_dim = 16
model = Sequential()
# model.add(Embedding(1000, 64, input_length=10))
model.add(Dense(units=10, activation='relu', input_dim=input_dim))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=output_dim, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer=adam,
metrics=['mse', 'accuracy'])
return model
def get_dimension(space, action):
if type(space) is Box:
return space.shape[0]
elif type(space) is Discrete:
if len(space.shape) > 0:
raise ValueError(f'unexpected val {len(space.shape)}')
elif action:
return space.n
else:
return 1
else:
raise ValueError(f'Unexpected type {type(space)}') | nsragow/RlGym | dqn_frozen_categorical.py | dqn_frozen_categorical.py | py | 6,297 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.uniform",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "random.sample",
"... |
30467024197 | from collections import deque
class SnakeGame:
def __init__(self, width: int, height: int, food: List[List[int]]):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
"""
self.snake = deque()
self.snake.append([0, 0])
self.snake_set = set()
self.snake_set.add((0, 0))
self.score = 0
self.food = deque()
for ele in food:
self.food.append(ele)
self.width = width
self.height = height
self.directions = {"U": [-1, 0], "L": [0, -1], "R": [0, 1], "D": [1, 0]}
def move(self, direction: str) -> int:
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
"""
head = self.snake[0]
dx = self.directions[direction][0]
dy = self.directions[direction][1]
new_head = [head[0] + dx, head[1] + dy]
new_head_tuple = (head[0] + dx, head[1] + dy)
# out of boundary
if new_head[0] < 0 or new_head[0] >= self.height or new_head[1] < 0 or new_head[1] >= self.width:
return -1
# eat food
if len(self.food) > 0 and self.food[0] == new_head:
self.score += 1
self.food.popleft()
else: # 如果不吃,尾巴就要删掉,如果吃,尾巴不动
tail = self.snake.pop()
self.snake_set.remove((tail[0], tail[1]))
# add head 最后加头,因为尾巴删掉后,头正好占了尾巴的位置是允许的
if new_head_tuple in self.snake_set:
return -1
self.snake.appendleft(new_head)
self.snake_set.add(new_head_tuple)
return self.score
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction)
width = 3
height = 2
food = [[1, 2], [0, 1]]
snake = SnakeGame(width, height, food)
print(snake.move("R"))
print(snake.move("D"))
print(snake.move("R"))
print(snake.move("U"))
print(snake.move("L"))
print(snake.move("U"))
| dundunmao/LeetCode2019 | 353. Design Snake Game.py | 353. Design Snake Game.py | py | 2,425 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 20,
"usage_type": "call"
}
] |
24549535365 | import logging
from pytest import raises, fixture
from kiwi_keg.image_definition import KegImageDefinition
from kiwi_keg.exceptions import KegError
class TestKegImageDefinition:
@fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def setup(self):
self.keg_definition = KegImageDefinition(
image_name='leap-jeos/15.2', recipes_roots=['../data'], image_version='1.0.0'
)
def test_setup_raises_recipes_root_not_existing(self):
with raises(KegError):
KegImageDefinition(
image_name='leap-jeos/15.2', recipes_roots=['artificial']
)
def test_setup_raises_image_not_existing(self):
with raises(KegError) as exception_info:
KegImageDefinition(
image_name='no/such/image', recipes_roots=['../data']
)
assert 'Image source path "no/such/image" does not exist' in \
str(exception_info.value)
def test_populate_raises_yaml_error(self):
with raises(KegError) as exception_info:
keg_definition = KegImageDefinition(
image_name='broken-yaml', recipes_roots=['../data/broken']
)
keg_definition.populate()
assert 'Error parsing image data' in \
str(exception_info.value)
def test_populate_raises_schema_error(self):
with raises(KegError) as exception_info:
keg_definition = KegImageDefinition(
image_name='broken-schema', recipes_roots=['../data/broken']
)
keg_definition.populate()
assert 'Image definition malformed' in \
str(exception_info.value)
def test_populate_raises_config_error(self):
with raises(KegError) as exception_info:
keg_definition = KegImageDefinition(
image_name='broken-config', recipes_roots=['../data/broken']
)
keg_definition.populate()
assert 'does not exist' in \
str(exception_info.value)
def test_populate_raises_overlay_error(self):
with raises(KegError) as exception_info:
keg_definition = KegImageDefinition(
image_name='broken-overlay', recipes_roots=['../data/broken']
)
keg_definition.populate()
assert 'No such overlay files module' in \
str(exception_info.value)
def test_include_logs_missing(self):
keg_definition = KegImageDefinition(
image_name='missing-include/15.2', recipes_roots=['../data'], image_version='1.0.0'
)
with self._caplog.at_level(logging.INFO):
keg_definition.populate()
assert 'Include "platform/notblue" does not exist' in self._caplog.text
def test_check_archive_refs(self):
with self._caplog.at_level(logging.INFO):
self.keg_definition.populate()
del self.keg_definition._data['archives']
self.keg_definition._check_archive_refs()
assert 'Referenced archive "blue.tar.gz" not defined' in self._caplog.text
| SUSE-Enceladus/keg | test/unit/image_definition_test.py | image_definition_test.py | py | 3,122 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "kiwi_keg.image_definition.KegImageDefinition",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 19,
"usage_type": "call"
},
{
"api_name... |
15204139120 | """import os
os.environ["KERAS_BACKEND"] = "theano"
import keras"""
import keras
print(keras.backend.backend())
from flask import request, render_template, redirect, url_for, Flask
import numpy as np
import time, random
from selenium import webdriver
from tensorflow.keras.models import load_model
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
model = load_model('activity_model4_tens_balanced_std.h5')
options = webdriver.ChromeOptions()
options.add_argument('headless')
app = Flask(__name__)
@app.route('/')
def home():
return redirect(url_for("checker"))
map_dic = {2: 'Downstairs',
4: 'Jogging',
1: 'Sitting',
5: 'Standing',
3: 'Upstairs',
0: 'Walking'}
def send_for_prediction(data):
data = np.array(data)
data = std.fit_transform(data)
print(data.shape)
data = data.reshape(1, 20, 3, 1)
prediction = np.argmax(model.predict(data))
print(model.predict(data))
output = map_dic[prediction]
print(output)
return output
cumulated_data = []
time_frame = 20
action = 'checking...'
@app.route('/checker', methods=["POST", "GET"])
def checker():
global cumulated_data, action
if request.method == 'POST':
x = request.form["id_x"]
y = request.form["id_y"]
z = request.form["id_z"]
print(len(cumulated_data), x)
if len(cumulated_data) < time_frame:
try:
cumulated_data.append([float(x), float(y), float(z)])
except:
pass
return render_template('index.html', x=x, y=y, z=z, content=action)
else:
action = send_for_prediction(cumulated_data)
cumulated_data = []
return render_template('index.html', x=x, y=y, z=z, content=action)
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(host='192.168.43.166', port=5000, debug=True)
print('here')
| siddhant230/Fun_Projects | activity_recognition/backend.py | backend.py | py | 2,059 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "keras.backend.backend",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 15,
"usage_type": "call"
},
{
"api_n... |
428329513 | from django.shortcuts import render,redirect
# importamos la libreria generic
from django.views import View
from .models import *
from .forms import *
# Create your views here.
class AlumnoView(View):
def get(self,request):
listaAlumnos = TblAlumno.objects.all()
formAlumno = AlumnoForm()
context = {
'alumnos' : listaAlumnos,
'formAlumno': formAlumno
}
return render(request,'index.html',context)
def post(self, request):
formAlumno = AlumnoForm(request.POST)
if formAlumno.is_valid():
formAlumno.save()
# <process form cleaned data>
return redirect('/')
| Angellvz/DAE-2022-02-LAVENTURA | djangoApp07/django_panel/web/views.py | views.py | py | 689 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.View",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 26,
"usage_type": "call"
}
] |
72182864424 | import Utils
from KernelNPRegression import KernelNPRegression
from RobustNPRegression import RobustNPRegression
from matplotlib import pyplot as plt
def draw_scatter(points, color):
plt.scatter([point[0] for point in points],
[point[1] for point in points],
c=color)
def draw_graphic(points, color):
plt.plot([point[0] for point in points],
[point[1] for point in points],
c=color)
def main():
data = Utils.load('data.csv')
kernel_regression_instance = KernelNPRegression(data)
robust_regression_instance = RobustNPRegression(data)
print('Running KernelNonParametristicRegression...')
kernel_parameters = kernel_regression_instance.learn(data)
print('Finished, parameters:\n' + str(kernel_parameters))
print('Running RobustNonParametristicRegression...')
robust_parameters = robust_regression_instance.learn(data)
print('Finished, parameters:\n' + str(robust_parameters))
delta = 0.05
points = []
for di in data:
points.append(list(di))
draw_scatter(points, 'green')
points = []
minx = min(map(lambda p: p[0], data))
maxx = max(map(lambda p: p[0], data))
p = minx
while p < maxx:
points.append(kernel_regression_instance.evaluate([p, 0.0], kernel_parameters))
p += delta
draw_graphic(points, 'red')
#plt.show()
#points = []
#for di in data:
# points.append(list(di))
#draw_scatter(points, 'green')
points = []
minx = min(map(lambda p: p[0], data))
maxx = max(map(lambda p: p[0], data))
p = minx
while p < maxx:
points.append(robust_regression_instance.evaluate([p, 0.0], robust_parameters))
p += delta
draw_graphic(points, 'blue')
plt.show()
if __name__ == '__main__':
main() | DimaPhil/ML-Hometasks | HW5/main.py | main.py | py | 1,815 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matp... |
8365518894 | from enum import Enum
from typing import Dict, TYPE_CHECKING, List, Union, cast
from ..types import TealType, require_type
from ..errors import TealInputError, verifyTealVersion
from ..ir import TealOp, Op, TealBlock
from .expr import Expr
from .txn import TxnField, TxnExprBuilder, TxnaExprBuilder, TxnObject
from .seq import Seq
if TYPE_CHECKING:
from ..compiler import CompileOptions
class InnerTxnAction(Enum):
Begin = Op.itxn_begin
Submit = Op.itxn_submit
Next = Op.itxn_next
InnerTxnAction.__module__ = "pyteal"
class InnerTxnActionExpr(Expr):
def __init__(self, action: InnerTxnAction) -> None:
super().__init__()
self.action = action
def __str__(self):
return "(InnerTxn{})".format(self.action.name)
def __teal__(self, options: "CompileOptions"):
op = self.action.value
verifyTealVersion(
op.min_version,
options.version,
"TEAL version too low to create inner transactions",
)
return TealBlock.FromOp(options, TealOp(self, op))
def type_of(self):
return TealType.none
def has_return(self):
return False
class InnerTxnFieldExpr(Expr):
def __init__(self, field: TxnField, value: Expr) -> None:
super().__init__()
require_type(value, field.type_of())
self.field = field
self.value = value
def __str__(self):
return "(InnerTxnSetField {} {})".format(self.field.arg_name, self.value)
def __teal__(self, options: "CompileOptions"):
verifyTealVersion(
Op.itxn_field.min_version,
options.version,
"TEAL version too low to create inner transactions",
)
return TealBlock.FromOp(
options, TealOp(self, Op.itxn_field, self.field.arg_name), self.value
)
def type_of(self):
return TealType.none
def has_return(self):
return False
class InnerTxnBuilder:
"""This class represents expressions used to create, modify, and submit inner transactions.
Inner transactions are transactions which applications can dynamically create. Each inner
transaction will appear as a transaction inside of the current transaction being executed.
As of TEAL version 5, only the transaction types :any:`TxnType.Payment`, :any:`TxnType.AssetTransfer`,
:any:`TxnType.AssetConfig`, and :any:`TxnType.AssetFreeze` are allowed. Additionally, not all
fields are allowed to be set. For example, it is not currently allowed to set the rekeyTo field
of an inner transaction.
"""
@classmethod
def Begin(cls) -> Expr:
"""Begin preparation of a new inner transaction.
This new inner transaction is initialized with its sender to the application address (:any:`Global.current_application_address`);
fee to the minimum allowable, taking into account :code:`MinTxnFee` and credit from
overpaying in earlier transactions; :code:`FirstValid`/:code:`LastValid` to the values in
the top-level transaction, and all other fields to zero values.
Requires TEAL version 5 or higher. This operation is only permitted in application mode.
"""
return InnerTxnActionExpr(InnerTxnAction.Begin)
@classmethod
def Next(cls) -> Expr:
"""Begin preparation of a new inner transaction (in the same transaction group).
This new inner transaction is initialized with its sender to the application address (:any:`Global.current_application_address`);
fee to the minimum allowable, taking into account :code:`MinTxnFee` and credit from
overpaying in earlier transactions; :code:`FirstValid`/:code:`LastValid` to the values in
the top-level transaction, and all other fields to zero values.
Requires TEAL version 6 or higher. This operation is only permitted in application mode.
"""
return InnerTxnActionExpr(InnerTxnAction.Next)
@classmethod
def Submit(cls) -> Expr:
"""Execute the current inner transaction.
:any:`InnerTxnBuilder.Begin` and :any:`InnerTxnBuilder.SetField` must be called before
submitting an inner transaction.
This will fail if 256 inner transactions have already been executed, or if the
inner transaction itself fails. Upon failure, the current program will immediately exit and
fail as well.
If the inner transaction is successful, then its effects can be immediately observed by this
program with stateful expressions such as :any:`Balance`. Additionally, the fields of the
most recently submitted inner transaction can be examined using the :any:`InnerTxn` object.
If the inner transaction creates an asset, the new asset ID can be found by looking at
:any:`InnerTxn.created_asset_id() <TxnObject.created_asset_id>`.
Requires TEAL version 5 or higher. This operation is only permitted in application mode.
"""
return InnerTxnActionExpr(InnerTxnAction.Submit)
@classmethod
def SetField(cls, field: TxnField, value: Union[Expr, List[Expr]]) -> Expr:
"""Set a field of the current inner transaction.
:any:`InnerTxnBuilder.Begin` must be called before setting any fields on an inner
transaction.
Note: For non-array field (e.g., note), setting it twice will overwrite the original value.
While for array field (e.g., accounts), setting it multiple times will append the values.
Requires TEAL version 5 or higher. This operation is only permitted in application mode.
Args:
field: The field to set on the inner transaction.
value: The value to that the field should take. This must evaluate to a type that is
compatible with the field being set.
"""
if not field.is_array:
if type(value) is list:
raise TealInputError(
"inner transaction set field {} does not support array value".format(
field
)
)
return InnerTxnFieldExpr(field, cast(Expr, value))
else:
if type(value) is not list:
raise TealInputError(
"inner transaction set array field {} with non-array value".format(
field
)
)
for valueIter in value:
if not isinstance(valueIter, Expr):
raise TealInputError(
"inner transaction set array field {} with non PyTeal expression array element {}".format(
field, valueIter
)
)
return Seq(
*[
InnerTxnFieldExpr(field, cast(Expr, valueIter))
for valueIter in value
]
)
@classmethod
def SetFields(cls, fields: Dict[TxnField, Union[Expr, List[Expr]]]) -> Expr:
"""Set multiple fields of the current inner transaction.
:any:`InnerTxnBuilder.Begin` must be called before setting any fields on an inner
transaction.
Note: For non-array field (e.g., note), setting it twice will overwrite the original value.
While for array field (e.g., accounts), setting it multiple times will append the values.
Requires TEAL version 5 or higher. This operation is only permitted in application mode.
Args:
fields: A dictionary whose keys are fields to set and whose values are the value each
field should take. Each value must evaluate to a type that is compatible with the
field being set.
"""
fieldsToSet = [cls.SetField(field, value) for field, value in fields.items()]
return Seq(fieldsToSet)
InnerTxnBuilder.__module__ = "pyteal"
InnerTxn: TxnObject = TxnObject(
TxnExprBuilder(Op.itxn, "InnerTxn"),
TxnaExprBuilder(Op.itxna, Op.itxnas, "InnerTxna"),
)
InnerTxn.__module__ = "pyteal"
| gconnect/voting-dapp-pyteal-react | venv/lib/python3.8/site-packages/pyteal/ast/itxn.py | itxn.py | py | 8,116 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "ir.Op.itxn_begin",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "ir.Op",
"lin... |
4022925396 | import requests
from requests.structures import CaseInsensitiveDict
import base64
import json
from django.conf import settings
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning
)
GITHUB_TOKEN = getattr(settings, "GITHUB_TOKEN", None)
def deploy_done(
git_repository,
target_branch,
repository_path,
deployment,
bef_bluegreen,
bef_canary,
chg_bluegreen,
chg_canary,
container,
tag,
):
basic_url = "https://raw.githubusercontent.com/"
basic_api_url = "https://api.github.com/repos/"
github_token = GITHUB_TOKEN
git_repo = git_repository.split("/")[-2:]
if len(git_repo[1]) > 4 and git_repo[1][-4:] == ".git":
git_repo[1] = git_repo[1][:-4]
chg_container_url = (
basic_url
+ "/".join(git_repo)
+ "/"
+ target_branch
+ "/"
+ repository_path
+ "/kustomization.yaml"
)
chg_container_upload_url = (
basic_api_url
+ "/".join(git_repo)
+ "/contents/"
+ repository_path
+ "/kustomization.yaml"
)
bef_url = (
basic_url
+ "/".join(git_repo)
+ "/"
+ target_branch
+ "/"
+ repository_path
+ "/deploy/"
+ bef_bluegreen
+ "/"
+ bef_canary
+ "/kustomization.yaml"
)
bef_upload_url = (
basic_api_url
+ "/".join(git_repo)
+ "/contents/"
+ repository_path
+ "/deploy/"
+ bef_bluegreen
+ "/"
+ bef_canary
+ "/kustomization.yaml"
)
tar_url = (
basic_url
+ "/".join(git_repo)
+ "/"
+ target_branch
+ "/"
+ repository_path
+ "/deploy/"
+ chg_bluegreen
+ "/"
+ chg_canary
+ "/kustomization.yaml"
)
chg_upload_url = (
basic_api_url
+ "/".join(git_repo)
+ "/contents/"
+ repository_path
+ "/deploy/"
+ chg_bluegreen
+ "/"
+ chg_canary
+ "/kustomization.yaml"
)
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = f"Bearer {github_token}"
resp = requests.get(chg_container_url, headers=headers, verify=False)
target_text = []
next_change = False
for text in resp.text.split("\n"):
if ("- name:") in text and (container) in text:
target_text.append(text)
next_change = True
elif next_change:
target_text.append(f''' newTag: "{tag}"''')
next_change = False
else:
target_text.append(text)
chg_container_text = "\n".join(target_text)
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = f"Bearer {github_token}"
resp = requests.get(bef_url, headers=headers, verify=False)
target = "-".join(deployment.split("-")[1:-2])
target_text = []
target_path = ""
for text in resp.text.split("\n"):
if text.split("/")[-1] == target:
target_path = text
else:
target_text.append(text)
print("target_path :", target_path)
before_text = "\n".join(target_text)
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = f"Bearer {github_token}"
resp = requests.get(tar_url, headers=headers, verify=False)
target_text = []
count = 0
for text in resp.text.split("\n"):
if count == 2:
target_text.append(target_path)
target_text.append(text)
count += 1
chg_text = "\n".join(target_text)
# # test code
# bef_upload_url = basic_api_url + "/".join(git_repo) + "/contents/" + "test.txt"
# print(before_text)
# print(chg_text)
result_code, msg = github_edit_file(
github_url=chg_container_upload_url,
github_token=github_token,
text_content=chg_container_text,
)
if result_code == -1:
return -1, "컨테이너 이미지 변경 실패"
result_code, msg = github_edit_file(
github_url=bef_upload_url, github_token=github_token, text_content=before_text
)
if result_code == -1:
return -1, "GIT 저장소 변경 실패"
result_code, msg = github_edit_file(
github_url=chg_upload_url, github_token=github_token, text_content=chg_text
)
if result_code == -1:
return -1, "GIT 저장소 변경 실패"
return 1, "GIT 변경 성공"
def github_edit_file(github_url, github_token, text_content):
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = f"Bearer {github_token}"
resp = requests.get(github_url, headers=headers, verify=False)
if resp.status_code != 200 and resp.status_code != 201:
return -1, "깃허브 연동 실패"
bef_upload_sha = resp.json()["sha"]
headers = CaseInsensitiveDict()
headers["Accept"] = "application/json"
headers["Authorization"] = f"Bearer {github_token}"
data = {
"message": "Multi Deploy-Go auto-commit",
"content": base64.b64encode(text_content.encode("UTF-8")).decode("ascii"),
"sha": bef_upload_sha,
}
resp = requests.put(
github_url, headers=headers, data=json.dumps(data), verify=False
)
if resp.status_code != 200 and resp.status_code != 201:
return -1, "깃허브 연동 실패"
return 1, "깃허브 변경 완료"
if __name__ == "__main__":
git_repository = "https://github.com/kakaocloudschool/kustomize-test.git"
target_branch = "main"
repository_path = "overlays/prod"
deployment = "deploy-httpd-stable-s"
bef_bluegreen = "blue"
bef_canary = "stable"
chg_bluegreen = "blue"
chg_canary = "canary"
deploy_done(
git_repository=git_repository,
target_branch=target_branch,
repository_path=repository_path,
deployment=deployment,
bef_bluegreen=bef_bluegreen,
bef_canary=bef_canary,
chg_bluegreen=chg_bluegreen,
chg_canary=chg_canary,
)
| kakaocloudschool/Multi_ojigo | api_utils/github_api.py | github_api.py | py | 6,199 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "requests.packages",
"line_number": 8,
"usage_type": "attribute"
},
{
... |
22226448679 | import numpy as np
import pandas as pd
import argparse
def get_tail(hmm, ncRNA, strict=False):
expression = [int(x) for x in hmm.loc[ncRNA].score.split(',')]
if (np.mean(expression) > 1.5) and (len(expression) > 1000):
trim_length = trim_expression(expression, strict)
return trim_length
else:
return False
def trim_expression(expression_list, strict):
trim_length = 0
while True:
if strict:
if np.mean(np.array(expression_list[-100:])) < 1.1:
expression_list = expression_list[:-100]
trim_length += 5000
else:
break
else:
if all(np.array(expression_list[-100:]) == 1):
expression_list = expression_list[:-100]
trim_length += 5000
else:
break
return trim_length
def get_largest_string_non_zero(x):
cum_sum = []
suma= 0
for i in x:
if (i > 1.1):
suma+= 1
else:
suma = 0
cum_sum.append(suma)
max_cum_sum = np.max(cum_sum)
largest_string_non_zero = max_cum_sum/len(x)
return largest_string_non_zero
def is_fractured(transcripto):
x = [int(x) for x in transcripto.split(',')]
if np.max(x) >= 3.9:
return False
largest_string_non_zero = get_largest_string_non_zero(x)
mean_non_zero = np.mean(np.array(x)>1)
if (mean_non_zero > 0.4) and (largest_string_non_zero < 0.2):
return True
else:
return False
def trim_and_split(hmm, ncRNA, split_len = 50, min_len=200, min_portion = 0.2, strict=True,
strand = 'plus'):
current_pos = 0
start_list = []
end_list = []
transcript_length = hmm.loc[ncRNA].end - hmm.loc[ncRNA].start
transcripto = hmm.loc[ncRNA].score
# if strand == 'minus':
# transcripto = transcripto[::-1]
if strict:
transcripto_split = transcripto.split(',')
counts = [int(x) for x in transcripto_split]
largest_non_zero = get_largest_string_non_zero(counts)
percent_zero = np.mean(np.array(counts) == 1)
# if is_fractured(counts):#np.mean(counts) > 1.5:
# split_len = 1e100
#if (np.max(counts)==4):
if (np.max(counts)==4) or (np.quantile(counts, 0.5)>=2.9):
if np.quantile(counts, 0.75) <= 3.1:
transcripto = transcripto.replace('2,'*100, '3,'*100)
transcripto_split = transcripto.split(',')
transcripto_split = ['1' if x == '2' else x for x in transcripto_split]
if np.quantile(counts, 0.75) >= 3.9:
print('enormous!')
transcripto_split = ['1' if x == '3' else x for x in transcripto_split]
elif (largest_non_zero > 0.25) and (percent_zero > 0.5):
split_len = 5
transcripto = ','.join(transcripto_split)
if is_fractured(transcripto):#np.mean(counts) > 1.5:
print('fractured!')
end = len(transcripto.split(','))*50
return [0], [end]
split_transcripts = transcripto.split(','.join(['1']*split_len))
longest = max([len(x) for x in split_transcripts])
high_scores = []
for transcript in split_transcripts:
try:
sc = np.mean([int(x) for x in transcript.strip(',').split(',')])
high_scores.append(sc)
except:
continue
highest = np.max(high_scores)
inherited = False
print(counts)
print(split_transcripts)
len_to_add = 0
for transcript in split_transcripts:
current_longest = ((len(transcript) == longest) and (len(transcript) > 3))
if transcript == ',':
current_pos += (50*split_len)
elif transcript == '':
current_pos += (50*split_len)
else:
transcript_counts = transcript.strip(',').split(',')
if (transcript[0] == ',') and (strand == 'plus'):
current_pos += 50
# if (strand == 'minus') and inherited:
# current_pos += (50*split_len)
transcript_counts = [int(x) for x in transcript_counts]
print(transcript_counts)
current_highest = np.abs(np.mean(transcript_counts) - highest) <= 1e-100
is_max_peak = transcript_counts.count(4) >= 5
transcript_portion = (len(transcript_counts)*50)/transcript_length
if ((len(transcript_counts) > min_len) and (transcript_portion >= min_portion)) or is_max_peak or current_highest or current_longest:
start_pos = current_pos
for i in range(len(transcript_counts)):
if transcript_counts[i] == 1:
print('trimmin')
start_pos += 50
else:
break
if np.mean(transcript_counts) > 1.1:
start_list.append(start_pos)
trim = trim_expression(transcript_counts, True)
if trim:
if np.mean(transcript_counts) > 1.1:
end_list.append(current_pos + ((len(transcript_counts)*50)-trim))
else:
if np.mean(transcript_counts) > 1.1:
trimmer = 50
# if strand == 'plus':
# trimmer = 50
# elif strand == 'minus':
# trimmer = 0
# else:
# raise Exception('strand error')
end_list.append(current_pos + (len(transcript_counts)*50) - trimmer)
print('adding')
if transcript[-1] == ',':
current_pos += (50*split_len)
current_pos += (len(transcript_counts)*50)
if transcript[-1] == ',':
inherited = True
# if strand == 'minus':
# return end_list, start_list
return start_list, end_list
# def trim_and_split(hmm, ncRNA, split_len = 50, min_len=200, min_portion = 0.2, strict=True,
# strand = 'plus'):
# current_pos = 0
# start_list = []
# end_list = []
# transcript_length = hmm.loc[ncRNA].end - hmm.loc[ncRNA].start
# transcripto = hmm.loc[ncRNA].score
# if strand == 'minus':
# transcripto = transcripto[::-1]
# if strict:
# transcripto_split = transcripto.split(',')
# counts = [int(x) for x in transcripto_split]
# #if (np.max(counts)==4):
# if (np.max(counts)==4) or (np.quantile(counts, 0.75)>=3):
# transcripto_split = ['1' if x == '2' else x for x in transcripto_split]
# transcripto = ','.join(transcripto_split)
# split_transcripts = transcripto.split(','.join(['1']*split_len))
# longest = max([len(x) for x in split_transcripts])
# high_scores = []
# for transcript in split_transcripts:
# try:
# sc = np.mean([int(x) for x in transcript.strip(',').split(',')])
# high_scores.append(sc)
# except:
# continue
# highest = np.max(high_scores)
# inherited = False
# print(counts)
# print(split_transcripts)
# len_to_add = 0
# for transcript in split_transcripts:
# current_longest = ((len(transcript) == longest) and (len(transcript) > 3))
# if transcript == ',':
# current_pos += (50*split_len)
# elif transcript == '':
# current_pos += (50*split_len)
# else:
# transcript_counts = transcript.strip(',').split(',')
# if (transcript[0] == ',') and (strand == 'plus'):
# current_pos += 50
# if (strand == 'minus') and inherited:
# current_pos += (50*split_len)
# # if (transcript[-1] == ',') and (strand == '-'):
# # current_pos += (50*split_len)
# transcript_counts = [int(x) for x in transcript_counts]
# print(transcript_counts)
# current_highest = np.abs(np.mean(transcript_counts) - highest) <= 1e-100
# transcript_portion = (len(transcript_counts)*50)/transcript_length
# if ((len(transcript_counts) > min_len) and (transcript_portion >= min_portion)) or current_highest or current_longest:
# start_pos = current_pos
# for i in range(len(transcript_counts)):
# if transcript_counts[i] == 1:
# print('trimmin')
# start_pos += 50
# else:
# break
# if np.mean(transcript_counts) > 1.1:
# start_list.append(start_pos)
# trim = trim_expression(transcript_counts, True)
# if trim:
# if np.mean(transcript_counts) > 1.1:
# end_list.append(current_pos + ((len(transcript_counts)*50)-trim))
# else:
# if np.mean(transcript_counts) > 1.1:
# if strand == 'plus':
# trimmer = 50
# elif strand == 'minus':
# trimmer = 0
# else:
# raise Exception('strand error')
# end_list.append(current_pos + (len(transcript_counts)*50) - trimmer)
# print('adding')
# if transcript[-1] == ',':
# current_pos += (50*split_len)
# current_pos += (len(transcript_counts)*50)
# if transcript[-1] == ',':
# inherited = True
# return start_list, end_list
parser = argparse.ArgumentParser()
parser.add_argument('--chrom', type=str, required=True)
parser.add_argument('--strand', type=str, required=True)
if __name__ == '__main__':
args = parser.parse_args()
chrom = args.chrom
strand = args.strand
hmm = pd.read_csv('NonCodingRNA/tables/{chrom}.{strand}.hmm.sorted.bed.gz'.format(
chrom = chrom, strand = strand), sep='\t',
names = ['chrom', 'start', 'end', 'names', 'score', 'strand'] + [
'chRNA_'+str(i+1) for i in range(86)
]
).sort_values('start')
hmm.index = hmm.names
hmm['length'] = hmm.end - hmm.start
samples = ['chRNA_'+str(i+1) for i in range(86)]
RPKM = ((hmm[samples]/np.array(hmm[samples].sum(axis=0))).T/np.array(hmm.length)).T*1e9
RPKM.index = hmm.names
chrom_list = []
start_list = []
end_list = []
names_list = []
score_list = []
strand_list = []
ncRNAs = pd.Index([x for x in hmm.index if x[:5] == 'ncRNA'])
ncRNAs = ncRNAs.intersection(RPKM.loc[RPKM.median(axis=1)>0.1].index)
for n in ncRNAs:
start = hmm.loc[n].start
transcript_end = hmm.loc[n].end
scores = [int(x) for x in hmm.loc[n].score.split(',')]
transcript_ln = transcript_end - start
ln = len(scores)
if (RPKM.loc[n].median() >= 1) or (ln > 100):
# if (ln < 750):
if (ln < 650):
srt, end = trim_and_split(hmm, n, min_len=50, strict=True, split_len=10, strand=strand)
# elif (ln >= 750):
else:
srt, end = trim_and_split(hmm, n, min_len=50, strict=True, split_len=100, strand=strand)
else:
srt, end = [], []
print('trimming: ' + n)
print(srt, end)
print('')
if len(srt) > 0:
for i in range(len(srt)):
# if strand == 'plus':
start_ = start + srt[i]
end_ = start + end[i]
if strand == 'plus':
strand_list.append('+')
else:
# start_ = start + (transcript_ln - end[i])
# print(end[i])
# end_ = start + (transcript_ln - srt[i])
# print(srt[i])
strand_list.append('-')
chrom_list.append(chrom)
start_list.append(start_)
end_list.append(end_)
names_list.append(n + '_' + str(i+1))
score_list.append('.')
else:
chrom_list.append(chrom)
start_list.append(start)
end_list.append(transcript_end)
names_list.append(n)
score_list.append('.')
if strand == 'plus':
strand_list.append('+')
else:
strand_list.append('-')
# pc_names = pd.Index([x for x in hmm.index if x[:5] != 'ncRNA'])
# chrom_list += list(hmm.loc[pc_names].chrom)
# start_list += list(hmm.loc[pc_names].start)
# end_list += list(hmm.loc[pc_names].end)
# names_list += list(hmm.loc[pc_names].names)
# score_list += list(hmm.loc[pc_names].score)
# strand_list += list(hmm.loc[pc_names].strand)
df = pd.DataFrame()
df['chrom'] = chrom_list
df['start'] = start_list
df['end'] = end_list
df['names'] = names_list
df['score'] = score_list
df['strand'] = strand_list
df.to_csv('NonCodingRNA/tables/{chrom}.{strand}.hmm_trimmed.bed.gz'.format(
chrom = chrom, strand = strand), sep='\t', index=False, header=False)
| bfairkun/ChromatinSplicingQTLs | code/scripts/NonCodingRNA/trim_ncRNAs.py | trim_ncRNAs.py | py | 14,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.mean",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,... |
70183489063 | from decimal import Decimal, InvalidOperation
from typing import TYPE_CHECKING, Any, List, Optional, TypedDict
from django.db import models, transaction
if TYPE_CHECKING:
from senda.core.models.clients import ClientModel
from senda.core.models.localities import LocalityModel, StateChoices
from senda.core.models.offices import OfficeModel
from senda.core.models.order_internal import InternalOrderModel
from senda.core.models.order_supplier import SupplierOrderModel
from senda.core.models.products import (
ProductModel,
ProductTypeChoices,
ProductStockInOfficeModel,
)
from senda.core.models.purchases import PurchaseModel
from senda.core.models.rental_contracts import RentalContractModel
from senda.core.models.suppliers import SupplierModel
from senda.core.models.employees import EmployeeModel, EmployeeOfficeModel
from users.models import UserModel
class ClientModelManager(models.Manager["ClientModel"]):
"""
Custom manager for the ClientModel, providing methods to create and update client instances.
"""
def create_client(
self,
email: str,
first_name: str,
last_name: str,
locality: "LocalityModel",
house_number: str,
street_name: str,
house_unit: str,
dni: str,
phone_code: str,
phone_number: str,
) -> "ClientModel":
"""
Creates a new client instance with the given details. Validates to ensure the email and DNI are unique.
"""
if self.filter(email=email).exists():
raise ValueError("Ya existe un cliente con ese email")
if self.filter(dni=dni).exists():
raise ValueError("Ya existe un cliente con ese DNI")
return self.create(
email=email,
first_name=first_name,
last_name=last_name,
locality=locality,
house_number=house_number,
street_name=street_name,
house_unit=house_unit,
dni=dni,
phone_code=phone_code,
phone_number=phone_number,
)
def update_client(
self, client: "ClientModel", locality: "LocalityModel", **kwargs: Any
) -> "ClientModel":
"""
Updates an existing client instance with the provided details.
"""
client.locality = locality
for field, value in kwargs.items():
setattr(client, field, value)
client.save()
return client
class LocalityModelManager(models.Manager["LocalityModel"]):
"""
Custom manager for the LocalityModel, providing methods to create and retrieve locality instances.
"""
def create_locality(
self, name: str, postal_code: str, state: "StateChoices"
) -> "LocalityModel":
"""
Creates a new locality instance with the given details. Ensures the locality's uniqueness.
"""
name = name.strip().lower().title()
if self.filter(name=name, postal_code=postal_code, state=state).exists():
raise ValueError("La localidad ya existe")
return self.create(name=name, postal_code=postal_code, state=state)
def get_or_create_locality(
self, name: str, postal_code: int, state: "StateChoices"
) -> "LocalityModel":
"""
Retrieves or creates a locality instance based on the provided details.
"""
locality, _created = self.get_or_create(
name=name, postal_code=postal_code, state=state
)
return locality
InternalOrderProductsDict = TypedDict(
"InternalOrderProductsDict", {"id": str, "quantity": int}
)
class InternalOrderManager(models.Manager["InternalOrderModel"]):
"""
Custom manager for the InternalOrderModel, handling the creation of internal orders.
"""
@transaction.atomic
def create_internal_order(
self,
office_branch: "OfficeModel",
office_destination: "OfficeModel",
user: "UserModel",
products: List[InternalOrderProductsDict],
) -> "InternalOrderModel":
"""
Creates a new internal order with associated products and history. This process is atomic.
"""
from senda.core.models.order_internal import InternalOrderHistoryStatusChoices
internal_order = self.create(
office_branch=office_branch, office_destination=office_destination
)
internal_order.save()
internal_order.history.create(
status=InternalOrderHistoryStatusChoices.PENDING,
internal_order=internal_order,
user=user,
)
for product in products:
internal_order.orders.create(
product_id=product["id"],
quantity=product["quantity"],
)
return internal_order
class SupplierModelManager(models.Manager["SupplierModel"]):
@transaction.atomic
def create_supplier(
self,
cuit: str,
name: str,
email: str,
locality: "LocalityModel",
house_number: str,
street_name: str,
house_unit: str,
phone_code: str,
phone_number: str,
note: str,
) -> "SupplierModel":
if self.filter(email=email).exists():
raise ValueError("Ya existe un proveedor con ese email")
if self.filter(cuit=cuit).exists():
raise ValueError("Ya existe un proveedor con ese CUIT")
return self.create(
cuit=cuit,
name=name,
email=email,
locality=locality,
house_number=house_number,
street_name=street_name,
house_unit=house_unit,
phone_code=phone_code,
phone_number=phone_number,
note=note,
)
SupplierOrderProductsDict = TypedDict(
"SupplierOrderProductsDict", {"id": str, "quantity": int}
)
class SupplierOrderManager(models.Manager["SupplierOrderModel"]):
"""
Custom manager for the SupplierOrderModel, handling the creation of supplier orders.
"""
@transaction.atomic
def create_supplier_order(
self,
supplier: "SupplierModel",
office_destination: "OfficeModel",
user: "UserModel",
products: List[SupplierOrderProductsDict],
) -> "SupplierOrderModel":
"""
Creates a new supplier order with associated products, history, and total cost calculation. This process is atomic.
"""
from senda.core.models.order_supplier import SupplierOrderHistoryStatusChoices
supplier_order = self.create(
supplier=supplier, office_destination=office_destination
)
supplier_order.save()
supplier_order.history.create(
status=SupplierOrderHistoryStatusChoices.PENDING,
supplier_order=supplier_order,
)
for product in products:
supplier_order.orders.create(
product_id=product["id"],
quantity=product["quantity"],
)
supplier_order.total = supplier_order.calculate_total()
return supplier_order
ProductStockInOfficeModelDict = TypedDict(
"ProductStockInOfficeModelDict", {"office_id": str, "stock": int}
)
ProductSupplierDict = TypedDict(
"ProductSupplierDict", {"supplier_id": str, "price": str}
)
ProductServiceDict = TypedDict("ProductServiceDict", {"name": str, "price": str})
def parse_price(price_str: str) -> Decimal:
"""
Parses a price string into a Decimal object, handling different formatting conventions.
"""
standard_format_str = price_str.replace(".", "").replace(",", ".")
try:
return Decimal(standard_format_str)
except InvalidOperation:
raise ValueError(f"The price {price_str} is not a valid number format")
class ProductModelManager(models.Manager["ProductModel"]):
"""
Custom manager for the ProductModel, providing methods to create product instances with associated details.
"""
def create_product(
self,
sku: str,
name: str,
brand_id: str,
description: str,
type: "ProductTypeChoices",
price: str,
stock: List[ProductStockInOfficeModelDict],
services: List[ProductServiceDict],
suppliers: List[ProductSupplierDict],
) -> "ProductModel":
"""
Creates a new product instance with various associated data like stock, services, and suppliers.
"""
if self.filter(sku=sku).exists():
raise ValueError("Ya existe un producto con ese sku")
product = self.create(
sku=sku,
name=name,
brand_id=brand_id,
description=description,
type=type,
price=parse_price(price),
)
for stock_data in stock:
product.stock.create(
office_id=stock_data["office_id"],
stock=stock_data["stock"],
)
for service_data in services:
product.services.create(
name=service_data["name"],
price=parse_price(service_data["price"]),
)
for supplier_data in suppliers:
product.suppliers.create(
supplier_id=supplier_data["supplier_id"],
price=parse_price(supplier_data["price"]),
)
return product
def get_products_with_stock_in_office(self, office: "OfficeModel", **kwargs: Any):
"""
Returns all products with associated stock in the given office.
"""
return self.filter(stock__office=office, **kwargs)
class ProductStockInOfficeManager(models.Manager["ProductStockInOfficeModel"]):
"""
Custom manager for the ProductStockInOfficeModel, providing methods to create and update stock instances.
"""
def create_stock(
self, product: "ProductModel", office: "OfficeModel", stock: int
) -> "ProductStockInOfficeModel":
"""
Creates a new stock instance for the given product and office.
"""
if self.filter(product=product, office=office).exists():
raise ValueError("Ya existe un stock para ese producto en esa sucursal")
return self.create(product=product, office=office, stock=stock)
PurchaseProductsItemDict = TypedDict(
"PurchaseProductsItemDict", {"product": str, "quantity": int}
)
class PurchaseModelManager(models.Manager["PurchaseModel"]):
"""
Custom manager for the PurchaseModel, handling the creation of purchase instances.
"""
@transaction.atomic
def create_purchase(
self,
client: "ClientModel",
office: str,
products: List[PurchaseProductsItemDict],
) -> "PurchaseModel":
"""
Creates a new purchase instance with associated purchase items. This process is atomic.
"""
purchase = self.create(client=client, office_id=office)
purchase.save()
for product in products:
purchase.purchase_items.create(
quantity=product["quantity"],
product_id=product["product"],
)
purchase.recalculate_total()
return purchase
RentalContractProductsItemDict = TypedDict(
"RentalContractProductsItemDict",
{"id": str, "quantity": int, "service": Optional[str]},
)
class RentalContractManager(models.Manager["RentalContractModel"]):
"""
Custom manager for the RentalContractModel, handling the creation of rental contract instances.
"""
@transaction.atomic
def create_rental_contract(
self,
client: "ClientModel",
products: List[RentalContractProductsItemDict],
office: str,
locality: "LocalityModel",
house_number: str,
street_name: str,
house_unit: str,
contract_start_datetime: str,
contract_end_datetime: str,
) -> "RentalContractModel":
"""
Creates a new rental contract with associated items and history. This process is atomic.
"""
from senda.core.models.rental_contracts import RentalContractStatusChoices
rental_contract = self.create(
client=client,
office_id=office,
locality=locality,
house_number=house_number,
street_name=street_name,
house_unit=house_unit,
contract_start_datetime=contract_start_datetime,
contract_end_datetime=contract_end_datetime,
)
for product in products:
rental_contract.rental_contract_items.create(
product_id=product["id"],
service_id=product["service"],
quantity=product["quantity"],
)
rental_contract.rental_contract_history.create(
status=RentalContractStatusChoices.PRESUPUESTADO,
rental_contract=rental_contract,
)
return rental_contract
class EmployeeModelManager(models.Manager["EmployeeModel"]):
"""
Custom manager for the EmployeeModel, providing methods to create and update employee instances.
"""
@transaction.atomic
def create_employee(self, user: "UserModel", offices: List[str]):
from senda.core.models.offices import OfficeModel
from senda.core.models.employees import EmployeeOfficeModel
"""
Creates a new employee instance, ensuring the user does not already have an associated employee.
"""
if self.filter(user=user).exists():
raise ValueError("Ya existe ese empleado")
employee = self.create(user=user)
for office_id in offices:
office = OfficeModel.objects.get(id=office_id)
EmployeeOfficeModel.objects.create(employee=employee, office=office)
return employee
def update_employee(self, employee: "EmployeeModel", **kwargs: Any):
"""
Updates an existing employee instance with the provided details.
"""
for field, value in kwargs.items():
setattr(employee, field, value)
employee.save()
return employee
| UNPSJB/SendaAlquiler | backend/senda/core/managers.py | managers.py | py | 14,222 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.Manager",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "ty... |
8380776575 | import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (15, 5)
def plot_temperature(m, a, b, min, max):
"""
This function will plot the temperatures for a given month m, within the time range a-b and temperature range min-max.
m: The month the user chooses as a number
a: The year the user wants to read temperatures FROM
b: The year the user wants to read temperatures TO
min: Minimum temperature the user wants to read
max: Maximum temperature the user wants to read
"""
"""
For months the function takes in a number instead of the name of the month. It would be faster with typing the month, but I've decided to do it this way! :D
"""
month_list = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}
month = month_list[m];
temp_stats = pd.read_csv('temperature.csv', sep = ',')
temp_stats.index = temp_stats['Year']
del temp_stats['Year']
print(temp_stats[month].loc[a:b])
temp_stats[month].loc[a:b].plot()
plt.ylim(ymax = max, ymin = min)
plt.ylabel('Temperature')
plt.xlabel('Year')
plt.legend()
plt.show()
def plot_CO2(a, b, min, max):
"""
This function will plot the CO2 levels for a given time range a-b and CO2 range min-max.
a: The year the user wants to read CO2 levels FROM
b: The year the user wants to read CO2 levels TO
min: Minimum CO2 level the user wants to read
max: Maximum CO2 level the user wants to read
"""
"""
I've used a zero, 0, instead of O in "CO2", because apperantly CO2 is a reserved variable...?
"""
C02_stats = pd.read_csv('co2.csv', sep = ',')
C02_stats.index = C02_stats['Year']
del C02_stats['Year']
C02_stats.loc[a:b].plot()
plt.ylim(ymax = max, ymin = min)
plt.ylabel('CO2 levels')
plt.xlabel('Year')
plt.legend()
plt.show()
#Test, uncomment to see :D
plot_temperature(6, 1880, 2017, 0, 22)
plot_CO2(1774, 1842, 0, 20)
| cjiang94/INF3331-Python | assignment6/temperature_CO2_plotter.py | temperature_CO2_plotter.py | py | 2,116 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matpl... |
29247518148 | # Run 'discoronode.py' program to start processes to execute computations sent
# by this client, along with this program.
# This example is similar to 'discoro_client6.py', except it uses broadcasting
# over Channel to send messages to remote coroutines to process, and uses
# 'deque' module to implement circular buffer.
import asyncoro.disasyncoro as asyncoro
from asyncoro.discoro import *
# This generator function is sent to remote discoro to analyze data
# and generate apprporiate signals that are sent to a coroutine
# running on client. The signal in this simple case is average of
# moving window of given size is below or above a threshold.
def rcoro_avg_proc(channel, threshold, trend_coro, window_size, coro=None):
import collections
# subscribe to channel (at client)
yield channel.subscribe(coro)
# create circular buffer
data = collections.deque(maxlen=window_size)
for i in range(window_size):
data.append(0.0)
cumsum = 0.0
# first message is 'start' command; see 'client_proc'
assert (yield coro.receive()) == 'start'
while True:
i, n = yield coro.receive()
if n is None:
break
cumsum += (n - data[0])
avg = (cumsum / window_size)
if avg > threshold:
trend_coro.send((i, 'high', float(avg)))
elif avg < -threshold:
trend_coro.send((i, 'low', float(avg)))
data.append(n)
raise StopIteration(0)
# This generator function is sent to remote discoro process to save
# the received data in a file (on the remote peer).
def rcoro_save_proc(channel, coro=None):
import os
import tempfile
# subscribe to channel (at client)
yield channel.subscribe(coro)
# first message is 'start' command (to make sure all recipients started)
assert (yield coro.receive()) == 'start'
# save data in 'tickdata' where computation files are saved (this will be
# deleted when computation is done, so it must be copied elsewhere if
# necessary)
with open('tickdata', 'w') as fd:
while True:
i, n = yield coro.receive()
if n is None:
break
fd.write('%s: %s\n' % (i, n))
raise StopIteration(0)
# This coroutine runs on client. It gets trend messages from remote
# coroutine that computes moving window average.
def trend_proc(coro=None):
coro.set_daemon()
while True:
trend = yield coro.receive()
print('trend signal at % 4d: %s / %.2f' % (trend[0], trend[1], trend[2]))
# This process runs locally. It creates two remote coroutines at two discoronode
# server processes, two local coroutines, one to receive trend signal from one
# of the remote coroutines, and another to send data to two remote coroutines
def client_proc(computation, coro=None):
# schedule computation with the scheduler; scheduler accepts one computation
# at a time, so if scheduler is shared, the computation is queued until it
# is done with already scheduled computations
if (yield computation.schedule()):
raise Exception('Could not schedule computation')
# in discoro_client6.py, data is sent to each remote coroutine; here, data
# is broadcast over channel and remote coroutines subscribe to it
data_channel = asyncoro.Channel('data_channel')
# not necessary to register channel in this case, as it is sent to remote
# coroutines; if they were to 'locate' it, it should be registered
# data_channel.register()
trend_coro = asyncoro.Coro(trend_proc)
rcoro_avg = yield computation.run(rcoro_avg_proc, data_channel, 0.4, trend_coro, 10)
assert isinstance(rcoro_avg, asyncoro.Coro)
rcoro_save = yield computation.run(rcoro_save_proc, data_channel)
assert isinstance(rcoro_save, asyncoro.Coro)
# make sure both remote coroutines have subscribed to channel ('deliver'
# should return 2 if they both are)
assert (yield data_channel.deliver('start', n=2)) == 2
# if data is sent frequently (say, many times a second), enable
# streaming data to remote peer; this is more efficient as
# connections are kept open (so the cost of opening and closing
# connections is avoided), but keeping too many connections open
# consumes system resources
yield asyncoro.AsynCoro.instance().peer(rcoro_avg.location, stream_send=True)
yield asyncoro.AsynCoro.instance().peer(rcoro_save.location, stream_send=True)
# send 1000 items of random data to remote coroutines
for i in range(1000):
n = random.uniform(-1, 1)
item = (i, n)
# data can be sent to remote coroutines either with 'send' or
# 'deliver'; 'send' is more efficient but no guarantee data
# has been sent successfully whereas 'deliver' indicates
# errors right away
data_channel.send(item)
yield coro.sleep(0.02)
item = (i, None)
data_channel.send(item)
yield computation.close()
data_channel.close()
if __name__ == '__main__':
import random
# asyncoro.logger.setLevel(asyncoro.Logger.DEBUG)
# if scheduler is shared (i.e., running as program), nothing needs
# to be done (its location can optionally be given to 'schedule');
# othrwise, start private scheduler:
Scheduler()
computation = Computation([])
asyncoro.Coro(client_proc, computation)
| pgiri/asyncoro | examples/discoro_client6_channel.py | discoro_client6_channel.py | py | 5,369 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "asyncoro.disasyncoro.Channel",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "asyncoro.disasyncoro",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "a... |
11168278407 | import csv
import json
from collections import defaultdict
import itertools
import os
import sys
import argparse
## NB update relevant config file name here
#from config2019 import *
# Mr Gorbachev, tear down etc etc
# Generate booth data structure (combinations hardcoded):
NPP_FIELDS = ["ID", "Division", "Booth", "Latitude", "Longitude"]
# So we'll have 7 columns of stuff before the prefs start:
SA1s_FIELDS = ["year","state_ab", "div_nm", "SA1_id", "pp_id", "pp_nm", "votes"]
# we'll use this one later
ttyjump = '\033[F\033[2K' if (hasattr(sys.stderr, "isatty") and sys.stderr.isatty()) else ""
def main(SCENARIO, NPP_BOOTHS_PATH, STATE, YEAR, PARTIES, SA1S_BREAKDOWN_PATH, SA1S_PREFS_PATH):
combinations = ["None"] # special-casing the empty
for r in range(len(PARTIES)):
chooseN = list(itertools.combinations(sorted(PARTIES), r+1))
for i in chooseN:
combinations += ["".join(j) for j in list(itertools.permutations(i))]
total_combos = len(combinations)
### Load in booth data
booths = {}
boothsfields = NPP_FIELDS+combinations+["Total"]
with open(NPP_BOOTHS_PATH) as consolcsv:
consolreader = csv.DictReader(consolcsv) # fieldnames implicit from the first line
for row in consolreader:
booths[row["Division"]+row["Booth"]] = row # key by 'divbooth'
###
lines = []
with open(SA1S_BREAKDOWN_PATH) as sa1scsv:
sa1sreader = csv.DictReader(sa1scsv) # fieldnames implicit from the first line
progress = 0
print()
for srow in sa1sreader:
if not srow["state_ab"]==STATE:
continue # All SA1s nationwide are in the one file - so any row with the wrong state can be safely skipped.
elif not (srow["year"] == YEAR):
sys.exit("Problem in `{SA1s_BREAKDOWN_PATH}`: Unsupported election year: "+srow["year"]+". Exiting.")
# However, the wrong year is definitely cause for concern
# if progress == 0:
# print(f"Projecting Senate results for scenario {SCENARIO} from "+srow["year"]+" onto state/territory-level electoral boundaries.\n", file=sys.stderr)
# basically a big vector multiply
bob = [srow["SA1_id"]]
db = [ booths[srow["div_nm"]+srow["pp_nm"]][i] for i in boothsfields[5:] ]
## print("db:\t", db)
for i in range(len(db)):
try:
bob.append(float(srow["votes"]) * float(db[i]) / float(db[-1]))
except ZeroDivisionError:
bob.append(0.0)
lines.append(bob)
## print("bob:\t", bob)
progress+= 1
if (progress % 1000 == 0):
print(ttyjump+f"... Projection progress: {progress:n} SA1s...", file=sys.stderr)
print(ttyjump+f"... Projection complete: {progress:n} SA1s", file=sys.stderr)
# Accumulation phase.
sa1s = {}
print(f"... Progress: summing SA1s...", file=sys.stderr)
for line in lines:
sa1_id = line[0]
if sa1_id not in sa1s:
sa1s[sa1_id] = line[1:]
else:
for i in range(1, len(line)):
sa1s[sa1_id][i-1] += line[i]
outlines = []
for ids in sa1s.keys():
outlines.append([ids] + sa1s[ids])
outlines.sort()
print(ttyjump+"... Writing File...", file=sys.stderr, end='')
with open(SA1S_PREFS_PATH, 'w') as fp:
print(*(["SA1_id"] + boothsfields[5:]), sep=',', file=fp, flush=True)
for line in outlines:
print(*line, sep=',', file=fp)
print("... Done!\n", file=sys.stderr)
# end main()
def run(argp, confp):
try:
if argp.scenarios and not (set(argp.scenarios).intersection(set(confp.sections()))):
print(f"Error: no specified scenario is defined.", file=sys.stderr)
else:
scenlist = confp.sections() if argp.all else argp.scenarios
for SCENARIO in scenlist:
YEAR = confp[SCENARIO]['YEAR']
PARTIES = json.loads(confp[SCENARIO]['GROUPS'])
STATE = confp[SCENARIO]['STATE']
SA1S_DISTRICTS_PATH = confp[SCENARIO]['SA1S_DISTS_PATH']
SA1S_BREAKDOWN_PATH = confp[SCENARIO]['SA1S_BREAKDOWN_PATH']
OUTPUT_DIR = confp[SCENARIO]['OUTPUT_DIR']
NPP_BOOTHS_PATH = os.path.join(OUTPUT_DIR, SCENARIO, confp[SCENARIO]['NPP_BOOTHS_FN'])
SA1S_PREFS_PATH = os.path.join(OUTPUT_DIR, SCENARIO, confp[SCENARIO]['SA1S_PREFS_FN'])
print("*** Projecting Scenario {}: {}, in {} [{}] ***\n".format(SCENARIO, " vs ".join(PARTIES.keys()), STATE, YEAR), file=sys.stderr)
main(SCENARIO, NPP_BOOTHS_PATH, STATE, YEAR, PARTIES, SA1S_BREAKDOWN_PATH, SA1S_PREFS_PATH)
except KeyError as k:
sys.exit(f"There was an issue with the arguments or configuration file: {k}")
def parse_argv():
parser = argparse.ArgumentParser(help="Project booth results down onto SA1s")
# pretty much everything comes from the configfile, yeah?
parser.add_argument('configfile', type=argparse.FileType('r'))
mxg = parser.add_mutually_exclusive_group(required=True)
mxg.add_argument("-a", "--all", action="store_true", help="run every scenario defined in `configfile`")
mxg.add_argument("-s", "--scenario", dest='scenarios', metavar='scenario', action="append", help="run a scenario code defined in `configfile` (can be specified multiple times to run several scenarios)")
# not going to support listing scenarios here
return parser.parse_args()
if __name__ == "__main__":
argp = parser.parse_args()
confp = configparser.ConfigParser()
confp.read_file(argp.configfile)
run(argp, confp)
| alexjago/nPP-Senate | src/SA1s_Multiplier.py | SA1s_Multiplier.py | py | 5,812 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stderr",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.isatty",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "itertools.pe... |
12534507732 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import datetime
df = pd.read_csv("D-GROWTH.csv")
df['Date'] = pd.to_datetime(df["Date"])
all_years = df['Date'].dt.year.unique()
temp = {'Date': [], 'Growth': []}
for i in all_years:
for j in range(12, 0, -1):
data = df[(df['Date'].dt.year == i) & (df['Date'].dt.month == j)]
try:
last_date = max(data['Date'].dt.date)
temp['Date'].append(last_date)
temp['Growth'].append(data[data['Date'] == last_date].iloc[0, 2])
except:
continue
temp = pd.DataFrame(temp)
month_data = {'Date': [], 'Month Growth': []}
for i in range(len(temp)):
month_data['Date'].append(temp['Date'][i])
try:
x = ((temp['Growth'][i+1] - temp['Growth'][i]) / temp['Growth'][i+1]) * 100
month_data['Month Growth'].append(x)
except:
month_data['Month Growth'].append(temp['Growth'][i])
month_data = pd.DataFrame(month_data)
month_data.to_csv("M-Growth.csv")
sns.lineplot(x='Date', y='Month Growth', data=month_data, markers=True, dashes=False)
plt.show()
| icyboguyaman/Python-Small-Projects | hw1.py | hw1.py | py | 1,139 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
... |
30537139211 | from tkinter import *
from tkinter import messagebox
from openpyxl import *
import cv2
import pickle
import cvzone
import numpy as np
import os
root = Tk()
root.title("ParkiN")
root.geometry('925x500+300+200')
root.configure(bg='#fff')
root.resizable(False, False)
def book():
vehicleno = user.get()
phoneno = Phone.get()
gmailac = gmail.get()
if vehicleno == '' and phoneno == '' and gmailac == '':
messagebox.showerror("X", 'Please fill the details')
building = Toplevel(root)
building.title('Parkin!')
building.geometry('925x500+300+200')
building.configure(bg='#fff')
building.resizable(False, False)
Label(building, text='Please select Your Parking Space', fg='#57a1f8', bg='white',
font=('Microsoft Yahei UI Light', 20)).grid(row=0, column=0, columnspan=2, padx=285, pady=5)
Parking_A = Button(building, text='Parking A', pady=1, width=10, fg='black', border=0, bg='#FFFFEB',
font=('Microsoft Yahei UI Light', 14,), command=footage)
Parking_A.grid(row=1, column=0, padx=140, pady=10)
park1 = PhotoImage(file='parkingb.png')
park11 = Label(building, image=park1, border=0, bg='white')
park11.grid(row=2, column=0, padx=40, pady=10, sticky=W)
park11.image = park1
Parking_B = Button(building, text='Parking B', pady=1, width=10, fg='black', border=0, bg='#FFFFEB',
font=('Microsoft Yahei UI Light', 14,), command=build2)
Parking_B.grid(row=1, column=1, padx=140, pady=10)
park2 = PhotoImage(file='parkingb.png')
park22 = Label(building, image=park2, border=0, bg='white')
park22.grid(row=2, column=1, padx=40, pady=10, sticky=W)
park22.image = park2
Parking_C = Button(building, text='Parking C', pady=1, width=10, fg='black', border=0, bg='#FFFFEB',
font=('Microsoft Yahei UI Light', 14,), command=build3)
Parking_C.grid(row=3, column=0, padx=140, pady=10)
Parking_D = Button(building, text='Parking D', pady=1, width=10, fg='black', border=0, bg='#FFFFEB',
font=('Microsoft Yahei UI Light', 14,), command=build4)
Parking_D.grid(row=3, column=1, padx=140, pady=10)
img = PhotoImage(file='parking1.png')
Label(root, image=img, border=0, bg='white').place(x=-350, y=-265)
frame = Frame(root, width=400, height=390, bg='#fff')
frame.place(x=450, y=80)
heading = Label(frame, text='Welcome to Parkin!', fg="#57a1f8", bg="white", font=('Microsoft Yahei UI Light', 20,))
heading.place(x=120, y=5)
def insert():
# if user not fill any entry
# then print "empty input"
if (Phone.get() == "" or
user.get() == "" or
gmail.get() == ""):
print("Please fill all details")
else:
# assigning the max row and max column
# value upto which data is written
# in an excel sheet to the variable
current_row = sheet.max_row
current_column = sheet.max_column
# get method returns current text
# as string which we write into
# excel spreadsheet at particular location
sheet.cell(row=current_row + 1, column=1).value = Phone.get()
sheet.cell(row=current_row + 1, column=2).value = user.get()
sheet.cell(row=current_row + 1, column=3).value = gmail.get()
# save the file
wb.save('book1.xlsx')
# set focus on the name_field box
user.focus_set()
# call the clear() function
def footage():
cap = cv2.VideoCapture('carPark.mp4')
file_path = os.path.join('D:', 'Coding', 'C course', '.vscode', 'CarParkingPos')
with open(file_path, 'rb') as f:
posList = pickle.load(f)
width, height = 107, 46
def checkParkingSpace(imgPro):
spaceCounter = 0
for pos in posList:
x, y = pos
imgCrop = imgPro[y:y + height, x:x + width]
# cv2.imshow(str(x*y),imgCrop)
count = cv2.countNonZero(imgCrop) # The number in front of the boxes
if count < 900:
color = (0, 255, 0)
thickness = 5
else:
color = (0, 0, 255)
thickness = 2
cv2.rectangle(img, pos, (pos[0] + width, pos[1] + height), color, 2)
cvzone.putTextRect(img, str(count), (x, y + height - 3), scale=0.8, thickness=1, offset=0)
cvzone.putTextRect(img, str(count), (x, y + height - 3), scale=0.8, thickness=1, offset=0)
while True:
if cap.get(cv2.CAP_PROP_POS_FRAMES) == cap.get(cv2.CAP_PROP_FRAME_COUNT):
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (3, 3), 1)
imgThreshold = cv2.adaptiveThreshold(imgBlur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 25,
16)
imgMedian = cv2.medianBlur(imgThreshold, 5)
kernel = np.ones((3, 3), np.uint8)
imgDilate = cv2.dilate(imgMedian, kernel, iterations=1)
checkParkingSpace(imgDilate)
cv2.imshow("Image", img)
cv2.imshow("ImageBlur", imgBlur)
cv2.imshow("ImageThres", imgMedian)
cv2.waitKey(50)
ok = Button(frame, text='Book', pady=1, width=25, fg='white', border=0, bg='#57a1f8',
font=('Microsoft Yahei UI Light', 14), command=lambda: [insert(), book()])
ok.place(x=115, y=250)
def on_enter(e):
user.delete(0, 'end')
def on_leave(e):
if user.get() == '':
user.insert(0, 'Vehicle Number ')
user = Entry(frame, width=25, fg="black", border=0, bg='white', font=('Microsoft Yahei UI Light', 13))
user.place(x=110, y=67)
# This input will be given by the parking coordinator
user.insert(0, 'Vehicle Number')
user.bind("<FocusIn>", on_enter)
user.bind("<FocusOut>", on_leave)
Frame(frame, width=295, height=2, bg='black').place(x=110, y=90)
def on_enter(e):
Phone.delete(0, 'end')
def on_leave(e):
if Phone.get() == '':
Phone.insert(0, 'Phone No.')
Phone = Entry(frame, width=25, fg="black", border=0, bg='white', font=('Microsoft Yahei UI Light', 13))
Phone.place(x=110, y=134)
Phone.insert(0, 'Phone No.')
Phone.bind("<FocusIn>", on_enter)
Phone.bind("<FocusOut>", on_leave)
Frame(frame, width=295, height=2, bg='black').place(x=110, y=160)
def on_enter(e):
gmail.delete(0, 'end')
def on_leave(e):
if gmail.get() == '':
gmail.insert(0, 'Enter your Gmail')
gmail = Entry(frame, width=25, fg="black", border=0, bg='white', font=('Microsoft Yahei UI Light', 13))
gmail.place(x=110, y=200)
gmail.insert(0, 'Enter your Gmail ')
gmail.bind("<FocusIn>", on_enter)
gmail.bind("<FocusOut>", on_leave)
Frame(frame, width=295, height=2, bg='black').place(x=110, y=230)
# For Parking A
def build1():
park_a=Toplevel(root)
park_a.title('Parkin!')
park_a.geometry('925x500+300+200')
park_a.configure(bg='#fff')
park_a.resizable(False,False)
# For Parking B
def build2():
park_b = Toplevel(root)
park_b.title('Parkin!')
park_b.geometry('925x500+300+200')
park_b.configure(bg='#fff')
park_b.resizable(False, False)
# For Parking C
def build3():
park_c = Toplevel(root)
park_c.title('Parkin!')
park_c.geometry('925x500+300+200')
park_c.configure(bg='#fff')
park_c.resizable(False, False)
# For Parking D
def build4():
park_d = Toplevel(root)
park_d.title('Parkin!')
park_d.geometry('925x500+300+200')
park_d.configure(bg='#fff')
park_d.resizable(False, False)
# create the sheet object
wb = load_workbook('book1.xlsx')
sheet = wb.active
def excel():
# resize the width of columns in
# excel spreadsheet
sheet.column_dimensions['A'].width = 20
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 30
# write given data to an excel spreadsheet
# at particular location
sheet.cell(row=1, column=1).value = "mobile no."
sheet.cell(row=1, column=2).value = "Vehicle No."
sheet.cell(row=1, column=3).value = "e-mail id"
root.mainloop()
| FrostPrince003/Book2Park | parkin.py | parkin.py | py | 8,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.... |
34145540015 | msg = "good"
import traceback
import os
try:
import os
import pickle
import onnxruntime as rt
from time import time
from transformers import RobertaForSequenceClassification, RobertaTokenizer
import numpy as np
import urllib
except Exception as e:
msg = traceback.format_exc()
tmp = "/tmp/"
cold = True
model_url="https://media.githubusercontent.com/media/onnx/models/main/text/machine_comprehension/roberta/model/roberta-sequence-classification-9.onnx"
model_object_key = "roberta-sequence-classification-9.onnx"
model_path = tmp + model_object_key
def has_gpu() -> bool:
return os.path.isfile("/usr/bin/nvidia-smi")
# Check if models are available
# Download model if model is not already present
if not os.path.isfile(model_path):
urllib.request.urlretrieve (model_url, model_path)
providers=['CPUExecutionProvider']
if has_gpu():
providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
session = rt.InferenceSession(model_path, providers=providers)
input_id = session.get_inputs()[0].name
def main(args):
global cold
was_cold = cold
cold = False
try:
start = time()
input_text = args.get('input', "This film is so good")
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
input_ids = [np.array(tokenizer.encode(input_text, add_special_tokens=True))]
# for input_meta in session.get_inputs():
# print(input_meta)
# print(input_ids, to_numpy(input_ids))
results = session.run([], {input_id: input_ids})
pred = np.argmax(results)
if(pred == 0):
results = "Prediction: negative {}".format(results)
elif(pred == 1):
results = "Prediction: positive {}".format(results)
end = time()
return {"body": { "latency":end-start, "cold":was_cold, "start":start, "end":end, "output":results }}
except Exception as e:
err = str(e)
try:
trace = traceback.format_exc()
except Exception as fug:
trace = str(fug)
return {"body": { "import_error":msg, "runtime_error":err, "trace":trace, "cold":was_cold }}
| COS-IN/iluvatar-faas | src/load/functions/python3/gpu-functions/onnx-roberta/main.py | main.py | py | 2,058 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "traceback.format_exc",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
... |
43688718516 | import torch
from torch import nn
from torch.nn import Sequential as Seq, Linear as Lin, Conv2d
##############################
# Basic layers
##############################
def act_layer(act, inplace=False, neg_slope=0.2, n_prelu=1):
"""
helper selecting activation
:param act:
:param inplace:
:param neg_slope:
:param n_prelu:
:return:
"""
act = act.lower()
if act == 'relu':
layer = nn.ReLU(inplace)
elif act == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [%s] is not found' % act)
return layer
def norm_layer(norm, nc):
# helper selecting normalization layer
norm = norm.lower()
if norm == 'batch':
layer = nn.BatchNorm2d(nc, affine=True)
elif norm == 'instance':
layer = nn.InstanceNorm2d(nc, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm)
return layer
class MLP(Seq):
def __init__(self, channels, act='relu', norm=None, bias=True):
m = []
for i in range(1, len(channels)):
m.append(Lin(channels[i - 1], channels[i], bias))
if act:
m.append(act_layer(act))
if norm:
m.append(norm_layer(norm, channels[-1]))
super(MLP, self).__init__(*m)
class BasicConv(nn.Module):
def __init__(self, channels, act='relu', norm=None, bias=True):
super(BasicConv, self).__init__()
m = []
for i in range(1, len(channels)):
m.append(Conv2d(channels[i - 1], channels[i], 1, bias=bias))
if act:
m.append(act_layer(act))
if norm:
m.append(norm_layer(norm, channels[-1]))
self.body = Seq(*m)
def forward(self, x, edge_index=None):
return self.body(x)
def batched_index_select(inputs, index):
"""
:param inputs: torch.Size([batch_size, num_dims, num_vertices, 1])
:param index: torch.Size([batch_size, num_vertices, k])
:return: torch.Size([batch_size, num_dims, num_vertices, k])
"""
batch_size, num_dims, num_vertices, _ = inputs.shape
k = index.shape[2]
idx = torch.arange(0, batch_size) * num_vertices
idx = idx.contiguous().view(batch_size, -1)
inputs = inputs.transpose(2, 1).contiguous().view(-1, num_dims)
index = index.contiguous().view(batch_size, -1) + idx.type(index.dtype).to(inputs.device)
index = index.contiguous().view(-1)
return torch.index_select(inputs, 0, index).contiguous().view(batch_size, -1, num_dims).transpose(2, 1).contiguous().view(batch_size, num_dims, -1, k)
| lightaime/sgas | gcn/gcn_lib/dense/torch_nn.py | torch_nn.py | py | 2,774 | python | en | code | 157 | github-code | 36 | [
{
"api_name": "torch.nn.ReLU",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number... |
2303254132 | import os
import sys
from os.path import join, dirname
from dotenv import load_dotenv
import datetime
import time
import schedule
import logging
import iso8601
from googleapiclient.discovery import build
import functools
logger = logging.getLogger('autosnap')
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def catch_exceptions(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
try:
job_func(*args, **kwargs)
except:
import traceback
print(traceback.format_exc())
return wrapper
def load_env():
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
@catch_exceptions
def create_snapshot(project, disk, zone, snapshot_name):
logger.info('Creating Snapshot.')
body = {'name': snapshot_name + '-' + str(int(datetime.datetime.now().timestamp()))}
logger.info(compute.disks().createSnapshot(project=project, disk=disk, zone=zone, body=body).execute())
@catch_exceptions
def delete_old_snapshots(project, snapshot_name):
logger.info('Deleting Old Snapshots.')
# Get a list of all snapshots
snapshots = compute.snapshots().list(project=project).execute()
while ( True ) :
next_page_token = snapshots.get("nextPageToken", None)
for snapshot in snapshots["items"]:
snapshot_date = iso8601.parse_date(snapshot["creationTimestamp"])
delete_before_date = datetime.datetime.now(snapshot_date.tzinfo) - datetime.timedelta(days=7)
# Check that a snapshot is for this disk, and that it was created
# more than 7 days ago.
if snapshot["name"].startswith(snapshot_name) and \
snapshot_date < delete_before_date:
logger.info(compute.snapshots().delete(
project=project, snapshot=snapshot["name"]).execute())
if next_page_token == None:
break
snapshots = compute.snapshots().list(
project=project, pageToken=next_page_token).execute()
if __name__ == '__main__':
logger.info('Loading Google Credentials.')
compute = build('compute', 'v1')
if not os.environ.get('PROJECT') and os.environ.get('DISK') and os.environ.get('INTERVAL_MINUTES'):
load_env() # not needed if loaded via docker
project = os.environ.get('PROJECT')
disk = os.environ.get('DISK')
zone = os.environ.get('ZONE')
snapshot_name = os.environ.get('SNAPSHOT_NAME')
interval = int(os.environ.get('INTERVAL_MINUTES'))
# Run first snapshot:
create_snapshot(project, disk, zone, snapshot_name)
# Run first snapshot:
delete_old_snapshots(project, snapshot_name)
# Create Schedule:
schedule.every(interval).minutes.do(create_snapshot, project, disk, zone, snapshot_name)
# Delete old snapshots
schedule.every(interval).minutes.do(delete_old_snapshots, project, snapshot_name)
while True:
schedule.run_pending()
time.sleep(1)
| rehive/autosnap-docker | app.py | app.py | py | 2,996 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
... |
20500455998 | import mailbox
from contextlib import contextmanager
import tempfile
from io import BytesIO
import gzip
import logging
import requests
from mlarchive2maildir.message import deobfuscate
class MessageIdMaildir(mailbox.Maildir):
"""
An extension of the mailbox.Maildir class that you can ask
whether a message with a specific message id already exists
"""
_message_ids = None
def __init__(self, dirname, factory=None, create=True):
super().__init__(dirname, factory, create)
self._build_msgid_cache()
def add(self, message):
super().add(message)
self._message_ids[message['message-id']] = True
def _build_msgid_cache(self):
self._message_ids = dict()
for key, message in self.items():
self._message_ids[message['message-id']] = True
def contains_msgid(self, message_id):
return message_id in self._message_ids
def import_mbox_from_url(self, url, headers=None):
"""
request a mbox by url, and pipes all messages
into the passed maildir object
"""
if url.endswith('.txt'):
suffix = '.txt'
elif url.endswith('.gz'):
suffix = '.gz'
else:
raise Exception('Invalid file suffix')
# get archive url stream
stream = BytesIO(requests.get(url).content)
if suffix == '.gz':
stream = gzip.open(stream)
# open mbox
with mbox_from_stream(stream) as tmbox:
for key, message in tmbox.items():
if message['message-id'] is None:
continue
deobfuscate(message)
logger = logging.getLogger(__name__)
if not self.contains_msgid(message['message-id']):
logger.debug('imported {}'.format(message['message-id']))
if headers:
for (header, value) in headers.items():
del message[header]
message[header] = value
self.add(message)
else:
logger.warning('maildir already contains msgid {} ({}), skipping…'.format(message['message-id'],
message['subject']))
@contextmanager
def mbox_from_stream(f):
"""
consumes a file-like object containing a mailbox.
will yield a mailbox.Mailbox object out of a temporary directory,
and clean up afterwards
:param f: the file-like object
"""
with tempfile.NamedTemporaryFile(mode='w+b') as tmbox_f:
tmbox_f.write(f.read())
tmbox_f.flush()
tmbox = mailbox.mbox(tmbox_f.name)
yield tmbox
@contextmanager
def locked_messageid_maildir(maildir_path):
"""
will yield a MessageIdMaildir, which is locked
:param maildir_path: the path to a mailbox
"""
maildir = MessageIdMaildir(maildir_path)
maildir.lock()
# override the regular add method
maildir.normal_add = maildir.add
yield maildir
maildir.unlock()
| flokli/mlarchive2maildir | mlarchive2maildir/mailbox.py | mailbox.py | py | 3,148 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "mailbox.Maildir",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "io.BytesIO",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_nu... |
15731209645 | from __future__ import annotations
from typing import Any
from typing import Dict
from typing import Set
from sqlalchemy import CHAR
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Numeric
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import UniqueConstraint
from ... import autogenerate
from ... import util
from ...autogenerate import api
from ...ddl.base import _fk_spec
from ...migration import MigrationContext
from ...operations import ops
from ...testing import config
from ...testing import eq_
from ...testing.env import clear_staging_env
from ...testing.env import staging_env
names_in_this_test: Set[Any] = set()
@event.listens_for(Table, "after_parent_attach")
def new_table(table, parent):
names_in_this_test.add(table.name)
def _default_include_object(obj, name, type_, reflected, compare_to):
if type_ == "table":
return name in names_in_this_test
else:
return True
_default_object_filters: Any = _default_include_object
_default_name_filters: Any = None
class ModelOne:
__requires__ = ("unique_constraint_reflection",)
schema: Any = None
@classmethod
def _get_db_schema(cls):
schema = cls.schema
m = MetaData(schema=schema)
Table(
"user",
m,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
Column("a1", Text),
Column("pw", String(50)),
Index("pw_idx", "pw"),
)
Table(
"address",
m,
Column("id", Integer, primary_key=True),
Column("email_address", String(100), nullable=False),
)
Table(
"order",
m,
Column("order_id", Integer, primary_key=True),
Column(
"amount",
Numeric(8, 2),
nullable=False,
server_default=text("0"),
),
CheckConstraint("amount >= 0", name="ck_order_amount"),
)
Table(
"extra",
m,
Column("x", CHAR),
Column("uid", Integer, ForeignKey("user.id")),
)
return m
@classmethod
def _get_model_schema(cls):
schema = cls.schema
m = MetaData(schema=schema)
Table(
"user",
m,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", Text, server_default="x"),
)
Table(
"address",
m,
Column("id", Integer, primary_key=True),
Column("email_address", String(100), nullable=False),
Column("street", String(50)),
UniqueConstraint("email_address", name="uq_email"),
)
Table(
"order",
m,
Column("order_id", Integer, primary_key=True),
Column(
"amount",
Numeric(10, 2),
nullable=True,
server_default=text("0"),
),
Column("user_id", Integer, ForeignKey("user.id")),
CheckConstraint("amount > -1", name="ck_order_amount"),
)
Table(
"item",
m,
Column("id", Integer, primary_key=True),
Column("description", String(100)),
Column("order_id", Integer, ForeignKey("order.order_id")),
CheckConstraint("len(description) > 5"),
)
return m
class _ComparesFKs:
def _assert_fk_diff(
self,
diff,
type_,
source_table,
source_columns,
target_table,
target_columns,
name=None,
conditional_name=None,
source_schema=None,
onupdate=None,
ondelete=None,
initially=None,
deferrable=None,
):
# the public API for ForeignKeyConstraint was not very rich
# in 0.7, 0.8, so here we use the well-known but slightly
# private API to get at its elements
(
fk_source_schema,
fk_source_table,
fk_source_columns,
fk_target_schema,
fk_target_table,
fk_target_columns,
fk_onupdate,
fk_ondelete,
fk_deferrable,
fk_initially,
) = _fk_spec(diff[1])
eq_(diff[0], type_)
eq_(fk_source_table, source_table)
eq_(fk_source_columns, source_columns)
eq_(fk_target_table, target_table)
eq_(fk_source_schema, source_schema)
eq_(fk_onupdate, onupdate)
eq_(fk_ondelete, ondelete)
eq_(fk_initially, initially)
eq_(fk_deferrable, deferrable)
eq_([elem.column.name for elem in diff[1].elements], target_columns)
if conditional_name is not None:
if conditional_name == "servergenerated":
fks = inspect(self.bind).get_foreign_keys(source_table)
server_fk_name = fks[0]["name"]
eq_(diff[1].name, server_fk_name)
else:
eq_(diff[1].name, conditional_name)
else:
eq_(diff[1].name, name)
class AutogenTest(_ComparesFKs):
def _flatten_diffs(self, diffs):
for d in diffs:
if isinstance(d, list):
yield from self._flatten_diffs(d)
else:
yield d
@classmethod
def _get_bind(cls):
return config.db
configure_opts: Dict[Any, Any] = {}
@classmethod
def setup_class(cls):
staging_env()
cls.bind = cls._get_bind()
cls.m1 = cls._get_db_schema()
cls.m1.create_all(cls.bind)
cls.m2 = cls._get_model_schema()
@classmethod
def teardown_class(cls):
cls.m1.drop_all(cls.bind)
clear_staging_env()
def setUp(self):
self.conn = conn = self.bind.connect()
ctx_opts = {
"compare_type": True,
"compare_server_default": True,
"target_metadata": self.m2,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
"alembic_module_prefix": "op.",
"sqlalchemy_module_prefix": "sa.",
"include_object": _default_object_filters,
"include_name": _default_name_filters,
}
if self.configure_opts:
ctx_opts.update(self.configure_opts)
self.context = context = MigrationContext.configure(
connection=conn, opts=ctx_opts
)
self.autogen_context = api.AutogenContext(context, self.m2)
def tearDown(self):
self.conn.close()
def _update_context(
self, object_filters=None, name_filters=None, include_schemas=None
):
if include_schemas is not None:
self.autogen_context.opts["include_schemas"] = include_schemas
if object_filters is not None:
self.autogen_context._object_filters = [object_filters]
if name_filters is not None:
self.autogen_context._name_filters = [name_filters]
return self.autogen_context
class AutogenFixtureTest(_ComparesFKs):
def _fixture(
self,
m1,
m2,
include_schemas=False,
opts=None,
object_filters=_default_object_filters,
name_filters=_default_name_filters,
return_ops=False,
max_identifier_length=None,
):
if max_identifier_length:
dialect = self.bind.dialect
existing_length = dialect.max_identifier_length
dialect.max_identifier_length = (
dialect._user_defined_max_identifier_length
) = max_identifier_length
try:
self._alembic_metadata, model_metadata = m1, m2
for m in util.to_list(self._alembic_metadata):
m.create_all(self.bind)
with self.bind.connect() as conn:
ctx_opts = {
"compare_type": True,
"compare_server_default": True,
"target_metadata": model_metadata,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
"alembic_module_prefix": "op.",
"sqlalchemy_module_prefix": "sa.",
"include_object": object_filters,
"include_name": name_filters,
"include_schemas": include_schemas,
}
if opts:
ctx_opts.update(opts)
self.context = context = MigrationContext.configure(
connection=conn, opts=ctx_opts
)
autogen_context = api.AutogenContext(context, model_metadata)
uo = ops.UpgradeOps(ops=[])
autogenerate._produce_net_changes(autogen_context, uo)
if return_ops:
return uo
else:
return uo.as_diffs()
finally:
if max_identifier_length:
dialect = self.bind.dialect
dialect.max_identifier_length = (
dialect._user_defined_max_identifier_length
) = existing_length
def setUp(self):
staging_env()
self.bind = config.db
def tearDown(self):
if hasattr(self, "_alembic_metadata"):
for m in util.to_list(self._alembic_metadata):
m.drop_all(self.bind)
clear_staging_env()
| sqlalchemy/alembic | alembic/testing/suite/_autogen_fixtures.py | _autogen_fixtures.py | py | 9,880 | python | en | code | 2,219 | github-code | 36 | [
{
"api_name": "typing.Set",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.event.listens_for",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",... |
13895020960 | import matplotlib.pyplot as plt
import numpy as np
x = np.array([[1,0],[0,1],[0,-1],[-1,0],[0,2],[0,-2],[-2,0]])
y = np.array([-1,-1,-1,1,1,1,1])
def z(X):
x1, x2 = X[0], X[1]
t1 = x2**2 - 2*x1 - 2
t2 = x1**2 - 2*x2 - 1
return [t1,t2]
T = np.array([z(i) for i in x])
for i in T:
print(i)
from sklearn import svm
clf = svm.SVC(kernel = 'linear', C = 1000)
clf.fit(T,y)
for i, j in enumerate(T):
t1, t2 = j[0], j[1]
if y[i] == 1:
plt.scatter(t1, t2, c='r', edgecolors='k')
plt.annotate('1', (t1,t2), fontsize=16)
elif y[i] == -1:
plt.scatter(t1, t2, c='b', edgecolors='k')
plt.annotate('-1', (t1,t2), fontsize=16)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
# plot support vectors
ax.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=100,
linewidth=1, facecolors='none')
plt.savefig('1.png')
plt.show()
| kevinliu726/MachineLearningTechnique | hw1/1.py | 1.py | py | 1,275 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number"... |
205686342 | import tensorflow as tf
import numpy as np
import gym
from gym.wrappers import Monitor
import random
import os
import time
def Policy(action_cnn,state,sess,epsilon, num_actions=4):
preds = sess.run(action_cnn.preds,{action_cnn.input:state[np.newaxis,:]})
p = np.ones(num_actions)*epsilon/num_actions
greedy_action = np.argmax(preds)
p[greedy_action] += (1 - epsilon)
return p
def DQL(env,sess,action_cnn,target_cnn,num_episodes,epsilons,discount_factor,replay_mem_size,batch_size,C,record,decay_rate,algo,make_video = True):
saver = tf.train.Saver()
# Load a previous checkpoint if we find one
ckpt = tf.train.get_checkpoint_state(os.path.dirname('Breakoutcheckpoints/'))
if ckpt and ckpt.model_checkpoint_path:
print("Loading Checkpoint")
saver.restore(sess, ckpt.model_checkpoint_path)
decay_count = tf.train.global_step(sess,action_cnn.global_step)
print(decay_count)
#Initializing Replay memory
D = []
#preprocess = Preprocess()
state = env.reset()
state = preprocess(state,sess)
state = np.stack([state] * 4, axis=2)
print("populating replay memory")
start = time.time()
for i in range(50000):
action_probs = Policy(action_cnn,state,sess,epsilons[min(decay_count,decay_rate-1)],env.action_space.n)
possible_actions = np.arange(env.action_space.n)
action = np.random.choice(possible_actions, p = action_probs)
new_state,reward,done, _ = env.step(action)
new_state = preprocess(new_state,sess)
new_state = np.append(state[:,:,1:],new_state[:,:,np.newaxis],axis = 2)
D.append((state,action,reward,new_state,done))
if done:
state = env.reset()
state = preprocess(state,sess)
state = np.stack([state] * 4, axis=2)
else:
state = new_state
print("Been There Done That")
print(time.time() - start)
if make_video:
env = Monitor(env, directory='Breakoutvideos', video_callable=lambda count: count % record == 0, resume=True)
losses = []
running_mean = 0
for i in range(num_episodes):
saver.save(sess, 'Breakoutcheckpoints/DQN')
state = env.reset()
state = preprocess(state,sess)
state = np.stack([state] * 4, axis=2)
print("episode: ",i)
ep_reward = 0
loss = None
j =0
while True:
print("\rstep {}".format(j),end = "")
sys.stdout.flush()
j+=1
if (decay_count)%C == 0:
copy_op = copy_cnn_params(action_cnn,target_cnn)
sess.run(copy_op)
print("Copied parameters to target network")
action_probs = Policy(action_cnn,state,sess,epsilons[min(decay_count,decay_rate-1)],env.action_space.n)
possible_actions = np.arange(env.action_space.n)
action = np.random.choice(possible_actions, p = action_probs)
new_state,reward,done,_ = env.step(action)
new_state = preprocess(new_state,sess)
new_state = np.append(state[:,:,1:],new_state[:,:,np.newaxis],axis = 2)
ep_reward += reward
if(len(D)==replay_mem_size):
D.pop(0)
D.append((state,action,reward,new_state,done))
batch = random.sample(D,batch_size)
states, actions, rewards, new_states, dones = zip(*batch)
states = np.array(states)
actions = np.array(actions)
rewards = np.array(rewards)
new_states = np.array(new_states)
rewards = np.array(rewards)
flags = np.invert(dones)
#Q Learning
if algo == 'DQ':
y = rewards + flags*discount_factor*np.max(sess.run(target_cnn.preds,
{target_cnn.input:new_states}),axis=1)
#Double Q Learning
elif algo == 'DDQ':
greedy_actions = np.argmax(sess.run(action_cnn.preds,{action_cnn.input:new_states}),axis = 1)
y = rewards + flags*discount_factor*((sess.run(target_cnn.preds,{target_cnn.input:new_states}))[np.arange(BATCH_SIZE),greedy_actions])
loss,_ = sess.run([action_cnn.loss,action_cnn.opti],{action_cnn.input:states,action_cnn.output:y,
action_cnn.actions:actions})
state = new_state
decay_count += 1
if done:
break
print("Reward: ",ep_reward)
running_mean = 0.9*running_mean + 0.1*ep_reward
print("Loss: ",loss) | kabirahuja2431/DeepQLearning | dql.py | dql.py | py | 3,990 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.newaxis",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
... |
33082220746 | import detectron2
from detectron2.utils.logger import setup_logger
import numpy as np
import os, json, cv2, random
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode
from detectron2.utils.visualizer import Visualizer
setup_logger()
# if your dataset is in COCO format, this cell can be replaced by the following three lines:
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")
# vidor
vidor_obj_to_idx = {
'adult': 0, 'aircraft': 1, 'antelope': 2, 'baby': 3, 'baby_seat': 4,
'baby_walker': 5, 'backpack': 6, 'ball/sports_ball': 7, 'bat': 8, 'bear': 9,
'bench': 10, 'bicycle': 11, 'bird': 12, 'bottle': 13, 'bread': 14,
'bus/truck': 15, 'cake': 16, 'camel': 17, 'camera': 18, 'car': 19,
'cat': 20, 'cattle/cow': 21, 'cellphone': 22, 'chair': 23, 'chicken': 24,
'child': 25, 'crab': 26, 'crocodile': 27, 'cup': 28, 'dish': 29,
'dog': 30, 'duck': 31, 'electric_fan': 32, 'elephant': 33, 'faucet': 34,
'fish': 35, 'frisbee': 36, 'fruits': 37, 'guitar': 38, 'hamster/rat': 39,
'handbag': 40, 'horse': 41, 'kangaroo': 42, 'laptop': 43, 'leopard': 44,
'lion': 45, 'microwave': 46, 'motorcycle': 47, 'oven': 48, 'panda': 49,
'penguin': 50, 'piano': 51, 'pig': 52, 'rabbit': 53, 'racket': 54,
'refrigerator': 55, 'scooter': 56, 'screen/monitor': 57, 'sheep/goat': 58, 'sink': 59,
'skateboard': 60, 'ski': 61, 'snake': 62, 'snowboard': 63, 'sofa': 64,
'squirrel': 65, 'stingray': 66, 'stool': 67, 'stop_sign': 68, 'suitcase': 69,
'surfboard': 70, 'table': 71, 'tiger': 72, 'toilet': 73, 'toy': 74,
'traffic_light': 75, 'train': 76, 'turtle': 77, 'vegetables': 78, 'watercraft': 79
}
def vidor_to_coco_format(anno_dir, split):
dataset_dicts = []
for dirs in os.listdir(os.path.join(anno_dir, 'annotation', split)):
for files in os.listdir(os.path.join(anno_dir, 'annotation', split, dirs)):
with open(os.path.join(anno_dir, 'annotation', split, dirs, files)) as f:
anno = json.load(f)
tid_to_obj = {
obj_tid['tid']:obj_tid['category'] for obj_tid in anno['subject/objects']
}
record = {}
record['height'] = anno['height']
record['width'] = anno['width']
for bbox_idx, bboxes in enumerate(anno['trajectories']):
record['file_name'] = os.path.join(
anno_dir, 'image', anno['video_id'], '{0:0=5d}.jpg'.format(bbox_idx)
)
record['image_id'] = '{0:0=5d}'.format(bbox_idx)
objs = []
for bbox in bboxes:
obj = {
'bbox': [bbox['bbox']['xmin'],
bbox['bbox']['ymin'],
bbox['bbox']['xmax'],
bbox['bbox']['ymax']],
'bbox_mode': BoxMode.XYXY_ABS,
'category_id': vidor_obj_to_idx[tid_to_obj[bbox['tid']]]
}
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
return dataset_dicts
if __name__=='__main__':
# vidor_to_coco_format
anno_dir = "/home/t2_u1/data/vidor/"
for d in ["training", "validation"]:
DatasetCatalog.register("vidor_" + d, lambda d=d:vidor_to_coco_format(anno_dir, d))
MetadataCatalog.get("vidor_" + d).set(
thing_classes=[
'adult', 'aircraft', 'antelope', 'baby', 'baby_seat',
'baby_walker', 'backpack', 'ball/sports_ball', 'bat', 'bear',
'bench', 'bicycle', 'bird', 'bottle', 'bread',
'bus/truck', 'cake', 'camel', 'camera', 'car',
'cat', 'cattle/cow', 'cellphone', 'chair', 'chicken',
'child', 'crab', 'crocodile', 'cup', 'dish',
'dog', 'duck', 'electric_fan', 'elephant', 'faucet',
'fish', 'frisbee', 'fruits', 'guitar', 'hamster/rat',
'handbag', 'horse', 'kangaroo', 'laptop', 'leopard',
'lion', 'microwave', 'motorcycle', 'oven', 'panda',
'penguin', 'piano', 'pig', 'rabbit', 'racket',
'refrigerator', 'scooter', 'screen/monitor', 'sheep/goat', 'sink',
'skateboard', 'ski', 'snake', 'snowboard', 'sofa',
'squirrel', 'stingray', 'stool', 'stop_sign', 'suitcase',
'surfboard', 'table', 'tiger', 'toilet', 'toy',
'traffic_light', 'train', 'turtle', 'vegetables', 'watercraft'
]
)
vidor_metadata = MetadataCatalog.get("vidor_training")
dataset_dicts = vidor_to_coco_format(anno_dir, "training")
with open("./vidor_coco_format.json", "w") as f:
j = json.dump(dataset_dicts, f)
# with open("./vidor_coco_format.json", "r") as f:
# dataset_dicts = json.load(f)
num_images_to_show = 3
for d in random.sample(dataset_dicts, num_images_to_show):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=vidor_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
cv2.imshow('sample', out.get_image()[:, :, ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows() | sangminwoo/Temporal-Span-Proposal-Network-VidVRD | detectron/vidor_anno_to_coco_format.py | vidor_anno_to_coco_format.py | py | 4,815 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "detectron2.utils.logger.setup_logger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",... |
10829381285 | #!/usr/bin/env python
import os, sys
import json
import messytables
import subprocess
from dgitcore.helper import cd
from dgitcore.plugins.instrumentation import InstrumentationBase
from dgitcore.config import get_config
def run(cmd):
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True)
output = output.decode('utf-8')
output = output.strip()
return output
def repo_origin(filename, what=['Push URL']):
with cd(os.path.dirname(filename)):
cmd = "git remote show origin"
output = run(cmd)
#* remote origin
#Fetch URL: git@github.com:jaredpar/VsVim.git
#Push URL: git@github.com:jaredpar/VsVim.git
#HEAD branch: master
#Remote branches:
response = {}
output = output.split("\n")
output = output[1:]
for o in output:
for w in what:
if w in o:
response[w] = o[o.index(":")+1:]
return response
def repo_remote_url(filename):
with cd(os.path.dirname(filename)):
cmd = "git config --get remote.origin.url"
output = run(cmd)
return {'remote.origin.url': output.strip()}
def executable_commit(filename,
what=['commit', 'username', 'useremail', 'date']):
mapping = {
'commit': '%H',
'username': '%cn',
'useremail': '%ce',
'date': '%cd'
}
missing = [mapping[w] for w in what if w not in mapping]
if len(missing) > 0:
print("Cannot gather commit attributes of executable", missing)
raise Exception("Invalid specification")
codes = ",".join([mapping[w] for w in what if w in mapping])
with cd(os.path.dirname(filename)):
cmd = 'git log -n 1 --date=iso --pretty="%s" -- %s ' %(codes, filename)
output = run(cmd)
output = output.strip()
output = output.split(",")
return {what[i]: output[i] for i in range(len(what))}
return {}
def executable_repopath(filename):
with cd(os.path.dirname(filename)):
cmd = 'git rev-parse --show-prefix'
output = run(cmd)
output = output.strip()
return {
'path': os.path.join(output, os.path.basename(filename))
}
def executable_filetype(filename):
with cd(os.path.dirname(filename)):
cmd = '/usr/bin/file ' + filename
output = run(cmd)
output = output.strip()
output = output[output.index(":")+1:]
return {
'filetype': output
}
def get_metadata(args):
filename = args[0]
metadata = {'cmd': ' '.join(args) }
metadata.update(repo_remote_url(filename))
metadata.update(executable_commit(filename))
metadata.update(executable_repopath(filename))
metadata.update(executable_filetype(filename))
return metadata
class ExecutableInstrumentation(InstrumentationBase):
"""Instrumentation to extract executable related summaries such as the git commit, nature of executable, parameters etc.
"""
def __init__(self):
super(ExecutableInstrumentation, self).__init__('executable',
'v0',
"Executable analysis")
def update(self, config):
if 'executables' in config:
for i in range(len(config['executables'])):
args = config['executable'][i]['args']
metadata = get_metdata(args)
config['executable'][i].update(metadata)
return config
def setup(mgr):
obj = ExecutableInstrumentation()
mgr.register('instrumentation', obj)
if __name__ == "__main__":
viz = '/home/pingali/analytics/politics/bin/mumbai-visualize.py'
response = run_executable([viz])
print(json.dumps(response, indent=4))
| pingali/dgit | dgitcore/contrib/instrumentations/executable.py | executable.py | py | 3,917 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "subprocess.check_output",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "dgitcore.helper.cd",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.... |
21119796127 | import heapq
from math import inf
from typing import List
class Solution:
def minimumTime(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
if grid[0][1] > 1 and grid[1][0] > 1:
return -1
dis = [[inf] * n for _ in range(m)]
dis[0][0] = 0
h = [(0, 0, 0)]
while True:
d, i, j = heapq.heappop(h)
if i == m - 1 and j == n - 1:
return d
for x, y in (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1):
if 0 <= x < m and 0 <= y < n:
nd = max(d + 1, grid[x][y])
nd += (nd - x - y) % 2
if nd < dis[x][y]:
dis[x][y] = nd
heapq.heappush(h, (nd, x, y))
if __name__ == '__main__':
grid = [[0,1,3,2],[5,1,2,5],[4,3,8,6]]
grid = [[0,2,4],[3,2,1],[1,0,4]]
rtn = Solution().minimumTime(grid)
print(rtn) | plattanus/leetcodeDAY | python/2577. 在网格图中访问一个格子的最少时间.py | 2577. 在网格图中访问一个格子的最少时间.py | py | 961 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "math.inf",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "heapq.heappop",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number":... |
39853467321 | import cv2 as cv
import numpy as np
im = cv.imread('car.png', 0)
kernel = np.ones((5, 5), np.int16) * 1 / 25
out = cv.filter2D(im, -1, kernel)
print(out)
for i in range(len(out)):
for j in range(len(out[0])):
if out[i][j] < 128:
out[i][j] = 255
else:
out[i][j] = 0
cv.imshow('output', out)
cv.waitKey(0)
cv.destroyAllWindows()
| RonaldCDO/Python | python_opencv/grayscale_images.py | grayscale_images.py | py | 376 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.int16",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.filter2D",
"line_number":... |
4941442035 | import requests
import configparser
import json
cfg = configparser.ConfigParser()
cfg.read("config.ini")
_api_url = str(cfg["default"]["api_url"])
def test_post_scm_repo():
response = requests.post(f"{_api_url}/scm-repos", json={"url": "abc"})
assert response.status_code == 201
body = response.json()
assert body["url"] == "abc"
assert body["id"] > 0
| shmenkins/acceptance-tests | shmenkins/at/api_test.py | api_test.py | py | 378 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 11,
"usage_type": "call"
}
] |
16103131667 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='ISY994v5',
version='0.9.7',
description='ISY99 Controller Rest and Websocket client v5 firmware',
author='Michael Cumming',
author_email='mike@4831.com',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/mjcumming/ISY994v5',
keywords=['INSTEON', 'ISY994', 'ISY', 'Universal Devices'],
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'aiohttp',
]
)
| mjcumming/ISY994v5 | setup.py | setup.py | py | 804 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 17,
"usage_type": "call"
}
] |
10514105287 | import allure
from selenium.webdriver.common.by import By
from extensions import ui_actions
from utilities.Base import Base
@allure.step("Business Flow: Login")
def login(email, password):
ui_actions.update_text(Base.LOGIN_PAGE.textbox_user_email, email)
ui_actions.update_text(Base.LOGIN_PAGE.textbox_password, password)
ui_actions.click(Base.LOGIN_PAGE.button_login)
@allure.step("Business Flow: Search By Email")
def search_for_email(email):
ui_actions.update_text(Base.CUSTOMER.textbox_search_email, email)
ui_actions.click(Base.CUSTOMER.btn_search)
@allure.step("Business Flow: Search By Name")
def search_for_name(first_name, last_name):
ui_actions.update_text(Base.CUSTOMER.textbox_search_firstName, first_name)
ui_actions.update_text(Base.CUSTOMER.textbox_search_lastName, last_name)
ui_actions.click(Base.CUSTOMER.btn_search)
@allure.step("Business Flow: Search For Name")
def search_name_on_table(full_name):
flag = False
for r in range(1, Base.CUSTOMER.get_row_number + 1):
table = Base.CUSTOMER.table
name = ui_actions.get_element_text(
table.find_element(
By.XPATH,
"//table[@id='customers-grid']/tbody/tr[" + str(r) + "]/td[3]",
)
)
if name == full_name:
flag = True
break
return flag
@allure.step("Business Flow: Navigate To The Customer Table Page")
def goto_customer_table():
ui_actions.click(Base.MEGA_MENU.btn_customers_prm)
ui_actions.click(Base.MEGA_MENU.btn_customers_sub)
@allure.step("Business Flow: Fill In Customer Form")
def fill_customer_form(customer):
ui_actions.update_text(Base.CREATE_CUSTOMER.textbox_email, customer['email'])
ui_actions.update_text(Base.CREATE_CUSTOMER.textbox_password, customer['password'])
ui_actions.update_text(Base.CREATE_CUSTOMER.textbox_first_name, customer['first_name'])
ui_actions.update_text(Base.CREATE_CUSTOMER.textbox_last_name, customer['last_name'])
if customer['gender'] == "male":
ui_actions.click(Base.CREATE_CUSTOMER.textbox_gender_male)
elif customer['gender'] == "female":
ui_actions.click(Base.CREATE_CUSTOMER.textbox_gender_female)
ui_actions.update_text(Base.CREATE_CUSTOMER.textbox_dateOfBirth, customer['date_birth'])
ui_actions.update_text(Base.CREATE_CUSTOMER.textbox_company, customer['company'])
if customer['tax_exempt']:
ui_actions.click(Base.CREATE_CUSTOMER.checkbox_taxExempt)
ui_actions.click(Base.CREATE_CUSTOMER.div_newsletter)
if customer['newsletter'] == "Test store 2":
ui_actions.click(Base.CREATE_CUSTOMER.li_newsletter_test_store_2_opt)
ui_actions.click(Base.CREATE_CUSTOMER.div_customer_role)
if customer['role'] == "Administrators":
ui_actions.click(Base.CREATE_CUSTOMER.li_Administrators_role_opt)
ui_actions.select_element_by_text(Base.CREATE_CUSTOMER.select_vendor_id, customer['vendor_id'])
if not customer['active']:
ui_actions.click(Base.CREATE_CUSTOMER.checkbox_active)
ui_actions.update_text(Base.CREATE_CUSTOMER.textarea_admin_comment, customer['comment'])
| liorc955/Python-automation | workflows/web_flows.py | web_flows.py | py | 3,153 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "extensions.ui_actions.update_text",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "extensions.ui_actions",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "utilities.Base.Base.LOGIN_PAGE",
"line_number": 10,
"usage_type": "attribute"
}... |
41917076519 | from flask import jsonify, make_response
import src.flaskr.controllers.turn_logic as turn_logic
from src.flaskr.controllers.uuid_supplier import UuidSupplier
import src.flaskr.controllers.winner_logic as winner_logic
from src.flaskr.models.game_model import Game, GameSchema, MoveSchema
from src.flaskr.persistence.repositories.game_repository_api import GameRepositoryApi
GAME_SCHEMA = GameSchema()
MOVE_SCHEMA = MoveSchema()
def games__get__list(request):
# This is how you get the value of the query param
# with key 'username'. Returns None if no value passed
username = request.args.get('username')
repo = GameRepositoryApi()
games = repo.select_all_games()
if username == None:
return make_response([g.id for g in games], 200)
filtered_games = [g for g in games if g.player_a == username or g.player_b == username]
return make_response([g.id for g in filtered_games], 200)
def games__post__create(request):
# creates a new game object based on the data in the request
body = request.json
id = UuidSupplier().get()
new_game = Game(id, body['opponent'], body['challenger'], [])
repo = GameRepositoryApi()
repo.insert(new_game)
repo.commit()
return make_response(jsonify(id), 201)
def some_game__get__info(request, game_id):
repo = GameRepositoryApi()
game = repo.select_game(game_id)
if (game == None):
return make_response('No game with this ID was found on the server.', 404)
return make_response(GAME_SCHEMA.dump(game), 200)
def some_game__get__turn(request, game_id):
repo = GameRepositoryApi()
game = repo.select_game(game_id)
if game is None:
return make_response('No game with ID ' + game_id, 404)
turn = turn_logic.derive_turn(game)
return make_response(jsonify(turn), 200)
def some_game__get__winner(request, game_id):
repo = GameRepositoryApi()
game = repo.select_game(game_id)
if game is None:
msg = 'No game with this ID was found on the server.'
return make_response(msg, 404)
status = winner_logic.find_winner(game)
return make_response(jsonify(status), 200)
def some_game__post__move(request, game_id):
# gets the request body and deserializes it into a Move model object
body = request.json
move = MOVE_SCHEMA.load(body)
if not (move.x in [0,1,2] and move.y in [0,1,2]):
msg = 'Invalid move coordinates.'
return make_response(msg, 400)
repo = GameRepositoryApi()
game = repo.select_game(game_id)
if game is None:
msg = 'No game with this ID was found on the server.'
return make_response(msg, 404)
turn = turn_logic.derive_turn(game)
is_occupied = len([m for m in game.moves if m.x == move.x and m.y == move.y]) > 0
is_game_decided = winner_logic.find_winner(game) != 'undecided'
is_invalid_move = turn != move.occupier or is_occupied or is_game_decided
if (is_invalid_move):
msg = 'Illegal move. This player does not have the turn or this field is already occupied.'
return make_response(msg, 403)
repo.insert_move(move, game_id)
repo.commit()
updated_game = repo.select_game(game_id)
return make_response(GAME_SCHEMA.dump(updated_game), 201)
| eriklong95/tiktak | src/flaskr/controllers/games_controller.py | games_controller.py | py | 3,297 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.flaskr.models.game_model.GameSchema",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "src.flaskr.models.game_model.MoveSchema",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "src.flaskr.persistence.repositories.game_repository_api.GameReposi... |
70062828584 | from flask import Blueprint, render_template, request, current_app,redirect
from flask import url_for, flash
from jobweb.decorators import admin_required
from jobweb.models import db,User
from jobweb.forms import UserRegisterForm, CompanyRegisterForm,UserEditForm, CompanyEditForm
admin = Blueprint('admin', __name__, url_prefix='/admin')
@admin.route('/')
@admin_required
def admin_index():
return render_template('admin/index.html')
@admin.route('/users')
@admin_required
def users():
page = request.args.get('page', default=1, type=int)
pagination = User.query.paginate(
page=page,
per_page=current_app.config['ADMIN_PER_PAGE'],
error_out = False
)
return render_template('admin/users.html', pagination=pagination)
@admin.route('/users/create_user', methods=['GET', 'POST'])
@admin_required
def create_user():
form = UserRegisterForm()
if form.is_submitted():
form.create_user()
flash('create user success', 'success') #添加求职者成功
return redirect(url_for('admin.users'))
return render_template('admin/create_user.html', form=form)
@admin.route('/users/create_company', methods=["GET","POST"])
@admin_required
def create_company():
form = CompanyRegisterForm()
if form.is_submitted():
form.create_companyProfile()
flash('create company success','success') #添加企业成功
return redirect(url_for('admin.users'))
return render_template('admin/create_company.html', form=form)
@admin.route('/users/<int:user_id>/edit', methods=['GET', 'POST'])
@admin_required
def edit_user(user_id):
user = User.query.get_or_404(user_id)
if user.is_company:
form = CompanyEditForm(obj=user)
else:
form = UserEditForm(obj=user)
if form.validate_on_submit():
form.update(user)
flash('edit success', 'success')
return redirect(url_for('admin.users'))
if user.is_company:
form.website.data = user.companydetail.website
form.desc.data = user.companydetail.desc
return render_template('admin/edit_user.html', form=form, user=user)
@admin.route('/users/<int:user_id>/disable', methods=['GET','POST'])
@admin_required
def disable_user(user_id):
user = User.query.get_or_404(user_id)
if user.is_disable:
user.is_disable = False
flash('enable user', 'success')
else:
user.is_disable = True
flash('disable user', 'success')
db.session.add(user)
db.session.commit()
return redirect(url_for('admin.users'))
| LouPlus/jobplus3-9 | jobweb/handlers/admin.py | admin.py | py | 2,498 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jobweb.decorators.admin_required",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": ... |
21325799107 | from django_mongoengine.queryset import QuerySet
from .config import (
HARD_DELETE,
DELETED_INVISIBLE,
DELETED_ONLY_VISIBLE,
DELETED_VISIBLE,
DELETED_VISIBLE_BY_FIELD,
)
class SafeDeletionQuerySet(QuerySet):
"""Default queryset for the SafeDeletionQuerySetManager.
Takes care of "lazily evaluating" safedeletion QuerySets. QuerySets passed
within the ``SafeDeletionQuerySetManager`` will have all of the models available.
The deleted policy is evaluated at the very end of the chain when the
QuerySet itself is evaluated.
"""
_safedeletion_visibility = DELETED_INVISIBLE
_safedeletion_visibility_field = "pk"
_safedeletion_filter_applied = False
# pylint: disable=arguments-differ
# pylint: disable=inconsistent-return-statements
def delete(
self,
force_policy=None,
signal_kwargs=None,
write_concern=None,
_from_doc_delete=False,
cascade_refs=None,
):
doc = self._document
# pylint: disable=protected-access
current_policy = (
doc._safedeletion_policy if (force_policy is None) else force_policy
)
if current_policy == HARD_DELETE:
return super().delete(write_concern, _from_doc_delete, cascade_refs)
if write_concern is None:
write_concern = {}
for obj in self.all():
obj.delete(
force_policy=force_policy, signal_kwargs=signal_kwargs, **write_concern
)
def undelete(self, force_policy=None, **write_concern):
for obj in self.all():
obj.undelete(force_policy, **write_concern)
def __call__(self, q_obj=None, **query):
force_visibility = getattr(self, "_safedeletion_force_visibility", None)
visibility = (
force_visibility
if force_visibility is not None
else self._safedeletion_visibility
)
if not self._safedeletion_filter_applied and visibility in (
DELETED_INVISIBLE,
DELETED_VISIBLE_BY_FIELD,
DELETED_ONLY_VISIBLE,
):
visibility_query = (
{"deleted": None}
if visibility in (DELETED_INVISIBLE, DELETED_VISIBLE_BY_FIELD)
else {"deleted__ne": None}
)
query.update(visibility_query)
self._safedeletion_filter_applied = True
return super().__call__(q_obj, **query)
# pylint: disable=arguments-differ
def all(self, force_visibility=None):
"""Override so related managers can also see the deleted models.
A model's m2m field does not easily have access to `all_objects` and
so setting `force_visibility` to True is a way of getting all of the
models. It is not recommended to use `force_visibility` outside of related
models because it will create a new queryset.
Args:
force_visibility: Force a deletion visibility. (default: {None})
"""
if force_visibility is not None:
# pylint: disable=attribute-defined-outside-init
self._safedeletion_force_visibility = force_visibility
return super().all()
def _check_field_filter(self, **kwargs):
"""Check if the visibility for DELETED_VISIBLE_BY_FIELD needs t be put into effect.
DELETED_VISIBLE_BY_FIELD is a temporary visibility flag that changes
to DELETED_VISIBLE once asked for the named parameter defined in
`_safedeletion_force_visibility`. When evaluating the queryset, it will
then filter on all models.
"""
if (
self._safedeletion_visibility == DELETED_VISIBLE_BY_FIELD
and self._safedeletion_visibility_field in kwargs
):
# pylint: disable=attribute-defined-outside-init
self._safedeletion_force_visibility = DELETED_VISIBLE
def filter(self, *q_objs, **query):
queryset = self.clone()
# pylint: disable=protected-access
queryset._check_field_filter(**query)
return super(SafeDeletionQuerySet, queryset).filter(*q_objs, **query)
def get(self, *q_objs, **query):
queryset = self.clone()
# pylint: disable=protected-access
queryset._check_field_filter(**query)
return super(SafeDeletionQuerySet, queryset).get(*q_objs, **query)
def clone(self):
clone = super().clone()
# pylint: disable=protected-access
clone._safedeletion_visibility = self._safedeletion_visibility
clone._safedeletion_visibility_field = self._safedeletion_visibility_field
clone._safedeletion_filter_applied = self._safedeletion_filter_applied
if hasattr(self, "_safedeletion_force_visibility"):
# pylint: disable=protected-access
clone._safedeletion_force_visibility = self._safedeletion_force_visibility
return clone
| ngocngoan/django-safedeletion-mongoengine | safedeletion_mongoengine/queryset.py | queryset.py | py | 4,946 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "django_mongoengine.queryset.QuerySet",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "config.DELETED_INVISIBLE",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "config.HARD_DELETE",
"line_number": 41,
"usage_type": "name"
},
{
"... |
984949848 | """empty message
Revision ID: dde24bfed677
Revises: 0c89b9c0a9cd
Create Date: 2020-04-07 16:50:44.747914
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dde24bfed677'
down_revision = '0c89b9c0a9cd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('workout',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('program', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=120), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=1200), nullable=False),
sa.ForeignKeyConstraint(['program'], ['program.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('exercise',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('workout', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=40), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=300), nullable=False),
sa.Column('frequency_lower', sa.Integer(), nullable=True),
sa.Column('frequency_upper', sa.Integer(), nullable=True),
sa.Column('completetion_type', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['workout'], ['workout.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('exercise')
op.drop_table('workout')
# ### end Alembic commands ###
| torbjomg/pwa_flask_app | migrations/versions/dde24bfed677_.py | dde24bfed677_.py | py | 1,867 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
30337199579 | import Gui
import networkx as nx
import random
def ant(graph:nx.Graph, psize:int, node:tuple, pgraph, pherom, pheromcoef, update):
ants = []
for i in range(psize):
ants.append([set(),0])
findway(graph, pgraph , ants[-1], node, pheromcoef)
update()
s = node
for n in ants[-1][0]:
if n != s:
pgraph[n][s]["weight"] += float(pherom)/ants[-1][1]
s = n
ants = sorted(ants,key=lambda q: q[1])
return ants[0][0],ants[0][1]
def findway(graph:nx.Graph, pgraph:nx.Graph, ant, node, pheromcoef):
s = [node]
visited = set([node])
while len(s) < graph.number_of_nodes()+1 and s:
sm = 0
for nei in graph[s[-1]].items():
if nei[0] not in visited:
sm += float(pheromcoef)*pgraph[s[-1]][nei[0]]["weight"]+nei[1]["weight"]
c = random.uniform(0,1)
p = 0
for nei in graph[s[-1]].items():
if nei[0] not in visited:
p += float(pheromcoef)*pgraph[s[-1]][nei[0]]["weight"]+nei[1]["weight"]
if c <= p/sm:
s.append(nei[0])
visited.add(nei[0])
ant[1] += nei[1]["weight"]
break
if nei[0] == node and len(s) == graph.number_of_nodes():
s.append(nei[0])
if len(s) == graph.number_of_nodes()+1:
ant[0] = s
else:
ant[0] = None
if __name__ == '__main__':
w = Gui.Window(ant, 0)
w.run() | rawr-0/ants_algorithm_lab | main.py | main.py | py | 1,524 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "networkx.Graph",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "networkx.Graph",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "random.uniform",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "Gui.Window",
... |
12607805013 | from gevent import monkey
monkey.patch_all()
from bs4 import BeautifulSoup
import requests
from fake_useragent import UserAgent
import gevent
from gevent.queue import Queue
import os
def getPictures():
while not urls.empty():
item = urls.get_nowait()
if not os.path.exists(f'F://meiziba//{item[1]}'):
os.mkdir(f'F://meiziba//{item[1]}')
headers = {'User-Agent': UserAgent().random}
response = requests.get(item[0], headers=headers)
picSoup = BeautifulSoup(response.text, 'lxml')
pictures = picSoup.find(
'div', class_='entry-content u-clearfix text_indent_2').find_all('a')
i = 1
for pic in pictures:
with open(f'F://meiziba//{item[1]}//{i}.jpg', 'wb') as f:
f.write(requests.get(pic['href']).content)
i += 1
if __name__ == '__main__':
urls = Queue()
URL = 'http://www.mziba.cn/page_1.html'
headers = {'User-Agent': UserAgent().random}
res = requests.get(URL, headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
articles = soup.find_all('article', class_='col-md-6 col-lg-4 col-xl-3 grid-item')
for article in articles:
t = article.find('h2', class_='entry-title').find('a')
print(t)
title = t.text
url = t['href']
urls.put_nowait([url, title])
tasks_list = []
for _ in range(10):
task = gevent.spawn(getPictures)
tasks_list.append(task)
gevent.joinall(tasks_list)
| Kenny3Shen/CodeShen | Code/Python/Web Crawler/meiziba.py | meiziba.py | py | 1,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gevent.monkey.patch_all",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "gevent.monkey",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
21809713879 | import asyncio
import logging
from bleak import BleakClient, BleakScanner
DEVICE_NAME = "iBobber"
IBOBBER_ADDR = "34:14:B5:4B:B9:15"
ACCEL_SERVICE_UUID = "1791FFA0-3853-11E3-AA6E-0800200C9A66"
CUSTOM_SERVICE_UUID = "1791FF90-3853-11E3-AA6E-0800200C9A66"
BATT_SERVICE_UUID = "0000180F-0000-1000-8000-00805F9B34FB"
DEVICE_SERVICE_UUID = "0000180A-0000-1000-8000-00805F9B34FB"
OAD_SERVICE_UUID = "F000FFC0-0451-4000-B000-000000000000"
ALL_SERVICES = [
ACCEL_SERVICE_UUID,
CUSTOM_SERVICE_UUID,
BATT_SERVICE_UUID,
DEVICE_SERVICE_UUID,
OAD_SERVICE_UUID,
]
log = logging.getLogger("iBobber")
log.setLevel(logging.DEBUG)
async def main():
logging.basicConfig(
level=logging.INFO,
# level=logging.DEBUG,
format="%(asctime)s %(levelname)-8s %(name)-15s %(message)s",
# datefmt="%Y-%m-%d %H:%M:%S",
)
log.debug("meow")
for device, advert in sorted(
(await BleakScanner.discover(return_adv=True)).values(),
key=lambda d: -d[1].rssi,
):
if not (device.name == DEVICE_NAME or advert.local_name == DEVICE_NAME):
continue
log.debug("Got a bobber at %s", device.address)
log.debug("services be like %s", advert.service_uuids)
async with BleakClient(
device.address,
# services=ALL_SERVICES,
timeout=9999,
) as client:
log.info("Connected!")
for service in client.services:
log.debug("%s: %s", service.handle, service.description)
for characteristic in service.characteristics:
log.debug(
" %s: %s",
characteristic.handle,
characteristic.description,
)
asyncio.run(main())
| limehouselabs/i.-bobba | ibobber/__main__.py | __main__.py | py | 1,810 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.INFO... |
36838033719 | import os
import sys
from typing import List
import yaml
# Permit imports from "buildscripts".
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '../../..')))
# pylint: disable=wrong-import-position
from buildscripts.idl import lib
from buildscripts.idl.idl import parser
def gen_all_feature_flags(idl_dirs: List[str] = None):
"""Generate a list of all feature flags."""
default_idl_dirs = ["src", "buildscripts"]
if not idl_dirs:
idl_dirs = default_idl_dirs
all_flags = []
for idl_dir in idl_dirs:
for idl_path in sorted(lib.list_idls(idl_dir)):
if lib.is_third_party_idl(idl_path):
continue
# Most IDL files do not contain feature flags.
# We can discard these quickly without expensive YAML parsing.
with open(idl_path) as idl_file:
if 'feature_flags' not in idl_file.read():
continue
with open(idl_path) as idl_file:
doc = parser.parse_file(idl_file, idl_path)
for feature_flag in doc.spec.feature_flags:
if feature_flag.default.literal != "true":
all_flags.append(feature_flag.name)
with open("buildscripts/resmokeconfig/fully_disabled_feature_flags.yml") as fully_disabled_ffs:
force_disabled_flags = yaml.safe_load(fully_disabled_ffs)
return list(set(all_flags) - set(force_disabled_flags))
def gen_all_feature_flags_file(filename: str = "all_feature_flags.txt"):
flags = gen_all_feature_flags()
with open(filename, "w") as output_file:
output_file.write("\n".join(flags))
print("Generated: ", os.path.realpath(output_file.name))
def main():
"""Run the main function."""
gen_all_feature_flags_file()
if __name__ == '__main__':
main()
| mongodb/mongo | buildscripts/idl/gen_all_feature_flag_list.py | gen_all_feature_flag_list.py | py | 1,844 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.normpath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numbe... |
24184494226 | """
H4xton fake typing chat exploit
"""
import requests,os,sys,time,random
class Discord():
def TypeExploit(token,channelid):
url = f"https://canary.discord.com/api/v9/channels/{channelid}/typing"
headers = {
'authority': 'canary.discord.com',
'content-length': '0',
'sec-ch-ua': '',
'accept-language': 'da,en-US;q=0.9',
'sec-ch-ua-mobile': '?1',
'authorization': token,
'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Mobile Safari/537.36',
'x-discord-locale': 'da',
'accept': '*/*',
'origin': 'https://canary.discord.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty'
}
while True:
response = requests.request("POST", url, headers=headers, data=None)
if response.status_code == 204:
print("[+] Succesfuly send a Heartbeat")
else:
exit("[-] Failed to send a Heartbeat -> ".format(response.text))
if __name__ == "__main__":
token = input("[+] Enter your token: ")
channelid = input("[+] Channel ID: ")
while True:
Discord.TypeExploit(token,channelid)
| lnfernal/Discord-Hearbeat-Exploit | main.py | main.py | py | 1,350 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.request",
"line_number": 31,
"usage_type": "call"
}
] |
26383130759 | import os
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=4"
import sys
from typing import Literal
import pickle
sys.path.append(snakemake.config['workdir'])
import jax
from jax.config import config
config.update("jax_enable_x64", True)
print(f"Jax device count: {jax.local_device_count()}")
import torch
torch.set_default_dtype(torch.float64)
from arviz.data.inference_data import InferenceDataT
from src.utils.experiment_storage import ExperimentStorage, experiment_id_ctx
from src.napsu_mq.napsu_mq import NapsuMQModel, NapsuMQResult
from src.utils.string_utils import epsilon_float_to_str
from src.utils.timer import Timer
from src.utils.data_utils import transform_for_modeling
from src.utils.job_parameters import JobParameters
from src.utils.seed_utils import set_seed, get_seed
from src.utils.data_generator import create_dummy_dataset
"""
Create model with increasing number of variables in dataset and profiling enabled. Try to estimate the exponent for the runtime
using the number of canonical queries and the tree width.
"""
if __name__ == '__main__':
parameter_combination_pickle_path = snakemake.input[0]
parameter_combination_pickle_file = open(parameter_combination_pickle_path, "rb")
parameter_combinations: JobParameters = pickle.load(parameter_combination_pickle_file)
parameter_combination_pickle_file.close()
epsilon = parameter_combinations.epsilon
epsilon_str = epsilon_float_to_str(epsilon)
experiment_id = parameter_combinations.experiment_id
experiment_id_ctx.set(experiment_id)
dataset_name = parameter_combinations.dataset
query_list = parameter_combinations.query_list
query_str = parameter_combinations.query_string
laplace_approximation = parameter_combinations.laplace_approximation
laplace_approximation_algorithm = parameter_combinations.laplace_approximation_algorithm
algo = parameter_combinations.algo
repeat_index = parameter_combinations.repeat_index
seed = snakemake.config['seed']
unique_seed = get_seed(seed, repeat_index)
rng = set_seed(unique_seed)
target_file = str(snakemake.output[0])
storage_file_path = f"logs/napsu_linear_regression_test_storage_{experiment_id}.pickle"
mode: Literal["append"] = "append"
timer_file_path = "logs/napsu_linear_regression_test_timer.csv"
storage = ExperimentStorage(file_path=storage_file_path, mode=mode)
timer = Timer(file_path=timer_file_path, mode=mode)
n_categories = int(dataset_name.split("x")[1])
dataframe = create_dummy_dataset(n_columns=5, n_rows=10000, n_categories=n_categories)
dataframe = transform_for_modeling(dataset_name, dataframe)
n, d = dataframe.shape
query = []
delta = (n ** (-2))
timer_meta = {
"experiment_id": experiment_id,
"dataset_name": dataset_name,
"query": query,
"epsilon": epsilon,
"delta": delta,
"MCMC_algo": algo,
"laplace_approximation": laplace_approximation,
"laplace_approximation_algorithm": laplace_approximation_algorithm,
"repeat_index": repeat_index,
"original_seed": unique_seed
}
pid = timer.start(f"Main run", **timer_meta)
print(
f"PARAMS: \n\tdataset name {dataset_name}\n\tcliques {query_str}\n\tMCMC algo {algo}\n\tepsilon {epsilon_str}\n\tdelta: {delta}\n\tLaplace approximation {laplace_approximation}")
print("Initializing NapsuMQModel")
model = NapsuMQModel()
result: NapsuMQResult
inf_data: InferenceDataT
result, inf_data = model.fit(
data=dataframe,
dataset_name=dataset_name,
rng=rng,
epsilon=epsilon,
delta=delta,
column_feature_set=query,
MCMC_algo=algo,
use_laplace_approximation=laplace_approximation,
return_inference_data=True,
enable_profiling=False,
laplace_approximation_algorithm=laplace_approximation_algorithm,
laplace_approximation_forward_mode=True
)
timer.stop(pid)
print("Writing model to file")
result.store(target_file)
inf_data.to_netcdf(f"logs/inf_data_linear_regression_{dataset_name}_{epsilon_str}e_{repeat_index}_repeat.nc")
# Save storage and timer results every iteration
storage.save_as_pickle(file_path=storage_file_path, experiment_id=experiment_id)
timer.save(file_path=timer_file_path, mode=mode, index=False)
| jarsba/gradu | scripts/create_models_for_linear_regression.py | create_models_for_linear_regression.py | py | 4,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "jax.config.config.update",... |
476311110 | import json
import clearskies
from clearskies.handlers.exceptions import ClientError, InputError
from clearskies.handlers.base import Base
from .exceptions import ProducerError
class NoInput(clearskies.handlers.SchemaHelper, Base):
_configuration_defaults = {
'base_url': '',
'can_rotate': True,
'can_revoke': True,
'create_callable': None,
'revoke_callable': None,
'rotate_callable': None,
'payload_schema': None,
'id_column_name': None,
'create_endpoint': 'sync/create',
'revoke_endpoint': 'sync/revoke',
'rotate_endpoint': 'sync/rotate',
}
def __init__(self, di):
super().__init__(di)
def configure(self, configuration):
# we don't need authentication but clearskies requires it, so provide one if it doesn't exist
if 'authentication' not in configuration:
configuration['authentication'] = clearskies.authentication.public()
super().configure(configuration)
def _finalize_configuration(self, configuration):
# add in our base url and make sure the final result doesn't start or end with a slash
base_url = configuration['base_url'].strip('/')
for endpoint in ['create_endpoint', 'revoke_endpoint', 'rotate_endpoint']:
configuration[endpoint] = (base_url + '/' + configuration[endpoint].strip('/')).lstrip('/')
if configuration.get('payload_schema'):
configuration['payload_schema'] = self._schema_to_columns(configuration['payload_schema'])
return super()._finalize_configuration(configuration)
def _check_configuration(self, configuration):
super()._check_configuration(configuration)
error_prefix = f"Configuration error for handler '{self.__class__.__name__}':"
if not configuration.get('id_column_name'):
raise ValueError(
f"{error_prefix} you must provide 'id_column_name' - the name of a key from the response of the create callable that will be passed along to the revoke callable"
)
for action in ['revoke']:
if not configuration.get(f'can_{action}'):
continue
if not configuration.get(f'{action}_callable'):
raise ValueError(f"{error_prefix} you must provide '{action}_callable' or set 'can_{action}' to False")
if not callable(configuration.get(f'{action}_callable')):
raise ValueError(f"{error_prefix} '{action}_callable' must be a callable but was something else")
if not configuration.get('create_callable'):
raise ValueError(f"{error_prefix} you must provide 'create_callable'")
if not callable(configuration.get('create_callable')):
raise ValueError(f"{error_prefix} 'create_callable' must be a callable but was something else")
if configuration.get('rotate_callable'):
if 'can_rotate' in configuration and not configuration.get('can_rotate'):
raise ValueError(
f"{error_prefix} 'rotate_callable' was provided, but can_rotate is set to False. To avoid undefined behavior, this is not allowed."
)
if not callable(configuration['rotate_callable']):
raise ValueError(
f"{error_prefix} 'rotate_callable' must be a callable (or None) but was something else"
)
if configuration.get('payload_schema') is not None:
self._check_schema(configuration['payload_schema'], None, error_prefix)
def handle(self, input_output):
full_path = input_output.get_full_path().strip('/')
if full_path == self.configuration('create_endpoint'):
return self.create(input_output)
elif full_path == self.configuration('revoke_endpoint'):
if self.configuration('can_revoke'):
return self.revoke(input_output)
else:
return self.dummy_revoke(input_output)
elif full_path == self.configuration('rotate_endpoint') and self.configuration('can_rotate'):
return self.rotate(input_output)
return self.error(input_output, 'Page not found', 404)
def _check_payload(self, payload):
if not self.configuration('payload_schema'):
return {}
schema = self.configuration('payload_schema')
return {
**self._extra_column_errors(payload, schema),
**self._find_input_errors(payload, schema),
}
def _get_payload(self, input_output):
request_json = input_output.request_data(required=True)
if 'payload' not in request_json:
raise InputError("Missing 'payload' in JSON POST body")
if not request_json['payload']:
raise InputError("Provided 'payload' in JSON POST body was empty")
if not isinstance(request_json['payload'], str):
if isinstance(request_json['payload'], dict):
raise InputError(
"'payload' in the JSON POST body was a JSON object, but it should be a serialized JSON string"
)
raise InputError("'payload' in JSON POST must be a string containing JSON")
try:
payload = json.loads(request_json['payload'])
except json.JSONDecodeError:
raise InputError("'payload' in JSON POST body was not a valid JSON string")
return payload
def _get_ids(self, input_output):
request_json = input_output.request_data(required=True)
if 'ids' not in request_json:
raise InputError("Missing 'ids' in JSON POST body")
return request_json['ids']
def create(self, input_output):
try:
payload = self._get_payload(input_output)
except InputError as e:
return self.error(input_output, e.errors, 400)
errors = self._check_payload(payload)
if errors:
return self.input_errors(input_output, errors)
try:
credentials = self._di.call_function(
self.configuration('create_callable'),
**payload,
payload=payload,
for_rotate=False,
)
except (InputError, ClientError, ProducerError) as e:
return self.error(input_output, str(e), 400)
# we need to return a meaningful id if we are going to revoke at the end
if self.configuration('can_revoke'):
id_column_name = self.configuration('id_column_name')
if id_column_name not in credentials:
raise ValueError(
f"Response from create callable did not include the required id column: '{id_column_name}'"
)
# akeyless will only accept strings as the id value - no integers/etc
credential_id = str(credentials[id_column_name])
else:
credential_id = 'i_dont_need_an_id'
return input_output.respond({
'id': credential_id,
'response': credentials,
}, 200)
def dummy_revoke(self, input_output):
"""
Revoke, but don't revoke
This is here because Akeyless always requires a revoke endpoint, but revokation is not always
possible. So, if revoke is disabled, we still need to respond to the revoke endpoint.
"""
try:
payload = self._get_payload(input_output)
ids = self._get_ids(input_output)
except InputError as e:
return self.error(input_output, e.errors, 400)
errors = self._check_payload(payload)
if errors:
return self.input_errors(input_output, errors)
return input_output.respond({
'revoked': ids,
'message': '',
}, 200)
def revoke(self, input_output):
try:
payload = self._get_payload(input_output)
ids = self._get_ids(input_output)
except InputError as e:
return self.error(input_output, e.errors, 400)
errors = self._check_payload(payload)
if errors:
return self.input_errors(input_output, errors)
for id in ids:
try:
self._di.call_function(
self.configuration('revoke_callable'),
**payload,
payload=payload,
id_to_delete=id,
)
except (InputError, ClientError, ProducerError) as e:
return self.error(input_output, str(e), 400)
return input_output.respond({
'revoked': ids,
'message': '',
}, 200)
def rotate(self, input_output):
try:
payload = self._get_payload(input_output)
except InputError as e:
return self.error(input_output, e.errors, 400)
errors = self._check_payload(payload)
if errors:
return self.input_errors(input_output, errors)
# The user may have provided a rotate callable, in which case just use that.
if self.configuration('rotate_callable'):
new_payload = self._di.call_function(
self.configuration('rotate_callable'),
**payload,
payload=payload,
)
# otherwise, perform a standard create+revoke
else:
try:
new_payload = self._di.call_function(
self.configuration('create_callable'),
**payload,
payload=payload,
for_rotate=True,
)
if self.configuration('can_revoke'):
self._di.call_function(
self.configuration('revoke_callable'),
**new_payload,
payload=new_payload,
id_to_delete=payload.get(self.configuration('id_column_name')),
)
except (InputError, ClientError, ProducerError) as e:
return self.error(input_output, str(e), 400)
return input_output.respond({
'payload': json.dumps(new_payload),
}, 200)
def documentation(self):
return []
def documentation_security_schemes(self):
return {}
def documentation_models(self):
return {}
| cmancone/clearskies-akeyless-custom-producer | src/clearskies_akeyless_custom_producer/handlers/no_input.py | no_input.py | py | 10,414 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "clearskies.handlers",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "clearskies.handlers.base.Base",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "clearskies.authentication.public",
"line_number": 27,
"usage_type": "call"
},
{
... |
72975128425 | from django import forms
from crispy_forms.layout import Submit
from core.forms.base_crispy_form import BaseCrispyForm
from core.models.round import Round
class WinnerForm(BaseCrispyForm, forms.ModelForm):
SUBMIT_BUTTON_VALUE = "Declare Winner"
SUBMIT_BUTTON_CSS_CLASSES = "btn-no-bg btn-outline-info"
class Meta:
model = Round
fields = ["winner"]
def __init__(
self,
*args,
players=None,
enable_reroll=True,
moderator_id: int = None,
**kwargs
):
self.moderator_id = moderator_id
super().__init__(*args, **kwargs)
if self.instance.winner:
self.helper.inputs.clear()
self.helper.add_input(
Submit(
name="action",
value="Start Next Round",
css_class="btn-no-bg btn-outline-primary",
)
)
del self.fields["winner"]
else:
self.players = players
player_choices = [{"pk": None, "player_name": "-----"}] + list(players)
self.fields["winner"].choices = (
(x["pk"], x["player_name"]) for x in player_choices
)
self.helper.add_input(
Submit(
name="action",
value="Reveal Submitter",
css_class="btn-no-bg btn-outline-danger",
)
)
if enable_reroll:
self.helper.add_input(
Submit(
name="action",
value="Shuffle",
css_class="btn-no-bg btn-outline-primary",
)
)
def save(self, commit=True):
self.instance.moderator_id = self.moderator_id
super().save(self)
| lekjos/dailygroove | src/core/forms/winner_form.py | winner_form.py | py | 1,864 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "core.forms.base_crispy_form.BaseCrispyForm",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"... |
6788861571 | import codecs
from django.conf import settings
from django.views.generic.edit import CreateView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse
from django.http.response import HttpResponseRedirect
from django.contrib import messages
from achievement.models import Achievement
from ...models import (
BulkCreationConsultant, Consultant
)
from ...forms import BulkCreationConsultantForm
from .helpers import read_name_email_coins_from_csv
class BulkCreationConsultantFormView(
PermissionRequiredMixin,
CreateView
):
template_name = 'network/bulk_creation_form.html'
permission_required = settings.CONSULTANT_FULL_PERMS_ADD_CONSULTANT
model = BulkCreationConsultant
form_class = BulkCreationConsultantForm
raise_exception = True
def get_success_message(self, *args, **kwargs):
bulk_creation = kwargs.get('bulk_creation')
return '{} invitations created successfully'.format(bulk_creation.consultants.success().count())
def get_success_url(self, bulk_creation):
return reverse('consultant:bulk-add-detail', kwargs={'pk': bulk_creation.pk})
def create_bulk_creation(self, form):
bulk_creation = form.save()
bulk_creation.created_by = self.request.user
bulk_creation.save()
return bulk_creation
def create_bulk_user(self, new_user, bulk_creation):
return BulkCreationConsultant.objects.create(
name=new_user.name,
email=new_user.email,
coins=new_user.coins,
bulk_creation=bulk_creation,
)
def invite_consultant(self, new_user, custom_message):
short_name = new_user.name.split(' ')[0]
consultant = Consultant.objects.create_consultant(
short_name=short_name,
full_name=new_user.name,
email=new_user.email,
invite_user=self.request.user,
registration_process=True,
custom_text=custom_message,
coins=new_user.coins,
)
return consultant
def update_existing_user(self, new_user, bulk_creation_consultant):
if not new_user.user.is_consultant:
bulk_creation_consultant.set_error_email_used()
else:
consultant = new_user.user.consultant
if not new_user.has_coins:
bulk_creation_consultant.set_error_email_used()
elif new_user.has_achievement_created():
bulk_creation_consultant.set_error_achievement_created()
else:
Achievement.objects.create_reward_for_consultant(
consultant,
new_user.coins,
)
bulk_creation_consultant.set_consultant(consultant)
def form_valid(self, form):
bulk_creation = self.create_bulk_creation(form)
filecontent = form.cleaned_data.get('file_csv')
filecontent = codecs.iterdecode(filecontent, 'utf-8')
users_list = read_name_email_coins_from_csv(filecontent)
for new_user in users_list:
bulk_creation_consultant = self.create_bulk_user(new_user, bulk_creation)
if new_user.missing_information():
bulk_creation_consultant.set_error_missing_information()
elif new_user.exists:
self.update_existing_user(new_user, bulk_creation_consultant)
else:
consultant = self.invite_consultant(
new_user, form.cleaned_data.get('custom_text'),
)
bulk_creation_consultant.set_consultant(consultant)
success_message = self.get_success_message(**{'bulk_creation': bulk_creation})
messages.success(self.request, success_message)
return HttpResponseRedirect(self.get_success_url(bulk_creation))
| tomasgarzon/exo-services | service-exo-core/consultant/views/network/bulk.py | bulk.py | py | 3,853 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.mixins.PermissionRequiredMixin",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.views.generic.edit.CreateView",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.CONSULTANT_FULL_PERMS_ADD_CONSULTANT... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.