seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
69096792 | ##################################################
# pygame - 마우스 이벤트(MOUSEMOTION) 이해
#
# 참고 사이트
# https://pg.org
# https://pg.org/docs/
##################################################
import pygame as pg
import sys
import time
# 초기화 작업
# 반드시 주어진 함수이름과 순서를 지켜야 함.
pg.init()
screen = pg.display.set_mode((400, 300))
pg.display.set_caption("pygame 이해하기")
x1 = 0
y1 = 0
running = True
while running:
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.MOUSEMOTION:
pos = pg.mouse.get_pos()
x1 = pos[0]
y1 = pos[1]
screen.fill((0, 0, 0))
pg.draw.rect(screen, (0, 255, 0), (x1, y1, 44, 44), 2)
pg.display.update()
time.sleep(0.1)
print('메인루프 종료')
pg.quit()
| null | 1006.py | 1006.py | py | 885 | python | en | code | null | code-starcoder2 | 51 |
89133984 | #чоловіки жінки ієрархічна
import pandas as pd
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
data = pd.read_csv('1.csv', sep=';')
print(data.head())
X = data.iloc[:, [0, 5, 6]].values
#дендограма
dendogram = sch.dendrogram(sch.linkage(X[:,[1,2]], method='ward'))
plt.title('Dendogram')
plt.xlabel('Clusters')
plt.ylabel('Euclid dist')
plt.show()
# розбиттся на кластери
hierc = AgglomerativeClustering(n_clusters= 3, affinity= 'euclidean', linkage='ward')
y_hierc = hierc.fit_predict(X[:,[1,2]])
# виводимо отримані дані на графік
plt.scatter(X[y_hierc == 0, 1], X[y_hierc == 0,2], s = 100,c = 'y', label = 'Average')
plt.scatter(X[y_hierc == 1, 1], X[y_hierc == 1,2], s = 100,c = 'b', label = 'The Best')
plt.scatter(X[y_hierc == 2, 1], X[y_hierc == 2,2], s = 100,c = 'c', label = 'Worst')
plt.title('Clusters of countries Hierarhial')
plt.xlabel('Male')
plt.ylabel('Female')
plt.legend()
plt.show()
# виводимо списки країн
print('The best:')
for i in X[y_hierc == 1, 0]:
print(i)
print(" ")
print('Average:')
for i in X[y_hierc == 0, 0]:
print(i)
print(" ")
print('Worst:')
for i in X[y_hierc == 2, 0]:
print(i)
| null | 2.py | 2.py | py | 1,307 | python | en | code | null | code-starcoder2 | 51 |
220999683 | import psutil as ps
import time
import netifaces
class NetMonitor:
def __init__(self):
self._last_stat = None
self._last_time = None
def _get_net_io_counters(self):
counters = ps.net_io_counters(pernic=True)
ret = dict()
for k, v in counters.items():
ret[k] = v._asdict()
ret[k].update({
"recv_per_sec": 0,
"sent_per_sec": 0
})
return ret
def _set_last_stat(self, counters):
self._last_stat = counters
self._last_time = time.time()
def update(self):
counters = self._get_net_io_counters()
for i in netifaces.interfaces():
if 2 not in netifaces.ifaddresses(i).keys():
continue
counters[i].update({"ip": netifaces.ifaddresses(i)[2][0]['addr'], "interface": i})
if not self._last_stat:
self._set_last_stat(counters=counters)
return counters
delta = time.time() - self._last_time
for k, v in counters.items():
old = self._last_stat.get(k)
counters[k].update({
"recv_per_sec": round((v['bytes_recv'] - old['bytes_recv']) / delta / 1024, 2),
"sent_per_sec": round((v['bytes_sent'] - old['bytes_sent']) / delta / 1024, 2)
})
self._set_last_stat(counters)
def get(self):
return self._last_stat
if __name__ == '__main__':
net_monitor = NetMonitor()
net_monitor.update()
while True:
print(net_monitor.get()['eth0']['recv_per_sec'] / 1024)
time.sleep(1)
net_monitor.update()
| null | statbrowser/monitor/network.py | network.py | py | 1,655 | python | en | code | null | code-starcoder2 | 51 |
328569008 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 16:59:20 2020
@author: Jieyun Hu
"""
# This file includes preparing the data, encoding and modeling
# To predict two type of activity. Click = 0 , Swipe = 1.
# put this file outside the filtered_traces folder
import os
import json
from matplotlib import pyplot as plt
import numpy as np
import collections
npy_file_dir = './ui_layout_vectors/ui_vectors.npy'
json_file_dir = './ui_layout_vectors/ui_names.json'
lst = os.listdir("filtered_traces") # For exploring the filtered_traces file
index_search_file = []
with open(json_file_dir) as json_file:
index_search_file = json.load(json_file)['ui_names']
vectors = np.load(npy_file_dir)
def search_for_index(ui_name):
full_name = ui_name + '.png'
return index_search_file.index(full_name)
#find the 64-dim vector
def search_for_vector(index):
return vectors[index,:]
def ui_to_vector(ui_name):
return vectors[search_for_index(ui_name),:]
def gestures_to_vectors(gestures_dir):
with open(gestures_dir) as json_file:
gestures = json.load(json_file)
get_ui_seq = [*gestures]
vecs = []
for ui in get_ui_seq:
vecs.append(ui_to_vector(ui))
return vecs
# Given a list of paths of file directory.
# return[0] is 67 vectors which concatenate 64-dim vectors with 3 dim vector representation of activity
# return[1] is only 3 dim vector representing activity. I haven't used it, but it may be useful later.
def gestures_array_to_vector(gestures_dir_array):
#print(gestures_dir_array)
res = []
res_y = []
for gestures_dir in gestures_dir_array:
with open(gestures_dir) as json_file:
gestures = json.load(json_file)
get_ui_seq = [*gestures]
vecs = []
vecs_y = []
for ui in get_ui_seq:
try:
vector_64 = ui_to_vector(ui) #add 64 dim vector to activity vector
lst_of_activity = gestures.get(ui)
if len(lst_of_activity) == 1: #click
temp = [0]
temp.extend(lst_of_activity[0])
temp = np.asarray(temp)
vecs_y.append(temp) # e.g [0, coorX, coorY]
vector_67 = np.concatenate((vector_64,temp),axis=0)
#print(len(vector_67))
vecs.append(vector_67)# 64 dim vector add to the activity vector
elif len(lst_of_activity) > 1: #swipe
average_of_coor = [float(sum(l))/len(l) for l in zip(*lst_of_activity)]
temp = [1]
temp.extend(average_of_coor) # e.g [1, coorX, coorY]
temp = np.asarray(temp)
vecs_y.append(temp)
vector_67 = np.concatenate((vector_64,temp),axis=0)
vecs.append(vector_67)
except:
pass
#print(vecs_y)
#print(vecs)
res.append(vecs)
res_y.append(vecs_y)
return [res,res_y]
dict = collections.defaultdict(list)
def trace_length_to_dictionary():
for f in lst:
sublst = os.listdir("filtered_traces/"+f)
for sub in sublst:
file_name = "filtered_traces/"+f+"/"+sub+"/gestures.json"
with open(file_name) as json_file:
data = json.load(json_file)
data_len = len(data)
#dict[data_len].append(f)
dict[data_len].append(file_name)
#trace_length_to_dictionary need to be run ahead of this
#return the list of file names with the same length of trace
def find_files_by_count(count):
return dict[count]
#given an start and end index, find all files with in the range of trace steps
def find_all_files_in_range(start,end):
res = []
for i in range(start,end):
l = dict[i]
for each in l:
res.append(each)
return res
trace_length_to_dictionary()
trace_dir_array = find_all_files_in_range(20,54)
vectors_array = gestures_array_to_vector(trace_dir_array)[0]
#print(vectors_array[0])
from tensorflow.keras.layers import Input, SimpleRNN, GRU, LSTM, Dense, Flatten, Dropout, GlobalMaxPool1D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#RNN model
# N = number of samples
# T = sequence length
# D = number of input features
# M = number of hidden units
# K = number of output units
N = 1
T = 1
D = 67
M = 10
K = 1
i = Input(shape = (T,D))
x = SimpleRNN(M, return_sequences=True)(i)
x = GlobalMaxPool1D()(x)
x = Dropout(0.5)(x)
x = Dense(K,activation = 'relu')(x)
model = Model(i,x)
#model.compile( loss = 'mse', metrics = ['accuracy'], optimizer = Adam(lr = 0.001),)
model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr=0.001), metrics = ['accuracy'],)
# split the dataset array to X and y
def split_dataset_array(dataset_array, time_step):
X, y = list(), list()
for dataset in dataset_array:
dataset = np.array(dataset)
len_ = len(dataset)
x_index = 0
y_index = T
while y_index < len_:
x_input = dataset[x_index:(x_index+time_step), :]
y_input = dataset[y_index,:][64]
X.append(x_input)
y.append(y_input)
x_index +=1
y_index +=1
return array(X), array(y)
X, y = split_dataset_array(vectors_array, T)
#print(X.shape)
#print(y.shape)
#print(y)
r = model.fit(X, y, epochs = 200, validation_split = 0.4)
import matplotlib.pyplot as plt
f1 = plt.figure(1)
plt.title('Loss')
plt.plot(r.history['loss'], label = 'train')
plt.plot(r.history['val_loss'], label = 'test')
plt.legend()
f1.show()
f2 = plt.figure(2)
plt.title('Accuracy')
plt.plot(r.history['acc'], label = 'train')
plt.plot(r.history['val_acc'], label = 'test')
plt.legend()
f2.show()
#prediction test
test = ['filtered_traces/com.linkedin.android/trace_0/gestures.json']
res = gestures_array_to_vector(test)
#testing for a ui
#The model just predict every next activity to 0
for i in range(len(res[0][0])):
x = res[0][0][i].reshape(1,1,67)
yhat = model.predict(x)
print (yhat.argmax(axis=-1))
| null | encoding_modeling.py | encoding_modeling.py | py | 6,613 | python | en | code | null | code-starcoder2 | 51 |
574894510 |
from xai.brain.wordbase.verbs._participate import _PARTICIPATE
#calss header
class _PARTICIPATED(_PARTICIPATE, ):
def __init__(self,):
_PARTICIPATE.__init__(self)
self.name = "PARTICIPATED"
self.specie = 'verbs'
self.basic = "participate"
self.jsondata = {}
| null | xai/brain/wordbase/verbs/_participated.py | _participated.py | py | 273 | python | en | code | null | code-starcoder2 | 51 |
22089702 | import sys
import time
import copy
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
def scroll_bottom(browser, cycle):
# scroll to bottom to get more URLs
script = "var q=document.documentElement.scrollTop={}"
scroll = 10000
for i in range(cycle):
js = script.format(scroll)
browser.execute_script(js)
scroll += 500
time.sleep(0.5)
urls = set()
current_urls = set()
backup_urls = set()
domain = "https://youtube.com"
option = webdriver.ChromeOptions()
option.add_argument("--proxy-server=http://127.0.0.1:8080")
browser = webdriver.Chrome("C:/Python/chromedriver75.exe", options=option)
browser.get(domain)
# find urls on main page
scroll_bottom(browser, 10)
thumbnails = browser.find_elements_by_id("thumbnail")
for element in thumbnails:
url = element.get_attribute('href')
if url:
urls.add(url)
current_urls.add(url)
time.sleep(1)
print('phase 1 finished.')
print(str(len(urls)) + " urls have been captured.")
print()
# find urls in sub-pages
for i in range(5):
for url in current_urls:
browser.get(url)
browser.execute_script('videos = document.querySelectorAll("video"); for(video of videos) {video.pause()}')
scroll_bottom(browser, 5)
endpoints = browser.find_elements_by_class_name('yt-simple-endpoint')
for ep in endpoints:
url = ep.get_attribute('href')
if url:
urls.add(url)
backup_urls.add(url)
if len(urls) > 2000:
browser.close()
print(str(len(urls)) + " urls have been captured.")
# print(urls)
output = open('urls.txt', 'w')
for url in urls:
output.write(url + "\n")
output.close()
sys.exit(0)
time.sleep(0.5)
current_urls = backup_urls
backup_urls.clear()
print("phase 2 finished.")
print(str(len(urls)) + " urls have been captured.")
print()
browser.close()
| null | scrapy.py | scrapy.py | py | 1,817 | python | en | code | null | code-starcoder2 | 51 |
28042027 | from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class Recipe:
def __init__( self , data ):
self.id = data['id']
self.name = data['name']
self.description = data['description']
self.instructions = data['instructions']
self.under30 = data['under30']
self.author_id = data['author_id']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
# Now we use class methods to query our database
@classmethod
def get_all(cls):
query = "SELECT * FROM recipes;"
# make sure to call the connectToMySQL function with the schema you are targeting.
results = connectToMySQL('recipes').query_db(query)
# Create an empty list to append our instances of friends
recipes = []
# Iterate over the db results and create instances of friends with cls.
for recipe in results:
recipes.append( cls(recipe) )
return recipes
@classmethod
def get_by_id(cls, data):
query = "SELECT * FROM recipes WHERE id = %(id)s;"
# make sure to call the connectToMySQL function with the schema you are targeting.
result = connectToMySQL('recipes').query_db(query, data)
if len(result) < 1:
return False
return cls(result[0])
@classmethod
def save(cls, data):
query = "INSERT INTO recipes ( name, description, instructions, under30, author_id, created_at, updated_at) VALUES ( %(name)s, %(description)s, %(instructions)s, %(under30)s, %(author_id)s, %(created_at)s, NOW());"
# data is a dictionary that will be passed into the save method from server.py
return connectToMySQL('recipes').query_db( query, data )
@classmethod
def update(cls, data):
query = "UPDATE recipes SET name = %(name)s, description = %(description)s, instructions = %(instructions)s, under30 = %(under30)s, created_at = %(created_at)s, updated_at = NOW() WHERE id = %(id)s;"
# data is a dictionary that will be passed into the save method from server.py
return connectToMySQL('recipes').query_db( query, data )
@classmethod
def delete(cls, data):
query = "DELETE FROM recipes WHERE id = %(id)s;"
print(query)
# data is a dictionary that will be passed into the save method from server.py
return connectToMySQL('recipes').query_db( query, data )
@staticmethod
def validate(recipe):
is_valid = True;
if len(recipe['name']) < 3:
flash("Recipe Name must be at least 3 characters")
is_valid = False
if len(recipe['description']) < 3:
flash("Description must be at least 3 characters")
is_valid = False
if len(recipe['instructions']) < 3:
flash("Instructions must be at least 3 characters")
is_valid = False
if not recipe['created_at']:
flash("Date created is required", "register")
is_valid = False
return is_valid
| null | flask_mysql/validation/recipes/flask_app/models/recipe.py | recipe.py | py | 3,191 | python | en | code | null | code-starcoder2 | 50 |
359088689 | import json
def run(data, parameters):
listDictionaries = json.loads(data)
minVehicleCount = int(parameters['minVehicleCount'])
maxVehicleCount = int(parameters['maxVehicleCount'])
print('minVehicleCount is: ' + str(minVehicleCount))
print('maxVehicleCount is: ' + str(maxVehicleCount))
# Filter out the elements with a vehicleCount not within the minimum and maximum
for element in listDictionaries[:]:
vehicleCount = getVehicleCount(element)
print('element is: ' + str(element))
print('vehicleCount is: ' + str(vehicleCount))
if vehicleCount is not None:
if (vehicleCount > maxVehicleCount) or (vehicleCount < minVehicleCount):
print('removing')
listDictionaries.remove(element)
else:
print('not removing')
continue
else:
listDictionaries.remove(element)
print('removing')
continue
print('number of elements is: ' + str(len(listDictionaries)))
for element in listDictionaries:
vehicleCount = getVehicleCount(element)
print('vehicleCount left is: ' + str(vehicleCount))
# Add a vehicleCount key to the data
element['vehicle count'] = str(vehicleCount)
return json.dumps(listDictionaries)
def getVehicleCount(element):
try:
vehicleCount = int(element['properties']['count']['java.lang.Long'])
except KeyError:
vehicleCount = None
# print('vehicleCount is: ' + str(vehicleCount))
# print('type of vehicleCount is: ' + str(type(vehicleCount)))
return vehicleCount
| null | vehicleCountFilterScript.py | vehicleCountFilterScript.py | py | 1,639 | python | en | code | null | code-starcoder2 | 50 |
302851296 | """Ecolink 4655BC0-R device."""
from zigpy.profiles import zha
from zigpy.quirks import CustomDevice
from zigpy.zcl.clusters.general import Basic, Identify, Ota, PollControl
from zigpy.zcl.clusters.homeautomation import Diagnostic
from zigpy.zcl.clusters.measurement import TemperatureMeasurement
from zigpy.zcl.clusters.security import IasZone
from zhaquirks import PowerConfigurationCluster
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
class CustomPowerConfigurationCluster(PowerConfigurationCluster):
"""Custom PowerConfigurationCluster."""
cluster_id = PowerConfigurationCluster.cluster_id
MIN_VOLTS = 2.1
MAX_VOLTS = 3.0
class Ecolink4655BC0R(CustomDevice):
"""Ecolink 4655BC0-R device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=1026
# device_version=0
# input_clusters=[0, 1, 3, 32, 1026, 1280, 2821]
# output_clusters=[25]>
MODELS_INFO: [("Ecolink", "4655BC0-R")],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.IAS_ZONE,
INPUT_CLUSTERS: [
Basic.cluster_id,
CustomPowerConfigurationCluster.cluster_id,
Identify.cluster_id,
PollControl.cluster_id,
TemperatureMeasurement.cluster_id,
IasZone.cluster_id,
Diagnostic.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
INPUT_CLUSTERS: [
Basic.cluster_id,
CustomPowerConfigurationCluster,
Identify.cluster_id,
PollControl.cluster_id,
TemperatureMeasurement.cluster_id,
IasZone.cluster_id,
Diagnostic.cluster_id,
],
OUTPUT_CLUSTERS: [Ota.cluster_id],
}
}
}
| null | zhaquirks/ecolink/contact.py | contact.py | py | 2,215 | python | en | code | null | code-starcoder2 | 50 |
504414090 | import networkx as nx # Imports networkx package for implementation of graphs
import matplotlib.pyplot as plt # Imports matplotlib package for plotting and displaying the graphs
node=[]
edge=[]
# Return a list of nodes of each cycle
def cycles(G):
l = list(nx.simple_cycles(G))
if len(l) != 0:
print('No. of cycles in the graph are: ', len(l), '\n')
print('The nodes of each cycle are: ', l, '\n')
else:
print('There are no cycles in the given graph\n')
return l
# Returns a list of edges incoming or outgoing via a pendant node
def pendant(G):
deg = G.degree() # creates a dictionary of degree of each node as value & label of each node as key
e = []
min_value = min(deg.values()) # finds the least degree
if min_value == 1: # if the least degree is 1
min_keys = [k for k in deg if deg[k] == min_value] # finds all the nodes with the degree 1 i.e., pendant nodes
print('No. of pendant nodes are: ', len(min_keys), '\n')
print('The pendant nodes are: ', min_keys, '\n')
e = G.edges(min_keys)+G.in_edges(min_keys) # creates a list of edges incoming or outgoing via a pendant node
for i in range(0,len(e)):
e[i]=list(e[i])
for j in range(0,len(e[i])):
if e[i][j] in min_keys:
e[i][j]=e[i][j]+'(pen)'
else:
print('There are no pendant nodes in the given graph\n')
return e
# Draws a graph G
def draw(G,pos,name):
nx.draw_networkx(G,pos=pos,with_labels=True, node_size = 200, node_color='orange',font_size=10)
plt.axis('off') # Will not display axes
plt.title(name) # Will display name on the graph
plt.show() # Displays the drawn graph
# Draws cycles in a graph
def draw_cycles(l, pos, name):
if len(l)==0:
X=nx.DiGraph()
draw(X,pos,'No cycles are present the given graph')
elif len(l)==1 and len(l[0])==1:
X=nx.DiGraph()
X.add_edge(l[0][0],l[0][0])
nx.draw_networkx(X,pos=pos,with_labels=True, node_size = 200, node_color='orange',font_size=10)
plt.axis('off')
plt.title(name)
plt.show()
else:
for i in range(0, len(l)): # Traverses through each cycle
X = nx.DiGraph()
j = 0
for j in range(0, len(l[i])-1): # Traverses through nodes of each cycle
X.add_node(l[i][j]) # Adds each node to the cycle graph
X.add_edge(l[i][j], l[i][j+1]) # Adds each edge to the cycle graph except the last edge
X.add_edge(l[i][j+1], l[i][0]) # Adds the last edge to the cycle graph
nx.draw_networkx(X,pos=pos,with_labels=True, node_size = 200, node_color='orange',font_size=10)
plt.axis('off')
plt.title(name)
plt.show() # Draws each cycle as a graph
# Creates Directed Graph
G = nx.DiGraph() # Graph that will contain Main Graph input by user
G_pend = nx.DiGraph() # Graph that will contain pendant odes
if __name__ == "__main__":
# Inputs details of graph from user
print('Enter labels of nodes: (NOTE: Enter the nodes in a single line, separated by a single whitespace)\n')
node = [x for x in input().split()] # Lambda Expression to convert the input string(with each label separated by a space) & splits them & stores all the labels in a list
print('Enter number of edges:\n')
noe = int(input())
print("Enter each edge: (NOTE: Enter the starting node & ending node of each edge, separated by a single whitespace)\n")
for i in range(0, noe):
y = []
y = [x for x in input().split()]
for i in range(0,2): # Checks whether if user is giving valid edges or not
if y[i] in node:
continue
else:
print('Please enter edges between the entered nodes only. Try again!!!')
exit()
if y[0]==y[1]: # if there is a self loop on a node, then it is represented in the graph as [node]*
for i in range(0,len(node)):
if node[i]==y[0]:
node[i]=y[0]+'*'
y[0]=y[1]=node[i]
edge.append(y) # Append each edge nodes to edge list
G.add_nodes_from(node) # Adds nodes list to the main graph
G.add_edges_from(edge) # Adds edges list to the main graph
pos = nx.circular_layout(G) # Fixes the positions of nodes in circular format
list_cycles = cycles(G) # Call function to find the list of nodes of each cycle in the main graph
list_pend = pendant(G) # Call function to find the list of edges of pendant nodes in the main graph
G_pend.add_edges_from(list_pend) # Adds edges of pendant nodes to the graph that displays pendant nodes
pos1 = nx.circular_layout(G_pend)
draw(G,pos,'Main Graph') # Draws the main graph
draw_cycles(list_cycles,pos,'Cycles') # Draws each cycle of main graph, if any. I no cycles are there, the graph will be just an empty graph
if len(list_pend)!=0:
draw(G_pend,pos1,'Pendant nodes') # Displays pendant nodes of main graph, if any. I no pendant nodes are there, the graph will be just an empty graph
else:
draw(G_pend,pos1,'There are no pendant nodes in the given graph')
| null | Standalone EXE program/graph.py | graph.py | py | 5,266 | python | en | code | null | code-starcoder2 | 50 |
69774422 | def search(amount, values, maxValues, maxValueItems):
for i in range(amount + 1):
maxValue = 0
maxValueItem = None
for d in [value for value in values if value[1] <= i]:
if d[2] + maxValues[i - d[1]] > maxValue:
maxValue = d[2] + maxValues[i - d[1]]
maxValueItem = d
maxValues[i] = maxValue
maxValueItems[i] = maxValueItem
def printItems(items, amount):
while amount > 0:
print(items[amount])
amount -= items[amount][1]
def main():
amount = 20
maxValues = [0] * (amount + 1)
maxValueItems = [0] * (amount + 1)
search(amount, [
(1, 2, 3),
(2, 3, 4),
(3, 4, 8),
(4, 5, 8),
(5, 9, 10)
], maxValues, maxValueItems)
print(maxValues[amount])
printItems(maxValueItems, amount)
if __name__ == '__main__':
main()
| null | base/steal_bag.py | steal_bag.py | py | 798 | python | en | code | null | code-starcoder2 | 50 |
649364548 | #!/usr/bin/python
# Deploys the master branch to the hutmap.com dreamhost account
#
# Specifically, merges master into dreamhost, builds css and js files with new
# uuid suffixes, pulls this all into dreamhost, and restarts the server.
#
# Assumptions:
# - git on the PATH (i.e. you can use git from a terminal)
# - the vagrant vm is up and running
# - ssh on the PATH (manual steps printed if ssh fails)
from os.path import join, dirname, normpath
import os
import shlex
import subprocess
import time
import urllib2
import uuid
import shutil
def shell(cmd, **kwargs):
args = shlex.split(cmd)
subprocess.check_call(args, **kwargs)
base_path = normpath(join(dirname(__file__), '..', '..'))
os.chdir(base_path)
hutmap_ssh = 'hutmap@ssh.hutmap.com'
vers = uuid.uuid1()
success1 = False
try:
shutil.rmtree('public/static/css', ignore_errors=True)
shutil.rmtree('public/static/js', ignore_errors=True)
shell('git checkout dreamhost')
shell('git pull origin dreamhost')
shell('git merge -s resolve master -m"Merge master into branch dreamhost"')
shell('git rm -r public/static/css/ public/static/js/')
shell('python scripts/utils/shovel-server.py start')
time.sleep(1)
urllib2.urlopen('http://localhost:3000/build.css?version={0}'.format(vers))
urllib2.urlopen('http://localhost:3000/build.js?version={0}'.format(vers))
shell('git add public/static/css/ public/static/js/')
shell('git commit -m"Version {0}"'.format(vers))
shell('git push origin dreamhost')
success1 = True
finally:
shell('git checkout master')
if success1:
success2 = False
try:
deploy_remote = open(os.path.join(base_path, 'scripts', 'utils', 'deploy-dreamhost-remote.sh'))
subprocess.check_call(['ssh', hutmap_ssh, 'bash -s {}'.format(vers)], stdin=deploy_remote)
success2 = True
except Exception as e:
print(e)
if success1 and success2:
print('\nDeploy successful!\n')
elif success1 and not success2:
print('\n\nThere were errors but you can still complete the deployment.\n')
print('To complete, ssh in and run the following:')
print(' hutmap.com/scripts/utils/deploy-dreamhost-remote.sh {}\n'.format(vers))
print('Or all in one go:')
print(' ssh {} "bash -s {}" < scripts/utils/deploy-dreamhost-remote.sh\n'.format(hutmap_ssh, vers))
else:
print('\n Deploy failed. Look at the stack trace printed below for more details.\n')
| null | scripts/utils/deploy-dreamhost.py | deploy-dreamhost.py | py | 2,397 | python | en | code | null | code-starcoder2 | 51 |
538532492 | # coding=utf-8
from random import Random
# json数据((filename,data),(filename,time))
ZeroJsonData = { 'data':{}, 'time':{} }
# 计算字符串长度
def strLen(str):
try:
row_l=len(str)
utf8_l=len(str.encode('utf-8'))
return (utf8_l-row_l)/2+row_l
except:
return None
return None
#账号的token(name,token)
ZeroAccountToken = {'key':{}, 'time':{}, 'token':{}}
#账号超时时间
ZeroAccountTokenTimeout = 1
#计算随机字符串
def strRandom(length=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
l = len(chars) - 1
random = Random()
for i in range(length):
str+=chars[random.randint(0, l)]
return str
| null | app/glb.py | glb.py | py | 760 | python | en | code | null | code-starcoder2 | 51 |
510353263 | from .attention_weight import AttentionWeight
from .weight_sum import WeightSum
class Attention:
def __init__(self):
self.params, self.grads = [], []
self.attention_weight = AttentionWeight()
self.weight_sum = WeightSum()
self.weight = None
def forward(self, hs, h):
weight = self.attention_weight.forward(hs, h)
out = self.weight_sum.forward(hs, weight)
self.weight = weight
return out
def backward(self, dout):
dhs0, da = self.weight_sum.backward(dout)
dhs1, dh = self.attention_weight.backward(da)
dhs = dhs0 + dhs1
return dhs, dh
| null | model/layer/attention.py | attention.py | py | 652 | python | en | code | null | code-starcoder2 | 51 |
13869570 | ###
# Script for plotting and animating both PurpleAir and AQY data from a controlled experiment
# across multiple rounds. If a similar experiment is performed in the future, the filenames
# and round times can be subsituted into the global variables at the top of the script to
# produce new plots
###
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
from datetime import datetime
from datetime import timedelta
from pytz import timezone
from scipy import interpolate
S1 = "exp5_data/PA1.csv" # 0 0 0 0
S2 = "exp5_data/PA2.csv" # 2 6 10 14
S3 = "exp5_data/PA2.csv" # 4 8 12 16
S4 = "exp5_data/PA4.csv" # 30 30 30 30
AERO_EXHAUST = "exp5_data/AQY BD-1160 Data Export.csv"
AERO_DISTANT = "exp5_data/AQY BD-1161 Data Export.csv" # 30 4 8 12 16
# Start, End times for each round
ROUND_TIMES = {1: ["16:28:54", "16:55:00"],
2: ["15:07:52", "15:22:54"],
3: ["15:32:28", "15:47:05"],
4: ["15:58:46", "16:12:19"]}
GLOBAL_START = datetime.strptime("15:00:00", "%H:%M:%S").replace(tzinfo=timezone('US/Pacific'))
R1 = [datetime.strptime(ROUND_TIMES[1][0], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific')), \
datetime.strptime(ROUND_TIMES[1][1], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific'))]
R2 = [datetime.strptime(ROUND_TIMES[2][0], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific')), \
datetime.strptime(ROUND_TIMES[2][1], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific'))]
R3 = [datetime.strptime(ROUND_TIMES[3][0], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific')), \
datetime.strptime(ROUND_TIMES[3][1], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific'))]
R4 = [datetime.strptime(ROUND_TIMES[4][0], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific')), \
datetime.strptime(ROUND_TIMES[4][1], "%H:%M:%S").replace(tzinfo=timezone('US/Pacific'))]
round_list = [R1, R2, R3, R4]
R2_RANGE = [datetime.strptime("15:02:00", "%H:%M:%S").replace(tzinfo=timezone('US/Pacific')), \
datetime.strptime("15:28:00", "%H:%M:%S").replace(tzinfo=timezone('US/Pacific'))]
def dateToMinutes(start, date):
delta = date - start
secs = delta.total_seconds()
return secs/60
def purple_air_full(filename):
df = pd.read_csv(filename)
pm = df["pm2_5_atm"].tolist()
times = [x[-9:-1] for x in df["UTCDateTime"].tolist()]
dates = [datetime.strptime(x, "%H:%M:%S") for x in times]
utcTimes = [x.replace(tzinfo=timezone('UTC')) for x in dates]
pstTimes = [x.astimezone(timezone('US/Pacific'))-timedelta(minutes=7) for x in utcTimes]
for i in range(len(pstTimes)):
if pstTimes[i].day == 31:
pstTimes[i] = pstTimes[i]+timedelta(days=1)
return [pm, [dateToMinutes(GLOBAL_START, x) for x in pstTimes]]
S1F = purple_air_full(S1)
S2F = purple_air_full(S2)
S3F = purple_air_full(S3)
S4F = purple_air_full(S4)
def purple_air(filename, timeRange = None):
df = pd.read_csv(filename)
pm = df["pm2_5_atm"].tolist()
times = [x[-9:-1] for x in df["UTCDateTime"].tolist()]
dates = [datetime.strptime(x, "%H:%M:%S") for x in times]
utcTimes = [x.replace(tzinfo=timezone('UTC')) for x in dates]
pstTimes = [x.astimezone(timezone('US/Pacific'))-timedelta(minutes=7) for x in utcTimes]
for i in range(len(pstTimes)):
if pstTimes[i].day == 31:
pstTimes[i] = pstTimes[i]+timedelta(days=1)
if timeRange != None:
newVals = []
newPstTimes = []
for i in range(len(pstTimes)):
date = pstTimes[i]
if timeRange[0] < date and date < timeRange[1]:
newVals.append(pm[i])
newPstTimes.append(dateToMinutes(timeRange[0], date))
return (newPstTimes, newVals)
rounds = []
for _ in range(5):
rounds.append([[], []])
for i in range(len(pstTimes)):
date = pstTimes[i]
if R1[0] < date and date < R1[1]:
rounds[0][0].append(pm[i])
rounds[0][1].append(dateToMinutes(R1[0], date))
elif R2[0] < date and date < R2[1]:
rounds[1][0].append(pm[i])
rounds[1][1].append(dateToMinutes(R2[0], date))
elif R3[0] < date and date < R3[1]:
rounds[2][0].append(pm[i])
rounds[2][1].append(dateToMinutes(R3[0], date))
elif R4[0] < date and date < R4[1]:
rounds[3][0].append(pm[i])
rounds[3][1].append(dateToMinutes(R4[0], date))
return rounds
S1R = purple_air(S1)
S2R = purple_air(S2)
S3R = purple_air(S3)
S4R = purple_air(S4)
def subtract(xR, yR):
f = interpolate.interp1d(yR[1], yR[0], fill_value='extrapolate')
yInterp = f(xR[1])
return [a - b for a, b in zip(xR[0], yInterp)]
def subtractTest():
plt.plot(S1R[0][1], S1R[0][0])
testY = [[x+100 for x in S1R[0][0]], [y+4 for y in S1R[0][1]]]
plt.plot(testY[1], testY[0])
plt.plot(S1R[0][1], subtract(S1R[0], testY))
plt.show()
def divide(xR, yR):
f = interpolate.interp1d(yR[1], yR[0], fill_value='extrapolate')
yInterp = f(xR[1])
return [a / b for a, b in zip(xR[0], yInterp)]
def testPlot():
plt.plot(S1R[2][1], S1R[2][0])
plt.plot(S3R[2][1], S3R[2][0])
plt.plot(S4R[2][1], S4R[2][0])
plt.legend(["1", "3", "4"])
plt.show()
def s2_ratios():
r2s2sub = [subtract(S2R[1], S4R[1]), S2R[1][1]]
r2s1sub = [subtract(S1R[1], S4R[1]), S1R[1][1]]
r2div = divide(r2s2sub, r2s1sub)
plt.plot(S2R[1][1], r2div)
r3s2sub = [subtract(S2R[2], S4R[2]), S2R[2][1]]
r3s1sub = [subtract(S1R[2], S4R[2]), S1R[2][1]]
r3div = divide(r3s2sub, r3s1sub)
plt.plot(S2R[2][1], r3div)
r4s2sub = [subtract(S2R[3], S4R[3]), S2R[3][1]]
r4s1sub = [subtract(S1R[3], S4R[3]), S1R[3][1]]
r4div = divide(r4s2sub, r4s1sub)
plt.plot(S2R[3][1], r4div)
plt.legend(["Round 2 (2 ft)", "Round 3 (6 ft)", "Round 4 (10 ft)"])
plt.show()
def s3_ratios():
r2s3sub = [subtract(S3R[1], S4R[1]), S3R[1][1]]
r2s1sub = [subtract(S1R[1], S4R[1]), S1R[1][1]]
r2div = divide(r2s3sub, r2s1sub)
plt.plot(S3R[1][1], r2div)
r3s3sub = [subtract(S3R[2], S4R[2]), S3R[2][1]]
r3s1sub = [subtract(S1R[2], S4R[2]), S1R[2][1]]
r3div = divide(r3s3sub, r3s1sub)
plt.plot(S3R[2][1], r3div)
r4s3sub = [subtract(S3R[3], S4R[3]), S3R[3][1]]
r4s1sub = [subtract(S1R[3], S4R[3]), S1R[3][1]]
r4div = divide(r4s3sub, r4s1sub)
plt.plot(S3R[3][1], r4div)
plt.legend(["Round 2 (4 ft)", "Round 3 (8 ft)", "Round 4 (12 ft)"])
plt.show()
def aero(filename, param, plot=True, timeRange=None):
df = pd.read_csv(filename)
vals = df[param].tolist()
times = [x[-5:] for x in df["Time"].tolist()]
dates = [datetime.strptime(x, "%H:%M") for x in times]
pstTimes = [x.replace(tzinfo=timezone('US/Pacific')) for x in dates]
for i in range(len(pstTimes)):
if pstTimes[i].day == 31:
pstTimes[i] = pstTimes[i]+timedelta(days=1)
if timeRange != None:
newVals = []
newPstTimes = []
for i in range(len(pstTimes)):
date = pstTimes[i]
if timeRange[0] < date and date < timeRange[1]:
newVals.append(vals[i])
newPstTimes.append(dateToMinutes(timeRange[0], date))
vals = newVals
pstTimes = newPstTimes
else:
pstTimes = [dateToMinutes(GLOBAL_START, x) for x in pstTimes]
if plot:
plt.plot(pstTimes, vals)
return pstTimes, vals
def fig1():
""" Plots PM2.5 levels over the entire experiment for each PurpleAir and AQY sensor """
plt.plot(S1F[1], S1F[0])
plt.plot(S2F[1], S2F[0])
plt.plot(S4F[1], S4F[0])
aero(AERO_EXHAUST, "PM2.5 (µg/m³)")
aero(AERO_DISTANT, "PM2.5 (µg/m³)")
addTimeLines(GLOBAL_START)
plt.legend(["PurpleAir 1", "PurpleAir 2", "PurpleAir 4", "Aeroqual Exhaust", "Aeroqual Distant"])
plt.xlabel("Time [mins]")
plt.ylabel("PM$_{2.5}$ Concentration [$\mu$g/m$^3$]")
plt.title("PM$_{2.5}$ Measurements")
plt.show()
def fig2():
""" Plots NO2 levels over the entire experiment for each AQY sensor """
aero(AERO_EXHAUST, "NO2 (ppb)")
aero(AERO_DISTANT, "NO2 (ppb)")
addTimeLines(GLOBAL_START)
plt.legend(["Exhaust", "Distant"])
plt.title("Aeroqual NO2 Measurements")
plt.xlabel("Time [mins]")
plt.ylabel("NO2 Concentration [ppb]")
plt.show()
def fig3():
""" Plots O3 levels over the entire experiment for each AQY sensor """
aero(AERO_EXHAUST, "O3 (ppb)")
aero(AERO_DISTANT, "O3 (ppb)")
addTimeLines(GLOBAL_START)
plt.legend(["Exhaust", "Distant"])
plt.title("Aeroqual O3 Measurements")
plt.xlabel("Time [mins]")
plt.ylabel("O3 Concentration [ppb]")
plt.show()
def fig4():
""" Plots PM2.5 measurements for sensors 1 and 4 during round 2 """
fig, ax = plt.subplots()
S1X, S1Y = purple_air(S1, R2_RANGE)
S4X, S4Y = purple_air(S4, R2_RANGE)
plt.plot(S1X, S1Y)
plt.plot(S4X, S4Y)
ax.set_ylim(-20, 200)
addGray(ax, R2_RANGE[0], [2])
plt.legend(["1 ft from vehicle", "30 ft from vehicle"])
plt.xlabel("Time [mins]")
plt.ylabel("PM$_{2.5}$ Concentration [$\mu$g/m$^3$]")
plt.title("PM$_{2.5}$ by Distance")
plt.show()
def addTimeLines(start, rounds = [1, 2, 3, 4]):
for i in rounds:
plt.axvline(x=dateToMinutes(start, round_list[i-1][0]), color=(1, 0, 0))
plt.axvline(x=dateToMinutes(start, round_list[i-1][1]), color=(0, 0, 1))
def addGray(ax, start, rounds = [1, 2, 3, 4]):
for i in rounds:
ax.axvspan(dateToMinutes(start, round_list[i-1][0]), dateToMinutes(start, round_list[i-1][1]), alpha=0.2, color='gray')
def getInterpPoints(x1, y1, x2, y2, numPoints):
""" Outputs numPoints interpolated points from [x1, x2) """
Xs = []
for i in range(numPoints):
newX = x1 + (i * (x2-x1) / numPoints)
Xs.append(newX)
Ys = []
f = interpolate.interp1d([x1, x2], [y1, y2], fill_value='extrapolate')
for x in Xs:
Ys.append(f(x).item())
return Xs, Ys
def animate(framerate=60, speedMultiplier=2):
""" Generates an animation of NO2 concentration over time for both AQY sensors """
plt.rcParams['animation.ffmpeg_path'] = 'C:\\ffmpeg\\bin\\ffmpeg.exe'
fig, ax = plt.subplots()
xdata, ydata = [], []
ln, = plt.plot([], [], color=(0, 0, 0))
size = 15
fig.set_size_inches(size, size/1.777)
plt.ylabel("NO$_2$ Concentration [ppb]")
plt.xlabel("Time [mins]")
plt.title("NO$_{2}$ Concentration Over Time")
locs1, vals1 = aero(AERO_EXHAUST, "NO2 (ppb)", False, R1)
locs2, vals2 = aero(AERO_DISTANT, "NO2 (ppb)", False, R1)
dataPreInterp = [[locs1, vals1], [locs2, vals2]]
data = []
for line in dataPreInterp:
Xs, Ys = [], []
for i in range(len(line[0])-1):
interp = getInterpPoints(line[0][i], line[1][i], line[0][i+1], line[1][i+1], framerate)
Xs.extend(interp[0])
Ys.extend(interp[1])
Xs.append(line[0][-1])
Ys.append(line[1][-1])
data.append([Xs, Ys])
xyData = [[[], []], [[], []]]
colors = [(0.796875, 0.14453125, 0.16015625), (0.22265625, 0.4140625, 0.69140625)]
lines = []
for index in range(len(data)):
lobj = ax.plot([],[],lw=2,color=colors[index])[0]
lines.append(lobj)
def initTest():
ax.set_xlim(-20, len(locs1)+10)
ax.set_ylim(-10, 100)
return ln,
def frameTest(i):
xdata.append(data[0][0][i])
ydata.append(data[0][1][i])
ln.set_data(xdata, ydata)
return ln,
def init():
ax.set_xlim(-2, len(locs1)+2)
ax.set_ylim(-2, 80)
for line in lines:
line.set_data([],[])
legend = plt.legend(["1 ft from vehicle", "30 ft from vehicle"], loc="upper left")
for i in range(len(data)):
legend.legendHandles[i].set_color(colors[i])
return lines
def frame(i):
for n in range(len(data)):
if i < len(data[n][0]) and i < len(data[n][1]):
xyData[n][0].append(data[n][0][i])
xyData[n][1].append(data[n][1][i])
for n in range(len(lines)):
if i < len(data[n][0]) and i < len(data[n][1]):
lines[n].set_data(xyData[n][0], xyData[n][1])
legend = plt.legend(["1 ft from vehicle", "30 ft from vehicle"], loc="upper left")
for i in range(len(data)):
legend.legendHandles[i].set_color(colors[i])
return lines + [legend]
anim = animation.FuncAnimation(fig, frame, init_func=init,
frames=len(data[0][0]), interval=20, blit=True)
FFMpegWriter = animation.writers['ffmpeg']
writer = FFMpegWriter(fps=framerate*speedMultiplier, metadata=dict(artist='Me'), bitrate=1800)
anim.save('basic_animation_exp4.mp4', writer=writer)
def main():
fig1()
if __name__ == "__main__":
main() | null | AQY_Analysis/exp.py | exp.py | py | 12,989 | python | en | code | null | code-starcoder2 | 51 |
248293684 | from flask import Blueprint, render_template, redirect, url_for, request
import traceback
from fantom_util import mturk
from controllers import training_controller
training_app = Blueprint("training", __name__)
CONTACT_EMAIL = "<INSERT_EMAIL@EXAMPLE.COM>"
@training_app.route("/set_trainer")
def set_trainer() -> str:
training_url = url_for("training.training")
return f"""
<form action="{training_url}" method="get">
<input type="text" name="external_worker_id" placeholder="your worker ID">
<input type="submit" value="submit">
</form>
"""
@training_app.route("/done")
def done():
return "Congratulations! You are now qualified for the HIT!"
@training_app.route("/")
def training():
external_worker_id = request.args.get("external_worker_id")
if not external_worker_id:
return redirect(url_for("training.set_trainer"))
training = training_controller.get_next_training_for_worker(external_worker_id)
if training == "__DONE__":
return redirect(url_for("training.done"))
if not training or training["id"] == 0:
return render_template(
"video.html", task_id=0, external_worker_id=external_worker_id
)
else:
return render_template(
"training.html",
history=training["history"],
replies=training["replies"],
description=training["description"],
task_id=training["id"],
external_worker_id=external_worker_id,
submit_url=url_for("training.training_submit"),
with_audio=False,
used_text_input=True,
)
@training_app.route("/training_submit", methods=["POST"])
def training_submit():
external_worker_id = request.form["external_worker_id"]
task_id = int(request.form["task_id"])
if task_id == 0:
the_time = float(request.form.get("task_identifier", 0.0)) - 3315.2
if the_time < 0.9 or the_time > 1.0:
return "Please watch the whole video and try again."
try:
done_training = training_controller.submit(external_worker_id, task_id)
except KeyError:
return redirect(
url_for("training.training", external_worker_id=external_worker_id)
)
except Exception:
print(traceback.format_exc())
return "Sorry! Something went wrong. Please email {} and provide your worker id to fix this issue.".format(
CONTACT_EMAIL
)
if done_training:
try:
mturk.qualify_worker(external_worker_id)
return redirect(url_for("training.done"))
except Exception:
print(traceback.format_exc())
return "You are done, but something went wrong with your qualification. Please email {} and provide your worker id to fix this issue.".format(
CONTACT_EMAIL
)
else:
return redirect(
url_for("training.training", external_worker_id=external_worker_id)
)
| null | crowd_sourcing/pages/training.py | training.py | py | 3,018 | python | en | code | null | code-starcoder2 | 51 |
456678305 | from readMetrics import readMetrics
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from scipy.interpolate import splrep, splev, interp1d
from math import log10, floor
MAXRAD = ['90', '92', '94', '96']
MAXRAD = ['90', '99']
#MAXRAD = ['80','82','84','86','88','90', '92', '94', '96', '98']
MAXRAD = ['90','91','92','93','94','95','96','97','98','99']
#MAXRAD = ['99']
color = iter(cm.brg(np.linspace(0.0,0.8, len(MAXRAD))))
fs = 24
plt.figure(figsize=(14,12))
for maxrad in MAXRAD :
c = next(color)
(STR, CA, EPS, REDVOL, LAMBDA, AREA, VLME, LENGTH, DP, ETA) = readMetrics(maxrad)
v = round(REDVOL[0],2)
lam = LAMBDA[0]
# calculate lamc from the cubic equuation: 2 v x^3 - 3 x + 1 = 0
if (abs(v - 0.95) < 1e-12) :
lamc = 1.2324
if (abs(v - 0.90) < 1e-12) :
lamc = 1.3712
if (abs(v - 0.85) < 1e-12) :
lamc = 1.5050
if (abs(v - 0.80) < 1e-12) :
lamc = 1.6437
if (abs(v - 0.75) < 1e-12) :
lamc = 1.7925
if (abs(v - 0.70) < 1e-12) :
lamc = 1.9562
if (abs(v - 0.65) <= 0.01) :
lamc = 2.1397
lam = lam/lamc
delta = 1 - lam
# fc = 7.5112
# n = 0.8776
# n = 0.86
# #fc = 19.041
# #n = 0.9931
# delta = pow(delta,n)
# delta = 1
# fc = 1
lam = round(lam,2)
v = round(v,2)
# CE = CA
# DPE = DP
# for i in range(len(CE)) :
# CE[i] = CA[i]/(EPS[i]*EPS[i])
# DPE[i] = DP[i]*EPS[i]
# xL = CE
# yL = DPE
xL = CA/(delta*delta)
if (STR == 'DP') :
yL = DP*delta
# yL = DP
# for i in range(len(yL)) :
# yL[i] = yL[i]/(8.0*(1.0 - EPS[i])*LENGTH[i])
if (STR == 'EPS') :
yL = (EPS/delta)
## interpolate to xi using splines
#tck = interpolate.splrep(x, y, s=0)
#yi = interpolate.splev(xi, tck, der=0)
xi = np.logspace(floor(log10(min(xL))), floor(log10(max(xL))), 10000)
tck = splrep(xL, yL, s=0)
yi = splev (xi, tck, der=0)
# f = interp1d(xL, yL)
# yi = f(xi)
plt.plot(xi,yi,'-',c=c)
plt.plot(xL,yL,'.',c=c)
if (maxrad == MAXRAD[0] or maxrad == MAXRAD[-1]) :
plt.text(1000, yi[-1], str(lam),fontsize = fs-8)
if (maxrad == MAXRAD[0]) :
lam0 = lam
if (maxrad == MAXRAD[-1]) :
lam1 = lam
# set plot preferences
plt.title('$v$ = 0.' + str(int(v*100)) + ', $\lambda/\lambda_c$ = ' + str(lam0) + ' to ' + str(lam1),fontsize=fs)
plt.xscale('log')
#plt.yscale('symlog')
plt.xticks(fontsize=fs-4)
plt.yticks(fontsize=fs-4)
plt.xlim((0.01,1000))
plt.xlabel('$\mathrm{Ca} = \mu U R^2 / (\epsilon^2 \kappa)$',fontsize=fs)
if (STR == 'DP') :
plt.ylabel('$\epsilon \Delta p R/ (\mu U)$',fontsize=fs)
if (STR == 'EPS') :
plt.ylabel('$Q/(\pi \epsilon R^2 U) = 1 - V/U$',fontsize=fs)
if (STR == 'AREA') :
plt.ylabel('$A/R^2$',fontsize=fs)
if (STR == 'VLME') :
plt.ylabel('$\Omega/R^3$',fontsize=fs)
if (STR == 'REDVOL') :
plt.ylabel('$v$',fontsize=fs)
if (STR == 'GAMMAX') :
plt.ylabel('$\gamma_{\mathrm{max}}/ (\mu U)$',fontsize=fs)
if (STR == 'TAUMAX') :
plt.ylabel(r'$\tau_{\mathrm{max}}R/ (\mu U)$',fontsize=fs)
plt.show()
| null | C++/draft01/ALT/highCa/postproc/metrics/master/metrics.py | metrics.py | py | 2,928 | python | en | code | null | code-starcoder2 | 51 |
286640403 | """
Author:
oliver
"""
import numpy as np
import pandas as pd
import scipy as sp
import datetime
import time
import subprocess
import matplotlib
# matplotlib.use("Agg") # This suppresses the annoying Python Rocket GUI indicator in the dock.
matplotlib.use('TkAgg')
import pylab
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.dates as mdates
dt = datetime.datetime
if False:
d_gov = pd.read_excel("key_rates.xlsx", "10y_gv", 0, range(5))
# d_gov.head()
d_gov.columns = ["date", "px", "yield"]
# d_gov.date[0]
ts_gov = d_gov[(d_gov.date >= dt(2010,1,1)) & (d_gov.date <= dt(2015,9,1))]
# ts_gov = d_gov[(d_gov.date >= dt(2003,1,1)) & (d_gov.date <= dt(2015,9,1))]
# d_gov.shape
# ts_gov.shape
# loading into the console the data from cprcdralpha.py
# d is the cprcdralphda data
ts = d[(d.coupon > 5.0) & (d.origination >= datetime.date(2003,1,1)) & (d.origination <= datetime.date(2004,1,1))]
ts = ts[ts.cnloans > 500]
ts = ts[ts.cdr1 >= 0.0] # Because I want to use a log scale and these aren't that helpful
ts = ts[ts.crr1 >= 0.0]
ts2 = ts[ts.date >= datetime.datetime(2003,1,1)]
# plt.clf()
# plt.gca().axes.twinx().set_ylabel("fkjdhs")
# plt.rc('text', usetex=True)
# plt.rcParams["figure.figsize"] = (8, 5)
# plt.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], 'size': 16})
# plt.plot(ts_gov.date, 10.0*ts_gov.px, 'k-', linewidth=2)
# plt.plot(ts2.date, ts2.crr1, 'o', markersize=5, mfc="grey", alpha=0.5)
# plt.title("Prepayment of FNMA and FHLMC Pass-Throughs")
# plt.gca().axes.twinx().set_ylabel("dsfsdf")
# plt.ylabel("Prepayment Rate")
# plt.xlabel("Date")
# plt.yticks(plt.yticks()[0], [str(int(i)) + "\\%" for i in plt.yticks()[0]])
# plt.tick_params(pad=10)
ts_fed = d_fed[d_fed.date >= dt(2003,1,1)]
plt.clf()
plt.tick_params(pad=10)
# plt.tick_params(pad=10, length=5, width=1.3, which='major')
plt.title("Prepayment of FNMA and FHLMC Pass-Throughs")
plt.rc('text', usetex=True)
plt.rcParams["figure.figsize"] = (8, 5)
plt.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], 'size': 16})
fig = plt.gcf()
# fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(ts2.date, ts2.crr1, 'o', markersize=5, mfc="grey", alpha=0.5)
ax1.set_xlabel('Date')
ax1.set_ylabel("Prepayment Rate")
# ax1.get_yticks()
ax1.set_yticklabels([str(int(i)) + "\\%" for i in ax1.get_yticks()])
ax2 = ax1.twinx()
# ax2.plot(ts_gov.date, ts_gov.px, 'w-', linewidth=6)
# ax2.plot(ts_gov.date, ts_gov.px, 'k-', linewidth=1)
ax2.set_ylabel("US Treasury 10-Year Bond Yield")
ax2.plot(ts_fed.date, ts_fed.funds, 'w-', linewidth=5)
ax2.plot(ts_fed.date, ts_fed.funds, 'k-', linewidth=1)
ax2.set_ylabel("US Effective Federal Funds Rate")
ax2.set_yticklabels([str(i) + "\\%" for i in ax2.get_yticks()])
ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
file_name_1 = "{}{}{}".format("Plots/", ts, ".pdf")
file_name_2 = "{}{}{}{}".format("Plots/", ts, "_dated", ".pdf")
plt.savefig(file_name_1, format="pdf", bbox_inches='tight')
ax1.set_xlabel("Date \n File: {}.pdf".format(ts.replace("_", "\_")), labelpad=5)
plt.savefig(file_name_2, format="pdf", bbox_inches='tight')
subprocess.call(["open", file_name_2])
| null | Python/other_rates.py | other_rates.py | py | 3,441 | python | en | code | null | code-starcoder2 | 51 |
485497608 | """
输入两个正整数计算它们的最大公约数和最小公倍数
Version: 0.1
Author: rebecca
Date: 2018-03-01
"""
a, b= input("please input two positive integer: ").split()
a = int(a)
b = int(b)
for i in range(min(a,b),0,-1):
if a%i==0 and b%i==0:
print("%d is the highest common factor of %d and %d" %(i,a,b))
print("%d is the least common multiple of %d and %d" %(a*b//i,a,b))
break
| null | day1-3 2021-06-10/py_day4_2.py | py_day4_2.py | py | 424 | python | en | code | null | code-starcoder2 | 51 |
277624187 | """ Sentiment Classification Model """
from data import IMDB_Data_Loader
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.contrib import predictor
import pandas as pd
import os
class Sentiment_Classifier:
def __init__(self):
self.dl = IMDB_Data_Loader()
self.train_df, self.test_df = self.dl.download_and_load_data()
self.num_epochs = 10
self.num_steps = 100
self.learning_rate = 0.001
self.model_dir = './model'
self.saved_model_dir = './saved_model'
self.hub_module = "https://tfhub.dev/google/universal-sentence-encoder/2"
if not os.path.exists(self.model_dir):
self.estimator = self.train()
else:
self.embedded_feature_column = hub.text_embedding_column(
key="sentence",
module_spec=self.hub_module
)
self.estimator = tf.estimator.DNNClassifier(
hidden_units=[500,100],
feature_columns=[self.embedded_feature_column],
n_classes=2,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate),
model_dir= self.model_dir,
)
self.saved_model_dir = self.estimator.export_savedmodel(export_dir_base=self.saved_model_dir, serving_input_receiver_fn=self.serving_input_receiver_fn())
self.predict_input_fn = predictor.from_saved_model(self.saved_model_dir)
def train(self):
train_input_fn = tf.estimator.inputs.pandas_input_fn(
self.train_df,
self.train_df['polarity'],
num_epochs=self.num_epochs,
shuffle=True
)
predict_train_input_fn = tf.estimator.inputs.pandas_input_fn(
self.train_df,
self.train_df['polarity'],
shuffle=False
)
predict_test_input_fn = tf.estimator.inputs.pandas_input_fn(
self.test_df,
self.test_df['polarity'],
shuffle=False
)
embedded_feature_column = hub.text_embedding_column(
key="sentence",
module_spec=self.hub_module
)
estimator = tf.estimator.DNNClassifier(
hidden_units=[500,100],
feature_columns=[embedded_feature_column],
n_classes=2,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate),
model_dir= self.model_dir,
)
estimator.train(input_fn=train_input_fn, steps=self.num_steps)
train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn)
test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn)
print("Training set accuracy: {accuracy}".format(**train_eval_result))
print("Test set accuracy: {accuracy}".format(**test_eval_result))
return estimator
def serving_input_receiver_fn(self):
feature_placeholder = {'sentence': tf.placeholder(dtype=tf.string, shape=[1], name='sentence_placeholder')}
return tf.estimator.export.build_raw_serving_input_receiver_fn(feature_placeholder)
def predict(self, text):
""" Predicts on one Example """
output_dict = self.predict_input_fn({'inputs': [text]})
output_dict['scores'] = output_dict['scores'].tolist()
output_dict['classes'] = list(map(lambda x: x.decode("utf-8"), output_dict['classes'].tolist()[0] ))
return output_dict
def estimator_predict(self, text):
""" Predicts on one Example Using TF Estimator, very slow at the moment :( """
df = pd.DataFrame({"sentence": [text]})
predict_input_fn = tf.estimator.inputs.pandas_input_fn(
df,
shuffle=False
)
return int([x['class_ids'][0] for x in self.estimator.predict(input_fn=predict_input_fn)][0])
| null | ml_model.py | ml_model.py | py | 3,869 | python | en | code | null | code-starcoder2 | 51 |
52024111 | import os
import json
import base64
from typing import (Dict, Optional, Any, List)
from collections import OrderedDict
from gtmcore.labbook.labbook import LabBook
from gtmcore.activity import ActivityStore, ActivityType, ActivityRecord, ActivityDetailType, ActivityDetailRecord, \
ActivityAction
from gtmcore.activity.utils import ImmutableList, DetailRecordList, TextData
class BundledAppManager:
"""Class to manage bundled apps within a labbook instance"""
def __init__(self, labbook: LabBook) -> None:
# LabBook Environment
self.labbook = labbook
@property
def bundled_app_file(self):
return os.path.join(self.labbook.root_dir, '.gigantum', 'apps.json')
@property
def reserved_names(self) -> list:
"""A property for all reserved application names. These are names that are currently used in Gigantum bases
Returns:
list
"""
return ['jupyter', 'notebook', 'jupyterlab', 'rstudio']
@property
def reserved_ports(self) -> list:
"""A property for all reserved application ports. The following ports are currently reserved:
8888 - jupyter
8787 - rstudio
8686 - reserved for future expansion
8585 - reserved for future expansion
8484 - reserved for future expansion
8383 - reserved for future expansion
Returns:
list
"""
return [8888, 8787, 8686, 8585, 8484, 8383]
def add_bundled_app(self, port: int, name: str, description: str, command: Optional[str] = None) -> Dict[str, Any]:
"""Add a "bundled app" configuration to this labbook
Args:
port(int): port number to expose from the container (will be routed to the browser)
name(str): name of the bundled app
description(str): description of the bundled app
command(str): command to run in the container if needed to start the app
Returns:
dict
"""
# Check if a reserved application name, currently:
if name.lower() in self.reserved_names:
raise ValueError(f"{name} is a reserved application name. Try again.")
if len(name) > 10 or len(name) < 1:
raise ValueError(f"{name} must be 10 characters or less.")
if len(description) > 240:
raise ValueError(f"{description} must be 240 characters or less.")
if command:
if len(command) > 1024:
raise ValueError(f"{command} must be 1024 characters or less.")
# Base64 encode the command to avoid escaping issues when persisting to json file
command = base64.b64encode(command.encode()).decode()
# Check if a reserved port currently
if port in self.reserved_ports:
raise ValueError(f"Port {port} is a in reserved port. Try a different port.")
data = self._load_bundled_app_data()
# Check for port already in use
for app in data:
if data[app].get('port') == port:
raise ValueError(f"Port {port} is already in use. Try again.")
data[name] = {'port': port,
'description': description,
'command': command}
with open(self.bundled_app_file, 'wt') as bf:
json.dump(data, bf)
# Commit the changes
self.labbook.git.add(self.bundled_app_file)
commit = self.labbook.git.commit(f"Committing bundled app")
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT,
show=False,
action=ActivityAction.CREATE,
data=TextData('plain', f"App configuration: {json.dumps(data[name])}"))
ar = ActivityRecord(ActivityType.ENVIRONMENT,
message=f"Added app '{name}'",
show=True,
linked_commit=commit.hexsha,
detail_objects=DetailRecordList([adr]),
tags=ImmutableList(["environment", "docker", "bundled_app"]))
ars = ActivityStore(self.labbook)
ars.create_activity_record(ar)
return data
def remove_bundled_app(self, name: str) -> None:
"""Remove a bundled app from this labbook
Args:
name(str): name of the bundled app
Returns:
None
"""
data = self._load_bundled_app_data()
if name not in data:
raise ValueError(f"App {name} does not exist. Cannot remove.")
del data[name]
with open(self.bundled_app_file, 'wt') as baf:
json.dump(data, baf)
# Commit the changes
self.labbook.git.add(self.bundled_app_file)
commit = self.labbook.git.commit(f"Committing bundled app")
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT,
show=False,
action=ActivityAction.CREATE,
data=TextData('plain', f"Removed bundled application: {name}"))
ar = ActivityRecord(ActivityType.ENVIRONMENT,
message=f"Removed bundled application: {name}",
show=True,
linked_commit=commit.hexsha,
detail_objects=DetailRecordList([adr]),
tags=ImmutableList(["environment", "docker", "bundled_app"]))
ars = ActivityStore(self.labbook)
ars.create_activity_record(ar)
def _load_bundled_app_data(self) -> OrderedDict:
"""Load data file or return an empty OrderedDict
Returns:
OrderedDict
"""
if os.path.isfile(self.bundled_app_file):
with open(self.bundled_app_file, 'rt') as baf:
data = json.load(baf, object_pairs_hook=OrderedDict)
else:
data = OrderedDict()
return data
def get_bundled_apps(self) -> OrderedDict:
"""Get collection of bundled apps in this labbook
Returns:
None
"""
data = self._load_bundled_app_data()
# b64 decode the commands
for app in data:
if data[app]['command']:
data[app]['command'] = base64.b64decode(data[app]['command']).decode()
return data
def get_docker_lines(self) -> List[str]:
"""Method to get lines to add to the dockerfile
Returns:
list
"""
lines = list()
data = self.get_bundled_apps()
# Check for port already in use
for app in data:
lines.append(f"EXPOSE {data[app].get('port')}")
return lines
| null | packages/gtmcore/gtmcore/environment/bundledapp.py | bundledapp.py | py | 6,800 | python | en | code | null | code-starcoder2 | 51 |
253550646 | from panther_base_helpers import box_parse_additional_details
def rule(event):
if event.get('event_type') != 'SHIELD_ALERT':
return False
alert_details = box_parse_additional_details(event).get('shield_alert', {})
if alert_details.get('rule_category', '') == 'Anomalous Download':
if alert_details.get('risk_score', 0) > 50:
return True
return False
def title(event):
details = box_parse_additional_details(event)
description = details.get('shield_alert',
{}).get('alert_summary',
{}).get('description', '')
if description:
return description
return 'Anamalous download activity triggered by user [{}].'.format(
event.get('created_by', {}).get('name', '<UNKNOWN_USER>'))
| null | box_rules/box_anomalous_download.py | box_anomalous_download.py | py | 819 | python | en | code | null | code-starcoder2 | 51 |
300483046 | import time
import argparse
import pynvml
class Device(object):
class Status:
INIT = "INIT"
DETECTING = "DETECTING"
STOP = "STOP"
start_detecting_mem_threshold = 32 * 1024 * 1024
def __init__(self, handle):
self.handle = handle
self.status = self.Status.INIT
self.max_mem_usage = 0
def update(self):
info = pynvml.nvmlDeviceGetMemoryInfo(self.handle)
if self.status == self.Status.INIT:
if info.used > self.start_detecting_mem_threshold:
self.status = self.Status.DETECTING
elif self.status == self.Status.DETECTING:
if info.used < self.start_detecting_mem_threshold:
self.status = self.Status.STOP
return False
else:
self.max_mem_usage = max(self.max_mem_usage, info.used)
elif self.status == self.Status.STOP:
raise ValueError("detecting is stop")
else:
raise ValueError("invalid status")
return True
def main():
parser = argparse.ArgumentParser(description="collect GPU device memory usage")
parser.add_argument("-g", type=int, default=1, help="number of gpu devices")
parser.add_argument("-n", type=float, default=1, help="metrics rate")
args = parser.parse_args()
pynvml.nvmlInit()
n_gpus = args.g
devices = [Device(pynvml.nvmlDeviceGetHandleByIndex(i)) for i in range(n_gpus)]
running = True
while running:
time.sleep(args.n)
running = False
for device in devices:
running |= device.update()
pynvml.nvmlShutdown()
for i, device in enumerate(devices):
max_mem_usage_mbytes = device.max_mem_usage / 1024 / 1024
print(f"gpt{i} max memory usage: {max_mem_usage_mbytes:.2f}M")
if __name__ == "__main__":
main()
| null | LanguageModeling/gpt-2/tools/gpu_memory_usage.py | gpu_memory_usage.py | py | 1,861 | python | en | code | null | code-starcoder2 | 51 |
397983169 | #!/usr/bin/env python
# coding:utf-8
"""
Author:
LiTeng 1471356861@qq.com
Implement TextRNN, contains LSTM,GRU,RNN
Reference: "Effective LSTMs for Target-Dependent Sentiment Classification"
"Bidirectional LSTM-CRF Models for Sequence Tagging"
"Generative and discriminative text classification
with recurrent neural networks"
"""
import tensorflow as tf
from tensorflow import keras
from model.layers.embeddings import EmbeddingsLayer
from utils.logger import Type
class RNNType(Type):
RNN = 'RNN'
LSTM = 'LSTM'
GRU = 'GRU'
@classmethod
def str(cls):
return ",".join([cls.RNN, cls.LSTM, cls.GRU])
class Model(tf.keras.Model):
"""
One layer rnn.
"""
def __init__(self, config):
super(Model, self).__init__()
self.config = config
if self.config.embedding.use_embedding:
self.embedding = EmbeddingsLayer(config.embedding)
else:
self.reshape = keras.layers.Reshape((config.TextRNN.input_length, config.TextRNN.embedding_dimension))
if self.config.TextRNN.rnn_type == RNNType.LSTM:
layer_cell = keras.layers.LSTM
elif self.config.TextRNN.rnn_type == RNNType.GRU:
layer_cell = keras.layers.GRU
else:
layer_cell = keras.layers.SimpleRNN
self.rnn_type = config.TextRNN.rnn_type
self.num_layers = config.TextRNN.num_layers
self.bidirectional = config.TextRNN.bidirectional
self.layer_cells = []
for i in range(config.TextRNN.num_layers):
if config.TextRNN.bidirectional:
self.layer_cells.append(keras.layers.Bidirectional(
layer_cell(config.TextRNN.hidden_dimension,
use_bias=config.TextRNN.use_bias,
activation=config.TextRNN.activation,
kernel_regularizer=keras.regularizers.l2(self.config.TextRNN.l2 * 0.1),
recurrent_regularizer=keras.regularizers.l2(self.config.TextRNN.l2))))
else:
self.layer_cells.append(layer_cell(config.TextRNN.hidden_dimension,
use_bias=config.TextRNN.use_bias,
activation=config.TextRNN.activation,
kernel_regularizer=keras.regularizers.l2(self.config.TextRNN.l2 * 0.1),
recurrent_regularizer=keras.regularizers.l2(self.config.TextRNN.l2)))
self.fc = keras.layers.Dense(config.TextRNN.num_classes)
def call(self, inputs, training=None, mask=None):
print("inputs", inputs)
x = inputs
if self.config.embedding.use_embedding:
# [b, sentence len] => [b, sentence len, word embedding]
x = self.embedding(x)
print("embedding", x)
else:
x = self.reshape(x)
for layer_cell in self.layer_cells:
x = layer_cell(x)
print('rnn', x)
x = self.fc(x)
print(x.shape)
if self.config.logits_type == "softmax":
x = tf.nn.softmax(x)
elif self.config.logits_type == "sigmoid":
x = tf.nn.sigmoid(x)
return x
| null | model/classification/textrnn.py | textrnn.py | py | 3,277 | python | en | code | null | code-starcoder2 | 50 |
522554903 | """Common features for bignum in test generation framework."""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import typing
from abc import abstractmethod
from typing import Iterator, List, Tuple, TypeVar
T = TypeVar('T') #pylint: disable=invalid-name
def invmod(a: int, n: int) -> int:
"""Return inverse of a to modulo n.
Equivalent to pow(a, -1, n) in Python 3.8+. Implementation is equivalent
to long_invmod() in CPython.
"""
b, c = 1, 0
while n:
q, r = divmod(a, n)
a, b, c, n = n, c, b - q*c, r
# at this point a is the gcd of the original inputs
if a == 1:
return b
raise ValueError("Not invertible")
def hex_to_int(val: str) -> int:
return int(val, 16) if val else 0
def quote_str(val) -> str:
return "\"{}\"".format(val)
def bound_mpi(val: int, bits_in_limb: int) -> int:
"""First number exceeding number of limbs needed for given input value."""
return bound_mpi_limbs(limbs_mpi(val, bits_in_limb), bits_in_limb)
def bound_mpi_limbs(limbs: int, bits_in_limb: int) -> int:
"""First number exceeding maximum of given number of limbs."""
bits = bits_in_limb * limbs
return 1 << bits
def limbs_mpi(val: int, bits_in_limb: int) -> int:
"""Return the number of limbs required to store value."""
return (val.bit_length() + bits_in_limb - 1) // bits_in_limb
def combination_pairs(values: List[T]) -> List[Tuple[T, T]]:
"""Return all pair combinations from input values.
The return value is cast, as older versions of mypy are unable to derive
the specific type returned by itertools.combinations_with_replacement.
"""
return typing.cast(
List[Tuple[T, T]],
list(itertools.combinations_with_replacement(values, 2))
)
class OperationCommon:
"""Common features for bignum binary operations.
This adds functionality common in binary operation tests.
Attributes:
symbol: Symbol to use for the operation in case description.
input_values: List of values to use as test case inputs. These are
combined to produce pairs of values.
input_cases: List of tuples containing pairs of test case inputs. This
can be used to implement specific pairs of inputs.
unique_combinations_only: Boolean to select if test case combinations
must be unique. If True, only A,B or B,A would be included as a test
case. If False, both A,B and B,A would be included.
"""
symbol = ""
input_values = [] # type: List[str]
input_cases = [] # type: List[Tuple[str, str]]
unique_combinations_only = True
def __init__(self, val_a: str, val_b: str) -> None:
self.arg_a = val_a
self.arg_b = val_b
self.int_a = hex_to_int(val_a)
self.int_b = hex_to_int(val_b)
def arguments(self) -> List[str]:
return [
quote_str(self.arg_a), quote_str(self.arg_b)
] + self.result()
@abstractmethod
def result(self) -> List[str]:
"""Get the result of the operation.
This could be calculated during initialization and stored as `_result`
and then returned, or calculated when the method is called.
"""
raise NotImplementedError
@classmethod
def get_value_pairs(cls) -> Iterator[Tuple[str, str]]:
"""Generator to yield pairs of inputs.
Combinations are first generated from all input values, and then
specific cases provided.
"""
if cls.unique_combinations_only:
yield from combination_pairs(cls.input_values)
else:
yield from (
(a, b)
for a in cls.input_values
for b in cls.input_values
)
yield from cls.input_cases
# BEGIN MERGE SLOT 1
# END MERGE SLOT 1
# BEGIN MERGE SLOT 2
# END MERGE SLOT 2
# BEGIN MERGE SLOT 3
# END MERGE SLOT 3
# BEGIN MERGE SLOT 4
# END MERGE SLOT 4
# BEGIN MERGE SLOT 5
# END MERGE SLOT 5
# BEGIN MERGE SLOT 6
# END MERGE SLOT 6
# BEGIN MERGE SLOT 7
# END MERGE SLOT 7
# BEGIN MERGE SLOT 8
# END MERGE SLOT 8
# BEGIN MERGE SLOT 9
# END MERGE SLOT 9
# BEGIN MERGE SLOT 10
# END MERGE SLOT 10
| null | scripts/mbedtls_dev/bignum_common.py | bignum_common.py | py | 4,802 | python | en | code | null | code-starcoder2 | 50 |
307158340 | #Jose Luis Mata Lomelí
#Crear dibujos tipo espirografo
import pygame
import math
import random
#Dimensiones
Ancho = 800
Alto = 800
#Colores
BLANCO = (255, 255, 255)
def dibujar(r, R, l):
# Inicializa el motor de pygame
pygame.init()
# Crea una ventana de Ancho * Alto
ventana = pygame.display.set_mode((Ancho, Alto)) # Crea la ventana donde dibujara
reloj = pygame.time.Clock() # Para limitar los fps
termina = False # Bandera para saber si termina la ejecucion, iniciamos suponiendo que no
while not termina: # Ciclo principal, MIENTRAS la variable termina sea False, el ciclo se repite automáticamente
# Procesa los eventos que recibe
for evento in pygame.event.get():
if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir
termina = True # Queremos terminar el ciclo
# Borrar pantalla
ventana.fill(BLANCO)
k = r/R
periodo = r//math.gcd(r, R)
#color
colorrandom1 = (random.randrange(255), random.randrange(255), random.randrange(255))
colorrandom2 = (random.randrange(255), random.randrange(255), random.randrange(255))
colorrandom3 = (random.randrange(255), random.randrange(255), random.randrange(255))
colorrandom4 = (random.randrange(255), random.randrange(255), random.randrange(255))
#circulo 1
u = r * 2
U = R * 2
o = l * 2
p = u / U
#circulo 2
f = r * 3
F = R * 3
h = l * 3
j = f / F
#circulo 3
z = r * 4
Z = R * 4
v = l * 4
b = z / Z
#Primer Circulo
for angulo in range(0, 360 * periodo, 1):
a = math.radians(angulo)
x = int(R * ((1-k) * math.cos(a) + (l * k * math.cos(((1-k)/k)*a))))
y = int(R * ((1-k) * math.sin(a) - (l * k * math.sin(((1-k)/k)*a))))
pygame.draw.circle(ventana, colorrandom1, (x + Ancho//2, Alto//2 - y), 1, 1)
#Segundo Circulo
for angulo in range(0, 360 * periodo, 1):
a = math.radians(angulo)
x = int(R * ((1-p) * math.cos(a) + (o * p * math.cos(((1-p)/p)*a))))
y = int(R * ((1-p) * math.sin(a) - (o * p * math.sin(((1-p)/p)*a))))
pygame.draw.circle(ventana, colorrandom2, (x + Ancho//2, Alto//2 - y), 1, 1)
#Tercer Circulo
for angulo in range(0, 360 * periodo, 1):
a = math.radians(angulo)
x = int(R * ((1-j) * math.cos(a) + (h * j * math.cos(((1-j)/j)*a))))
y = int(R * ((1-j) * math.sin(a) - (h * j * math.sin(((1-j)/j)*a))))
pygame.draw.circle(ventana, colorrandom3, (x + Ancho//2, Alto//2 - y), 1, 1)
#Cuarto Circulo
for angulo in range(0, 360 * periodo, 1):
a = math.radians(angulo)
x = int(R * ((1-b) * math.cos(a) + (v * b * math.cos(((1-b)/b)*a))))
y = int(R * ((1-b) * math.sin(a) - (v * b * math.sin(((1-b)/b)*a))))
pygame.draw.circle(ventana, colorrandom4, (x + Ancho//2, Alto//2 - y), 1, 1)
pygame.display.flip() # Actualiza trazos (Si no llamas a esta funcion, entonces no se dibuja)
reloj.tick(1)
pygame.quit() # termina pygame
# Funcion principal
def main():
r = int(input("Valor de r: "))
R = int(input("Valor de R: "))
l = float(input("Valor de l: "))
dibujar(r, R, l) # Aplicar la funcion y dibujar
main()
| null | misionImposible-mision6.py | misionImposible-mision6.py | py | 3,559 | python | en | code | null | code-starcoder2 | 50 |
195964046 | #import ui #used for pythonista
#import console #used for pythonista
import get_time
import get_data
import calc
import matplotlib.pyplot as plt
import pylab
import matplotlib.dates as mdates
import pylab
from io import BytesIO
from pprint import pprint
#Last week: sunday (0) + Monday (2)
#week before: Sunday (1) + Monday (3)
master_dict = get_data.my_filtered_activities()
def period(dictionary,Sunday,Monday):
dict_1 = dictionary.copy()
#if key is newer than last sunday, remove it
for key in dictionary:
if key > get_time.LS(Sunday):
del dict_1[key]
for key in dictionary:
if key < get_time.LM(Monday):
del dict_1[key]
past_dict_rev = {k: dict_1[k] for k in list(reversed(sorted(dict_1.keys())))}
past_dict = {k: past_dict_rev[k] for k in list(sorted(past_dict_rev.keys()))}
past_run_count = calc.activity_count(past_dict)
past_mile_list = []
for i in past_dict:
past_mile_list.append(float(past_dict[i]['distance_miles']))
past_miles = ("{0:.2f}".format(sum(past_mile_list)))
past_ten_percent = ("{0:.2f}".format(float(past_miles) * .1))
past_run_title_label = []
for i in list(sorted(past_dict)):
past_run_title_label.append(past_dict[i]['weekday_short_date'])
past_run_mile_label = []
for i in list(sorted(past_dict)):
past_run_mile_label.append(past_dict[i]['distance_miles'])
past_run_pace_label = []
for i in list(sorted(past_dict)):
past_run_pace_label.append(past_dict[i]['pace'])
past_run_elapsed_label = []
for i in list(sorted(past_dict)):
past_run_elapsed_label.append(str(past_dict[i]['elapsed']))
past_run_treadmill_label = []
for i in list(sorted(past_dict)):
past_run_treadmill_label.append(str(past_dict[i]['treadmill_flagged']))
remaining(past_ten_percent,past_miles)
print("past run")
print(past_run_count)
print(past_miles)
print(past_ten_percent)
print(past_run_title_label)
print(past_run_mile_label)
print(past_run_pace_label)
print(past_run_elapsed_label)
print(past_run_treadmill_label)
# label1= v['label1']
# label1.text = (get_time.convert_weekday_full(get_time.LM(2)) + " - " + get_time.convert_weekday_full(get_time.LS(0)))
#
# label2= v['label2']
# label2.text = str(past_miles)
#
# label3= v['label3']
# label3.text = str(past_run_count)
#
# label4= v['label4']
# label4.text = str(past_ten_percent)
#
# label5= v['label5']
# label5.text = ("\n".join(past_run_title_label))
#
# label6= v['label6']
# label6.text = ("\n".join(past_run_mile_label))
#
# label7= v['label7']
# label7.text = ("\n".join(past_run_pace_label))
#
# label8= v['label8']
# label20.text = ("\n".join(past_run_elapsed_label))
#
# label9= v['label9']
# label9.text = ("\n".join(past_run_partner_label))
def current_period(dictionary):
dict_2 = dictionary.copy()
global current_miles
global current_week_count
for key in dictionary:
if key < get_time.LM(0):
del dict_2[key]
current_week_count = calc.activity_count(dict_2)
mile_list = []
for i in dict_2:
mile_list.append(float(dict_2[i]['distance_miles']))
current_miles = sum(mile_list)
current_run_title_label = []
for i in list(sorted(dict_2)):
current_run_title_label.append(dict_2[i]['weekday_short_date'])
current_run_mile_label = []
for i in list(sorted(dict_2)):
current_run_mile_label.append(dict_2[i]['distance_miles'])
current_run_pace_label = []
for i in list(sorted(dict_2)):
current_run_pace_label.append(dict_2[i]['pace'])
current_run_elapsed_label = []
for i in list(sorted(dict_2)):
current_run_elapsed_label.append(str(dict_2[i]['elapsed']))
current_run_treadmill_label = []
for i in list(sorted(dict_2)):
current_run_treadmill_label.append(str(dict_2[i]['treadmill_flagged']))
print("current run")
print(current_run_title_label)
print(current_run_mile_label)
print(current_run_pace_label)
print(current_run_elapsed_label)
print(current_run_treadmill_label)
# label20= v['label20']
# label20.text = (get_time.weekday(get_time.LM(0)) + " " + str(get_time.LM(0).day) + " - " + get_time.weekday(get_time.now()) + " " + str(get_time.now().day))
#
# label21= v['label21']
# label21.text = str(current_week_count)
#
# label22= v['label22']
# label22.text = str(current_miles)
#
# label23= v['label23']
# label23.text = ("\n".join(current_run_title_label))
#
# label24= v['label24']
# label24.text = ("\n".join(current_run_mile_label))
#
# label25= v['label25']
# label25.text = ("\n".join(current_run_pace_label))
#
# label26= v['label26']
# label26.text = ("\n".join(current_run_elapsed_label))
#
# label27= v['label27']
# label27.text = ("\n".join(current_run_partner_label))
def remaining(past_ten_percent,past_miles):
remaining_miles = ("{0:.2f}".format((float(past_ten_percent) + float(past_miles)) - float(current_miles)))
print("REMAINING")
print(remaining_miles)
# label40= v['label40']
# label40.text = str(remaining_miles)
current_period(master_dict)
period(master_dict,0,2)
| null | Winter_10.py | Winter_10.py | py | 5,363 | python | en | code | null | code-starcoder2 | 51 |
340674584 | from influxdb import InfluxDBClient
from numpy.random import default_rng
import numpy as np
import pandas as pd
import datetime
import random
import csv
import os
from optimal_downsampling_manager.resource_predictor.estimate_table import Degraded_IATable, get_context, DownTimeTable, DownRatioTable, Degraded_Q_IATable, get_month_and_day
from math import e
import sys
import yaml
import argparse
with open('configuration_manager/config.yaml','r') as yamlfile:
data = yaml.load(yamlfile,Loader=yaml.FullLoader)
np.random.seed(10)
DBclient = InfluxDBClient(host=data['global']['database_ip'], port=data['global']['database_port'], database=data['global']['database_name'], username='root', password='root')
resultDBclient = InfluxDBClient(host=data['global']['database_ip'], port=data['global']['database_port'], database=data['global']['exp_database_name'], username='root', password='root')
result = DBclient.query('SELECT * FROM MaxAnalyticTargetNumber')
MaxTargetTable = pd.DataFrame(list(result.get_points(measurement="MaxAnalyticTargetNumber")))
result = DBclient.query('SELECT * FROM visual_features_entropies_PCA_normalized')
PCATable = pd.DataFrame(list(result.get_points(measurement="visual_features_entropies_PCA_normalized")))
alog_list = ['EF','EFR','FIFO','approx','heuristic','opt']
SEEN_ANALY_LIST = ["illegal_parking0", "people_counting"]
UNSEEN_ANALY_LIST = ["illegal_parking1", "car_counting"]
if __name__=='__main__':
round_ = 1
week = "week"
for ro in range(round_):
print("Generate queries...Round ",ro)
if os.path.isfile(f'./experiments/query_ia_error_allalgo_{week}_round{ro}.csv'):
os.remove(f'./experiments/query_ia_error_allalgo_{week}_round{ro}.csv')
start_day = 9
end_day = 15
size = (end_day-start_day+1)
query_video_list = []
chosen_ana_list = []
rng = default_rng()
full_length_sample_quality_info_df = None
full_info_df = None
for r in range(size):
date = str(r + start_day)
result = DBclient.query("SELECT * FROM raw_11_"+str(date))
per_day_video_list = list(result.get_points(measurement="raw_11_"+str(date)))
video_num_per_day = len(per_day_video_list)
poisson_query = np.random.poisson(lam=8/video_num_per_day, size=video_num_per_day) # 8 request / 24 hour
# with open(f'./poisson.csv','a',newline='') as f:
# writer = csv.writer(f)
# writer.writerow([poisson_query, sum(poisson_query)])
for idx_q, num_q in enumerate(poisson_query):
if num_q == 0:
continue
chosen_ana_list.append(rng.choice(len(SEEN_ANALY_LIST), num_q ,replace=True))
query_video_list.append(per_day_video_list[idx_q])
result = DBclient.query('SELECT * FROM sample_quality_alltarget_inshot_11_'+str(date))
full_length_sample_quality_info_df = pd.concat([full_length_sample_quality_info_df, pd.DataFrame(list(result.get_points(measurement='sample_quality_alltarget_inshot_11_'+str(date))))])
result = DBclient.query('SELECT * FROM analy_complete_result_inshot_11_'+str(date))
full_info_df = pd.concat([full_info_df, pd.DataFrame(list(result.get_points(measurement='analy_complete_result_inshot_11_'+str(date))))])
for algo in alog_list:
result = resultDBclient.query("SELECT * FROM video_in_server_"+algo)
video_in_server = pd.DataFrame(list(result.get_points(measurement = "video_in_server_"+algo)))
query_result_ia = []
for q in query_video_list:
# information amount of original video
# print("Querying",q['name'],"...")
origin_video_info = (full_info_df.loc[(full_info_df['name']==q['name']) & (full_info_df['a_type']=='illegal_parking0')]['target'].iloc[0] / MaxTargetTable.loc[(MaxTargetTable['a_type']=='illegal_parking0')]['value'].iloc[0])
origin_video_info += (full_info_df.loc[(full_info_df['name']==q['name']) & (full_info_df['a_type']=='people_counting')]['target'].iloc[0] / MaxTargetTable.loc[(MaxTargetTable['a_type']=='people_counting')]['value'].iloc[0])
origin_video_info += PCATable.loc[PCATable['name']==q['name']].iloc[0]['value']
target_point = video_in_server.loc[video_in_server['name']==q['name']]
if not target_point.empty:
target_fps = str(target_point['fps'].iloc[0]); target_bitrate = str(target_point['bitrate'].iloc[0])
### Information amount of complete videos in server
if target_fps =='24' and target_bitrate =='1000':
preserved_video_info = origin_video_info
else: ### Information amount of sampled videos in server
try:
preserved_video_info_ill0 = full_length_sample_quality_info_df.loc[(full_length_sample_quality_info_df['name']==q['name']) & (full_length_sample_quality_info_df['a_type']=='illegal_parking0') & (full_length_sample_quality_info_df['fps']==target_fps) & (full_length_sample_quality_info_df['bitrate']==target_bitrate)]['target'].iloc[0]
preserved_video_info_ill0 /= MaxTargetTable.loc[(MaxTargetTable['a_type']=='illegal_parking0')]['value'].iloc[0]
except:
print(q['name'], "fps:", target_fps, "bitrate:", target_bitrate,'ill')
preserved_video_info_ill0 = 0
try:
preserved_video_info_peo = full_length_sample_quality_info_df.loc[(full_length_sample_quality_info_df['name']==q['name']) & (full_length_sample_quality_info_df['a_type']=='people_counting') & (full_length_sample_quality_info_df['fps']==target_fps) & (full_length_sample_quality_info_df['bitrate']==target_bitrate)]['target'].iloc[0]
preserved_video_info_peo /= MaxTargetTable.loc[(MaxTargetTable['a_type']=='people_counting')]['value'].iloc[0]
except:
print(q['name'], "fps:", target_fps, "bitrate:", target_bitrate,'peo')
preserved_video_info_peo = 0
try:
preserved_video_info_pca = PCATable.loc[PCATable['name']==q['name']].iloc[0]['value']
except:
preserved_video_info_pca = 0
preserved_video_info = preserved_video_info_ill0 + preserved_video_info_peo + preserved_video_info_pca
info_error = abs(origin_video_info-preserved_video_info)
else:
print("Queried video has been deleted...")
info_error = origin_video_info
query_result_ia.append(info_error)
with open(f'./experiments/query_ia_error_allalgo_{week}_round{ro}.csv','a',newline='') as f:
writer = csv.writer(f)
writer.writerow([sum(query_result_ia)/len(query_result_ia), max(query_result_ia)])
| null | query_generator.py | query_generator.py | py | 7,452 | python | en | code | null | code-starcoder2 | 51 |
263821164 | import json
import logging
from typing import List, Callable, Any, Optional
from cryptoxlib.WebsocketMgr import Subscription, WebsocketMgr, WebsocketMessage, Websocket, CallbacksType
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.binance.functions import map_ws_pair
from cryptoxlib.clients.binance.enums import CandelstickInterval
LOG = logging.getLogger(__name__)
class BinanceWebsocket(WebsocketMgr):
WEBSOCKET_URI = "wss://stream.binance.com:9443/"
SUBSCRIPTION_ID = 0
def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,
websocket_uri: str = None, ssl_context = None) -> None:
super().__init__(websocket_uri = websocket_uri if websocket_uri is not None else BinanceWebsocket.WEBSOCKET_URI,
subscriptions = subscriptions,
builtin_ping_interval = None,
ssl_context = ssl_context,
auto_reconnect = True)
self.api_key = api_key
self.sec_key = sec_key
self.binance_client = binance_client
def get_websocket_uri_variable_part(self):
return "stream?streams=" + "/".join([subscription.get_channel_name() for subscription in self.subscriptions])
async def initialize_subscriptions(self) -> None:
for subscription in self.subscriptions:
await subscription.initialize(binance_client = self.binance_client)
async def _subscribe(self, websocket: Websocket):
BinanceWebsocket.SUBSCRIPTION_ID += 1
subscription_message = {
"method": "SUBSCRIBE",
"params": [
subscription.get_channel_name() for subscription in self.subscriptions
],
"id": BinanceWebsocket.SUBSCRIPTION_ID
}
LOG.debug(f"> {subscription_message}")
await websocket.send(json.dumps(subscription_message))
@staticmethod
def _is_subscription_confirmation(response):
if 'result' in response and response['result'] is None:
return True
else:
return False
async def _process_message(self, websocket: Websocket, message: str) -> None:
message = json.loads(message)
if self._is_subscription_confirmation(message):
LOG.info(f"Subscription confirmed for id: {message['id']}")
else:
# regular message
await self.publish_message(WebsocketMessage(subscription_id = message['stream'], message = message))
class BinanceSubscription(Subscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
@staticmethod
def get_channel_name():
pass
def get_subscription_message(self, **kwargs) -> dict:
pass
def construct_subscription_id(self) -> Any:
return self.get_channel_name()
class AllMarketTickersSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
def get_channel_name(self):
return "!ticker@arr"
class BestOrderBookTickerSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
def get_channel_name(self):
return "!bookTicker"
class BestOrderBookSymbolTickerSubscription(BinanceSubscription):
def __init__(self, pair: Pair, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
def get_channel_name(self):
return f"{map_ws_pair(self.pair)}@bookTicker"
class TradeSubscription(BinanceSubscription):
def __init__(self, pair: Pair, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
def get_channel_name(self):
return map_ws_pair(self.pair) + "@trade"
class AggregateTradeSubscription(BinanceSubscription):
def __init__(self, pair: Pair, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
def get_channel_name(self):
return map_ws_pair(self.pair) + "@aggTrade"
class CandlestickSubscription(BinanceSubscription):
def __init__(self, pair: Pair, interval: CandelstickInterval, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.pair = pair
self.interval = interval
def get_channel_name(self):
return f"{map_ws_pair(self.pair)}@kline_{self.interval.value}"
class AccountSubscription(BinanceSubscription):
def __init__(self, callbacks: CallbacksType = None):
super().__init__(callbacks)
self.listen_key = None
async def initialize(self, **kwargs):
binance_client = kwargs['binance_client']
listen_key_response = await binance_client.get_listen_key()
self.listen_key = listen_key_response["response"]["listenKey"]
LOG.debug(f'Listen key: {self.listen_key}')
def get_channel_name(self):
return self.listen_key
class BinanceTestnetWebsocket(BinanceWebsocket):
WEBSOCKET_URI = "wss://testnet.binance.vision/"
def __init__(self, subscriptions: List[Subscription], binance_client, api_key: str = None, sec_key: str = None,
ssl_context = None) -> None:
super().__init__(subscriptions = subscriptions, binance_client = binance_client, api_key = api_key,
sec_key = sec_key, websocket_uri = BinanceTestnetWebsocket.WEBSOCKET_URI,
ssl_context = ssl_context)
| null | cryptoxlib/clients/binance/BinanceWebsocket.py | BinanceWebsocket.py | py | 5,553 | python | en | code | null | code-starcoder2 | 51 |
551508955 | from flask_twisted import Twisted
from wordStore.app import create_app
app = create_app()
twisted = Twisted(app)
if __name__ == '__main__':
twisted.run(
host=app.config['host'], port=app.config['port'], debug=app.config['debug'])
| null | manage.py | manage.py | py | 247 | python | en | code | null | code-starcoder2 | 51 |
566139547 | import datetime
import os
import matplotlib.pyplot as plt
import networkx as nx
class Representation:
def __init__(self):
self.iterations =[]
self.values_fc = []
def reset_chart(self):
self.iterations= []
self.values_fc= []
def add_point_to_chart(self,iteration, value):
self.iterations.append(iteration)
self.values_fc.append(value)
def save_chart(self,path,testCase,test,bests,iterations):
figure, axes = plt.subplots()
plt.ylabel('Uzyskany koszt sieci w danej iteracji')
plt.xlabel('Iteracja')
plt.plot(range(int(iterations)), bests, linewidth=2.0)
#plt.show()
plt.savefig(path + "/" + testCase + "_" + test + '_chart.png',
bbox_inches='tight', format='png')
plt.close(figure)
def generate_out_files(self):
plt.ylabel('Wartość funkcji celu')
plt.xlabel('Iteracja')
print(self.iterations)
plt.plot(self.iterations, self.values_fc, linewidth=2.0)
#plt.grid(True)
plt.title("Funkcja celu")
plt.show()
def save_graph(self, Graph, path, testCase, test, config_parameters, cost, cost_cities, cost_ps, show):
figure, axes = plt.subplots()
cities = {}
electricity = {}
edges_electricity = {}
edges_cities = {}
keysC = set()
keysE = set()
for node in Graph.nodes():
if node >= 0:
cities.update({node: (Graph.node[node]['x'], Graph.node[node]['y'])})
else:
electricity.update({node: (Graph.node[node]['x'], Graph.node[node]['y'])})
for edge in Graph.edges():
if edge[0] < 0 or edge[1] < 0:
# edges_electricity.update({edge: ((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),
# (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']))})
edges_electricity.update({(Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']):
(Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),
(Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']):
(Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])})
keysE.add(((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),
(Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])))
else:
# edges_cities.update({edge[0]: ((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),
# (Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']))})
edges_cities.update({(Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']):
(Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),
(Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y']):
(Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])})
keysC.add(((Graph.node[edge[0]]['x'], Graph.node[edge[0]]['y']),
(Graph.node[edge[1]]['x'], Graph.node[edge[1]]['y'])))
nx.draw_networkx_nodes(Graph, cities, cities.keys(), node_color='red', node_size=150,
label='Miasto',
ax=axes)
nx.draw_networkx_nodes(Graph, electricity, electricity.keys(), node_color='blue', node_size=150, node_shape='h',
label='\nElektrownia\n',
ax=axes)
# nx.draw_networkx_edges(Graph, edges_cities, edge_color="black" )
nx.draw_networkx_edges(Graph, edges_cities, keysC,
label="Rail network cost:" + str(format(cost_cities, '.7f')) + '\nK: ' +
config_parameters[0], ax=axes)
nx.draw_networkx_edges(Graph, edges_electricity, keysE, edge_color="red",
label="Power grid cost:" + str(format(cost_ps, '.7f')) + '\nKe: ' +
config_parameters[1],ax=axes)
empty = {(0, 0): (0, 0)}
nx.draw_networkx_nodes(Graph, empty, empty.keys(), node_color='white', node_size=0,
label='\n\nCAPEX: ' + str(format(cost, '.7f'))
+ '\nPopulation: ' + str(config_parameters[2])
+ '\nSelection: ' + str(config_parameters[3])
+ '\nIterations: ' + str(config_parameters[4]),
ax=axes)
# nx.draw_networkx(Graph)
handles, labels = axes.get_legend_handles_labels()
legend = axes.legend(handles, labels, loc='upper center', ncol=3, bbox_to_anchor=(0.5, -0.1))
# legend.get_frame().set_alpha(0.5)
plt.gca().set_aspect('equal', adjustable='box')
plt.title("Najlepsze uzyskane rozwiązanie")
if show:
plt_copy = plt
plt_copy.show()
# plt.imsave(path+ folder_out+"/"+ testCase + "_" + test + '_bestIm.png', format='png')
plt.savefig(path + "/" + testCase + "_" + test + '_theBest.png',
bbox_extra_artists=(legend,), bbox_inches='tight', format='png')
plt.close(figure)
| null | src/representation.py | representation.py | py | 5,446 | python | en | code | null | code-starcoder2 | 50 |
179960298 |
from bs4 import BeautifulSoup
from datetime import datetime,date
import requests
import os
import sys
import re
import time
from subprocess import call,Popen,check_output,PIPE
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'includes'))
from customSettings import repoDir,djangoDir,djangoSettings,startFinYear,panchayatCrawlThreshold,panchayatRetryThreshold,telanganaStateCode,panchayatAttemptRetryThreshold,apStateCode,crawlRetryThreshold,crawlProcessTimeThreshold,logDir
#from crawlFunctions import crawlPanchayat,crawlPanchayatTelangana,libtechCrawler
from libtechCrawler import libtechCrawler
sys.path.insert(0, repoDir)
fileDir=os.path.dirname(os.path.abspath(__file__))
sys.path.append(djangoDir)
from libtechInit import libtechLoggerFetch
from nregaFunctions import stripTableAttributes,htmlWrapperLocal,getCurrentFinYear,savePanchayatReport,table2csv,getFullFinYear
from wrappers.logger import loggerFetch
import django
from django.core.wsgi import get_wsgi_application
from django.core.files.base import ContentFile
from django.utils import timezone
from django.db.models import F,Q,Sum,Count
os.environ.setdefault("DJANGO_SETTINGS_MODULE", djangoSettings)
django.setup()
from nrega.models import State,District,Block,Panchayat,Muster,LibtechTag,CrawlQueue,CrawlState
def argsFetch():
'''
Paser for the argument list that returns the args list
'''
import argparse
parser = argparse.ArgumentParser(description='These scripts will initialize the Database for the district and populate relevant details')
parser.add_argument('-l', '--log-level', help='Log level defining verbosity', required=False)
parser.add_argument('-limit', '--limit', help='Limit on the number of results', required=False)
parser.add_argument('-downloadLimit', '--downloadLimit', help='Limit on the number of results', required=False)
parser.add_argument('-s', '--stateCode', help='State for which the delayed payment report needs to be crawld', required=False)
parser.add_argument('-sf', '--startFinYear', help='From which financial year data needs to be crawled default is 2015-2016', required=False)
parser.add_argument('-step', '--step', help='Step for which the script needs to run', required=False)
parser.add_argument('-pc', '--panchayatCode', help='Panchayat for which the delayed payment report needs to be crawld', required=False)
parser.add_argument('-qid', '--qid', help='Queue Id for which this needs to be run', required=False)
parser.add_argument('-bc', '--blockCode', help='Panchayat for which the delayed payment report needs to be crawld', required=False)
parser.add_argument('-m', '--manage', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)
parser.add_argument('-e', '--execute', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)
parser.add_argument('-p', '--populate', help='Populate CrawlQueue', required=False,action='store_const', const=1)
parser.add_argument('-f', '--force', help='Force Run a step', required=False,action='store_const', const=1)
parser.add_argument('-se', '--singleExecute', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)
parser.add_argument('-i', '--initiateCrawl', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)
parser.add_argument('-d', '--debug', help='Debug Panchayat Crawl Queue', required=False,action='store_const', const=1)
parser.add_argument('-t', '--test', help='Manage Panchayat Crawl Queue', required=False,action='store_const', const=1)
args = vars(parser.parse_args())
return args
def main():
args = argsFetch()
logger = loggerFetch(args.get('log_level'))
if args['initiateCrawl']:
logger.debug("This script is going to initiate crawl")
if args['step']:
crawlStates=CrawlState.objects.filter(name=args['step'])
else:
#crawlStates=CrawlState.objects.all()
crawlStates=CrawlState.objects.filter(isActive=True)
for eachState in crawlStates:
curStateName=eachState.name
logger.debug("Curent state name is %s " % curStateName)
curhour=datetime.now().hour
nicTimeBand=False
if (curhour >=6) and (curhour < 20):
nicTimeBand=True
scriptDir='%s/custom/crawlScripts/' % djangoDir
# scriptName='%s/custom/crawlScripts/b.sh %s ' % (djangoDir,str(eachState.id))
logfile="/tmp/cq%s.log" % (curStateName)
debuglogfile="/tmp/debug%s_%s.log" % (curStateName,str(int(time.time())))
if ((eachState.nicHourRestriction==False) or ((eachState.nicHourRestriction==True) and (nicTimeBand==True))):
cmd="python %s/crawlMain.py -e -l debug -step %s " % (scriptDir,curStateName)
# cmd=scriptName
p1 = Popen(['pgrep', '-f', cmd], stdout=PIPE)
mypid = p1.communicate()[0].decode("utf-8").lstrip().rstrip()
logger.debug("Exsiting PID for this command %s is %s " % (curStateName,str(mypid)))
if (mypid == ""):
logger.debug("We are going to launch this program %s" % cmd)
with open(logfile,"wb") as f:
proc = Popen([cmd], shell=True,stdin=None, stdout=f, stderr=None, close_fds=True)
else:
mycmd='ps -o etimes= -p %s ' % mypid
p1 = Popen([mycmd], stdout=PIPE,shell=True)
output = p1.communicate()[0].decode("utf-8").lstrip().rstrip()
logger.debug(output)
if int(output) > crawlProcessTimeThreshold:
#Before Killing we will copy the log file to check for errors
mycmd="cp %s %s " % (logfile,debuglogfile)
p1 = Popen([mycmd], stdout=PIPE,shell=True)
mycmd="kill -9 %s " % mypid
p1 = Popen([mycmd], stdout=PIPE,shell=True)
output = p1.communicate()[0].decode("utf-8").lstrip().rstrip()
logger.debug(output)
if args['populate']:
panchayatCode=args['panchayatCode']
blockCode=args['blockCode']
if panchayatCode is not None:
eachPanchayat=Panchayat.objects.filter(code=panchayatCode).first()
CrawlQueue.objects.create(panchayat=eachPanchayat)
elif blockCode is not None:
eachBlock=Block.objects.filter(code=blockCode).first()
myPanchayats=Panchayat.objects.filter(block=eachBlock)
for eachPanchayat in myPanchayats:
CrawlQueue.objects.create(panchayat=eachPanchayat)
# CrawlQueue.objects.create(block=eachBlock,priority=500)
if args['execute']:
state=args['step']
crawlState=CrawlState.objects.filter(name=state).first()
logFileName="%s.log" % (crawlState.name)
logger1 = libtechLoggerFetch('debug',filename=logFileName)
if crawlState is not None:
limit=args['limit']
if args['downloadLimit']:
downloadLimit=int(args['downloadLimit'])
else:
downloadLimit=None
qid=args['qid']
forceRun=args['force']
try:
libtechCrawler(logger1,crawlState,qid=qid,forceRun=forceRun,downloadLimit=downloadLimit)
except:
logger1.exception('Got exception on main handler')
raise
else:
logger.info("Not a Valid Crawl Step")
logger.info("...END PROCESSING")
exit(0)
if __name__ == '__main__':
main()
| null | django/n.libtech.info/src/custom/crawlScripts/crawlMain.py | crawlMain.py | py | 7,211 | python | en | code | null | code-starcoder2 | 50 |
523380136 | import numpy as np
import operator # k-近邻算法执行排序操作
import argparse
import matplotlib
import matplotlib.pyplot as plt
import os
def createDataSet():
group = np.array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])# 测量点
labels = ['A', 'A', 'B', 'B'] # 数据点的标签信息,元素个数等于group矩阵行数
return group, labels
# 参数:inX:用于分类的输入向量;
# dataSet:输入的训练样本集;
# labels:标签向量;
# k: 用于选择最近邻居的数目
def classify0(inX, dataSet, labels, k):
# 距离计算
dataSetSize = dataSet.shape[0]
diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances ** 0.5
# 按照距离递增次序排序
sortedDistIndicies = distances.argsort()
classCount = {}
# 选取与当前点距离最小的k个点
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
# 确定前k个点所在类别的出现频率,运算符模块的itemgetter方法,此处为从大到小排
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
# 返回前k个点出现频率最高的类别作为当前点的预测分类。
return sortedClassCount[0][0]
# 样本文件:datingTestSet.txt
# 样本特征:每年获得的飞行常客里程数; 玩视频游戏所耗时间的百分比; 每周消费的冰淇淋公升数
# 程序功能: 将文本记录转换为NumPy的解析程序
def file2matrix(filename):
# 得到文件的行数
fr = open(filename)
arrayOLines = fr.readlines()
numberOfLines = len(arrayOLines)
# 创建返回的NumPy矩阵
returnMat = np.zeros((numberOfLines, 3)) # 这里第二个维度设置为3
classLabelVector = []
index = 0
# 解析文件数据到列表
for line in arrayOLines:
line = line.strip()
listFromLine = line.split('\t')
returnMat[index, :] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
# 返回训练样本矩阵和类标签向量
return returnMat, classLabelVector
# 程序功能:自动将数字特征值转化为0到1的区间
# 程序输入:样本特征集数据集
# 程序返回: 矩阵, range: 取值范围; minValue: 最小值
def autoNorm(dataSet):
minValues = dataSet.min(0) # 参数0使得函数可以从列中选取最小值,而不是选取当前行的最小值
maxValues = dataSet.max(0)
ranges = maxValues - minValues
normDataSet = np.zeros(np.shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - np.tile(minValues, (m, 1))
# 特征值相除,非矩阵除法,矩阵除法需要用np.linalg.solve(matA,matB)
normDataSet = normDataSet / np.tile(ranges, (m, 1))
return normDataSet, ranges, minValues
# 程序功能:用于分类器针对约会网站的测试
# 程序输出:分类器的错误率
def datingClassTest():
hoRatio = 0.10
# 从文件中读取数据并将其转化为归一化特征值
datingDataMat, datingLabels = file2matrix('./files/datingTestSet2.txt')
normMat, ranges, minValues = autoNorm(datingDataMat)
# 计算测试向量的数量,决定了 normMat向量中哪些数据用于测试,哪些数据用于分类器的训练样本;
m = normMat.shape[0]
numTestVecs = int(m * hoRatio)
errorCount = 0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)
print('预测结果:%d , 真实结果:%d' % (classifierResult, datingLabels[i]))
if (classifierResult != datingLabels[i]):
errorCount += 1.0
print('最终出错率为:%f' % (errorCount / float(numTestVecs)))
# 程序功能:约会网站预测指数
def classifyPerson():
resultList = ['不太喜欢', '可以做朋友', '可以交往']
percentTats = float(input("请问你每天的时间花在看视频和游戏上的占比是多少?/n"))
ffMiles = float(input("请问你每年的飞行常客里程数是多少?\n"))
iceCream = float(input("请问你周吃多公升冰淇淋?"))
datingDataMat, datingLabels = file2matrix('./files/datingTestSet2.txt')
normMat, ranges, minValues = autoNorm(datingDataMat)
inArr = np.array([ffMiles, percentTats, iceCream])
classifierResult = classify0((inArr-minValues)/ranges, normMat, datingLabels, 3)
print("海伦可能会对你说:%s"%resultList[classifierResult-1])
# 程序功能:将图像转换为向量
# 程序输入:文件路径
# 程序返回:NumPy数组
def img2vector(filename):
returnVect = np.zeros((1, 1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32*i+j] = int(lineStr[j])
return returnVect
def handwritingClassTest():
hwLabels = []
trainingFileList = os.listdir('./files/trainingDigits')
m = len(trainingFileList)
trainingMat = np.zeros((m, 1024))
# 将训练样本数据集转化成矩阵
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector('./files/trainingDigits/%s' % fileNameStr)
# 将测试数据也转化成NumP数组中
testFileList = os.listdir('./files/testDigits')
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('./files/testDigits/%s' % fileNameStr)
# 使用kNN进行分类
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
# 打印测试结果
print("预测结果为:%d;实际结果为:%d" % (classifierResult, classNumStr))
if (classifierResult != classNumStr):
errorCount += 1
print("预测结果为:%d;实际结果为:%d;错误: %s" % (classifierResult, classNumStr, fileNameStr))
print("最终错误个数为:%d;错误率为:%f;" % (errorCount, errorCount / float(mTest)))
def main(args):
if args.example:
if args.example=='dating':
datingClassTest()
return
elif args.example=='handwriting':
handwritingClassTest()
return
dataSet,labels=file2matrix(args.path)
type=classify0(args.feature,dataSet,labels,2)
print(type)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-path",help='File Path, Just Like: -path ./files/kNN_test.txt',type=str)
parser.add_argument('-feature', nargs='+', help='Feature List, Just Like: -feature 0.2 0.1',type=float)
parser.add_argument('-example',help="kNN Example, You Can Choose From: dating、handwriting. If you set the example argument, your needn't set other argument")
args = parser.parse_args()
main(args)
# result = classify0([0, 0], datingDataMat, datingLabels, 3)
# # print(result)
| null | MLzhoupengwu/kNN.py | kNN.py | py | 7,339 | python | en | code | null | code-starcoder2 | 50 |
3765698 | from django.urls import path
from .views import CategoryView, ProductListView, OrderCreateView
from rest_framework import routers
app_name = 'products'
router = routers.DefaultRouter()
router.register('products', ProductListView, basename='product')
urlpatterns = [
path('categories/', CategoryView.as_view()),
path('purchase/', OrderCreateView.as_view()),
] + router.urls
| null | products/urls.py | urls.py | py | 384 | python | en | code | null | code-starcoder2 | 50 |
215224187 | import time
import torch
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from torch.utils.tensorboard import SummaryWriter
from audio import Audio
from dataset import new_audio_datasets
from losses import MaskedL1
from model.io import ModelPackage
from utils.common import Averager
from utils.config import Config
from utils.decorators import ignore_exception
from utils.display import plot_mel, plot_attention, display_params, stream
from utils.paths import Paths
class Session:
def __init__(self,
index: int,
r: int,
lr: int,
max_step: int,
bs: int,
train_set: DataLoader,
val_set: DataLoader) -> None:
self.index = index
self.r = r
self.lr = lr
self.max_step = max_step
self.bs = bs
self.train_set = train_set
self.val_set = val_set
class Trainer:
def __init__(self, cfg: Config):
self.cfg = cfg
self.paths = Paths()
self.audio = Audio(cfg)
self.ckpt_path = self.paths.ckpt/cfg.config_id
log_dir = self.ckpt_path/'tensorboard'
self.writer = SummaryWriter(log_dir=log_dir, comment='v1')
self.criterion = MaskedL1()
def train(self, model: ModelPackage):
for i, session_params in enumerate(self.cfg.training_schedule, 1):
r, lr, max_step, bs = session_params
if model.tacotron.step < max_step:
train_set, val_set = new_audio_datasets(
paths=self.paths, batch_size=bs, r=r, cfg=self.cfg)
session = Session(
index=i, r=r, lr=lr, max_step=max_step,
bs=bs, train_set=train_set, val_set=val_set)
self.train_session(model, session)
def train_session(self, model: ModelPackage, session: Session):
model.r = session.r
cfg = self.cfg
tacotron, gan = model.tacotron, model.gan
taco_opti, gen_opti, disc_opti = \
model.taco_opti, model.gen_opti, model.disc_opti
device = next(tacotron.parameters()).device
display_params([
('Session', session.index), ('Reduction', session.r),
('Max Step', session.max_step), ('Learning Rate', session.lr),
('Batch Size', session.bs), ('Steps per Epoch', len(session.train_set))
])
for g in taco_opti.param_groups:
g['lr'] = session.lr
loss_avg = Averager()
duration_avg = Averager()
while tacotron.get_step() <= session.max_step:
for i, (seqs, mels, stops, ids, lens) in enumerate(session.train_set):
seqs, mels, stops, lens = \
seqs.to(device), mels.to(device), stops.to(device), lens.to(device)
t_start = time.time()
block_step = tacotron.get_step() % cfg.steps_to_eval + 1
tacotron.train()
lin_mels, post_mels, att = tacotron(seqs, mels)
lin_loss = self.criterion(lin_mels, mels, lens)
post_loss = self.criterion(post_mels, mels, lens)
loss = lin_loss + post_loss
loss_avg.add(loss)
taco_opti.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(tacotron.parameters(), 1.0)
taco_opti.step()
duration_avg.add(time.time() - t_start)
steps_per_s = 1. / duration_avg.get()
self.writer.add_scalar('Loss/train', loss, tacotron.get_step())
self.writer.add_scalar('Params/reduction_factor', session.r, tacotron.get_step())
self.writer.add_scalar('Params/batch_sze', session.bs, tacotron.get_step())
self.writer.add_scalar('Params/learning_rate', session.lr, tacotron.get_step())
msg = f'{block_step}/{cfg.steps_to_eval} | Step: {tacotron.get_step()} ' \
f'| {steps_per_s:#.2} steps/s | Avg. Loss: {loss_avg.get():#.4} '
stream(msg)
if tacotron.step % cfg.steps_to_checkpoint == 0:
self.save_model(model, step=tacotron.get_step())
if tacotron.step % self.cfg.steps_to_eval == 0:
val_loss = self.evaluate(model, session.val_set, msg)
self.writer.add_scalar('Loss/val', val_loss, tacotron.step)
self.save_model(model)
stream(msg + f'| Val Loss: {float(val_loss):#0.4} \n')
loss_avg.reset()
duration_avg.reset()
if tacotron.step > session.max_step:
return
def evaluate(self, model, val_set, msg) -> float:
model.tacotron.eval()
val_loss = 0
device = next(model.tacotron.parameters()).device
for i, batch in enumerate(val_set, 1):
stream(msg + f'| Evaluating {i}/{len(val_set)}')
seqs, mels, stops, ids, lens = batch
seqs, mels, stops, lens = \
seqs.to(device), mels.to(device), stops.to(device), lens.to(device)
with torch.no_grad():
pred = model.tacotron(seqs, mels)
lin_mels, post_mels, att = pred
lin_loss = F.l1_loss(lin_mels, mels)
post_loss = F.l1_loss(post_mels, mels)
val_loss += lin_loss + post_loss
if i == 1:
self.generate_samples(model, batch, pred)
val_loss /= len(val_set)
return float(val_loss)
def save_model(self, model: ModelPackage, step=None):
model.save(self.ckpt_path/'latest_model.zip')
if step is not None:
model.save(self.ckpt_path/f'model_step_{step}.zip')
@ignore_exception
def generate_samples(self, model: ModelPackage,
batch: torch.Tensor, pred: torch.Tensor):
seqs, mels, stops, ids, lens = batch
lin_mels, post_mels, att = pred
mel_sample = mels.transpose(1, 2)[0, :lens[0]].detach().cpu().numpy()
gta_sample = post_mels.transpose(1, 2)[0, :lens[0]].detach().cpu().numpy()
att_sample = att[0].detach().cpu().numpy()
target_fig = plot_mel(mel_sample)
gta_fig = plot_mel(gta_sample)
att_fig = plot_attention(att_sample)
self.writer.add_figure('Mel/target', target_fig, model.tacotron.step)
self.writer.add_figure('Mel/ground_truth_aligned', gta_fig, model.tacotron.step)
self.writer.add_figure('Attention/ground_truth_aligned', att_fig, model.tacotron.step)
target_wav = self.audio.griffinlim(mel_sample, 32)
gta_wav = self.audio.griffinlim(gta_sample, 32)
self.writer.add_audio(
tag='Wav/target', snd_tensor=target_wav,
global_step=model.tacotron.step, sample_rate=self.audio.sample_rate)
self.writer.add_audio(
tag='Wav/ground_truth_aligned', snd_tensor=gta_wav,
global_step=model.tacotron.step, sample_rate=self.audio.sample_rate)
seq = seqs[0].tolist()
_, gen_sample, att_sample = model.tacotron.generate(seq, steps=lens[0])
gen_fig = plot_mel(gen_sample)
att_fig = plot_attention(att_sample)
self.writer.add_figure('Attention/generated', att_fig, model.tacotron.step)
self.writer.add_figure('Mel/generated', gen_fig, model.tacotron.step)
gen_wav = self.audio.griffinlim(gen_sample, 32)
self.writer.add_audio(
tag='Wav/generated', snd_tensor=gen_wav,
global_step=model.tacotron.step, sample_rate=self.audio.sample_rate)
| null | trainer.py | trainer.py | py | 7,709 | python | en | code | null | code-starcoder2 | 50 |
477060780 | from echopype.echodata import EchoData
import xarray as xr
import numpy as np
def test_harmonize_env_param_time():
# Scalar
p = 10.05
assert EchoData._harmonize_env_param_time(p=p) == 10.05
# time1 length=1, should return length=1 numpy array
p = xr.DataArray(
data=[1],
coords={
"time1": np.array(["2017-06-20T01:00:00"], dtype="datetime64[ns]")
},
dims=["time1"]
)
assert EchoData._harmonize_env_param_time(p=p) == 1
# time1 length>1, interpolate to tareget ping_time
p = xr.DataArray(
data=np.array([0, 1]),
coords={
"time1": np.arange("2017-06-20T01:00:00", "2017-06-20T01:00:31", np.timedelta64(30, "s"), dtype="datetime64[ns]")
},
dims=["time1"]
)
# ping_time target is identical to time1
ping_time_target = p["time1"].rename({"time1": "ping_time"})
p_new = EchoData._harmonize_env_param_time(p=p, ping_time=ping_time_target)
assert (p_new["ping_time"] == ping_time_target).all()
assert (p_new.data == p.data).all()
# ping_time target requires actual interpolation
ping_time_target = xr.DataArray(
data=[1],
coords={
"ping_time": np.array(["2017-06-20T01:00:15"], dtype="datetime64[ns]")
},
dims=["ping_time"]
)
p_new = EchoData._harmonize_env_param_time(p=p, ping_time=ping_time_target["ping_time"])
assert p_new["ping_time"] == ping_time_target["ping_time"]
assert p_new.data == 0.5
| null | echopype/tests/echodata/test_echodata_misc.py | test_echodata_misc.py | py | 1,519 | python | en | code | null | code-starcoder2 | 50 |
255206708 | """ HandleCollisionsAction module
Contains HandleCollisionsAction class and associated utilities. Used in
controlling the gameplay and managing collisions in-game.
Authors:
- Shad Christopherson
- Peter Griffin
- Christian Soldevilla
"""
import random
from game import constants
from game.action import Action
from game.point import Point
from game.score import Score
class HandleCollisionsAction(Action):
"""A code template for handling collisions. The responsibility of this class of objects is to update the game state when actors collide.
Stereotype:
Controller
"""
def __init__(self):
"""The class constructor."""
super().__init__()
self._points = 0
self.keep_playing = True
self.gameWon = False
def execute(self, cast):
"""Executes the action using the given actors.
Args:
cast (dict): The game actors {key: tag, value: list}.
self.keep_playing (bool) determines whether or not to keep playing
"""
#set values
ball = cast["ball"][0] # ball
paddle = cast["paddle"][0] # paddle
bricks = cast["brick"] # brick
self.bricks = bricks
score = cast["score"][0] #score
#start brick check loop
iterator = 0
self.checkWin()
for brick in bricks:
if ball.get_position().equals(brick.get_position()):
newDirection = ball.get_velocity().reverse_y()
newDirection = newDirection.collision_randomizer() #randomizes the x value that comes from a y flip.
ball.set_velocity(newDirection)
del bricks[iterator] #need to actually delete the brick object, or it'll bounce always
score.add_points(1)
iterator += 1
# wall and ceiling/floor check
edgeCheck = ball.get_position().get_x()
ceilingCheck = ball.get_position().get_y()
if edgeCheck == constants.MAX_X - 1 or edgeCheck == 1:
newDirection = ball.get_velocity().reverse_x()
ball.set_velocity(newDirection)
if ceilingCheck == 1:
newDirection = ball.get_velocity().reverse_y()
newDirection = newDirection.collision_randomizer()
ball.set_velocity(newDirection)
if ceilingCheck == constants.MAX_Y - 1:
self.keep_playing = False
#paddle check
for i in range(11): #Handles collision with Paddle
checkPosition = paddle.get_position()
newPositionToCheck = checkPosition.lengthen_detect(i)
if ball.get_position().equals(newPositionToCheck):
# invert the velocity
newDirection = ball.get_velocity().reverse_y()
newDirection = newDirection.collision_randomizer()
ball.set_velocity(newDirection)
def checkGameOver(self):
"""Gets the self.keep_playing variable to run check.
Returns:
Boolean: Whether the game has ended.
"""
return self.keep_playing
def checkWin(self):
"""Checks the state of the brick in bricks.
Args:
self.keep_playing (Bool)
self.gameWon (Bool)
"""
if (len(self.bricks) == 0):
self.keep_playing = False
self.gameWon = True
def getWinCondition(self):
"""Gets the self.gameWon variable based on checkWin().
Returns:
Boolean: If the game is won.
"""
return self.gameWon | null | cse210-tc06/batter/game/handle_collisions_action.py | handle_collisions_action.py | py | 3,574 | python | en | code | null | code-starcoder2 | 50 |
9420601 | import pdfkit
def html_to_pdf(html, to_file):
# 将wkhtmltopdf.exe程序绝对路径传入config对象
path_wkthmltopdf = r'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'
config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# 生成pdf文件,to_file为文件路径
pdfkit.from_file(html, to_file, configuration=config)
print('OK')
html_to_pdf('index.html','index.pdf')
| null | docs/html/htmltopdf.py | htmltopdf.py | py | 413 | python | en | code | null | code-starcoder2 | 50 |
453182898 | #! /usr/bin/env python
# Public domain; MZMcBride, 2011; Legoktm, 2014
from flask import Flask, request
import cgi
import urllib
import re
import oursql
import operator
import json
import os
app = Flask(__name__)
my_cnf = os.path.expanduser('~/replica.my.cnf')
def database_list():
conn = oursql.connect(host='enwiki.labsdb', db='meta_p', read_default_file=my_cnf)
cursor = conn.cursor()
cursor.execute('''
/* checker.py database_list */
SELECT
dbname
FROM wiki
WHERE is_closed = 0;
''')
databases = cursor.fetchall()
cursor.close()
conn.close()
return [database[0] for database in databases]
def choose_host_and_domain(db):
conn = oursql.connect(host='enwiki.labsdb',
db='meta_p',
read_default_file=my_cnf)
cursor = conn.cursor()
cursor.execute('''
/* checker.py choose_host_and_domain */
SELECT
url
FROM wiki
WHERE dbname = ?;
''', (db,))
for row in cursor.fetchall():
domain = '%s' % row[0]
cursor.close()
conn.close()
return {'host': db + '.labsdb', 'domain': domain}
def get_extension_namespaces(domain):
params = {
'action': 'query',
'meta': 'proofreadinfo|siteinfo',
'piprop': 'namespaces',
'siprop': 'namespaces',
'format': 'json'
}
query_url = '%s/w/api.php?%s' % (domain, urllib.urlencode(params))
app.logger.debug(query_url)
url_contents = urllib.urlopen(query_url).read()
parsed_content = json.loads(url_contents)
page_namespace = parsed_content['query']['proofreadnamespaces']['page']['id']
index_namespace = parsed_content['query']['proofreadnamespaces']['index']['id']
names = parsed_content['query']['namespaces']
return {'page_namespace': page_namespace, 'index_namespace': index_namespace, 'names': names}
def get_page_links(cursor, db, page_namespace, index_namespace, index_page):
page_links = []
cursor.execute('''
/* checker.py get_page_links */
SELECT
pl_title
FROM pagelinks
JOIN page AS p1
ON pl_from = p1.page_id
JOIN page AS p2
ON p2.page_title = pl_title
AND p2.page_namespace = pl_namespace
WHERE pl_namespace = ?
AND p1.page_namespace = ?
AND p1.page_title = ?;
''', (page_namespace, index_namespace, index_page))
for row in cursor.fetchall():
pl_title = row[0]
#app.logger.debug(row[0])
try:
sort_key = int(unicode(row[0].rsplit('/', 1)[1].decode('utf-8')))
except IndexError:
sort_key = 1
page_links.append([pl_title, sort_key])
return page_links
def get_page_status(cursor, db, page_namespace, page):
page_status = {}
# Check if the page has transclusions first
cursor.execute('''
/* checker.py get_page_status */
SELECT
COUNT(*)
FROM templatelinks
WHERE tl_namespace = ?
AND tl_title = ?;
''', (page_namespace, page))
transclusion_count = cursor.fetchone()
if transclusion_count:
page_status['transclusion_count'] = transclusion_count[0]
# Then check if the page has been proofread
cursor.execute('''
/* checker.py get_page_status */
SELECT
cl_to
FROM page
JOIN categorylinks
ON cl_from = page_id
WHERE page_id = cl_from
AND page_namespace = ?
AND page_title = ?;
''', (page_namespace, page))
proofread_status = cursor.fetchone()
if proofread_status:
page_status['proofread_status'] = proofread_status[0].lower().replace('_', ' ')
return page_status
@app.route('/')
def main():
TEXT = ''
# Pick a db; make enwikisource the default
if request.args.get('db') is not None:
db = request.args.get('db').replace('_p', '')
else:
db = 'enwikisource'
# All right, now let's pick a host and domain
connection_props = choose_host_and_domain(db)
host = connection_props['host']
domain = connection_props['domain']
# Run this awful function to grab the namespace names that are required.
extension_dict = get_extension_namespaces(domain)
page_namespace = extension_dict['page_namespace']
index_namespace = extension_dict['index_namespace']
page_namespace_name = extension_dict['names'][str(page_namespace)]['*']
index_namespace_name = extension_dict['names'][str(index_namespace)]['*']
if 'title' in request.args:
title = request.args.get('title')
else:
title = ''
yes_table = '''\
<table id="ck-yes-table">
%s
</table>'''
yes_rows = []
no_table = '''\
<table id="ck-no-table">
%s
</table>'''
no_rows = []
tables = []
if host is not None and title:
conn = oursql.connect(host=host, db=db+'_p', read_default_file=my_cnf)
cursor = conn.cursor()
# Eliminate LTR and RTL marks and strip extra whitespace.
title = re.sub(r'(\xe2\x80\x8e|\xe2\x80\x8f)', '', title).strip(' ')
# Prep the title for the query (replace spaces and strip namespace name if present).
clean_title = title.replace(' ', '_').split(index_namespace_name+':', 1)[1]
page_links = get_page_links(cursor, db, page_namespace, index_namespace, clean_title)
if page_links:
# Sort!
page_links = sorted(page_links, key=operator.itemgetter(1))
for item in page_links:
page_link = item[0]
sort_key = item[1]
status = get_page_status(cursor, db, page_namespace, page_link)
if status['transclusion_count'] > 0:
yes_table_row = '''\
<tr>
<td>
<a href="//%s/wiki/%s">%s</a>\
</td>
<td>
%s
</td>
</tr>''' % (domain,
urllib.quote('%s:%s' % (page_namespace_name, page_link)),
cgi.escape('%s:%s' % (page_namespace_name, page_link.replace('_', ' ')), quote=True),
status['proofread_status'])
yes_rows.append(yes_table_row)
else:
no_table_row = '''\
<tr>
<td>
<a href="//%s/wiki/%s">%s</a>\
</td>
<td>
%s
</td>
</tr>''' % (domain,
urllib.quote('%s:%s' % (page_namespace_name, page_link)),
cgi.escape('%s:%s' % (page_namespace_name, page_link.replace('_', ' ')), quote=True),
status['proofread_status'])
no_rows.append(no_table_row)
tables.append(yes_rows)
tables.append(no_rows)
cursor.close()
conn.close()
TEXT += '''\
<!doctype html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<link rel="stylesheet" href="/checker/static/style-checker.css" type="text/css" />
<script type="text/javascript" src="/checker/static/jquery-1.3.2.min.js"></script>
<script type="text/javascript" src="/checker/static/jquery.tablesorter.js"></script>
<script type="text/javascript">
var extracted_data = function(node)
{
var text = node.innerText || node.textContent
return text.substring(text.lastIndexOf('/')+1);
}
$(document).ready(function()
{
$('input.focus').focus();
$('.ck-results').tablesorter(
{widgets: ['zebra'],
textExtraction: extracted_data}
);
}
);
</script>
<title>checker</title>
</head>
<body>
<div class="header" id="main-title"><a href="/checker/" title="checker">checker</a></div>\
'''
if title:
if db and host is not None and title:
TEXT += '<div id="ck-tables-wrapper">'
count = 0
for table in tables:
if count == 0:
TEXT += '<h1 class="header" id="Transcluded"> Transcluded </h1>'
else:
TEXT += '<h1 class="header" id="Not transcluded"> Not transcluded </h1>'
TEXT += '''\
<table class="ck-results inner-table">
<thead>
<tr>
<th class="header" id="ck-page-column">Page</th>
<th class="header" id="ck-status-column">Status</th>
</tr>
</thead>
<tbody>
%s
</tbody>
</table>''' % ('\n'.join(table))
count += 1
TEXT += '</div>'
else:
TEXT += '''\
<pre>
There was some sort of error. Sorry. :-(
</pre>'''
elif host is None:
TEXT += '''\
<pre>
You didn't specify an appropriate database name.
</pre>'''
else:
TEXT += '''\
<form action="/checker/" method="get">
<table id="input" class="inner-table">
<tr>
<th colspan="2" class="header">Input index title below.</th>
</tr>
<tr>
<th>Database</th>
<th>
<select id="database" name="db">'''
for i in database_list():
if i == '%s' % db:
TEXT += '''\
<option value="%s" selected="selected">%s</option>''' % (i, i)
else:
TEXT += '''\
<option value="%s">%s</option>''' % (i, i)
TEXT += '''\
</select>
</th>
</tr>
<tr>
<td colspan="2" id="input-cell">
<input class="focus" id="input" name="title" size="50" /><input id="go-button" type="submit" value="Go" />
</td>
</tr>
</table>
</form>'''
TEXT += '''\
<div id="footer">
<div id="meta-info">
<a href="https://github.com/legoktm/checker" title="source code">public domain</a> <b>·</b> \
<a href="http://en.wikipedia.org/w/index.php?title=User_talk:MZMcBride&action=edit&section=new" title="Report a bug">bugs</a>
</div>
</div>
</body>
</html>
'''
return TEXT
if __name__ == '__main__':
app.run(debug=True)
| null | checker/checker.py | checker.py | py | 9,445 | python | en | code | null | code-starcoder2 | 51 |
575911156 | from django.contrib import messages
from django.shortcuts import render, redirect
from .forms import MicrotasksForm
from .models import Microtasks,MAL_Requirements
# Create your views here.
def microtask(request):
if request.method == 'POST':
form = MicrotasksForm(request.POST, request.FILES)
if form.is_valid():
job_name = form.cleaned_data['job_name']
form.save()
messages.success(request, f'Account created for {job_name}! You have to login')
return redirect('/')
else:
form = MicrotasksForm()
return render(request, 'microtask.html', {'form': form})
def index(request):
microtask = Microtasks.objects.all()
# category = MAL_Requirements.objects.get(microtask.Category_of_the_microtask)
context = {'microtask':microtask}
return render(request, 'MalForm.html', context)
def handleSubmit(request):
if request.method == 'POST':
MAL_Job_Identification_Number = request.POST['malno']
Assembly_line_ID = request.POST['asi']
Name_of_the_Assembly_line = request.POST['nameassembly']
Name_of_the_person_incharge_of_the_MAL = request.POST['personname']
Link_of_the_output_folder = request.POST['link1']
Name_of_the_micro_task = request.POST['microtask']
Category_of_the_Microtask = request.POST['category']
Target_date = request.POST['td']
Total_budget_allocated_for_the_job = request.POST['budget']
Job_description = request.POST['jd']
Upload_job_sample = request.POST['jobsample']
Upload_Job_instructions = request.POST['instruction']
Quantity_of_the_Job = request.POST['quantity']
Link_of_the_Input_folder = request.POST['link2']
data = MAL_Requirements(
MAL_Job_Identification_Number=MAL_Job_Identification_Number,
Assembly_line_ID=Assembly_line_ID,
Name_of_the_Assembly_line=Name_of_the_Assembly_line,
Name_of_the_person_incharge_of_the_MAL=Name_of_the_person_incharge_of_the_MAL,
Link_of_the_output_folder=Link_of_the_output_folder,
microtask=Name_of_the_micro_task,
microtask_category=Category_of_the_Microtask,
Target_date=Target_date,
Total_budget_allocated_for_the_job=Total_budget_allocated_for_the_job,
Job_description=Job_description,
Uploadjob_sample=Upload_job_sample,
UploadJob_instructions=Upload_Job_instructions,
Quantity_of_the_Job=Quantity_of_the_Job,
Link_of_the_Input_folder=Link_of_the_Input_folder
)
data.save()
return redirect('index')
def posting_page(request,pk=None):
if request.user.is_active:
if pk is not None:
try:
data = Microtasks.objects.get(id=pk)
except:
data = "NA"
return render(request,'JobPosting_Page.html', {'datas': data})
return render(request,'JobPosting_Page.html') | null | jobs/views.py | views.py | py | 3,217 | python | en | code | null | code-starcoder2 | 51 |
262511262 | import pandas as pd
import numpy as np
import re
import os
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from six.moves import cPickle as pickle
from nltk.corpus import stopwords
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import itertools
no_alignment_file = [4764]
wrong_alignment = [3730]
from keras.layers import Activation, Input, Dense, Flatten, Dropout, Embedding
from keras.layers.convolutional import Conv1D, MaxPooling1D
from keras.layers.merge import concatenate
from keras import regularizers
from keras.models import Model
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from keras.optimizers import Adadelta
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau
import keras_metrics
def extract_patterns(data,extract=False):
if(extract):
patterns = {}
for index, row in data.iterrows():
patterns[row['index']] = set(get_pattern([row['text']])[0].values())
print('Extracted pattern from '+ row['index'] + ' index:'+ str(index))
print('Size: ', len(patterns[row['index']]), 'Patterns size', len(patterns))
try:
print('Saving Pickle')
with open('pickles/patterns/pattern.pickle','wb') as f:
save = {
'patterns' : patterns
}
pickle.dump(save,f,pickle.HIGHEST_PROTOCOL)
print('Successfully saved in pattern.pickle')
return patterns
except Exception as e:
print('Unable to save data to pickle', e)
print('Patterns probably not saved.')
return patterns
else:
try:
with open('pickles/patterns/pattern.pickle','rb') as f:
save = pickle.load(f)
patterns = save['patterns']
del save
returning = {}
for key in list(data['index']):
returning[key] = patterns[key]
return returning
except Exception as e:
print('Error loading base datasets pickle: ', e)
def clean_text(text, remove_actions= True):
punct_str = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~«»“…‘”'
if(remove_actions):
text = re.sub(r" ?\[[^)]+\]", "", text)
for p in punct_str:
text = text.replace(p,' ')
text = re.sub(' +', ' ', text)
return text.lower().strip()
def filter_word_count(data, n_count):
return data[list(map(lambda x: len(x.split(' ')) >= n_count,data['text']))]
def remove_empty_patterns(data,patterns):
empty_patterns = [k for k, v in patterns.items() if len(v) < 1]
patterns = { k:v for k, v in patterns.items() if len(v) >= 1 }
data = filter(lambda x: x[1]['index'] not in empty_patterns ,data.iterrows())
data = pd.DataFrame.from_items(data).T
return data,patterns
def remove_multiwildcard(patterns):
for index, patt in patterns.items():
flt_patt = {p for p in patt if p.split(' ').count('.+') == 1}
patterns[index] = flt_patt
return patterns
def load_data(word_count,emotional_mapping):
# full = generate_IEMOCAP_df()
data = pd.read_csv('data/IEMOCAP_sentences_votebased.csv',index_col=0)
data['emotion_code'] = data['emotion'].map( emotional_mapping ).astype(int)
# Take away fear, surprise,disgust, xxx and others. Not enough data
data = data[data.emotion_code < 4]
#Remove rows that don't have Alignment file
try:
data = data.drop(no_alignment_file)
except Exception as e:
print('Error at: ',e)
# Remove rows that have wrong Alignment file
try:
data = data.drop(wrong_alignment)
except Exception as e:
print('Error at: ',e)
# Clean Transcripts
data['text'] = data['text'].apply(clean_text)
# Filter Word Count
data = filter_word_count(data, word_count)
patterns = extract_patterns(data)
data,patterns = remove_empty_patterns(data,patterns)
patterns = remove_multiwildcard(patterns)
return data,patterns
def load_acoustic_fullmatrices(extraction_type = 'full',extract_fd = False):
if(extraction_type in ['full','wc','cw']):
try:
if(extract_fd):
fullmfcc_matrix_fd = None
fullrmse_matrix_fd = pd.read_pickle('pickles/patterns/'+extraction_type+'_rmse_matrix_fd.pickle')
print('Successfully loaded '+extraction_type+' RMSE Matrix FULLDATA')
fullzcr_matrix_fd = pd.read_pickle('pickles/patterns/'+extraction_type+'_zcr_matrix_fd.pickle')
print('Successfully loaded '+extraction_type+' ZCR Matrix FULLDATA')
with open('pickles/patterns/'+extraction_type+'_mfcc20_matrix_fd.pickle','rb') as f:
save = pickle.load(f)
fullmfcc_matrix_fd = save['multimatrix']
del save
print('Successfully loaded '+extraction_type+' MFCC Matrices FULLDATA')
fullmfcc_matrix_fd.append(fullrmse_matrix_fd)
fullmfcc_matrix_fd.append(fullzcr_matrix_fd)
return fullmfcc_matrix_fd
else:
fullmfcc_matrix = None
fullrmse_matrix = pd.read_pickle('pickles/patterns/'+extraction_type+'_rmse_matrix.pickle')
print('Successfully loaded '+extraction_type+' RMSE Matrix')
fullzcr_matrix = pd.read_pickle('pickles/patterns/'+extraction_type+'_zcr_matrix.pickle')
print('Successfully loaded '+extraction_type+' ZCR Matrix')
with open('pickles/patterns/'+extraction_type+'_mfcc20_matrix.pickle','rb') as f:
save = pickle.load(f)
fullmfcc_matrix = save['multimatrix']
del save
print('Successfully loaded '+extraction_type+' MFCC Matrices')
fullmfcc_matrix.append(fullrmse_matrix)
fullmfcc_matrix.append(fullzcr_matrix)
return fullmfcc_matrix
except Exception as e:
print('Error loading matrix: ', e)
else:
print('Error')
return None,None
def get_frequency_vectors(data,patterns_list):
patterns = extract_patterns(data)
transcript_order = list(data['index'])
frequency_vectors = []
for index in patterns:
frequency_vectors.append(np.isin(patterns_list,np.array(list(patterns[index]))))
vectors = pd.DataFrame(frequency_vectors,columns=patterns_list,index=patterns.keys())
vectors = vectors.loc[transcript_order]
vectors = vectors * 1
return vectors
seed = 7
np.random.seed(seed)
emotional_mapping = {'ang': 0, 'sad': 1, 'hap': 2, 'neu': 3,'fru': 4,'exc': 5,'fea': 6,'sur': 7,'dis': 8, 'xxx':9,'oth':10}
data, patterns = load_data(3,emotional_mapping)
# x_train, x_test, y_train, y_test = train_test_split(data, data.emotion_code, test_size=TEST_SIZE)
try:
with open('pickles/matrix_basedata.pickle','rb') as f:
save = pickle.load(f)
X_train = save['X_train']
X_test = save['X_test']
y_train = save['y_train']
y_test = save['y_test']
del save
except Exception as e:
print('Error loading base datasets pickle: ', e)
y_train = pd.get_dummies(y_train).values
y_test = pd.get_dummies(y_test).values
full_matrices = load_acoustic_fullmatrices(extraction_type='full',extract_fd = True)
wc_matrices = load_acoustic_fullmatrices(extraction_type='wc',extract_fd = True)
cw_matrices = load_acoustic_fullmatrices(extraction_type='cw',extract_fd = True)
########################################################################################
RMSE_INDEX = 20
ZCR_INDEX = 21
###########################################################################################
em_df = pd.read_pickle('pickles/patterns/pfief_matrix.pickle')
patterns_list = np.array(list(em_df.index))
print(len(em_df),len(full_matrices),len(wc_matrices),len(cw_matrices))
vectors = get_frequency_vectors(X_train,patterns_list)
test_vectors = get_frequency_vectors(X_test,patterns_list)
###########################################################################################
####### PARAMETERS ########
# EMBEDDING
EMBEDDING_DIM = 4
MAX_SEQ_LENGTH = 170
# MODEL
FILTER_SIZES = [1,1,1]
FEATURE_MAPS = [150,150,150]
DROPOUT_RATE = 0.2
# LEARNING
BATCH_SIZE = 200
NB_EPOCHS = 50
RUNS = 1
VAL_SIZE = 0.2
LEARNING_RATE = 0.01
##############################################################################
# acoustic_matrix = full_matrices[RMSE_INDEX]
# acoustic_matrix = acoustic_matrix.fillna(np.max(acoustic_matrix))
NUM_CHANNELS = 22
acoustic_matrices = full_matrices[:20].copy()
acoustic_matrices.append(full_matrices[ZCR_INDEX])
for i,am in enumerate(acoustic_matrices):
acoustic_matrices[i] = acoustic_matrices[i].fillna(np.max(acoustic_matrices[i]))
#######################################
full_data = []
for key, row in vectors.iterrows():
final = []
row_patt = [ i for i,v in row.iteritems() if v == 1]
row_matrix = em_df.loc[row_patt,:].as_matrix()
pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix
final.append(pad)
### ACU MATRICES ###
for i,am in enumerate(acoustic_matrices):
acu_matrix = am.loc[row_patt,:].as_matrix()
acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix
final.append(acu_pad)
full_data.append(final)
test_full_data = []
for key, row in test_vectors.iterrows():
final = []
row_patt = [ i for i,v in row.iteritems() if v == 1]
row_matrix = em_df.loc[row_patt,:].as_matrix()
pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix
final.append(pad)
### ACU MATRICES ###
for i,am in enumerate(acoustic_matrices):
acu_matrix = am.loc[row_patt,:].as_matrix()
acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix
final.append(acu_pad)
test_full_data.append(final)
acoustic_matrices = cw_matrices[:20].copy()
acoustic_matrices.append(cw_matrices[ZCR_INDEX])
for i,am in enumerate(acoustic_matrices):
acoustic_matrices[i] = acoustic_matrices[i].fillna(np.max(acoustic_matrices[i]))
cw_data = []
for key, row in vectors.iterrows():
final = []
row_patt = [ i for i,v in row.iteritems() if v == 1]
row_matrix = em_df.loc[row_patt,:].as_matrix()
pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix
final.append(pad)
### ACU MATRICES ###
for i,am in enumerate(acoustic_matrices):
acu_matrix = am.loc[row_patt,:].as_matrix()
acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix
final.append(acu_pad)
cw_data.append(final)
test_cw_data = []
for key, row in test_vectors.iterrows():
final = []
row_patt = [ i for i,v in row.iteritems() if v == 1]
row_matrix = em_df.loc[row_patt,:].as_matrix()
pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix
final.append(pad)
### ACU MATRICES ###
for i,am in enumerate(acoustic_matrices):
acu_matrix = am.loc[row_patt,:].as_matrix()
acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix
final.append(acu_pad)
test_cw_data.append(final)
acoustic_matrices = wc_matrices[:20].copy()
acoustic_matrices.append(wc_matrices[ZCR_INDEX])
for i,am in enumerate(acoustic_matrices):
acoustic_matrices[i] = acoustic_matrices[i].fillna(np.max(acoustic_matrices[i]))
wc_data = []
for key, row in vectors.iterrows():
final = []
row_patt = [ i for i,v in row.iteritems() if v == 1]
row_matrix = em_df.loc[row_patt,:].as_matrix()
pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix
final.append(pad)
### ACU MATRICES ###
for i,am in enumerate(acoustic_matrices):
acu_matrix = am.loc[row_patt,:].as_matrix()
acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix
final.append(acu_pad)
wc_data.append(final)
test_wc_data = []
for key, row in test_vectors.iterrows():
final = []
row_patt = [ i for i,v in row.iteritems() if v == 1]
row_matrix = em_df.loc[row_patt,:].as_matrix()
pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
pad[:row_matrix.shape[0],:row_matrix.shape[1]] = row_matrix
final.append(pad)
### ACU MATRICES ###
for i,am in enumerate(acoustic_matrices):
acu_matrix = am.loc[row_patt,:].as_matrix()
acu_pad = np.zeros((MAX_SEQ_LENGTH,EMBEDDING_DIM))
acu_pad[:acu_matrix.shape[0],:acu_matrix.shape[1]] = acu_matrix
final.append(acu_pad)
test_wc_data.append(final)
import time
import multiap_cnn_model
# # BALANCED DATA
printing = {}
FILTER_SIZES_AR = [[1,1,1]]
filter_sizes_names = ['1_1_1']
FEATURE_MAPS_AR = [[150,150,150]]
feature_maps_names = ['150']
DROPOUT_RATE = 0.2
LEARNING_RATE = 0.01
RUNS = 1
DATA_AR = [ wc_data,cw_data]
TEST_DATA_AR = [test_wc_data,test_cw_data]
data_names = ['wc','cw']
MAX_SEQ_LENGTH = 170
for Findex,filterS in enumerate(FILTER_SIZES_AR):
for Mindex, featureM in enumerate(FEATURE_MAPS_AR):
for Dindex, dataV in enumerate(DATA_AR):
FILTER_SIZES = filterS
FEATURE_MAPS = featureM
histories = []
for i in range(RUNS):
print('Running iteration %i/%i' % (i+1, RUNS))
start_time = time.time()
emb_layer = None
model = multiap_cnn_model.build_cnn(
embedding_dim= EMBEDDING_DIM,
filter_sizes = FILTER_SIZES,
feature_maps = FEATURE_MAPS,
max_seq_length = MAX_SEQ_LENGTH,
dropout_rate=DROPOUT_RATE,
num_channels=NUM_CHANNELS
)
model.compile(
loss='binary_crossentropy',
optimizer=Adadelta(clipvalue=3,lr=LEARNING_RATE),
metrics=['accuracy',keras_metrics.precision(),keras_metrics.recall()]
)
history = model.fit(
[dataV], y_train,
epochs=NB_EPOCHS,
batch_size=BATCH_SIZE,
verbose=1,
validation_data=([TEST_DATA_AR[Dindex]], y_test),
callbacks=[ModelCheckpoint('model-%i.h5'%(i+1), monitor='val_loss',
verbose=0, save_best_only=True, mode='min'),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=4, min_lr=0.01)
]
)
histories.append(history.history)
print('Iteration', i+1)
print("--- %s seconds on ---" % (time.time() - start_time))
with open('history/mfcc20_zcr/_FS'+str(filter_sizes_names[Findex])+'_FM_'+str(feature_maps_names[Mindex])+'_data_'+str(data_names[Dindex])+'.pkl', 'wb') as f:
pickle.dump(histories, f)
| null | run_model.py | run_model.py | py | 15,676 | python | en | code | null | code-starcoder2 | 51 |
161296733 | ''' Gestion principale d'un jeu de Yams, module principal.
@author: Rouyan Thi
'''
from random import sample, randint
def jet_init(nb_des, mode):
'''Lance tous les dés une première fois.
:param nb_des: nombre de dés à lancer
:type nb_des: int
:param mode: jeu avec ou sans dés cassés
:type mode: int
:return: renvoie la liste des valeurs des dés
:rtype: list
'''
liste_des = []
for n in range(nb_des):
liste_des += [randint(1,6)]
if mode == 1:
k = randint(1, 100)
pos_casse = [1, 1, 1, 1, 1]
if k <= 5:
a, b, c = randint(1, 5), randint(1, 5), randint(1, 5)
des_casses = place_casses([a, b, c], pos_casse)
elif k <=15:
a, b = randint(1, 5), randint(1, 5)
des_casses = place_casses([a, b], pos_casse)
elif k <= 25:
a = randint(1, 5)
des_casses = place_casses([a], pos_casse)
else:
des_casses = [0, 0, 0, 0, 0]
return [liste_des, des_casses]
return liste_des
def relance(select, liste_des, mode):
'''Relance les dés sélectionnés.
:param select: sélection des dés à relancer
:type select: list
:param liste_des: valeurs des dés avant relance
:type liste_des: list
:return: renvoie les nouvelles valeurs des dés
:rtype: list
'''
count = 0
if mode == 0:
for elem in select:
if elem != 0 and elem[1] == liste_des[elem[0]]:
liste_des[elem[0]] = randint(1, 6)
return liste_des
else:
pos_casse = []
for elem in select:
if elem != 0 and elem[1] == liste_des[elem[0]]:
liste_des[elem[0]] = randint(1, 6)
pos_casse += [1]
count += 1
else:
pos_casse += [0]
k = randint(1, 100)
if k <= 5 and count >= 3:
a, b, c = randint(1, 5), randint(1, 5), randint(1, 5)
des_casses = place_casses([a, b, c], pos_casse)
elif k <= 15 and count >= 2:
a, b = randint(1, 5), randint(1, 5)
des_casses = place_casses([a, b], pos_casse)
elif k <= 25 and count >= 1:
a = randint(1, 5)
des_casses = place_casses([a], pos_casse)
else:
des_casses = [0, 0, 0, 0, 0]
return [liste_des, des_casses]
def place_casses(val_cassees, pos_casse):
des_casses = [0, 0, 0, 0, 0]
for elem in val_cassees:
if elem == 1 and pos_casse[0] == 1:
des_casses[0] = 1
if elem == 2 and pos_casse[1] == 1:
des_casses[1] = 1
if elem == 3 and pos_casse[2] == 1:
des_casses[2] = 1
if elem == 4 and pos_casse[3] == 1:
des_casses[3] = 1
if elem == 5 and pos_casse[4] == 1:
des_casses[4] = 1
return des_casses
| null | modules/main.py | main.py | py | 2,912 | python | en | code | null | code-starcoder2 | 51 |
445572431 | """
mapsAPI.py
Source Code for Google Maps API or interaction between genetic algorithm and google maps
Author(s): Niklaas Cotta
CIS422 FA21
Team NASAK
Creation Date: 10/9/21
Sources:
List of APIs: https://developers.google.com/maps/documentation
Distance Matrix Documentation: https://developers.google.com/maps/documentation/distance-matrix/start
Description:
This file contains two functions. The first function, get_distance() contains the distance API call to google maps.
The second function, genMatrix(), creates a complete graph containing n addresses as vertices. This graph is in matrix
form. For each vertex (address) pair, calculates the distance between the two.
"""
import requests # this library is how python makes requests to APIs
import regex # this library is for parsing strings with regular expressions
def get_distance(origin, destination):
"""
This function takes a source string and a destination string in the form of an address.
Address may be in following forms:
1710 E 15th Ave, OR
6513 Flag Way Dr, Boise, Idaho
Seattle, Washington
San Francisco, CA
Function then requests distance between source and destination from google maps API.
If successful request, gets the distance from the json, and converts the distance from a string to integer
On failure returns -1. On success returns distance between the two places.
:param origin: str
:param destination: str
:return: float (distance between the two)
"""
# get API key
fp = open("api-key.txt", "r") # open file containing api key
API_KEY = fp.read()
fp.close()
# base url, used later in request to google
url = f"https://maps.googleapis.com/maps/api/distancematrix/json?" \
f"origins={origin}&destinations={destination}" \
f"&units=imperial&key={API_KEY}"
response = requests.request("GET", url, headers={}, data={}) # query response from maps, this is the API call
if response.status_code != 200: # 200 means OK
print("Could not get distance from API")
return -1
response = response.json() # convert response into json format
distancestr = response["rows"][0]["elements"][0]["distance"]["text"] # isolate .json element
# this just turns the string into a float number
found = regex.search(r"\d*[,.]*\d*", distancestr)
distance = float(found.group().replace(",", ""))
return distance # float
########################################################################################################################
def genMatrix(addressList):
"""
This function takes a list of addresses (strings) and generates a complete graph of distances between addresses.
This graph is in the form of a matrix where each index corresponds to an address, in the order of addressList.
After initially populating the matrix with 0s, the graph then calls get_distance() between each pair of addresses.
The graph is undirected, so the matrix will have symmetry.
:param addressList: list of str
:return: tuple containing list of list of distances (matrix) and list of addresses (strings)
"""
matrix = [] # empty var to be filled
n = len(addressList) # get length
# populate initial matrix with 0's (n x n matrix)
for j in range(n):
matrix.append([]) # add a "row"
for _ in range(n):
matrix[j].append(0)
# update matrix with actual distances
for j in range(n):
for i in range(n):
if i != j: # distance from x to x is 0
distance = get_distance(addressList[i], addressList[j]) # api call
matrix[j][i] = distance # insert distance into correct coordinate
if any(-1 in row for row in matrix): # make sure there are no invalid distances
print("WARNING: Distance matrix contains invalid distance (-1). API function could not grab distance. Program will continue")
return matrix, addressList # returns tuple containing address list and corresponding matrix
########################################################################################################################
if __name__ == '__main__':
# Example
newAddresses = ["NYC, NY", "1710 E 15th Ave, Eugene,OR", "Cocoa Beach,FL", "Seattle, Washington"]
newMatrix, addresses = genMatrix(newAddresses)
for address in newMatrix:
print(address)
| null | webapp/api/mapsAPI.py | mapsAPI.py | py | 4,441 | python | en | code | null | code-starcoder2 | 51 |
194272122 |
def line_splitter(filename):
num1 = []
with open(filename, encoding="utf8") as text:
text = text.read()
text.strip("\n")
line = text.splitlines()
return line
def search_cap_letter(line):
speech_box = []
speech_box1 = []
DASH = "—"
cap_inp = input("Найти речь, начинающуюся с (заглавная буква): ")
cap_inp.upper()
if len(cap_inp) == 1:
cap_letter = cap_inp
else:
cap_letter = cap_inp[0]
for l in line:
if l.startswith(DASH)and cap_letter in l:
speech_box.append(l)
for item in speech_box:
item = item.strip("— ")
if item.startswith(cap_letter):
speech_box1.append(item)
if len(speech_box1) == 0:
print("Такой буковки нет :(")
else:
return "\n".join(speech_box1)
def main():
line = line_splitter("prilepin.txt")
print(search_cap_letter(line))
if __name__ == "__main__":
main()
| null | Preparation1.py | Preparation1.py | py | 1,089 | python | en | code | null | code-starcoder2 | 51 |
457832215 |
from PIL import Image, ImageDraw
import math
import random
SIZE=(500, 700)
START=(250, SIZE[1]-100)
def radians(degrees):
return degrees/180*math.pi
def sortAngleHelper(angle):
return getAngleDistance(angle, radians(-90))
def getAngleDistance(angleA, angleB):
distance = angleA - angleB
if distance < 0:
distance = -distance
if distance > 2*math.pi:
distance = distance % 2*math.pi
if distance > math.pi:
distance = 2*math.pi - distance
return distance
def interpolateColor(colorA, colorB, ratio):
rgba = []
for i in range(4):
color = int(colorA[i] * (1-ratio) + colorB[i] * ratio)
rgba.append(color)
return (rgba[0], rgba[1], rgba[2], rgba[3])
def getRandomColorDeviation(color, variance):
rgb = []
for i in range(3):
value = random.randrange(color[i]-variance, color[i]+variance)
if value > 255:
value = 255
if value < 0:
value = 0
rgb.append(value)
return (rgb[0], rgb[1], rgb[2], color[3])
def getDistance(p1, p2):
x = p2[0] - p1[0]
y = p2[1] - p1[1]
return math.sqrt(x*x + y*y)
def interpolateScalar(s1, s2, ratio):
return (s1*(1-ratio) + s2*ratio)
def interpolate(p1, p2, ratio):
x = p1[0] * (1-ratio) + p2[0] * ratio
y = p1[1] * (1-ratio) + p2[1] * ratio
return [x, y]
def randRangeFloat(min, max):
diff = max-min
randPart = random.random()*diff
return (min + randPart)
def randRangeFloatArray(arr):
return randRangeFloat(arr[0], arr[1])
def interpolateArc(p1, p2, arcSize, ratio):
bottomPoint = interpolate(p1, p2, ratio)
parabolicRatio = 4*(ratio - ratio*ratio)
arc = arcSize * parabolicRatio
point = (bottomPoint[0], bottomPoint[1] + arc)
return point
def interpolateArcScalar(arcSize, ratio):
parabolicRatio = 4*(ratio - ratio*ratio)
return arcSize * parabolicRatio
def getBoundariesForCircle(center, diameter):
radius = diameter/2
x1 = center[0] - radius
x2 = center[0] + radius
y1 = center[1] - radius
y2 = center[1] + radius
return (x1, y1, x2, y2)
def drawCircle(center, diameter, color, draw : ImageDraw.ImageDraw):
xy = getBoundariesForCircle(center, diameter)
draw.ellipse(xy, fill=color)
return
numToDraw = 30
OUTPUT_PATH = "sprites/objects/trees/"
STUMP_START_WIDTH_RANGE = [12, 16]
HEIGHT_RANGE = [400, 550]
STUMP_DECAY_RATIO = 0.995
RESOLUTION = 5
BRANCH_START_LENGTH_RANGE = [100, 120]
BRANCH_DECAY_RATIO = 0.995
VERT_RATIO = 1/math.sqrt(3)
STUMP_COLOR = (100, 60, 20, 255)
BRANCH_ARC_RATIO = 0.2
BRANCH_COLOR = (0, 90, 40, 255)
BRANCH_COLOR_DEVIATION = 10
BRANCH_WIDTH = 1
SHADOW_OFFSET = 2
SUB_BRANCH_RATIO = 0.5
def drawBranch(start, length, angle, draw : ImageDraw.ImageDraw):
endX = start[0] + math.cos(angle)*length
endY = start[1] + math.sin(angle)*length*VERT_RATIO
end = (endX, endY)
arcSize = BRANCH_ARC_RATIO * length
numPieces = int(length/RESOLUTION)
color = getRandomColorDeviation(BRANCH_COLOR, BRANCH_COLOR_DEVIATION)
shadowColor = interpolateColor(color, (0, 0, 0, 255), 0.5)
angleSubBranchLeft = angle - radians(45)
angleSubBranchRight = angle + radians(45)
subBranchMaxLength = length * SUB_BRANCH_RATIO
for i in range(numPieces):
ratio = i/numPieces
nextRatio = (i+1)/numPieces
segmentStart = interpolateArc(start, end, arcSize, ratio)
segmentEnd = interpolateArc(start, end, arcSize, nextRatio)
shadowStart = (segmentStart[0], segmentStart[1]+SHADOW_OFFSET)
shadowEnd = (segmentEnd[0], segmentEnd[1]+SHADOW_OFFSET)
subBranchLength = interpolateArcScalar(subBranchMaxLength, ratio)
subBranchLeftEndX = segmentEnd[0] + math.cos(angleSubBranchLeft)*subBranchLength
subBranchLeftEndY = segmentEnd[1] + math.sin(angleSubBranchLeft)*subBranchLength*VERT_RATIO
subBranchRightEndX = segmentEnd[0] + math.cos(angleSubBranchRight)*subBranchLength
subBranchRightEndY = segmentEnd[1] + math.sin(angleSubBranchRight)*subBranchLength*VERT_RATIO
subBranchLeftEnd = (subBranchLeftEndX, subBranchLeftEndY)
subBranchRightEnd = (subBranchRightEndX, subBranchRightEndY)
subBranchLeftShadowEnd = (subBranchLeftEndX, subBranchLeftEndY + SHADOW_OFFSET)
subBranchRightShadowEnd = (subBranchRightEndX, subBranchRightEndY + SHADOW_OFFSET)
draw.line((shadowStart, shadowEnd), fill=shadowColor, width=BRANCH_WIDTH)
draw.line((shadowStart, subBranchLeftShadowEnd), fill=shadowColor, width=BRANCH_WIDTH)
draw.line((shadowStart, subBranchRightShadowEnd), fill=shadowColor, width=BRANCH_WIDTH)
draw.line((segmentStart, segmentEnd), fill=color, width=BRANCH_WIDTH)
draw.line((segmentStart, subBranchLeftEnd), fill=color, width=BRANCH_WIDTH)
draw.line((segmentStart, subBranchRightEnd), fill=color, width=BRANCH_WIDTH)
return
ANGLE_SPREAD_RATIO = 2
def drawBranches(start, n, lengthRange, draw : ImageDraw.ImageDraw):
angleSeeds = []
angleSeedsSum = 0
for i in range(n):
angleSeed = randRangeFloat(1, ANGLE_SPREAD_RATIO)
angleSeeds.append(angleSeed)
angleSeedsSum += angleSeed
nextAngle = randRangeFloat(0, 2*math.pi)
angles = []
for seed in angleSeeds:
nextAngle += seed/angleSeedsSum*2*math.pi % (2*math.pi)
angles.append(nextAngle)
angles.sort(key=sortAngleHelper)
for angle in angles:
length = randRangeFloatArray(lengthRange)
drawBranch(start, length, angle, draw)
return
BRANCH_SPACE = 20
BRANCH_N = 6
def drawTree(draw : ImageDraw.ImageDraw):
stumpWidth = randRangeFloatArray(STUMP_START_WIDTH_RANGE)
height = randRangeFloatArray(HEIGHT_RANGE)
location = START
heightSoFar = 0
lastBranch = 0
branchRatio = 1
while True:
drawCircle(location, stumpWidth, STUMP_COLOR, draw)
location = (location[0], location[1] - RESOLUTION)
stumpWidth = randRangeFloat(stumpWidth*STUMP_DECAY_RATIO, stumpWidth)
if lastBranch >= BRANCH_SPACE:
nBranches = random.randrange(4, 8)
branchRange = [BRANCH_START_LENGTH_RANGE[0]*branchRatio, BRANCH_START_LENGTH_RANGE[1]*branchRatio]
drawBranches(location, nBranches, branchRange, draw)
lastBranch = 0
heightSoFar += RESOLUTION
lastBranch += RESOLUTION
branchRatio *= BRANCH_DECAY_RATIO
if heightSoFar > height:
break
OUTPUT_PATH = "sprites/objects/trees/"
NUM_TREES = 30
for i in range(NUM_TREES):
image = Image.new('RGBA', SIZE, (0, 0, 0, 0))
draw = ImageDraw.Draw(image)
drawTree(draw)
filename = OUTPUT_PATH + "evergreen_" + str(i) + ".png"
image.save(filename, "PNG")
| null | generate/objects/trees/evergreen.py | evergreen.py | py | 6,806 | python | en | code | null | code-starcoder2 | 51 |
341016782 | from Stacks import FirstStack
def is_valid(expr):
st = FirstStack.Stack() # Use the Class.method notation to access the Stack() class in FirstStack
for ch in expr:
if ch in '({[':
st.push(ch)
if ch in ')}]':
if st.is_empty():
print("Right parentheses are more than left parentheses")
return False
else:
char = st.pop()
if not match_parentheses(char, ch):
print("Mismatched parentheses are ", char, " and ", ch)
return False
if st.is_empty():
print("Balanced Parentheses")
return True
else:
print("Left parentheses are more than right parentheses")
return False
def match_parentheses(leftPar, rightPar):
if leftPar == '[' and rightPar == ']':
return True
if leftPar == '{' and rightPar == '}':
return True
if leftPar == '(' and rightPar == ')':
return True
return False
while True:
print("Enter an epression with parentheses (q to quit) : ", end=" ")
expression = input()
if expression == "q":
break
if is_valid(expression):
print("Valid expression")
else:
print("Invalid Expression")
| null | Python_Programming/PythonDataStructures/Stacks/ValidateParentheses.py | ValidateParentheses.py | py | 1,284 | python | en | code | null | code-starcoder2 | 51 |
332385312 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
clicksData = pd.read_csv('./clicksInfo.csv', '\t', header=0).ix[:,:].as_matrix()
conversionsData = pd.read_csv('./conversionsInfo.csv', '\t', header=0).ix[:,:].as_matrix()
objForCpc = {}
for row in clicksData:
if(row[0] in objForCpc):
objForCpc[row[0]]['days'].append(row[1])
objForCpc[row[0]]['cpc'].append(row[2])
else:
objForCpc[row[0]] = {}
objForCpc[row[0]]['days'] = [row[1]]
objForCpc[row[0]]['cpc'] = [row[2]]
keys = objForCpc.keys()
n_groups = len(objForCpc[keys[0]]['cpc'])
fig, ax = plt.subplots(figsize=(20,10))
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.8
colors = ['red','green','b','yellow','orange','black','pink']
colorCounter=0
for ob in keys:
plt.bar(index + bar_width*colorCounter, objForCpc[ob]['cpc'], bar_width, alpha=opacity, color=colors[colorCounter], label=ob)
colorCounter += 1
plt.xlabel('Date')
plt.ylabel('Avg cpc')
plt.title('Average CPCs')
plt.xticks(index + bar_width, objForCpc[keys[0]]['days'])
plt.legend()
#plt.tight_layout()
#plt.show()
plt.savefig('./images/AvgCpc.png')
| null | plots/basicBarForAvgCpc.py | basicBarForAvgCpc.py | py | 1,123 | python | en | code | null | code-starcoder2 | 51 |
641614323 | # coding: utf-8
from __future__ import unicode_literals
import os.path
import re
from collections import Sequence
from operator import itemgetter
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import parse_iso8601, ExtractorError, try_get, OnDemandPagedList
class ParsedURL(object):
"""
This class provides a unified interface for urlparse(),
parse_qsl() and regular expression groups
"""
def __init__(self, url, regex=None):
self._match = None
self._groups = {}
self._query = query = {}
self._parts = parts = compat_urllib_parse_urlparse(url)
for key, value in compat_urlparse.parse_qsl(parts.query):
query[key] = int(value) if value.isdigit() else value
if regex:
self._match = re.match(regex, url)
assert self._match, "regex does not match url"
def __getattr__(self, item):
"""
forward the attributes from urlparse.ParsedResult
thus providing scheme, netloc, url, params, fragment
note that .query is shadowed by a different method
"""
return getattr(self._parts, item)
def query(self, key=None, default=None):
if key is None:
return dict(self._query)
return self._query.get(key, default)
def regex_group(self, key=None):
assert self._match, "no regex provided"
if key is None:
return self._match.groupdict()
return self._match.group(key)
class YoumakerIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://(?:[a-z][a-z0-9]+\.)?youmaker\.com/
(?:v|video|embed|channel|playlist)/
(?P<id>[0-9a-zA-Z-]+)
"""
_TESTS = [
{
"url": "http://www.youmaker.com/v/71b5d2c5-31b6-43b8-8475-1dcb5e10dfb0",
"info_dict": {
"id": "71b5d2c5-31b6-43b8-8475-1dcb5e10dfb0",
"ext": "mp4",
"title": "Как сшить шапочку из трикотажа. Плоский шов двойной иглой.",
"description": r"re:(?s)^Привет друзья!\n\nВ этом видео я .* представлена www\.iksonmusic\.com$",
"thumbnail": r"re:^https?://.*\.(?:jpg|png)$",
"duration": 358,
"upload_date": "20190614",
"uploader": "user_318f21e00e1f8a6b414f20a654d0f4fc7d2053bc",
"timestamp": 1560554895,
"channel": "Sewing Ideas",
"channel_id": "40ca79f7-8b21-477f-adba-7d0f81e5b5fd",
"channel_url": r"re:https?://www.youmaker.com/channel/40ca79f7-8b21-477f-adba-7d0f81e5b5fd",
"tags": [
"как сшить детскую шапочку из трикотажа",
"как шить двойной иглой трикотаж",
],
"categories": ["Life", "How-to & DIY"],
},
"params": {
"skip_download": True,
},
},
{
# all videos from channel
"url": "http://www.youmaker.com/channel/f06b2e8d-219e-4069-9003-df343ac5fcf3",
"info_dict": {
"id": "f06b2e8d-219e-4069-9003-df343ac5fcf3",
"title": "YoYo Cello",
"description": "Connect the World Through Music. \nConnect Our Hearts with Music.",
},
"playlist_mincount": 30,
"params": {
"nocheckcertificate": True,
},
},
{
# all videos from channel playlist
"url": "https://www.youmaker.com/channel/f8d585f8-2ff7-4c3c-b1ea-a78d77640d54/"
"playlists/f99a120c-7a5e-47b2-9235-3817d1c12662",
"info_dict": {
"id": "f99a120c-7a5e-47b2-9235-3817d1c12662",
"title": "Mini Cakes",
},
"playlist_mincount": 9,
"params": {
"nocheckcertificate": True,
},
},
]
REQUEST_LIMIT = 50
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
super(YoumakerIE, self).__init__(downloader=downloader)
self._protocol = "https"
self._category_map = None
self._cache = {}
@staticmethod
def _extract_url(webpage):
match = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?youmaker\.com/embed/[0-9a-zA-Z-]+)[^"]*"',
webpage,
)
return match.group("url") if match else None
def _fix_url(self, url):
if url.startswith("//"):
return "%s:%s" % (self._protocol, url)
return url
@property
def _base_url(self):
return self._fix_url("//www.youmaker.com")
@property
def _asset_url(self):
# as this url might change in the future
# it needs to be extracted from some js magic...
return self._fix_url("//vs.youmaker.com/assets")
def _live_url(self, video_id):
return self._fix_url("//live.youmaker.com/%s/playlist.m3u8" % video_id)
def _call_api(self, uid, path, what="JSON metadata", fatal=True, **kwargs):
"""
call the YouMaker JSON API and return a valid data object
path: API endpoint
what: query description
fatal: if True might raise ExtractorError otherwise warn and return None
**kwargs: parameters passed to _download_json()
"""
url = "%s/v1/api/%s" % (self._base_url, path)
kwargs.setdefault("note", "Downloading %s" % what)
kwargs.setdefault("errnote", "Failed to download %s" % what)
info = self._download_json(url, uid, fatal=fatal, **kwargs)
# soft error already reported
if info is False:
return None
status = try_get(info, itemgetter("status"), compat_str)
data = try_get(info, itemgetter("data"), (list, dict))
if status != "ok":
msg = "%s - %s" % (what, status or "Bad JSON response")
if fatal or status is None:
raise ExtractorError(
msg, video_id=uid, expected=isinstance(status, compat_str)
)
self.report_warning(msg, video_id=uid)
return data
@property
def _categories(self):
if self._category_map is None:
category_list = (
self._call_api(
None,
"video/category/list",
what="categories",
fatal=False,
)
or ()
)
self._category_map = {item["category_id"]: item for item in category_list}
return self._category_map
def _categories_by_id(self, cid):
categories = []
if cid is None:
return categories
while True:
item = self._categories.get(cid)
if item is None or item["category_name"] in categories:
break
categories.insert(0, item["category_name"])
cid = item["parent_category_id"]
return categories
def _get_subtitles(self, system_id):
if system_id is None:
return {}
subs_list = (
self._call_api(
system_id,
"video/subtitle",
what="subtitle info",
query={"systemid": system_id},
fatal=False,
)
or ()
)
subtitles = {}
for item in subs_list:
subtitles.setdefault(item["language_code"], []).append(
{"url": "%s/%s" % (self._asset_url, item["url"])}
)
return subtitles
def _video_entry_by_metadata(self, info):
# check some dictionary keys so it's safe to use them
mandatory_keys = {"video_uid", "title", "data"}
missing_keys = mandatory_keys - set(info.keys())
if missing_keys:
raise ExtractorError(
"Missing video metadata: %s" % ", ".join(missing_keys),
video_id=self.ie_key(),
)
video_uid = info["video_uid"]
tag_str = info.get("tag")
if tag_str:
tags = [tag.strip() for tag in tag_str.strip("[]").split(",")]
else:
tags = None
channel_url = (
"%s/channel/%s" % (self._base_url, info["channel_uid"])
if "channel_uid" in info
else None
)
video_info = info["data"] # asserted before
duration = video_info.get("duration")
formats = []
if info.get("live") and info.get("live_status") == "start":
is_live = True
playlist = self._live_url(video_uid)
else:
is_live = False
playlist = video_info.get("videoAssets", {}).get("Stream")
if playlist:
playlist_name = os.path.basename(playlist)
formats.extend(
self._extract_m3u8_formats(
self._fix_url(playlist),
video_uid,
ext="mp4",
entry_protocol="m3u8" if is_live else "m3u8_native",
note="Downloading %s" % playlist_name,
errnote="%s (%s)" % (video_uid, playlist_name),
fatal=False,
)
)
if not formats:
# as there are some videos on the platform with missing playlist
# expected is set True
raise ExtractorError(
"No video formats found!", video_id=video_uid, expected=True
)
self._sort_formats(formats)
for item in formats:
height = try_get(item, itemgetter("height"), int)
if height:
item["format_id"] = "%dp" % item["height"]
tbr = try_get(item, itemgetter("tbr"), (int, float))
if duration and tbr:
item["filesize_approx"] = 128 * tbr * duration
return {
"id": video_uid,
"title": info["title"], # asserted before
"description": info.get("description"),
"formats": formats,
"is_live": is_live,
"timestamp": parse_iso8601(info.get("uploaded_at")),
"uploader": info.get("uploaded_by"),
"duration": duration,
"categories": self._categories_by_id(info.get("category_id")),
"tags": tags,
"channel": info.get("channel_name"),
"channel_id": info.get("channel_uid"),
"channel_url": channel_url,
"thumbnail": info.get("thumbmail_path"),
"view_count": info.get("click"),
"subtitles": self.extract_subtitles(info.get("system_id")),
}
def _video_entry_by_id(self, uid):
info = self._cache.get(uid) or self._call_api(
uid,
"video/metadata/%s" % uid,
what="video metadata",
)
return self._video_entry_by_metadata(info)
def _paged_playlist_entries(self, uid, page_size=REQUEST_LIMIT):
def fetch_page(page_number):
offset = page_number * page_size
info = self._call_api(
uid,
path="playlist/video",
what="playlist entries %d-%d" % (offset + 1, offset + page_size),
query={"playlist_uid": uid, "offset": offset, "limit": page_size},
)
if not isinstance(info, Sequence):
raise ExtractorError("Unexpected playlist entries", uid, expected=False)
for item in info:
yield self.url_result(
"%s/video/%s" % (self._base_url, item["video_uid"]),
ie=self.ie_key(),
video_id=item["video_uid"],
video_title=item["video_title"],
)
_ = self._categories # preload categories
return OnDemandPagedList(fetch_page, page_size)
def _paged_channel_entries(self, uid, page_size=REQUEST_LIMIT):
def fetch_page(page_number):
offset = page_number * page_size
info = self._call_api(
uid,
path="video/channel/%s" % uid,
what="channel entries %d-%d" % (offset + 1, offset + page_size),
query={"offset": offset, "limit": page_size},
)
if not isinstance(info, Sequence):
raise ExtractorError("Unexpected channel entries", uid, expected=False)
for item in info:
self._cache[item["video_uid"]] = item
yield self.url_result(
"%s/video/%s" % (self._base_url, item["video_uid"]),
ie=self.ie_key(),
video_id=item["video_uid"],
video_title=item["title"],
)
_ = self._categories # preload categories
return OnDemandPagedList(fetch_page, page_size)
def _playlist_entries_by_id(self, uid):
_ = self._categories # preload categories
info = self._call_api(
uid,
"playlist/%s" % uid,
what="playlist metadata",
)
return self.playlist_result(
self._paged_playlist_entries(
info["playlist_uid"],
),
playlist_id=info["playlist_uid"],
playlist_title=info.get("name"),
playlist_description=None,
)
def _channel_entries_by_id(self, uid):
_ = self._categories # preload categories
info = self._call_api(
uid,
path="video/channel/metadata/%s" % uid,
what="channel metadata",
)
return self.playlist_result(
self._paged_channel_entries(
info["channel_uid"],
),
playlist_id=info["channel_uid"],
playlist_title=info.get("name"),
playlist_description=info.get("description"),
)
def _real_extract(self, url):
parsed_url = ParsedURL(url)
self._protocol = parsed_url.scheme
dispatch = (
(r"/(?:v|video|embed)/(?P<uid>[a-zA-z0-9-]+)", self._video_entry_by_id),
(
r"(/channel/[a-zA-z0-9-]+)?/playlists?/(?P<uid>[a-zA-z0-9-]+)",
self._playlist_entries_by_id,
),
(r"/channel/(?P<uid>[a-zA-z0-9-]+)/?$", self._channel_entries_by_id),
)
for regex, func in dispatch:
match = re.match(regex, parsed_url.path)
if not match:
continue
return func(**match.groupdict())
else:
raise ExtractorError("unsupported %s url" % self.ie_key(), expected=True)
| null | youtube_dl/extractor/youmaker.py | youmaker.py | py | 15,038 | python | en | code | null | code-starcoder2 | 51 |
278790596 | import os
from os import listdir
from openvino.inference_engine import IECore
import cv2
import numpy as np
from scipy.spatial.distance import cosine
import imutils
import dlib
from numpy import savez_compressed
det_model=r"C:\Users\LENOVO\Desktop\FaceReid\detection_model\face-detection-0202.xml"
det_weights=os.path.splitext(det_model)[0] +'.bin'
reid_model=r"C:\Users\LENOVO\Desktop\Detect&Recognize\face_net_mobile_face\model-0000.xml"
reid_weights=os.path.splitext(reid_model)[0] +'.bin'
def output_handler(frame,result,height,width):
faces=list()
for box in result[0][0]:
if box[2]>0.5:
xmin=int(box[3] *width)
ymin=int(box[4] *height)
xmax=int(box[5] *width)
ymax=int(box[6] *height)
face=frame[ymin:ymax,xmin:xmax]
face=cv2.resize(face,(112,112))
faces.append(face)
return faces
def extract_face(filename):
plugin=IECore()
net=plugin.read_network(model=det_model,weights=det_weights)
exec_net=plugin.load_network(network=net,device_name="CPU")
input_blob=list(net.input_info.keys())[0]
output_blob=next(iter(net.outputs))
b,c,h,w=net.input_info[input_blob].input_data.shape
image=cv2.imread(filename)
height=image.shape[0]
width=image.shape[1]
p_image=cv2.resize(image,(w,h))
p_image=p_image.transpose((2,0,1))
p_image=p_image.reshape(1,3,h,w)
infer_request=exec_net.start_async(request_id=0,inputs={input_blob:p_image})
status=exec_net.requests[0].wait(-1)
if status==0:
result=exec_net.requests[0].outputs[output_blob]
return output_handler(image,result,height,width)[0]
def reidentify(test_subject):
reid_plugin=IECore()
reid_net=reid_plugin.read_network(model=reid_model,weights=reid_weights)
reid_execnet=reid_plugin.load_network(network=reid_net,device_name="CPU")
reid_inputblob=list(reid_net.input_info.keys())[0]
reid_outputblob=next(iter(reid_net.outputs))
b,c,h,w=reid_net.input_info[reid_inputblob].input_data.shape
p_image=cv2.cvtColor(test_subject,cv2.COLOR_BGR2RGB)
p_image=cv2.resize(test_subject,(w,h))
p_image=p_image.transpose((2,0,1))
p_image=p_image.reshape(1,3,h,w)
infer_request=reid_execnet.start_async(request_id=0,inputs={reid_inputblob:p_image})
status=reid_execnet.requests[0].wait(-1)
if status==0:
result=reid_execnet.requests[0].outputs[reid_outputblob]
#This stores embeddings
#print(result[0])
#print('storing embedding')
#savez_compressed('tariq3.npz',result[0])
#return np.array(result).reshape((1,256))[0]
return result[0]
def is_match(known_embedding,candidate_embedding,thresh=0.5):
#calculate the distance between embeddings
score=cosine(known_embedding,candidate_embedding)
#score= np.sqrt(np.sum(np.square(np.subtract(known_embedding, candidate_embedding))))
if score<=thresh:
print('face is a match',('Score: ',score,' Threshold: ',thresh))
else:
print('face is not a match',('Score: ',score,' Threshold: ',thresh))
MOT={}
ANC={
#'anthony-mackie':"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/anthony-mackie/Anthony Mackie28_529.jpg",
#'daniel-kaluuya':"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/daniel-kaluuya/download (1).jpg",
#'idris-elba':"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/idris-elba/images (2).jpg",
#'kanye-west':"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/kanye-west/images (15).jpg",
#'lupita':"E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeOne/lupita/download.jpg"
'michael-blackson':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/michael-blackson/503862_v9_bb.jpg",
'morgan-freeman':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/morgan-freeman/2402.jpg",
'obama':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/obama/barack obama40_712.jpg",
'olivia-pope':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/olivia-pope/download (1).jpg"
#'rihanna':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/rihanna/images (1).jpg",
#'thiery-henry':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/thiery-henry/images (62).jpg",
#'viola-davis':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/viola-davis/‘HTGAWM’s-Viola-Davis-Why-Playing-Annalise-Keating-Has-‘Meant-Everything.jpg",
#'will-smith':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/will-smith/download (3).jpg",
#'zendaya':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/zendaya/591658_v9_bb.jpg",
#'zoe-saldana':"E:/FINAL-YEAR-PROJECT/Dataset++/face-dataset/evaluateThis/zoe-saldana/e708c468969d68c966422f5962e7f69453-2-zoe-saldana.2x.rhorizontal.w710.jpg"
}
directory="E:/FINAL-YEAR-PROJECT/Dataset++/straight-face/TakeTwo"
for name in listdir(directory):
print('Now on: ',name)
count=0
MOT[str(name)]=[]
path=directory+'/' +name
for file in listdir(path):
current_location=path+'/' +file
extracted_ancFace=extract_face(ANC[str(name)])
test_img=extract_face(current_location)
emb1=reidentify(extracted_ancFace)
emb2=reidentify(test_img)
score=cosine(emb1,emb2)
MOT[str(name)].append(score)
print('End of ',name)
print(MOT)
print("\n")
print("Bias Percentages")
print("\n")
print("At Threshold 0.5")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.5:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.55")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.55:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.6")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.6:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.65")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.65:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.70")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.70:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.75")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.75:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.80")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.80:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.85")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.85:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
print("\n")
print("At Threshold 0.90")
for name in MOT.keys():
values=MOT[str(name)]
masked=[]
for val in values:
if val<=0.90:
masked.append(1)
else:
masked.append(0)
percentage=sum(masked)/(len(masked)-1)
print(name," accuracy: ",percentage)
print("\n")
| null | Evaluating the Oneshot model/modelEval-Straightface.py | modelEval-Straightface.py | py | 7,910 | python | en | code | null | code-starcoder2 | 51 |
649318733 | import unittest
class TestMnvram(unittest.TestCase):
def test_rename(self):
nvram = {}
from mnvram import MozaiqRouter
router = MozaiqRouter(nvram)
router.renameRouter("newname")
nvram = router.nvram
self.assertTrue(nvram['wan_hostname'] == 'mozaiqnewname')
self.assertTrue(nvram['router_name'] == 'mozaiqnewname')
for k, v in nvram.items():
if k.endswith("ssid"):
self.assertTrue(v.startswith('mozaiq'))
| null | test_mnvram.py | test_mnvram.py | py | 457 | python | en | code | null | code-starcoder2 | 51 |
641535635 | # popQuiz - Nov 30, 2018
# Michael Reilly
# I pledge my honor that I have abided by the Stevens Honor System
from cs115 import *
'''
These exercises give you practice writing code with loops and practice reading code.
Read the instructions and the code carefully.
'''
'''
Here is a variation on the Car class.
'''
class Car(object):
def __init__(self, make: str, model: str, mpg: float, tank_capacity: float):
self.__make = make
self.__model = model
self.__mpg = mpg
self.__tank_capacity = tank_capacity
def get_make(self):
return self.__make
def get_model(self):
return self.__model
def __le__(self, other):
'''self <= other by make and model, assuming other is also a Car'''
assert isinstance(other, Car)
return self.__make < other.__make or (self.__make == other.__make and self.__model <= other.__model)
def __eq__(self, other):
return self.__make == other.__make and self.__model == other.__model
def __ne__(self, other):
return not (self == other)
def __lt__(self,other):
return self.__make < other.__make or self.__make == other.__make and self.__model < other.__model
def __str__(self):
return "Car(" + self.__make + ", " + self.__model + ")"
'''
Read this function, think about what it will print, and then run it to check your thinking.
'''
def demo1():
c1 = Car('Toyota', 'Prius', 50.2, 8.8)
c2 = Car('Honda', 'Civic', 29.5, 13)
c3 = Car('Honda', 'Accord', 21.5, 18)
c4 = Car('Tesla', 'Model 3', 60.0, 10.0)
cs = [c1, c2, c3, c4]
print(map(str,cs))
'''
Here is some code similar to lab 11. Read the docstrings and comments, at least.
'''
def insertSort(L):
'''Sort L in place, using insertV1.'''
for i in range(1,len(L)):
insert(L,i)
def insert(L, i):
'''Assume L[0:i] is sorted and 0 < i < len(L).
Shift elements of the list as needed to swap L[i] into
position so that L[0:i+1] is sorted.'''
x = L[i]
j = search(L, i, x) # find where to insert x
for k in range(i, j, -1): # shift elements out of the way
swap(L, k, k-1)
L[j] = x # insert x
def search(L, i, x):
'''Assuming L[0:i] is sorted and 0 <= i <= len(L),
return j such that 0 <= j <= i and L[0:j] <= x < L[j:i].'''
# Linear search: try successive indexes, starting with 0.
j = 0
while j < i and L[j] <= x: # Invariant: L[0:j] <= x and j <= i
j += 1
return j
def swap(aList, i, j):
'''swaps the values of aList[i] and aList[j]'''
temp = aList[i]
aList[i] = aList[j]
aList[j] = temp
'''
Read this function, think about what it will do, and then run it to check your thinking.
'''
def demo2():
c1 = Car('Toyota', 'Prius', 50.2, 8.8)
c2 = Car('Honda', 'Civic', 29.5, 13)
c3 = Car('Honda', 'Accord', 21.5, 18)
c4 = Car('Tesla', 'Model 3', 60.0, 10.0)
cs = [c1, c2, c3, c4]
insertSort(cs)
print("should be sorted: ", map(str,cs))
'''
TODO: replace None at two places in this function, so that it works as specified.
Check your work using test_find().
'''
def find(L,x):
'''Assuming L is sorted, return i such that L[i] == x, if x is in L.
Return -1 if x does not occur in L.'''
i = search(L, len(L), x)
if i == 0 or L[i-1] != x:
return -1
else:
return i-1
def test_find():
a = Car('Honda', 'Accord', 21.5, 18)
b = Car('Honda', 'Civic', 29.5, 13)
c = Car('Tesla', 'Model 3', 60.0, 10.0)
d = Car('Toyota', 'Prius', 50.2, 8.8)
e = Car('Maserati', 'Alfieri', 35.0, 10.0)
cs = [a, b, c, d] # note that it's sorted
assert find(cs, c) == 2
assert find(cs, e) == -1
print("test_find succeeded")
'''
Now comes the interesting part. You will implement the search specification
using the binary search algorithm instead of linear search.
Do not change the given code, except for filling the missing loop body.
Follow the hints. Use test_bfind() to check your work.
'''
#########################################################################
# The idea is to use two variables, j and hi, narrowing the search range
# with j on the low side and hi on the high side.
# Each iteration should decrease hi - j .
##########################################################################
def binsearch(L, i, x):
'''Assuming L[0:i] is sorted and 0 <= i <= len(L),
return j such that 0 <= j <= i and L[0:j] <= x < L[j:i].'''
j = 0 # now L[0:j] is []
hi = i # now L[hi,i] is []
# invariant: L[0:j] <= x < L[hi:i] and j <= hi
while j != hi:
# There's at least one element in L[j:hi].
# Set mid to one of the indexes j,...,hi-1
# Then update j or hi accordingly.
mid=(hi-1+j)//2
if x==L[j]:
j+=1
break
elif x==L[mid]:
j=mid+1
break
else:
if x<L[mid]:
hi=mid-1
else:
j=mid+1
# now j==hi and the invariant holds
return j
def bfind(L,x):
'''Like find but using binsearch.'''
i = binsearch(L, len(L), x)
if i == 0 or L[i-1] != x:
return -1
else:
return i - 1
def test_bfind():
# at start
assert bfind([3,6,20], 3) == 0
# at end
assert bfind([2,6,20], 20) == 2
# in middle, odd position
assert bfind([2,6,20,25,30], 25) == 3
# in middle, even position
assert bfind([2,6,20,25,30], 20) == 2
# in middle, odd position, even list
assert bfind([2,6,20,25], 25) == 3
# in middle, even position, even list
assert bfind([2,6,20,25], 20) == 2
print("test_bfind successful")
| null | popQuiz.py | popQuiz.py | py | 5,796 | python | en | code | null | code-starcoder2 | 51 |
606580312 | import gc
import uasyncio as asyncio
gc.collect()
from base_node import BaseDriver
import lvgl as lv
async def set_valve(i2c, i2c_address, pin, state):
'''
Parameters
----------
i2c : machine.I2C
I2C driver handle.
i2c_address : int
I2C address.
pin : int
Motor output pin (e.g., ``grove_i2c_motor.IN1``).
state : bool
If ``False`` set to ``A`` branch. Otherwise, set to ``B`` branch.
'''
driver = BaseDriver(i2c, i2c_address)
await asyncio.sleep_ms(0)
try:
driver.digital_write(pin, state)
except Exception as exception:
print('Error setting valve:', exception)
del driver
gc.collect() | null | valve.py | valve.py | py | 721 | python | en | code | null | code-starcoder2 | 51 |
55454051 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A mixin for objects that can be filtered in Query API"""
class Filterable(object):
"""Mixin for identification of filterable attributes of model in Query API"""
_filterable_attrs = []
@classmethod
def get_filterable_attributes(cls):
"""Collect all filterable attributes from base classes."""
result = set()
for base in cls.__mro__:
for attr in getattr(base, '_filterable_attrs', []):
if hasattr(cls, attr):
result.add(attr)
return result
@classmethod
def get_filterable_attribute(cls, name):
"""Return filterable attribute. If attribute is missing returns None."""
if cls.has_filterable_attribute(name):
return getattr(cls, name)
return None
@classmethod
def has_filterable_attribute(cls, name):
"""Check existence of filterable attribute."""
return name in cls.get_filterable_attributes()
| null | src/ggrc/models/mixins/filterable.py | filterable.py | py | 996 | python | en | code | null | code-starcoder2 | 51 |
114287718 | import pgl
import paddle.fluid as F
import paddle.fluid.layers as L
from models.base import BaseNet, BaseGNNModel
from models.ernie_model.ernie import ErnieModel
from models.ernie_model.ernie import ErnieGraphModel
from models.ernie_model.ernie import ErnieConfig
class ErnieSageV2(BaseNet):
def build_inputs(self):
inputs = super(ErnieSageV2, self).build_inputs()
term_ids = L.data(
"term_ids", shape=[None, self.config.max_seqlen], dtype="int64", append_batch_size=False)
return inputs + [term_ids]
def gnn_layer(self, gw, feature, hidden_size, act, initializer, learning_rate, name):
def ernie_send(src_feat, dst_feat, edge_feat):
"""doc"""
cls = L.fill_constant_batch_size_like(src_feat["term_ids"], [-1, 1, 1], "int64", 1)
src_ids = L.concat([cls, src_feat["term_ids"]], 1)
dst_ids = dst_feat["term_ids"]
sent_ids = L.concat([L.zeros_like(src_ids), L.ones_like(dst_ids)], 1)
term_ids = L.concat([src_ids, dst_ids], 1)
term_ids.stop_gradient = True
sent_ids.stop_gradient = True
ernie = ErnieModel(
term_ids, sent_ids,
config=self.config.ernie_config)
feature = ernie.get_pooled_output()
return feature
def erniesage_v2_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name):
feature = L.unsqueeze(feature, [-1])
msg = gw.send(ernie_send, nfeat_list=[("term_ids", feature)])
neigh_feature = gw.recv(msg, lambda feat: F.layers.sequence_pool(feat, pool_type="sum"))
term_ids = feature
cls = L.fill_constant_batch_size_like(term_ids, [-1, 1, 1], "int64", 1)
term_ids = L.concat([cls, term_ids], 1)
term_ids.stop_gradient = True
ernie = ErnieModel(
term_ids, L.zeros_like(term_ids),
config=self.config.ernie_config)
self_feature = ernie.get_pooled_output()
self_feature = L.fc(self_feature,
hidden_size,
act=act,
param_attr=F.ParamAttr(name=name + "_l",
learning_rate=learning_rate),
)
neigh_feature = L.fc(neigh_feature,
hidden_size,
act=act,
param_attr=F.ParamAttr(name=name + "_r",
learning_rate=learning_rate),
)
output = L.concat([self_feature, neigh_feature], axis=1)
output = L.l2_normalize(output, axis=1)
return output
return erniesage_v2_aggregator(gw, feature, hidden_size, act, initializer, learning_rate, name)
def gnn_layers(self, graph_wrappers, feature):
features = [feature]
initializer = None
fc_lr = self.config.lr / 0.001
for i in range(self.config.num_layers):
if i == self.config.num_layers - 1:
act = None
else:
act = "leaky_relu"
feature = self.gnn_layer(
graph_wrappers[i],
feature,
self.config.hidden_size,
act,
initializer,
learning_rate=fc_lr,
name="%s_%s" % ("erniesage_v2", i))
features.append(feature)
return features
def __call__(self, graph_wrappers):
inputs = self.build_inputs()
feature = inputs[-1]
features = self.gnn_layers(graph_wrappers, feature)
outputs = [self.take_final_feature(features[-1], i, "final_fc") for i in inputs[:-1]]
src_real_index = L.gather(graph_wrappers[0].node_feat['index'], inputs[0])
outputs.append(src_real_index)
return inputs, outputs
class ErnieSageModelV2(BaseGNNModel):
def gen_net_fn(self, config):
return ErnieSageV2(config)
| null | examples/erniesage/models/erniesage_v2.py | erniesage_v2.py | py | 4,208 | python | en | code | null | code-starcoder2 | 50 |
643000706 | #Time complexity: O(n)
#Space complexity: O(1)
#Works on leetcode: yes
#Approach: The idea here is to find the middle of the linked list and then reverse the second half of the linked list.
#After that, start traversing and comparing the first half and second half of the linked list. If any value is unequal,
#the linked list is a palindrome.
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next:
return True
slow = head
fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
slow = slow.next
slow = self.rev(slow)
while slow:
if slow.val!=head.val:
return False
slow=slow.next
head=head.next
return True
def rev(self,head):
if not head:
return head
prev = None
cur = head
while cur:
n = cur.next
cur.next = prev
prev = cur
cur = n
return prev | null | Problem2.py | Problem2.py | py | 1,147 | python | en | code | null | code-starcoder2 | 50 |
216516250 | from typing import List
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
if amount == 0:
return 0
if coins is None or len(coins) == 0:
return -1
coins = sorted(coins)
if amount < coins[0]:
return -1
result = [1000000 for i in range(amount + 1)]
result[0] = 0
for i in range(1, len(result)):
for c in coins:
if i - c >= 0:
result[i] = min(result[i - c] + 1, result[i])
return result[amount] if result[amount] < 1000000 else -1
coins = [186, 419, 83, 408]
amount = 6249
result = Solution().coinChange(coins, amount)
print(result)
| null | coin_change.py | coin_change.py | py | 712 | python | en | code | null | code-starcoder2 | 50 |
422560165 | from __future__ import print_function
import contextlib
import functools
import os
import sys
import shutil
import subprocess
import time
from astropy.io import fits
from .backend import CoadditionBackend
from .backend import ProjectionBackend
from .backend import BackendError
@contextlib.contextmanager
def timer(template):
"""
Usage:
.. code-block:: python
with timer("Thing done in {seconds} seconds"):
do_thing()
"""
start_time = time.time()
yield
total_time = time.time() - start_time
print(template.format(seconds=total_time))
def timed(template):
"""
A decorator to measure and log execution time of functions. Example:
.. code-block:: python
@timed("Thing done in {seconds} seconds")
def do_thing():
print("Foobar")
"""
def decorator(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
with timer(template):
return_value = fun(*args, **kwargs)
return return_value
return wrapper
return decorator
class TemporaryDirectoryShim(object):
def __init__(self, suffix="", prefix="tmp", dir=None):
import tempfile
self.name = tempfile.mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
shutil.rmtree(self.name)
try:
from tempfile import TemporaryDirectory
except:
TemporaryDirectory = TemporaryDirectoryShim
class SWarpBackend(ProjectionBackend, CoadditionBackend):
RESAMPLE_SUFFIX = ".resamp.fits"
PROJECT = 0
COADD = 1
OPTIONS = {
PROJECT: [
"-resample", "y", "-combine", "n", "-center_type", "manual",
"-resample_suffix", RESAMPLE_SUFFIX
],
COADD: [
"-resample", "n", "-combine", "y", "-center_type", "manual",
]
}
def get_command(self, input_path, output_path, user_parameters, ra, dec,
num_rows, num_cols, mode):
"""
:return: array of command parts, e.g. something like
:param mode: either SWarpBackend.PROJECT or SWarpBackend.COADD
..code-block:: python
["swarp", "-c", "swarp_conf.txt"]
"""
command = ["swarp"]
for name, value in user_parameters:
command.append(name)
command.append(str(value))
if not mode in (self.PROJECT, self.COADD):
raise BackendError("Wrong mode: %s", mode)
command.extend(self.OPTIONS[mode])
# Center coordinates
command.append("-center")
command.append("{ra},{dec}".format(ra=ra, dec=dec))
if mode == self.PROJECT:
output_dir = os.path.dirname(output_path)
command.append("-resample_dir")
command.append(output_dir)
else:
# Maybe we should set resample_dir here too?
command.append("-imageout_name")
command.append(output_path)
# Image size
command.append("-image_size")
command.append("{rows},{cols}".format(rows=num_rows, cols=num_cols))
command.append(input_path)
return command
def get_process_stdout_stderr_returncode(self, command):
popen = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return popen.communicate() + (popen.returncode,)
@timed("project() done in {seconds} seconds")
def project(self, header, image, user_parameters=None):
"""
:param header: astropy.io.fits.Header object
:param image: astropy.io.fits.HDUList object
:return: astropy.io.fits.HDUList object
"""
ra = header["CRVAL1"]
dec = header["CRVAL2"]
num_rows = header["NAXIS1"]
num_cols = header["NAXIS2"]
user_parameters = user_parameters or []
with TemporaryDirectory() as temp_dir:
input_path = os.path.join(temp_dir, "input.fits")
with timer("Projection input image written in {seconds} seconds"):
image.writeto(input_path)
output_path = os.path.join(temp_dir, "output.fits")
self._project(
input_path,
output_path,
user_parameters,
ra, dec, num_rows, num_cols
)
with timer("Projection output image read in {seconds} seconds"):
with open(output_path, "rb") as output_file:
image_data = output_file.read()
output_image = fits.HDUList.fromstring(image_data)
return output_image
def _project(self, input_path, output_path, user_parameters, ra, dec,
num_rows, num_cols):
input_path = os.path.abspath(input_path)
output_path = os.path.abspath(output_path)
output_dir = os.path.dirname(output_path)
input_name = os.path.basename(input_path)
command = self.get_command(
input_path, output_path, user_parameters, ra, dec, num_rows,
num_cols, mode=self.PROJECT
)
with timer("SWarp projection done in {seconds} seconds"):
stdout, stderr, returncode = self.get_process_stdout_stderr_returncode(
command
)
print("Projection stdout:", stdout, file=sys.stdout)
print("Projection stderr:", stderr, file=sys.stderr)
# Copy actual output file to the output path
try:
input_name_without_ext, ext = input_name.rsplit(".")
except ValueError:
input_name_without_ext = input_name
actual_output_path = os.path.join(
output_dir,
input_name_without_ext + self.RESAMPLE_SUFFIX
)
msg = "Moved projection output to destination in {seconds} seconds"
with timer(msg):
shutil.move(actual_output_path, output_path)
return returncode
@timed("coadd() done in {seconds} seconds")
def coadd(self, header, images, user_parameters=None):
"""
:param header: astropy.io.fits.Header object
:param images: a list of astropy.io.fits.HDUList objects
:return: astropy.io.fits.HDUList object
"""
ra = header["CRVAL1"]
dec = header["CRVAL2"]
num_rows = header["NAXIS1"]
num_cols = header["NAXIS2"]
user_parameters = user_parameters or []
input_paths = []
with TemporaryDirectory() as temp_dir:
input_list_path = os.path.join(temp_dir, "input_file_list")
with timer("Coaddition input written in {seconds} seconds"):
for i, image in enumerate(images):
input_path = os.path.join(
temp_dir, "input_{0}.fits".format(i)
)
# Write image to input path
image.writeto(input_path)
input_paths.append(input_path)
with open(input_list_path, "w") as input_list_file:
to_write = "\n".join(path for path in input_paths) + "\n"
input_list_file.write(to_write)
output_path = os.path.join(temp_dir, "output.fits")
input_list_arg = "@{path}".format(path=input_list_path)
return_code = self._coadd(
input_list_arg,
output_path,
user_parameters,
ra, dec,
num_rows, num_cols
)
with timer("Coaddition output read in {seconds} seconds"):
with open(output_path, "rb") as output_file:
image_data = output_file.read()
output_image = fits.HDUList.fromstring(image_data)
if return_code != 0:
raise BackendError("SWarp returned {code}".format(code=return_code))
return output_image
def _coadd(self, input_path, output_path, user_parameters, ra,
dec, num_rows, num_cols):
command = self.get_command(
input_path, output_path, user_parameters, ra, dec, num_rows,
num_cols, mode=self.COADD
)
with timer("SWarp coaddition done in {seconds} seconds"):
stdout, stderr, returncode = self.get_process_stdout_stderr_returncode(
command
)
print("Coaddition stdout:", stdout, file=sys.stdout)
print("Coaddition stderr:", stderr, file=sys.stderr)
return returncode
| null | worker/backend/swarp.py | swarp.py | py | 8,652 | python | en | code | null | code-starcoder2 | 50 |
439059818 | import pandas as pd
import requests
import settings
import pygame.mixer
import os
import time
aitalk_url = "https://webapi.aitalk.jp/webapi/v2/ttsget.php"
csv_path = "./audio/dialogue.csv"
audio_folder = "./audio/"
df = pd.read_csv(csv_path, index_col=0)
pygame.mixer.init()
def get_dialogue(status):
print(df['dialogue'][status])
return df['dialogue'][status]
def get_filename(status):
print(df['filename'][status])
return df['filename'][status]
def play_audio(status):
file_path = audio_folder + df['filename'][status]
print(file_path)
audio = pygame.mixer.Sound(file_path)
channel = audio.play()
# wait to finish
while channel.get_busy():
pygame.time.delay(100)
pygame.time.delay(500)
def call_my_name(display_name):
text = display_name
filename = display_name + '.wav'
request_aitalk(text, filename)
file_path = audio_folder + filename
audio = pygame.mixer.Sound(file_path)
channel = audio.play()
# wait to finish
while channel.get_busy():
pygame.time.delay(100)
pygame.time.delay(300)
play_audio("start")
# remove audio file
os.remove(file_path)
def ready_to_take():
play_audio("ok")
pygame.time.delay(200)
def count_down():
play_audio("count3")
pygame.time.delay(500)
play_audio("count2")
pygame.time.delay(500)
play_audio("count1")
pygame.time.delay(500)
audio = pygame.mixer.Sound(audio_folder + "shut.wav")
channel = audio.play()
# wait to finish
while channel.get_busy():
pygame.time.delay(100)
def request_aitalk(dialogue, filename):
params = {
'username': settings.AITALK_USERNAME,
'password': settings.AITALK_PASSWORD,
'text': dialogue,
'speaker_name': 'miyabi_west',
'input_type': 'text',
'volume': 1.00, # 音量
'speed': 1.10, # 話速
'pitch': 1.30, # 声の高さ
'range': 1.20, # 抑揚(声の高さの範囲)
'ext': 'wav'
}
# get an audio file from AITALK
response = requests.get(aitalk_url, params=params)
if response.status_code == 200:
with open(audio_folder + filename, 'wb') as saveFile:
saveFile.write(response.content)
else:
print(response)
def get_audio(status):
dialogue = get_dialogue(status)
filename = get_filename(status)
# request an audio file and save it
request_aitalk(dialogue, filename)
def main():
get_dialogue("smile again")
get_filename("smile again")
get_audio("start")
call_my_name("ドナルド・フォントルロイ・ダック")
count_down()
#request_aitalk("オバチャンが撮ったるで!", 'test.mp3')
if __name__ == '__main__':
main()
| null | dialogue.py | dialogue.py | py | 2,754 | python | en | code | null | code-starcoder2 | 50 |
558180748 | #!/usr/bin/env python3
# coding=utf-8
# Author: Junjie Wang
# Mail: dreamboy.gns@sjtu.edu.cn
# Website:http://120.79.231.160
# Blog:http://120.79.231.160/wordpress
# Created Time: 2018-12-01 21:03:11
# -------------- For more details, refer to https://wiseodd.github.io/techblog/2015/10/17/metropolis-hastings/ -------------
# Notes:
# Gibbs sampling is the special case of the M-H sampling, as in Gibbs, the acceptance ratio is 1. Besides, Gibbs samples from conditional distribution while the M-H samples from joint distribution. So Gibbs is faster than M-H, but we have to perform mathematical analysis beforehand(.e.g deduct the conditional distribution)
# Metropolis algorithm is also a special case of the M-H algorithm, namely when the transition matrix Q become symmetric
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import seaborn as sns
mus = np.array([5,5])
sigmas = np.array([[1,.9],[.9,1]])
def circle(x,y):
return (x-1)**2 + (y-2)**2 - 3**2
def gGaussian(x,y):
return st.multivariate_normal.pdf([x,y],mean=mus,cov=sigmas)
# actually we are implementing the Metropolis algorithm, as here we assume that the Q matrix is symmetric
def m_h(p,iter=1000):
x,y = .0,.0
samples = np.zeros((iter,2))
for i in range(iter):
x_,y_ = np.array([x,y]) + np.random.normal(size=2)
if np.random.rand() < p(x_,y_)/p(x,y):
x,y = x_,y_
samples[i,:] = np.array([x,y])
return samples
if __name__ == '__main__':
samples = m_h(circle,10000)
sns.jointplot(samples[:,0],samples[:,1])
samples = m_h(gGaussian,10000)
sns.jointplot(samples[:,0],samples[:,1])
plt.show()
| null | M_H_Sampling.py | M_H_Sampling.py | py | 1,693 | python | en | code | null | code-starcoder2 | 50 |
162549020 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class StockCollection(object):
def __init__(self):
self.__stocks = []
def add_stock(self, stock):
self.__stocks.append(stock)
def get_stock(self, index):
return self.__stocks[index]
def get_stocks(self):
return self.__stocks
def count(self):
return len(self.__stocks)
def get_correlation(self):
all_stocks = self.__stocks[0].get_historical_close()
tickers = [self.__stocks[0].get_name()]
for stock in self.__stocks[1:]:
all_stocks = pd.merge(all_stocks,
stock.get_historical_close(),
left_index=True,
right_index=True)
tickers.append(stock.get_name())
all_stocks.columns = tickers
corr = all_stocks.corr()
plt.imshow(corr, cmap='hot', interpolation='nearest')
sns.heatmap(
corr,
xticklabels=corr.columns,
yticklabels=corr.columns,
annot=True,
center=0,
cmap=sns.diverging_palette(150, 10, as_cmap=True)
)
plt.show()
def show_stock_list(self):
print("[0] All")
for i in range(len(self.__stocks)):
print("[" + str(i + 1) + "] " + self.__stocks[i].get_name())
def show_all_sma_charts(self, days):
for i in range(self.count()):
self.__stocks[i-1].show_sma_chart(days)
def show_all_ichimoku_charts(self):
for i in range(len(self.__stocks)):
self.__stocks[i].show_ichimoku_chart()
| null | project/StockCollection.py | StockCollection.py | py | 1,674 | python | en | code | null | code-starcoder2 | 50 |
231728584 | # -*- coding: utf-8 -*-
# Scrapy settings for kindle project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'kindle'
SPIDER_MODULES = ['kindle.spiders']
NEWSPIDER_MODULE = 'kindle.spiders'
# DUPEFILTER_DEBUG = True
ITEM_PIPELINES = ['kindle.pipelines.KindlePipeline']
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'kindle (+http://www.yourdomain.com)'
#USER_AGENT = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36'
USER_AGENT = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/36.0.1985.125 Chrome/36.0.1985.125 Safari/537.36'
HTTP_PROXY = 'http://127.0.0.1:8123'
#disable the default useragent, enable the new useragent
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,
'kindle.spiders.random_useragent.RandomUserAgentMiddleware' :400,
'kindle.spiders.middlewares.ProxyMiddleware': 410
}
COOKIES_ENABLES=False
DOWNLOAD_DELAY = 0.2 | null | scrapy/kindle/settings.py | settings.py | py | 1,223 | python | en | code | null | code-starcoder2 | 50 |
119745185 | import hashlib,json
from collections import OrderedDict
class My_MerkTree:
def __init__(self,listoftransaction=None):
self.listoftransaction = listoftransaction
self.past_transaction = []
def create_tree(self):
listoftransaction = self.listoftransaction
past_transaction = self.past_transaction
temp_transaction = []
tempDict = OrderedDict()
for index in range(0,len(listoftransaction),2):
current = listoftransaction[index]
if index+1 != len(listoftransaction):
current_right = listoftransaction[index+1]
else:
current_right = current
current_hash = hashlib.sha256(hashlib.sha256(current.encode('utf-8')).hexdigest().encode('utf-8'))
current_right_hash = hashlib.sha256(hashlib.sha256(current_right.encode('utf-8')).hexdigest().encode('utf-8'))
tempDict[listoftransaction[index]] = current_hash.hexdigest()
if index+1 != len(listoftransaction):
tempDict[listoftransaction[index+1]] = current_right_hash.hexdigest()
temp_transaction.append(current_hash.hexdigest() + current_right_hash.hexdigest())
past_transaction.append(tempDict)
if len(listoftransaction) != 1:
self.listoftransaction = temp_transaction
self.past_transaction = past_transaction
self.create_tree()
def get_past_transaction(self):
return self.past_transaction
def get_root_leaf(self):
last_key = list(self.past_transaction[-1].keys())[-1]
return self.past_transaction[-1][last_key]
# Declare the main part of the function to run
if __name__ == "__main__":
# a) Create the new class of My_MerkTree
My_Tree = My_MerkTree()
# b) Give list of transaction
transaction = ['a','b','c','d']
# c) pass on the transaction list
My_Tree.listoftransaction = transaction
# d) Create the Merkle Tree transaction
My_Tree.create_tree()
# e) Retrieve the transaction
past_transaction = My_Tree.get_past_transaction()
# f) Get the last transaction and print all
print ("First Example - Even number of transaction Merkel Tree")
print ('Final root of the tree : ',My_Tree.get_root_leaf())
print(json.dumps(past_transaction, indent=4))
print ("-" * 50 )
# h) Second example
print ("Second Example - Odd number of transaction Merkel Tree")
My_Tree = My_MerkTree()
transaction = ['a','b','c','d','e']
My_Tree.listoftransaction = transaction
My_Tree.create_tree()
past_transaction = My_Tree.get_past_transaction()
print ('Final root of the tree : ',My_Tree.get_root_leaf())
print(json.dumps(past_transaction, indent=4))
print ("-" * 50 )
# i) Actual Use Case
print ("Final Example - Actuall use case of the Merkle Tree")
# i-1) Declare a transaction - the ground truth
ground_truth_Tree = My_MerkTree()
ground_truth_transaction = ['a','b','c','d','e']
ground_truth_Tree.listoftransaction = ground_truth_transaction
ground_truth_Tree.create_tree()
ground_truth_past_transaction = ground_truth_Tree.get_past_transaction()
ground_truth_root = ground_truth_Tree.get_root_leaf()
# i-2) Declare a tampered transaction
tampered_Tree = My_MerkTree()
tampered_Tree_transaction = ['a','b','c','d','f']
tampered_Tree.listoftransaction = tampered_Tree_transaction
tampered_Tree.create_tree()
tampered_Tree_past_transaction = tampered_Tree.get_past_transaction()
tampered_Tree_root = tampered_Tree.get_root_leaf()
# i-3) The three company share all of the transaction
print ('Company A - my final transaction hash : ',ground_truth_root)
print ('Company B - my final transaction hash : ',ground_truth_root)
print ('Company C - my final transaction hash : ',tampered_Tree_root)
# i-4) Print out all of the past transaction
print ("\n\nGround Truth past Transaction ")
print(json.dumps(ground_truth_past_transaction, indent=4))
print ("\n\nTamper Truth past Transaction ")
print(json.dumps(tampered_Tree_past_transaction, indent=4))
# ---- END OF THE CODE ------ | null | merkle.2.py | merkle.2.py | py | 3,848 | python | en | code | null | code-starcoder2 | 51 |
434005392 | import pandas as pd
from sklearn import svm, metrics
or_input = [
[1,0,1],
[0,1,1],
[0,0,0],
[1,1,1],
[1,0,1],
[0,1,1],
[0,0,0],
[1,1,1],
[0,0,0],
[1,1,0],
]
or_df = pd.DataFrame(or_input)
or_data = or_df.ix[:, 0:1]
or_label = or_df.ix[:, 2]
clf = svm.SVC()
clf.fit(or_data, or_label)
pre = clf.predict(or_data)
ac_score = metrics.accuracy_score(or_label, pre)
print("정답률 =", ac_score) # 정답률이 맞는듯 예측할만한 데이터가 없어보여.. | null | 04_AI/1_Machine_Learning/150_or_train.py | 150_or_train.py | py | 504 | python | en | code | null | code-starcoder2 | 51 |
281551642 | '''DAILY TICKETS SENDING TO A WHATSAPP GROUP'''
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException, WebDriverException
from datetime import datetime
from time import sleep
import socket
import os
def is_connected():
try:
socket.create_connection(("Google", 80))
return True
except:
is_connected()
def getWeather(url):
resp = requests.get(url)
data = resp.json()
desc = data["weather"][0]["description"]
return desc
def getGoldRate(driver, url):
driver.get(url)
html_content = driver.page_source
soup = BeautifulSoup(html_content, 'html.parser')
# get the gold price
td_tags = soup.findAll("td")
count = -1
for tag in td_tags:
count += 1
if tag.text == "22K-916 Jewellery":
gold_price = td_tags[count + 1].text
return gold_price
def getCurrencyRate(driver, url):
driver.get(url)
rupee_rate = driver.find_element_by_xpath("//input[@class = 'a61j6 vk_gy vk_sh Hg3mWc']").text
return rupee_rate
def sendMessage(driver, group_name, d, w, c):
try:
find_user = driver.find_element_by_xpath('//span[@title = "{}"]'.format(group_name))
sleep(15)
find_user.click()
text_box = driver.find_element_by_xpath("//div[@class = '_3u328 copyable-text selectable-text']")
text_box.click()
message = ("*Daily Tickets*" +
"~*********************************~TODAY~" + d.strftime("%a, %b %d, %Y") +
"~-------------------------------------~WEATHER~" + w +
"~-------------------------------------~CURRENCY EXCHANGE RATE~1 Singapore Dollar = " + c + " Indian rupees" +
"~-------------------------------------~SPICE UP~Have a nice day!")
sleep(3)
# typing message into the whatsapp message box
for line in message.split("~"):
text_box.send_keys(line)
ActionChains(driver).key_down(Keys.SHIFT).key_down(Keys.ENTER).key_up(Keys.ENTER).key_up(
Keys.SHIFT).perform()
text_box.send_keys(Keys.BACKSPACE)
sleep(2)
text_box.send_keys(Keys.ENTER)
sleep(2)
except Exception as e:
print("{} Group doesn't exist!".format(group_name))
# MAIN MODULE STARTS HERE
group_name = "Deals & Recharge Offer"
urls = ["http://api.openweathermap.org/data/2.5/weather?q={}&APPID={}".format("Singapore",
"0822b961597ff8fe4e300e208e4aaee6"),
"https://www.mustafa.com.sg/",
"https://www.google.com/search?q=singapore+to+india+currency+exchange+rate&oq=singapore+to+india+currency+exchange+rate&aqs=chrome..69i57.9300j0j9&sourceid=chrome&ie=UTF-8",
"https://web.whatsapp.com/"]
#driver = webdriver.Chrome('C:\\ChromeDriver\\chromedriver.exe')
#driver.maximize_window()
d = datetime.now()
w = getWeather(urls[0])
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
c = getCurrencyRate(driver, urls[2])
driver.get(urls[3])
sleep(10)
sendMessage(driver, group_name, d, w, c)
driver.quit() | null | whatsapp.py | whatsapp.py | py | 3,890 | python | en | code | null | code-starcoder2 | 51 |
137754772 | '''
Created on Oct 17, 2019
@author: kerrylu
'''
import math
def distance(a,b):
"""
Parameters a and b are both 2-tuples
calculate and return distance between
points a and b
"""
return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
def minDistance(points):
"""
points (list of tuples of ints) - a list of tuples where
each tuple is a point.
Calculate and return the distance between the two closest
points in the list.
"""
d = []
for p in points:
for q in points:
if p != q:
d.append(distance(p, q))
return min(d)
s = []
for j in range(len(points)):
for k in range(j+1,len(points)):
if points[j] != points[k]:
s.append(distance(points[j], points[k]))
return min(s)
min = 99999999999999999
for j in range(len(points)):
for k in range(j+1,len(points)):
min = distance(points[j], points[k])
return min
print(minDistance([(-7, 2), (-6, 6), (1, 2), (4, 5), (9, -6)])) | null | Lab07/minDistance.py | minDistance.py | py | 1,063 | python | en | code | null | code-starcoder2 | 51 |
79959994 | from __future__ import print_function
import tensorflow as tf
import numpy as np
from argparse import ArgumentParser
import os
import cv2
import sys
from yolo_model import YOLOModel
from rotation_invariance_model import RotationalInvarianceModel
def main():
parser = ArgumentParser()
parser.add_argument('--input-width', dest='input_width',
default=160, type=int, help='input image width')
parser.add_argument('--input-height', dest='input_height',
default=120, type=int, help='input image height')
parser.add_argument('--output-width', dest='output_width',
default=16, type=int, help='output image width')
parser.add_argument('--output-height', dest='output_height',
default=12, type=int, help='output image height')
parser.add_argument('--yolo-checkpoint', dest='yolo_checkpoint',
default='YOLO1/YOLO-200000',
type=str, help='checkpoint to load')
parser.add_argument('--rotation-checkpoint', dest='rotation_checkpoint',
default='RotationalInvarianceModel6/RotationalInvarianceModel-100000',
type=str, help='checkpoint to load')
parser.add_argument('--threshold', dest='threshold',
default=0.5, type=float, help='threshold for output')
args = parser.parse_args()
# yolo graph
yolo_graph = tf.Graph()
with yolo_graph.as_default() as g:
yolo_model = YOLOModel(args.input_width, args.input_height, 3,
args.output_width, args.output_height, 1, saving=False)
yolo_sess = tf.Session(graph=g)
# rotation graph
rotation_graph = tf.Graph()
with rotation_graph.as_default() as g:
rotation_model = RotationalInvarianceModel(64, 3, 10,
model_name='RotationalInvarianceModel',
saving=False)
rotation_sess = tf.Session(graph=g)
if args.yolo_checkpoint != '' and \
os.path.isfile(args.yolo_checkpoint + '.meta') and \
os.path.isfile(args.yolo_checkpoint + '.index') and \
args.rotation_checkpoint != '' and \
os.path.isfile(args.rotation_checkpoint + '.meta') and \
os.path.isfile(args.rotation_checkpoint + '.index'):
yolo_model.load(yolo_sess, args.yolo_checkpoint)
camera = cv2.VideoCapture(0)
if camera.isOpened():
while True:
_, img = camera.read()
inputs = cv2.resize(img, (args.input_width, args.input_height))
inputs = np.expand_dims(inputs, 0)
xy_output, size_output, indicator = \
yolo_model.predict(yolo_sess, inputs)
roi_display = np.copy(img)
valid = (indicator > args.threshold).squeeze(-1)
scores = np.sort(indicator[valid, :]).squeeze(-1)[::-1]
xys = xy_output[valid, :]
sizes = size_output[valid, :]
print(scores)
print('\rfound: %d | max score: %f' % (len(xys), indicator.max()))
sys.stdout.flush()
for i in range(len(xys)):
if indicator[valid, :][i] == scores[0]:
x = xys[i, 0] * 640
y = xys[i, 1] * 480
w = sizes[i, 0] * 640
h = sizes[i, 1] * 480
p1 = (int(x - w / 2), int(y - h / 2))
p2 = (int(x + w / 2), int(y + h / 2))
cv2.rectangle(roi_display, p1, p2, (0, 255, 0), 2)
rotation_model.predict(rotation_sess,
inputs[p1[1]:p2[1], p1[0]:p2[0], :].expand_dim(0))
cv2.imshow('Camera image', img)
cv2.imshow('Indicator',
cv2.resize(np.squeeze(indicator, axis=0), (640, 480)))
cv2.imshow('ROI', roi_display)
key = cv2.waitKey(10)
if key in [ord('q'), ord('Q'), 10, 27]:
break
yolo_sess.close()
rotation_sess.close()
if __name__ == '__main__':
main()
| null | rotational-invariance/yolo_inference.py | yolo_inference.py | py | 3,614 | python | en | code | null | code-starcoder2 | 51 |
452260426 |
# 导入socket这个库
import socket
# 创建一个socket对象
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# 开始建立TCP连接
s.connect(("127.0.0.1",8888))
# 接收数据
buffer = []
d = s.recv(1024)
buffer.append(d)
# 把字节连接起来
data = b''.join(buffer)
print(data)
# 关闭连接
s.close() | null | 程序/3.py | 3.py | py | 321 | python | en | code | null | code-starcoder2 | 51 |
232442902 | #! /usr/bin/env python3
import vcf
import httplib2
import json
__author__ = 'Gerhard Bilek'
##
##
## Aim of this assignment is to annotate the variants with various attributes
## We will use the API provided by "myvariant.info" - more information here: https://docs.myvariant.info
## NOTE NOTE! - check here for hg38 - https://myvariant.info/faq
## 1) Annotate the first 900 variants in the VCF file
## 2) Store the result in a data structure (not in a database)
## 3) Use the data structure to answer the questions
##
## 4) View the VCF in a browser
##
class Assignment3:
def __init__(self):
## Check if pyvcf is installed
print("PyVCF version: %s" % vcf.VERSION)
## Call annotate_vcf_file here
self.vcf_path = "chr16.vcf" # TODO
@property
def annotate_vcf_file(self):
'''
- Annotate the VCF file using the following example code (for 1 variant)
- Iterate of the variants (use first 900)
- Store the result in a data structure
:return:
'''
print("TODO")
##
## Example loop
##
## Build the connection
h = httplib2.Http()
headers = {'content-type': 'application/x-www-form-urlencoded'}
params_pos = [] # List of variant positions
with open(self.vcf_path) as my_vcf_fh:
vcf_reader = vcf.Reader(my_vcf_fh)
for counter, record in enumerate(vcf_reader):
params_pos.append(record.CHROM + ":g." + str(record.POS) + record.REF + ">" + str(record.ALT[0]))
if counter >= 899:
break
## Build the parameters using the list we just built
params = 'ids=' + ",".join(params_pos) + '&hg38=true'
#print("Params_Pos: ", params_pos)
#print("Params: ", params)
## Perform annotation
res, con = h.request('http://myvariant.info/v1/variant', 'POST', params, headers=headers)
annotation_result = con.decode('utf-8')
## Generate json object
jsonobject = json.loads(annotation_result)
return(jsonobject)
def get_list_of_genes(self, jsonobject):
'''
Print the name of genes in the annotation data set
:return:
'''
for object in jsonobject:
if 'cadd' in object:
if 'genename' in object['cadd']['gene']:
print(object['cadd']['gene']['genename'])
#for object in jsonobject:
# if 'dbsnp' in object:
# if 'genename' in object['dbsnp']['gene']:
# print(object['dbsnp']['gene']['genename'])
def get_num_variants_modifier(self, jsonobject):
'''
Print the number of variants with putative_impact "MODIFIER"
:return:
'''
'''
for object in jsonobject:
if 'cadd' in object:
if 'putative_impact' in object['ann']:
#if 'putative_impact' in object:
print("boom")
'''
counter = 0
for object in jsonobject:
if 'snpeff' in object: # (???) snpeff , cadd
key, value = "putative_impact", "MODIFIER"
if key in object['snpeff']['ann'] and value == object['snpeff']['ann']['putative_impact']:
counter += 1
return(counter)
def get_num_variants_with_mutationtaster_annotation(self, jsonobject):
'''
Print the number of variants with a 'mutationtaster' annotation
:return:
'''
counter = 0
for object in jsonobject:
if 'dbnsfp' in object:
if 'mutationtaster' in object['dbnsfp']:
counter+=1
return(counter)
def get_num_variants_non_synonymous(self, jsonobject):
'''
Print the number of variants with 'consequence' 'NON_SYNONYMOUS'
:return:
'''
counter = 0
for object in jsonobject:
if 'cadd' in object:
key, value = "consequence", "NON_SYNONYMOUS"
if key in object['cadd'] and value == object['cadd']['consequence']: # value muss bis zum Key definiert werden.
counter += 1
return counter
def view_vcf_in_browser(self):
'''
- Open a browser and go to https://vcf.iobio.io/
- Upload the VCF file and investigate the details
:return:
'''
## Document the final URL here
print("The vcf file has been compressed and indexed via iTabixIt.app. The two resulting files, the compressed file (gz) and the index file (gz.tbi) were uploaded to https://vcf.iobio.io/")
print("Resutls: https://vcf.iobio.io/?species=Human&build=GRCh38")
def print_summary(self):
annoData = self.annotate_vcf_file # Syntax? Warum ohne Klammern??
#for object in annoData: print(object) # json objects
print()
print("List of Genes:") # 9
self.get_list_of_genes(annoData)
print()
print("Num of Modifier: ", self.get_num_variants_modifier(annoData)) # 4
print()
print("Num of Mutationtaster: ", self.get_num_variants_with_mutationtaster_annotation(annoData)) #5
print()
print("Num of nonSynonymous: ", self.get_num_variants_non_synonymous(annoData))
print()
print(self.view_vcf_in_browser())
print()
def main():
print("Assignment 3")
assignment3 = Assignment3()
assignment3.print_summary()
print("Done with assignment 3")
if __name__ == '__main__':
main()
| null | assignment3.py | assignment3.py | py | 5,772 | python | en | code | null | code-starcoder2 | 51 |
653446250 | import subprocess
import numpy as np
import os
def SplitType(csv_flag):
if(csv_flag):
return ","
else:
return " "
def OpenTUMPose(pose_path,csv_flag):
Pose = list()
with open(pose_path,encoding="utf-8") as f:
for line in f.readlines():
line=line.strip('\n')
data = line.split(SplitType(csv_flag))
if(data[0][0] == "#"):
continue
Pose.append(data)
return Pose
def SaveTUMPose(save_path,Poses,csv_flags):
if(csv_flags):
data = np.array(Poses)
data_float = data.astype(float)
pd.DataFrame(data_float).to_csv(save_path,index=None,header=None)
else:
with open(save_path,'w',encoding = 'utf-8') as f:
for pose in Poses:
f.write("{} {} {} {} {} {} {} {}\n".format(pose[0],pose[1],pose[2],pose[3],pose[4],pose[5],pose[6],pose[7]))
def ReadSequence(list_path):
seq = list()
with open(list_path) as f:
for line in f.readlines():
line=line.strip('\n')
if(line[0] == "#"):
continue
seq.append(line)
print("all_seq",seq)
return seq
def Evaluate(gt_path,evaluate_path,seq_list,iter_num,erase_num):
results = list()
for i in range(1,iter_num + 1):
print("for {} time".format(i))
one_result = list()
for j in range(len(seq_list)):
print_flag = False
gt_seq_path = gt_path + "/" + seq_list[j] + ".tum"
#eva_seq_path = evaluate_path + "/" + seq_list[j] + "_{}".format(i) + ".txt"
eva_seq_path = evaluate_path + "/" + seq_list[j] + "_{}".format(i) + ".txt"
eva_part_path = evaluate_path + "/" + seq_list[j] + "_{}".format(i) + "part.txt"
if(erase_num != 0):
GetPartPose(eva_seq_path,erase_num,eva_part_path)
cmd_eva = "evo_ape tum {} {} -a -p".format(gt_seq_path,eva_part_path)
eva_count = len(open(eva_part_path,'r').readlines())
else:
cmd_eva = "evo_ape tum {} {} -a".format(gt_seq_path,eva_seq_path)
eva_count = len(open(eva_seq_path,'r').readlines())
res = subprocess.check_output(cmd_eva, shell=True)
res = str(res)
print(cmd_eva)
result = res.split("\\n")
gt_count = len(open(gt_seq_path,'r').readlines())
for info in result:
detail = info.split("\\t")
if(detail[0].strip() == "rmse"):
print(seq_list[j],detail[1],gt_count,eva_count)
one_result.append(float(detail[1]))
print_flag = True
if not print_flag:
print(seq_list[j],"wrong")
one_result.append(float(1e3))
if(erase_num != 0):
cmd_erase = "rm {}".format(eva_part_path)
os.system(cmd_erase)
results.append(one_result)
return results
def GetAverageResult(results):
results_array = np.array(results)
average_ape = np.mean(results_array,axis=0)
print(average_ape)
def SaveResult(results,save_path):
results_array = np.array(results)
results_array = results_array.transpose()
np.savetxt(save_path,results_array,delimiter = ",")
def GetPartPose(old_path,erase_num,new_path):
poses = OpenTUMPose(old_path,False)
save_num = int(len(poses)) - int(erase_num)
new_poses = poses[-save_num:]
SaveTUMPose(new_path,new_poses,False)
gt_path = "/home/zhouxin/evaluation/EUROC/gt_s"
eva_path = "/media/zhouxin/66D231E0D231B4E12/Dataset/RPVIO/EuRoc/csv_results"
seq_list_path = "/home/zhouxin/evaluation/EUROC/seq_list_all.txt"
seq_list = ReadSequence(seq_list_path)
result_save_path = "/home/zhouxin/evaluation/EUROC/result.csv"
results = Evaluate(gt_path,eva_path,seq_list,10,0)
GetAverageResult(results)
SaveResult(results,result_save_path)
| null | EUROC/evaluation.py | evaluation.py | py | 3,942 | python | en | code | null | code-starcoder2 | 51 |
187513857 | import sublime
import sublime_plugin
import os
import golangconfig
from .gotools_util import Buffers
from .gotools_util import GoBuffers
from .gotools_util import Logger
from .gotools_util import ToolRunner
class GotoolsGuruCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return GoBuffers.is_go_source(self.view)
def run(self, edit, command=None):
if not command:
Logger.log("command is required")
return
filename, row, col, offset, offset_end = Buffers.location_at_cursor(self.view)
if command == "freevars":
pos = filename+":#"+str(offset)+","+"#"+str(offset_end)
else:
pos = filename+":#"+str(offset)
# Build up a package scope contaning all packages the user might have
# configured.
package_scope = []
project_package = golangconfig.setting_value("project_package", view=self.view)[0]
if project_package:
if not golangconfig.setting_value("build_packages")[0]:
package_scope.append(project_package)
else:
for p in golangconfig.setting_value("build_packages", view=self.view)[0]:
package_scope.append(os.path.join(project_package, p))
# add local package to guru scope
if golangconfig.setting_value("guru_use_current_package")[0]:
current_file_path = os.path.realpath(os.path.dirname(self.view.file_name()))
toolpath, env = golangconfig.subprocess_info('guru', ['GOPATH', 'PATH'], view=self.view)
GOPATH = os.path.realpath(env["GOPATH"])
GOPATH = os.path.join(GOPATH,"src")
local_package = os.path.relpath(current_file_path, GOPATH)
if sublime.platform() == 'windows':
local_package = local_package.replace('\\', '/')
Logger.status("GOPATH: "+GOPATH)
Logger.status("local_package: "+local_package)
package_scope.append(local_package)
sublime.active_window().run_command("hide_panel", {"panel": "output.gotools_guru"})
self.do_plain_guru(command, pos, package_scope)
def do_plain_guru(self, mode, pos, package_scope=[], regex="^(.*):(\d+):(\d+):(.*)$"):
Logger.status("running guru "+mode+"...")
args = []
if len(package_scope) > 0:
args = ["-scope", ",".join(package_scope)]
args = args + [mode, pos]
output, err, rc = ToolRunner.run(self.view, "guru", args, timeout=60)
Logger.log("guru "+mode+" output: " + output.rstrip())
if rc != 0:
print("GoTools: Guru error:\n%s" % err)
Logger.status("guru call failed (" + str(rc) +")")
return
Logger.status("guru "+mode+" finished")
panel = self.view.window().create_output_panel('gotools_guru')
panel.set_scratch(True)
panel.settings().set("result_file_regex", regex)
panel.run_command("select_all")
panel.run_command("right_delete")
panel.run_command('append', {'characters': output})
self.view.window().run_command("show_panel", {"panel": "output.gotools_guru"})
| null | gotools_guru.py | gotools_guru.py | py | 2,927 | python | en | code | null | code-starcoder2 | 51 |
606287825 | import RPi.GPIO as GPIO
import threading
import time
RELEASE_REVOLUTIONS = 2
class Motor(threading.Thread):
def __init__(self, write_pin, read_pin):
threading.Thread.__init__(self)
self.motor_write_pin = write_pin
self.motor_read_pin = read_pin
self.gpio_setup()
self.revolutions = 0
def gpio_setup(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
if self.motor_read_pin == 2:
GPIO.setup(self.motor_read_pin, GPIO.IN)
else:
GPIO.setup(self.motor_read_pin, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(self.motor_write_pin, GPIO.OUT)
GPIO.output(self.motor_write_pin, 0)
def release_item(self):
GPIO.output(self.motor_write_pin, 1)
item_released = 0
last_status = 1
while not item_released:
status = GPIO.input(self.motor_read_pin)
if status == 1 and last_status == 0:
self.revolutions += 0.5
if self.revolutions == RELEASE_REVOLUTIONS:
GPIO.output(self.motor_write_pin, 0)
print("Item released!")
self.revolutions = 0
item_released = 1
last_status = status
time.sleep(0.1) | null | Code/Backend/helpers/Motor.py | Motor.py | py | 1,310 | python | en | code | null | code-starcoder2 | 51 |
428445231 | def solution(array, commands):
answer = []
arr = []
for i in range(len(commands)):
arr = array
arr = arr[commands[i][0]-1:commands[i][1]]
arr.sort()
#print(arr)
answer.append(arr[commands[i][2]-1])
return answer | null | 프로그래머스/정렬_K번째수/K번째 수.py | K번째 수.py | py | 277 | python | en | code | null | code-starcoder2 | 51 |
77599074 | import sys
import csv
import math
import os
def ReadFile (filename = "C:/Users/VictorKiraydt/Documents/git/filmopoisk/server/resources/ratings.csv"):
#filename = os.path.dirname(os.path.abspath("../resources/ratings.csv")) + "\\ratings.csv"
f = open (filename)
r = csv.reader (f)
mentions = dict()
iterRatings = iter(r)
next(iterRatings)
maxReadedFilms = 1000
for line in iterRatings:
if maxReadedFilms == 0:
break
userId = line[0]
movieId = line[1]
rating = float(line[2])
if not userId in mentions:
mentions[userId] = dict()
mentions[userId][movieId] = rating
maxReadedFilms = maxReadedFilms - 1
f.close()
return mentions
def distCosine (vecA, vecB):
def dotProduct (vecA, vecB):
d = 0.0
for dim in vecA:
if dim in vecB:
d += vecA[dim]*vecB[dim]
return d
return dotProduct (vecA,vecB) / math.sqrt(dotProduct(vecA,vecA)) / math.sqrt(dotProduct(vecB,vecB))
def makeRecommendation (userId, userRates, nBestUsers, nBestProducts):
matches = [(u, distCosine(userRates[userId], userRates[u])) for u in userRates if u != userId]
bestMatches = sorted(matches, reverse = True)[:nBestUsers]
sim = dict()
sim_all = sum([x[1] for x in bestMatches])
bestMatches = dict([x for x in bestMatches if x[1] > 0.0])
for relatedUser in bestMatches:
for product in userRates[relatedUser]:
if not product in userRates[userId]:
if not product in sim:
sim[product] = 0.0
sim[product] += userRates[relatedUser][product] * bestMatches[relatedUser]
for product in sim:
sim[product] /= sim_all
bestProducts = sorted(sim.items(), reverse = True)[:nBestProducts]
for prodInfo in bestProducts:
print ('{ "movieId": "%6s", "correlationCoeff": "%6.4f" }' % (prodInfo[0], prodInfo[1]))
if __name__ == "__main__":
userId = str(sys.argv[1])
nBestUsers = int(sys.argv[2])
nBestProducts = int(sys.argv[3])
makeRecommendation(userId, ReadFile(), nBestUsers, nBestProducts) | null | server/python/collaborativeFiltering.py | collaborativeFiltering.py | py | 2,171 | python | en | code | null | code-starcoder2 | 51 |
427506636 | import numpy as np
import cv2
from matplotlib import pyplot as plt
"""
ORB是一种想要代替SIFT和SURF的算法,它更快。
1 FAST features from accelerated segment test,绘制16像素的圆
2 BRIEF Binary Robust Independt Elementary Features
3 暴力匹配
"""
img1 = cv2.imread('4.jpeg', 0)
img2 = cv2.imread('5.jpeg', 0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x:x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:100], img2, flags=2)
plt.imshow(img3)
plt.show() | null | 读书笔记《opencv3 计算机视觉 python语言实现》/第6章图像检索/6.1.4_orb.py | 6.1.4_orb.py | py | 694 | python | en | code | null | code-starcoder2 | 51 |
439284625 | # A function named unique_common that accepts two lists both of which
# contain integers as parameters and returns a sorted list (ascending
# order) which contains unique common elements from both the lists. If
# there are no common elements between the two lists, then your function
# should return the keyword None.
# method 1
def unique_common1(a, b):
output_list = []
for i in range(len(a)):
if (a[i] in b) and (a[i] not in output_list):
output_list.append(a[i])
output_list.sort()
return output_list
# method 2
def unique_common2(a, b):
MyList = []
for i in a:
for j in b:
if i == j:
if i not in MyList:
MyList.append(i)
MyList.sort()
if MyList == []:
return None
return MyList
def list_create():
lst = []
# number of elemetns as input
n = int(input("Enter number of elements : "))
# iterating till the range
for i in range(0, n):
ele = int(input("enter list element : "))
lst.append(ele) # adding the element
return lst
# Driver code test
my_list1 = list_create()
my_list2 = list_create()
result = unique_common1(my_list1, my_list2)
print(result)
################### Instructor function ###################
def _unique_common_elements_sample_ed2_(a, b):
common = []
for items in a:
if items in b:
common.append(items)
if not common:
return None
unique = []
for items in common[:]:
if items not in unique:
unique.append(items)
return sorted(unique) | null | 09 - Midterm Exam/08 Midterm Exam, Part 8 (Unique Common Elements).py | 08 Midterm Exam, Part 8 (Unique Common Elements).py | py | 1,606 | python | en | code | null | code-starcoder2 | 50 |
645699035 | ## Tickets File
def insertTickets(filetype, json, cursor, conn, uid):
if (filetype == 'tickets'):
featureAttrs = {'tickets', 'timestamp', 'uid'}
cnt = 0
tblName = 'tickets'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(json, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
renameArrayItem(keytypevals, 'timestamp', 'epoch')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Setup File
def insertSetup(filetype, json, cursor, conn, uid):
if (filetype == 'setup'):
featureAttrs = {'age', 'city', 'country', 'datatransmit_time', 'education', 'gender', 'phoneusefrequency', 'timezone', 'uid', 'webusefrequency', 'latitude', 'longitude', 'timestamp', 'datatransmit_charging', 'datatransmit_wifi'}
cnt = 0
tblName = 'setup'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(json, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Web File
def insertWeb(filetype, json, cursor, conn, uid):
if (filetype == 'web'):
web_page_node(json,uid,cursor,conn) # [pages] / [pageNode]
web_entry_node(json, uid, cursor, conn) # [pages] / [entriesNode]
def web_entry_response(json_entries_node, uid, cursor, conn, parentid):
tblName = 'web_entries_response'
featureAttrs = {'status', 'statusText', 'httpVersion', 'cookieNumber', 'redirectURL', 'headersSize', 'bodySize'}
featureAttrs2 = {'Date', 'Server', 'X-Powered-By', 'Content-Encoding', 'Content-Length', 'Keep-Alive', 'Connection', 'Content-Type'}
featureAttrs3 = {'size', 'compression', 'mimeType', 'encoding'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['response'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['response']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Date', 'header_Date')
renameArrayItem(vals2, 'Server', 'header_Server')
renameArrayItem(vals2, 'X-Powered-By', 'header_XPoweredBy')
renameArrayItem(vals2, 'Content-Encoding', 'header_ContentEncoding')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Type', 'header_ContentType')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
vals3 = {}
values3 = []
cntattr3 = 0
for tis3 in featureAttrs3:
vals3,values3 = appendJsonKey(json_entries_node['response']['content'], tis3, vals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
renameArrayItem(vals3, 'size', 'content_size')
renameArrayItem(vals3, 'compression', 'content_compression')
renameArrayItem(vals3, 'mimeType', 'content_mimeType')
renameArrayItem(vals3, 'encoding', 'content_encoding')
attrsInJson3,typesInJson3 = toCommaStringDict(vals3)
#print type(attrsInJson3)
#print attrsInJson3
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2
values.extend(values2)
if ( attrsInJson3 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson3
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson3
values.extend(values3)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_request(json_entries_node, uid, cursor, conn, parentid):
tblName = 'web_entries_request'
featureAttrs = {'method', 'url', 'httpVersion', 'cookieNumber', 'headerSize', 'bodySize'}
featureAttrs2 = {'Host', 'User-Agent', 'Accept', 'Accept-Encoding', 'Connection', 'Content-Length', 'Keep-Alive'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['request'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['request']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Host', 'header_Host')
renameArrayItem(vals2, 'User-Agent', 'header_UserAgent')
renameArrayItem(vals2, 'Accept', 'header_Accept')
renameArrayItem(vals2, 'Accept-Encoding', 'header_AcceptEncoding')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2
typesInJsonCombined = typesInJson + ',' + typesInJson2
values.extend(values2)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_node(json, uid, cursor, conn):
tblName = 'web_entries'
featureAttrs = {'pageid', 'entryStartTime', 'time', 'serverIPAddress', 'connection'}
featureAttrs2 = {'blocked', 'dns', 'connect', 'send', 'wait', 'receive', 'ssl'}
featureAttrs3 = {'beforeRequestCacheEntries', 'afterRequestCacheEntries', 'hitCount'}
for jiv in json['pages']:
for innerjiv in jiv['entriesNode']:
cntattr = 0
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(innerjiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
cntattr2 = 0
attrsInJson2 = ''
typesInJson2 = ''
keytypevals2 = {}
values2 = []
for tis2 in featureAttrs2:
keytypevals2,values2 = appendJsonKey(innerjiv['timings'], tis2, keytypevals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2)
cntattr3 = 0
attrsInJson3 = ''
typesInJson3 = ''
keytypevals3 = {}
values3 = []
for tis3 in featureAttrs3:
keytypevals3,values3 = appendJsonKey(innerjiv['cache'], tis3, keytypevals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
attrsInJson3,typesInJson3 = toCommaStringDict(keytypevals3)
##combine
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2 + ',' + attrsInJson3
typesInJsonCombined = typesInJson + ',' + typesInJson2 + ',' + typesInJson3
values.extend(values2)
values.extend(values3)
#insert
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
##entry request
web_entry_id = getMaxId(tblName,cursor,conn)
web_entry_request(innerjiv, uid, cursor, conn, web_entry_id)
web_entry_response(innerjiv, uid, cursor, conn, web_entry_id)
def web_page_node(json, uid, cursor, conn):
tblName = 'web_pages'
featureAttrs = {'tabid', 'pageStartTime', 'pageid', 'pagetitle', 'pageOnContentLoad', 'pageOnLoad', 'origin'}
cntattr = 0
for jiv in json['pages']:
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv['pageNode'], tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
renameArrayItem(keytypevals, 'pageid', 'id')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Tab File
def insertTabs(filetype, json, cursor, conn, uid):
if (filetype == 'tabs'):
featureAttrs = {'timestamp', 'tabid', 'tabstatus'}
cnt = 0
for jiv in json['tabs']:
tblName = 'web_tabs'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Info File
def insertInfo(filetype, json, cursor, conn, uid):
if (filetype == 'info'):
featureAttrs = {'timestamp', 'version', 'browser'}
tblName = 'web_info'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(json, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## App File
def insertApps(filetype, json, cursor, conn, uid):
if (filetype == 'apps'):
featureAttrs = {'uid', 'timestamp', 'app'}
cnt = 0
for jiv in json:
tblName = 'apps'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Feature File
def insertFeatures(filetype, json, cursor, conn, uid):
if (filetype == 'features'):
featureAttrs = {'manufacturer', 'model', 'timestamp', 'uid', 'version'}
cnt = 0
for jiv in json:
tblName = 'features'
cntattr = 0
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
renameArrayItem(keytypevals, 'timestamp', 'epoch')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
if (isJsonKey(jiv, 'screensize')):
featureAttrs2 = {'height', 'width'}
cntattr2 = 0
keytypevals2 = {}
values2 = []
for tis2 in featureAttrs2:
keytypevals2,values2 = appendJsonKey(jiv['screensize'], tis2, keytypevals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(keytypevals2, 'height', 'screen_height')
renameArrayItem(keytypevals2, 'width', 'screen_width')
attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2)
#combine
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2
values.extend(values2)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
## Network File
def insertNetwork(filetype, json, cursor, conn, uid):
if (filetype == 'network'):
networkAttrs = {'BSSID', 'IP', 'MAC', 'RSSI', 'SSID', 'detailedState', 'extraInfo', 'frequency', 'hasInternet', 'linkSpeed', 'mobileStatus', 'netID', 'signalStrength', 'timestamp', 'wiMaxStatus', 'wifiStatus'}
cnt = 0
for jiv in json:
tblName = 'network'
cntattr = 0
keytypevals = {}
values = []
for tis in networkAttrs:
keytypevals,values = appendJsonKey(jiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
renameArrayItem(keytypevals, 'timestamp', 'epoch')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
if isJsonKey(jiv, 'availableNetworks'):
for innerjiv in jiv['availableNetworks']:
innernetworkAttrs = {'BSSID', 'SSID', 'capabilities'}
tblNameinner = 'network_availableNetwork'
innercount = 0
keytypevalsinner = {}
valuesinner = []
cntattrinner = 0
for tisinner in innernetworkAttrs:
keytypevalsinner,valuesinner = appendJsonKey(innerjiv, tisinner, keytypevalsinner, valuesinner, cntattrinner)
cntattrinner = cntattrinner + 1
maxID = getMaxId(tblName, cursor, conn)
keytypevalsinner[cntattrinner] = 'network_id'
cntattrinner = cntattrinner + 1
valuesinner.append(maxID)
attrsInJsoninner,typesInJsoninner = toCommaStringDict(keytypevalsinner)
dbinsert(tblNameinner,attrsInJsoninner,typesInJsoninner,cursor,valuesinner,conn)
if isJsonKey(jiv, 'capabilities'):
for innerjiv2 in jiv['capabilities']:
try:
innernetworkAttrs2 = {'mLinkDownBandwidthKbps', 'mLinkUpBandwidthKbps', 'mNetworkCapabilities', 'mSignalStrength', 'mTransportTypes'}
tblNameinner2 = 'network_capabilities'
innercount2 = 0
keytypevalsinner2 = {}
valuesinner2 = []
cntattrinner2 = 0
for tisinner2 in innernetworkAttrs2:
keytypevalsinner2,valuesinner2 = appendJsonKey(innerjiv2, tisinner2, keytypevalsinner2, valuesinner2, cntattrinner2)
cntattrinner2 = cntattrinner2 + 1
maxID2 = getMaxId(tblName, cursor, conn)
keytypevalsinner2[cntattrinner2] = 'network_id'
cntattrinner2 = cntattrinner2 + 1
valuesinner2.append(maxID2)
attrsInJsoninner2,typesInJsoninner2 = toCommaStringDict(keytypevalsinner2)
dbinsert(tblNameinner2,attrsInJsoninner2,typesInJsoninner2,cursor,valuesinner2,conn)
except:
dummy = 0
if isJsonKey(jiv, 'linkProperties'):
for innerjiv3 in jiv['linkProperties']:
try:
innernetworkAttrs3 = {'mDomains', 'mIfaceName', 'mMtu', 'mTcpBufferSizes'}
innernetworkAttrs3b = {'mDnses'}
tblNameinner3 = 'network_linkProperties'
innercount3 = 0
keytypevalsinner3 = {}
valuesinner3 = []
cntattrinner3 = 0
for tisinner3 in innernetworkAttrs3:
keytypevalsinner3,valuesinner3 = appendJsonKey(innerjiv3, tisinner3, keytypevalsinner3, valuesinner3, cntattrinner3)
cntattrinner3 = cntattrinner3 + 1
keytypevalsinner3b = {}
valuesinner3b = []
for tisinner3b in innernetworkAttrs3b:
keytypevalsinner3b,valuesinner3b = appendJsonKeyConcat(innerjiv3, tisinner3b, keytypevalsinner3b, valuesinner3b, cntattrinner3)
cntattrinner3 = cntattrinner3 + 1
maxID3 = getMaxId(tblName, cursor, conn)
keytypevalsinner3[cntattrinner3] = 'network_id'
cntattrinner3 = cntattrinner3 + 1
valuesinner3.append(maxID3)
attrsInJsoninner3,typesInJsoninner3 = toCommaStringDict(keytypevalsinner3)
attrsInJsoninner3b,typesInJsoninner3b = toCommaStringDict(keytypevalsinner3b)
#combine
attrsInJsonCombined = attrsInJsoninner3
typesInJsonCombined = typesInJsoninner3
if ( attrsInJsoninner3b != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJsoninner3b
typesInJsonCombined = typesInJsonCombined + ',' + typesInJsoninner3b
valuesinner3.extend(valuesinner3b)
dbinsert(tblNameinner3,attrsInJsonCombined,typesInJsonCombined,cursor,valuesinner3,conn)
if isJsonKey(innerjiv3, 'mLinkAddresses'):
for innerjiv5 in innerjiv3['mLinkAddresses']:
innernetworkAttrs5 = {'address', 'flags', 'prefixLength', 'scope'}
tblNameinner5 = 'network_linkProperties_mLinkAddresses'
keytypevalsinner5 = {}
valuesinner5 = []
cntattrinner5 = 0
for tisinner5 in innernetworkAttrs5:
keytypevalsinner5,valuesinner5 = appendJsonKey(innerjiv5, tisinner5, keytypevalsinner5, valuesinner5, cntattrinner5)
cntattrinner5 = cntattrinner5 + 1
maxID5 = getMaxId(tblNameinner3, cursor, conn)
keytypevalsinner5[cntattrinner5] = 'network_linkProperties_id'
cntattrinner5 = cntattrinner5 + 1
valuesinner5.append(maxID5)
attrsInJsoninner5,typesInJsoninner5 = toCommaStringDict(keytypevalsinner5)
dbinsert(tblNameinner5,attrsInJsoninner5,typesInJsoninner5,cursor,valuesinner5,conn)
if isJsonKey(innerjiv3, 'mRoutes'):
for innerjiv6 in innerjiv3['mRoutes']:
innernetworkAttrs6 = {'mGateway', 'mHasGateway', 'mInterface', 'mIsHost', 'mType'}
tblNameinner6 = 'network_linkProperties_mRoutes'
keytypevalsinner6 = {}
valuesinner6 = []
cntattrinner6 = 0
for tisinner6 in innernetworkAttrs6:
keytypevalsinner6,valuesinner6 = appendJsonKey(innerjiv6, tisinner6, keytypevalsinner6, valuesinner6, cntattrinner6)
cntattrinner6 = cntattrinner6 + 1
maxID6 = getMaxId(tblNameinner3, cursor, conn)
keytypevalsinner6[cntattrinner6] = 'network_linkProperties_id'
cntattrinner6 = cntattrinner6 + 1
valuesinner6.append(maxID6)
attrsInJsoninner6,typesInJsoninner6 = toCommaStringDict(keytypevalsinner6)
dbinsert(tblNameinner6,attrsInJsoninner6,typesInJsoninner6,cursor,valuesinner6,conn)
except:
dummy = 0
cnt = cnt + 1
## Sensor File (temperature and humidity not coded)
def insertSensor(filetype, json, cursor, conn, session):
if (filetype == 'sensors' and json['sensor'] == 'Location'):
tblName = 'sensor_location'
fields = 'uid, epoch, configAccuracy'
fieldTypes = '%s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['configAccuracy']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
maxID = getMaxId(tblName, cursor, conn)
cnt = 0
for jiv in json['sensorData']['locations']:
tblName = 'sensor_location_data'
fields = 'sensor_location_id, latitude, longitude, accuracy, speed, bearing, provider, time, local_time'
fieldTypes = '%s, %s, %s, %s, %s, %s, %s, %s, %s'
values = [maxID,
json['sensorData']['locations'][cnt]['latitude'],
json['sensorData']['locations'][cnt]['longitude'],
json['sensorData']['locations'][cnt]['accuracy'],
json['sensorData']['locations'][cnt]['speed'],
json['sensorData']['locations'][cnt]['bearing'],
json['sensorData']['locations'][cnt]['provider'],
json['sensorData']['locations'][cnt]['time'],
json['sensorData']['locations'][cnt]['local_time']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
cnt = cnt + 1
if (filetype == 'sensors' and json['sensor'] == 'WiFi'):
tblName = 'sensor_WiFi'
fields = 'uid, epoch, senseCycles'
fieldTypes = '%s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['senseCycles']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
maxID = getMaxId(tblName, cursor, conn)
cnt = 0
for jiv in json['sensorData']['scanResult']:
tblName = 'sensor_WiFi_scanResult'
fields = 'sensor_WiFi_id, ssid, bssid, capabilities, level, frequency'
fieldTypes = '%s, %s, %s, %s, %s, %s'
values = [maxID,
json['sensorData']['scanResult'][cnt]['ssid'],
json['sensorData']['scanResult'][cnt]['bssid'],
json['sensorData']['scanResult'][cnt]['capabilities'],
json['sensorData']['scanResult'][cnt]['level'],
json['sensorData']['scanResult'][cnt]['frequency'] ]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
cnt = cnt + 1
if (filetype == 'sensors' and json['sensor'] == 'MagneticField'):
tblName = 'betterXkeyspace.sensor_magneticfield'
cnt = 0
for jiv in json['sensorData']['sensorTimeStamps']:
fields = 'uid, xAxis, yAxis, zAxis, epoch'
fieldTypes = '%s, %s, %s, %s, %s'
values = [json['uid'], json['sensorData']['xAxis'][cnt], json['sensorData']['yAxis'][cnt], json['sensorData']['zAxis'][cnt], json['sensorData']['sensorTimeStamps'][cnt] ]
cassandraInsert(tblName,fields,fieldTypes,values,session)
cnt = cnt + 1
if (filetype == 'sensors' and json['sensor'] == 'Gyroscope'):
tblName = 'betterXkeyspace.sensor_gyroscope'
cnt = 0
for jiv in json['sensorData']['sensorTimeStamps']:
fields = 'uid, xAxis, yAxis, zAxis, epoch'
fieldTypes = '%s, %s, %s, %s, %s'
values = [json['uid'],
json['sensorData']['xAxis'][cnt],
json['sensorData']['yAxis'][cnt],
json['sensorData']['zAxis'][cnt],
json['sensorData']['sensorTimeStamps'][cnt] ]
cassandraInsert(tblName,fields,fieldTypes,values,session)
cnt = cnt + 1
if (filetype == 'sensors' and json['sensor'] == 'Light'):
tblName = 'betterXkeyspace.sensor_light'
fields = 'uid, epoch, light, maxRange'
fieldTypes = '%s, %s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['light'],json['sensorData']['maxRange'] ]
cassandraInsert(tblName,fields,fieldTypes,values,session)
if (filetype == 'sensors' and json['sensor'] == 'Battery'):
tblName = 'sensor_battery'
fields = 'uid, epoch, level, scale, temp, voltage, plugged, status, health'
fieldTypes = '%s, %s, %s, %s, %s, %s, %s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['level'],json['sensorData']['scale'],json['sensorData']['temperature'],json['sensorData']['voltage'], json['sensorData']['plugged'],json['sensorData']['status'], json['sensorData']['health'] ]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
if (filetype == 'sensors' and json['sensor'] == 'Connection'):
tblName = 'sensor_connection'
fields = 'uid, epoch, connected, connecting, available, networkType, roaming, ssid'
fieldTypes = '%s, %s, %s, %s, %s, %s, %s, %s'
try:
values = [json['uid'], json['timestamp'], json['sensorData']['connected'],json['sensorData']['connecting'],json['sensorData']['available'],json['sensorData']['networkType'], json['sensorData']['roaming'],json['sensorData']['ssid'] ]
except:
values = [json['uid'], json['timestamp'], json['sensorData']['connected'],json['sensorData']['connecting'],json['sensorData']['available'],json['sensorData']['networkType'], json['sensorData']['roaming'], None ]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
if (filetype == 'sensors' and json['sensor'] == 'ConnectionStrength'):
tblName = 'sensor_connectionStrength'
fields = 'uid, epoch, strength'
fieldTypes = '%s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['strength']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
if (filetype == 'sensors' and json['sensor'] == 'PassiveLocation'):
tblName = 'sensor_passiveLocation'
fields = 'uid, epoch, latitude, longitude, accuracy, speed, bearing, provider, time'
fieldTypes = '%s, %s, %s, %s, %s, %s, %s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['latitude'],
json['sensorData']['longitude'], json['sensorData']['accuracy'], json['sensorData']['speed'],
json['sensorData']['bearing'], json['sensorData']['provider'], json['sensorData']['time']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
if (filetype == 'sensors' and json['sensor'] == 'PhoneState'):
tblName = 'sensor_phoneState'
fields = 'uid, epoch, eventType, data'
fieldTypes = '%s, %s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['eventType'],json['sensorData']['data']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
if (filetype == 'sensors' and json['sensor'] == 'Screen'):
tblName = 'sensor_screen'
fields = 'uid, epoch, status'
fieldTypes = '%s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['status']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
if (filetype == 'sensors' and json['sensor'] == 'StepCounter'):
tblName = 'sensor_stepCounter'
fields = 'uid, epoch, stepCount'
fieldTypes = '%s, %s, %s'
values = [json['uid'], json['timestamp'], json['sensorData']['stepCount']]
dbinsert(tblName,fields,fieldTypes,cursor,values,conn)
if (filetype == 'sensors' and json['sensor'] == 'Accelerometer'):
tblName = 'betterXkeyspace.sensor_accelerometer'
cnt = 0
for jiv in json['sensorData']['sensorTimeStamps']:
fields = 'uid, xAxis, yAxis, zAxis, epoch'
fieldTypes = '%s, %s, %s, %s, %s'
values = [json['uid'],
json['sensorData']['xAxis'][cnt],
json['sensorData']['yAxis'][cnt],
json['sensorData']['zAxis'][cnt],
json['sensorData']['sensorTimeStamps'][cnt] ]
cassandraInsert(tblName,fields,fieldTypes,values,session)
cnt = cnt + 1
## Helper Functions
def cassandraInsert(tblName, fields, fieldTypes, valuesList, session):
jiv = 0
session.execute_async("INSERT INTO " + tblName + " (" + fields + ") VALUES ("+fieldTypes + ")", valuesList)
def dbinsert(tblName,fields,fieldTypes,cursor,values,conn):
sql_command = "insert into " + tblName + " (" + fields + ") values (" + fieldTypes + ")"
#print sql_command
#print values
cursor.execute(sql_command, values)
conn.commit()
def getMaxId(tblName,cursor, conn):
sql = "select max(id) from " + tblName
cursor.execute(sql)
results = cursor.fetchall()
return str(results[0][0])
def isJsonKey(json, tisKey):
for key,val in json.items():
if (key == tisKey):
return True
break
return False
def appendJsonKey(json, key, vals, values, cntattr):
if (isJsonKey(json,key)):
vals[cntattr] = str(key)
values.append(json[key])
return vals,values
def toCommaStringDict(keytypevals):
ret = ''
ret2 = ''
for key in keytypevals:
ret = ret + '`' + keytypevals[key] + '`' + ','
ret2 = ret2 + '%s' + ','
if (len(ret) > 0):
ret = ret[:-1]
ret2 = ret2[:-1]
return ret,ret2
def renameArrayItem(arr, frm, to):
for key in arr:
try:
if( arr[key] == frm):
arr[key] = to
except:
dummy = 0
return arr
def appendJsonKeyConcat(json, key, vals, values, cntattr):
ret = ''
if (isJsonKey(json,key)):
for i in json[key]:
ret = (ret + ' ' + i).strip()
vals[cntattr] = str(key)
values.append(ret)
return vals,values | null | scripts/betterX_attributes_interactive.py | betterX_attributes_interactive.py | py | 25,968 | python | en | code | null | code-starcoder2 | 50 |
73411396 | # Day 5: Poisson Distribution 2 - 포아송 분포
# 특정 사건에 대한 기대값을 람다 l로 정의할때, 특정 사건이 k번 발생할 확률 분포
a, b = map(float, input().split())
result1 = 160 + 40*(a + a**2)
result2 = 128 + 40*(b + b**2)
print(round(result1, 3))
print(round(result2, 3))
| null | hackerrank/statistics/5. poisson_distribution_2.py | 5. poisson_distribution_2.py | py | 310 | python | en | code | null | code-starcoder2 | 50 |
634859315 | distancias = [
("Alabaster", "Birmingham", 24),
("Alabaster", "Montgomery", 71),
("Birmingham", "Huntsville", 103),
("Birmingham","Tuscaloosa", 59),
("Demopolis","Mobile", 141),
("Demopolis","Montgomery", 101),
("Demopolis","Tuscaloosa", 65),
("Mobile","Montgomery", 169),
("Montgomery","Tuscaloosa", 134)
]
''' Funcao auxiliar para inverter cidades
como tuplas sao tipo estatico de dado preciso criar uma nova tupla
'''
def swap(l):
l1 = (l[1], l[0], l[2])
return l1
''' Inverte as cidades entre si
'''
def inverte_cidades(triplas):
return list(map(swap, list(triplas)))
def distancia(cidade_entrada, cidade_saida):
dist = list(filter(lambda x: x[0] == cidade_entrada and x[1] == cidade_saida, distancias))
dist += list(filter(lambda x: x[0] == cidade_saida and x[1] == cidade_entrada, distancias))
return dist[0][2] if (len(dist) != 0) else -1 # se elas forem conectadas pego a distancia, senao -1
''' Lista de conexoes eh qualquer entrada da lista distancias cujo
primeiro elemento eh a cidade de entrada e o segundo elemento eh a cidade de saida
ou o primeiro elemento eh a cidade de saida e o segundo elemento eh a cidade de entrada
'''
def conexoes(entra):
conect = list(filter(lambda x: x[0] == entra, distancias))
conect += inverte_cidades(filter(lambda x: x[1] == entra, distancias))
return conect
''' Funcao recursiva para calcular a distancia entre duas cidades entra e destino
A primeira chamada cid_visitadas deve ser [] e tot_dist deve ser 0, elas
sao utilizadas para as chamadas recursivas.
A ideia deste algoritmo eh pegar todas minhas cidades visinhas de entra e calcular a distancia
recursivamente entre esta vizinha e a cidade destino.
Quando alguma vizinha == destino, entao fim.
'''
def calc_dist(entra, destino, cid_visitadas, tot_dist):
cid_visitadas = cid_visitadas + [entra] # desta forma nao modifico a original
conexoes_saida = conexoes(entra)
cidades_vizinhas = list(map(lambda x: x[1], conexoes_saida))
dist_vizinhas = list(map(lambda x: x[2], conexoes_saida))
# se a cidade de saida estiver nas vizinhas, fim
if(destino in cidades_vizinhas):
cid_visitadas.append(destino)
return (cid_visitadas, distancia(entra, destino) + tot_dist)
dist_min = 9999999999999
visitadas = []
# senao procuramos, recursivamente, a menor distancia entre minhas visinhas ate a saida
for cid_vizinha, dist_vizinha in zip(cidades_vizinhas, dist_vizinhas):
if cid_vizinha not in cid_visitadas:
(c, d) = calc_dist(cid_vizinha, destino, cid_visitadas, tot_dist + dist_vizinha)
if(d <= dist_min):
dist_min = d
visitadas = c
return (visitadas, dist_min)
entra = input("Insira cidade inicial: ")
dest = input("Insira cidade destino: ")
caminho, dist = calc_dist(entra, dest, [], 0)
print("Menor caminho: {}, {} miles".format(caminho, dist))
| null | python_stuff/ex5.py | ex5.py | py | 2,812 | python | en | code | null | code-starcoder2 | 50 |
69304178 | import itertools
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.externals.joblib import delayed, Parallel
from sklearn.utils import gen_even_slices
from ..base import BaseDetector
from ..utils import timeit, OneDimArray, TwoDimArray
__all__ = ['FastABOD']
def approximate_abof(
X: TwoDimArray,
X_train: TwoDimArray,
neigh_ind: TwoDimArray
) -> OneDimArray:
"""Compute the approximate Angle-Based Outlier Factor (ABOF) for each
sample.
"""
with np.errstate(invalid='raise'):
return np.var([
[
(diff_a @ diff_b) / (diff_a @ diff_a) / (diff_b @ diff_b)
for diff_a, diff_b in itertools.combinations(
X_neigh - query_point, 2
)
]
for query_point, X_neigh in zip(X, X_train[neigh_ind])
], axis=1)
class FastABOD(BaseDetector):
"""Fast Angle-Based Outlier Detector (FastABOD).
Parameters
----------
fpr : float, default 0.01
False positive rate. Used to compute the threshold.
n_jobs : int, default 1
Number of jobs to run in parallel. If -1, then the number of jobs is
set to the number of CPU cores.
verbose : bool, default False
Enable verbose output.
kwargs : dict
Other keywords passed to sklearn.neighbors.NearestNeighbors().
Attributes
----------
threshold_ : float
Threshold.
X_ : array-like of shape (n_samples, n_features)
Training data.
References
----------
H.-P. Kriegel, M. Schubert and A. Zimek,
"Angle-based outlier detection in high-dimensional data,"
In Proceedings of SIGKDD'08, pp. 444-452, 2008.
"""
@property
def X_(self) -> TwoDimArray:
return self._knn._fit_X
def __init__(
self,
fpr: float = 0.01,
n_jobs: int = 1,
verbose: bool = False,
**kwargs
) -> None:
super().__init__(fpr=fpr, verbose=verbose)
self.n_jobs = n_jobs
self._knn = NearestNeighbors(**kwargs)
self.check_params()
def check_params(self) -> None:
"""Check validity of parameters and raise ValueError if not valid."""
super().check_params()
@timeit
def fit(self, X: TwoDimArray, y: OneDimArray = None) -> 'FastABOD':
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : ignored
Returns
-------
self : FastABOD
Return self.
"""
self._knn.fit(X)
anomaly_score = self.anomaly_score()
self.threshold_ = np.percentile(anomaly_score, 100. * (1. - self.fpr))
return self
def anomaly_score(self, X: TwoDimArray = None) -> OneDimArray:
"""Compute the anomaly score for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features), default None
Data. If not provided, the anomaly score for each training sample
is returned.
Returns
-------
anomaly_score : array-like of shape (n_samples,)
Anomaly score for each sample.
"""
neigh_ind = self._knn.kneighbors(X, return_distance=False)
if X is None:
X = self.X_
n_samples, _ = X.shape
try:
result = Parallel(n_jobs=self.n_jobs)(
delayed(approximate_abof)(
X[s], self.X_, neigh_ind[s]
) for s in gen_even_slices(n_samples, self.n_jobs)
)
except FloatingPointError as e:
raise ValueError('X must not contain training samples') from e
return -np.concatenate(result)
def feature_wise_anomaly_score(self, X: TwoDimArray = None) -> TwoDimArray:
raise NotImplementedError()
def score(X: TwoDimArray, y: OneDimArray = None) -> float:
raise NotImplementedError()
| null | kenchi/outlier_detection/angle_based.py | angle_based.py | py | 4,091 | python | en | code | null | code-starcoder2 | 50 |
162301565 | '''
Handlers for AJAX (Javascript) functions used in the web interface to start
experiments and train BMI decoders
'''
import json, datetime
import logging
import io, traceback
import numpy as np
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.db.models import ProtectedError
from riglib import experiment
from .json_param import Parameters
from .models import TaskEntry, Feature, Sequence, Task, Generator, Subject, Experimenter, DataFile, System, Decoder, KeyValueStore, import_by_path
from .tasktrack import Track
import logging
import io, traceback
from . import exp_tracker # Wrapper for tasktrack.Track
from . import trainbmi
http_request_queue = []
@csrf_exempt
def train_decoder_ajax_handler(request, idx):
'''
AJAX handler for creating a new decoder.
Parameters
----------
request : Django HttpRequest
POST data containing details for how to train the decoder (type, units, update rate, etc.)
idx : int
ID number of the TaskEntry record with the data used to train the Decoder.
Returns
-------
Django HttpResponse
Indicates 'success' if all commands initiated without error.
'''
## Check if the name of the decoder is already taken
collide = Decoder.objects.filter(entry=idx, name=request.POST['bminame'])
if len(collide) > 0:
return _respond(dict(status='error', msg='Name collision -- please choose a different name'))
update_rate = float(request.POST['bmiupdaterate'])
kwargs = dict(
entry=idx,
name=request.POST['bminame'],
clsname=request.POST['bmiclass'],
extractorname=request.POST['bmiextractor'],
cells=request.POST['cells'],
channels=request.POST['channels'],
binlen=1./update_rate,
tslice=list(map(float, request.POST.getlist('tslice[]'))),
ssm=request.POST['ssm'],
pos_key=request.POST['pos_key'],
kin_extractor=request.POST['kin_extractor'],
zscore=request.POST['zscore'],
)
trainbmi.cache_and_train(**kwargs)
return _respond(dict(status="success"))
class encoder(json.JSONEncoder):
'''
Encoder for JSON data that defines how the data should be returned.
'''
def default(self, o):
if isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, Parameters):
return o.params
else:
return super(encoder, self).default(o)
def _respond(data):
'''
Generic HTTPResponse to return JSON-formatted dictionary values
Parameters
----------
data : dict
Keys and values can be just about anything
Returns
-------
HttpResponse
JSON-encoded version of the input dictionary
'''
return HttpResponse(json.dumps(data, cls=encoder), content_type="application/json")
def task_info(request, idx, dbname='default'):
'''
Get information about the task
Parameters
----------
request : Django HttpRequest
idx : int
Primary key used to look up the task from the database
Returns
-------
JSON-encoded dictionary
'''
task = Task.objects.using(dbname).get(pk=idx)
feats = []
for name, isset in list(request.GET.items()):
if isset == "true": # box for the feature checked
feat = Feature.objects.using(dbname).get(name=name)
feats.append(feat)
filter_kwargs = {'template': True, 'task__id': idx}
templates = TaskEntry.objects.using(dbname).filter(**filter_kwargs).order_by("-date")
template_info = [{'id': t.id, 'name': t.entry_name} for t in templates]
metadata = TaskEntry.get_default_metadata()
task_info = dict(params=task.params(feats=feats), generators=task.get_generators(), \
templates=template_info, metadata=metadata)
task_cls = task.get(feats=feats)
if issubclass(task_cls, experiment.Sequence):
task_info['sequence'] = task.sequences()
task_info['controls'] = task.controls(feats=feats)
return _respond(task_info)
def exp_info(request, idx, dbname='default'):
'''
Get information about the tasks that have already run
Parameters
----------
request : Django HttpRequest
POST request triggered by clicking on a task entry from the left side pane
idx : int
Primary key used to look up the TaskEntry from the database
Returns
-------
JSON-encoded dictionary
Data containing features, parameters, and any report data from the TaskEntry
'''
entry = TaskEntry.objects.using(dbname).get(pk=idx)
try:
entry_data = entry.to_json()
except Exception as e:
print("##### Error trying to access task entry data: id=%s, dbname=%s" % (idx, dbname))
import traceback
exception = traceback.format_exc()
exception.replace('\n', '\n ')
print(exception.rstrip())
print("#####")
return _respond_err(exception)
else:
return _respond(entry_data)
@csrf_exempt
def add_sequence(request):
print(request.POST)
sequence = json.loads(request.POST['sequence'])
task_id = json.loads(request.POST.get('task'))
seq = Sequence.from_json(sequence)
task = Task.objects.get(pk=task_id)
seq.task = task
seq.save()
return _respond(dict(id=seq.id, name=seq.name))
def hide_entry(request, idx):
'''
See documentation for exp_info
'''
print("hide_entry")
entry = TaskEntry.objects.get(pk=idx)
entry.visible = False
entry.save()
return _respond(dict())
def show_entry(request, idx):
'''
See documentation for exp_info
'''
print("hide_entry")
entry = TaskEntry.objects.get(pk=idx)
entry.visible = True
entry.save()
return _respond(dict())
def remove_entry(request, idx):
print("Remove entry %d" % idx)
entry = TaskEntry.objects.get(pk=idx)
try:
DataFile.objects.filter(entry=entry.id).delete()
except DataFile.DoesNotExist:
pass
try:
Decoder.objects.filter(entry=entry.id).delete()
except Decoder.DoesNotExist:
pass
entry.delete()
return _respond(dict())
def template_entry(request, idx):
'''
See documentation for exp_info
'''
entry = TaskEntry.objects.get(pk=idx)
entry.template = True
entry.save()
return _respond(dict())
def untemplate_entry(request, idx):
'''
See documentation for exp_info
'''
entry = TaskEntry.objects.get(pk=idx)
entry.template = False
entry.save()
return _respond(dict())
def backup_entry(request, idx):
'''
See documentation for exp_info
'''
entry = TaskEntry.objects.get(pk=idx)
entry.backup = True
entry.save()
return _respond(dict())
def unbackup_entry(request, idx):
'''
See documentation for exp_info
'''
entry = TaskEntry.objects.get(pk=idx)
entry.backup = False
entry.save()
return _respond(dict())
def gen_info(request, idx):
try:
gen = Generator.objects.get(pk=idx)
return _respond(gen.to_json())
except:
traceback.print_exc()
def start_next_exp(request):
try:
req, save = http_request_queue.pop(0)
return start_experiment(req, save=save)
except IndexError:
return _respond(dict(status="error", msg="No experiments in queue!"))
@csrf_exempt
def start_experiment(request, save=True, execute=True):
'''
Handles presses of the 'Start Experiment' and 'Test' buttons in the browser
interface
'''
#make sure we don't have an already-running experiment
tracker = Track.get_instance()
if len(tracker.status.value) != 0:
print("Task is running, exp_tracker.status.value:", tracker.status.value)
return _respond(dict(status="running", msg="Already running task!"))
# Try to start the task, and if there are any errors, send them to the browser interface
try:
data = json.loads(request.POST['data'])
task = Task.objects.get(pk=data['task'])
feature_names = list(data['feats'].keys())
subject_name = data['metadata'].pop('subject')
subject = Subject.objects.get(name=subject_name)
experimenter_name = data['metadata'].pop('experimenter')
experimenter = Experimenter.objects.get(name=experimenter_name)
project = data['metadata'].pop('project')
session = data['metadata'].pop('session')
entry = TaskEntry.objects.create(subject_id=subject.id, task_id=task.id, experimenter_id=experimenter.id,
project=project, session=session)
if 'entry_name' in data:
entry.entry_name = data['entry_name']
if 'date' in data and data['date'] != "Today" and len(data['date'].split("-")) == 3:
datestr = data['date'].split("-")
print("Got custom date: ", datestr)
entry.date = datetime.datetime(int(datestr[0]), int(datestr[1]), int(datestr[2])) # this does not work: datetime.datetime.strptime("%Y-%m-%d", datetime.datetime.now().strftime("%Y-%m-%d"))
params = Parameters.from_html(data['params'])
entry.params = params.to_json()
feats = Feature.getall(feature_names)
kwargs = dict(subj=entry.subject.id, subject_name=subject_name, base_class=task.get(),
feats=feats, params=params)
metadata = Parameters.from_html(data['metadata'])
entry.metadata = metadata.to_json()
# Save the target sequence to the database and link to the task entry, if the task type uses target sequences
if issubclass(task.get(feats=feature_names), experiment.Sequence):
seq = Sequence.from_json(data['sequence'])
seq.task = task
if save:
seq.save()
entry.sequence = seq
kwargs['seq'] = seq
response = dict(status="testing", subj=entry.subject.name,
task=entry.task.name)
if save:
# tag software version using the git hash
import git
repo = git.repo.Repo(__file__, search_parent_directories=True)
sw_version = repo.commit().hexsha[:8]
repo_dirty = repo.is_dirty(index=True, working_tree=True, untracked_files=False)
if repo_dirty:
sw_version += '.dirty'
entry.sw_version = sw_version
# Save the task entry to database
entry.save()
# Link the features used to the task entry
for feat_name in feature_names:
f = Feature.objects.get(name=feat_name)
entry.feats.add(f.pk)
response['date'] = entry.date.strftime("%h %d, %Y %I:%M %p")
response['status'] = "running"
response['idx'] = entry.ui_id
# Give the entry ID to the runtask as a kwarg so that files can be linked after the task is done
kwargs['saveid'] = entry.id
else:
entry.delete()
# Start the task FSM and tracker
if execute:
tracker.runtask(**kwargs)
else:
response["status"] = "completed"
# Return the JSON response
return _respond(response)
except Exception as e:
# Generate an HTML response with the traceback of any exceptions thrown
import io
import traceback
from .tasktrack import log_str
err = io.StringIO()
traceback.print_exc(None, err)
traceback.print_exc() # print again to console
err.seek(0)
log_str(err.read()) # log to tasktracker
err.seek(0)
tracker.reset() # make sure task is stopped
return _respond(dict(status="error", msg=err.read()))
def rpc(fn):
'''
Generic remote procedure call function
Parameters
----------
fn : callable
Function which takes a single argument, the tracker object.
Return values from this function are ignored.
Returns
-------
JSON-encoded dictionary
'''
tracker = Track.get_instance()
# make sure that there exists an experiment to interact with
if tracker.status.value not in [b"running", b"testing"]:
print("Task not running!", str(tracker.status.value))
return _respond(dict(status="error", msg="No task running, so cannot run command!"))
try:
status = tracker.status.value.decode("utf-8")
fn_response = fn(tracker)
response_data = dict(status="pending", msg=status)
if not fn_response is None:
response_data['data'] = fn_response
return _respond(response_data)
except Exception as e:
import traceback
traceback.print_exc()
return _respond_err(e)
def _respond_err(e):
'''
Default error response from server to webclient
Parameters
----------
e : Exception
Error & traceback to convert to string format.
Returns
-------
JSON-encoded dictionary
Sets status to "error" and provides the specific error message
'''
err = io.StringIO()
traceback.print_exc(None, err)
err.seek(0)
return _respond(dict(status="error", msg=err.read()))
@csrf_exempt
def stop_experiment(request):
return rpc(lambda tracker: tracker.stoptask())
def enable_clda(request):
return rpc(lambda tracker: tracker.task_proxy.enable_clda())
def disable_clda(request):
return rpc(lambda tracker: tracker.task_proxy.disable_clda())
def set_task_attr(request, attr, value):
'''
Generic function to change a task attribute while the task is running.
'''
return rpc(lambda tracker: tracker.task_proxy.remote_set_attr(attr, value))
@csrf_exempt
def save_notes(request, idx):
te = TaskEntry.objects.get(pk=idx)
te.notes = request.POST['notes']
te.save()
return _respond(dict(status="success"))
def reward_drain(request, onoff):
'''
Start/stop the "drain" of a solenoid reward remotely
This function is modified to use the reward system in Orsborn lab - check reward.py for functions
'''
from riglib import reward
r = reward.open()
if onoff == 'on':
r.drain(600)
print('drain on')
else:
print('drain off')
r.drain_off()
return HttpResponse('Turning reward %s' % onoff)
def populate_models(request):
""" Database initialization code. When 'db.tracker' is imported, it goes through the database and ensures that
1) at least one subject is present
2) all the tasks from 'tasklist' appear in the db
3) all the features from 'featurelist' appear in the db
4) all the generators from all the tasks appear in the db
"""
subjects = Subject.objects.all()
if len(subjects) == 0:
subj = Subject(name='testing')
subj.save()
for m in [Generator, System]:
m.populate()
return HttpResponse("Updated Tasks, features generators, and systems")
@csrf_exempt
def add_new_task(request):
from . import models
name, import_path = request.POST['name'], request.POST['import_path']
# verify import path
if import_path == '':
import_path = "riglib.experiment.Experiment"
try:
import_by_path(import_path)
except:
import traceback
traceback.print_exc()
return _respond(dict(msg="import path invalid!", status="error"))
task = Task(name=name, import_path=import_path)
task.save()
# add any new generators for the task
Generator.remove_unused()
Generator.populate()
task_data = dict(id=task.id, name=task.name, import_path=task.import_path)
return _respond(dict(msg="Added new task: %s" % task.name, status="success", data=task_data))
@csrf_exempt
def remove_task(request):
id = request.POST.get('id')
task = Task.objects.filter(id=id)
try:
entry = TaskEntry.objects.filter(task=id).values_list('id', flat=True)
except TaskEntry.DoesNotExist:
entry = None
if entry is None or len(entry) == 0:
try:
Sequence.objects.filter(task=id).delete()
except Sequence.DoesNotExist:
pass
task.delete()
return _respond(dict(msg="Removed task", status="success"))
else:
return _respond(dict(msg="Couldn't remove task, experiments {0} use it.".format(list(entry)), status="error"))
@csrf_exempt
def add_new_subject(request):
subject_name = request.POST['subject_name']
subj = Subject(name=subject_name)
subj.save()
return _respond(dict(msg="Added new subject: %s" % subj.name, status="success", data=dict(id=subj.id, name=subj.name)))
@csrf_exempt
def remove_subject(request):
id = request.POST.get('id')
try:
Subject.objects.filter(id=id).delete()
return _respond(dict(msg="Removed subject", status="success"))
except ProtectedError:
return _respond(dict(msg="Couldn't remove subject, there must be valid experiments that use it", status="error"))
@csrf_exempt
def add_new_experimenter(request):
exp_name = request.POST['experimenter_name']
exp = Experimenter(name=exp_name)
exp.save()
return _respond(dict(msg="Added new experimenter: %s" % exp.name, status="success", data=dict(id=exp.id, name=exp.name)))
@csrf_exempt
def remove_experimenter(request):
id = request.POST.get('id')
try:
Experimenter.objects.filter(id=id).delete()
return _respond(dict(msg="Removed experimenter", status="success"))
except ProtectedError:
return
@csrf_exempt
def add_new_system(request):
sys = System(name=request.POST['name'], path=request.POST['path'],
processor_path=request.POST['processor_path'])
sys.save()
system_data = dict(id=sys.id, name=sys.name)
return _respond(dict(msg="Added new system: %s" % sys.name, status="success", data=system_data))
@csrf_exempt
def remove_system(request):
from . import models
id = request.POST.get('id')
try:
System.objects.filter(id=id).delete()
return _respond(dict(msg="Removed system", status="success"))
except ProtectedError:
return _respond(dict(msg="Couldn't remove system, there must be valid experiments that use it", status="error"))
@csrf_exempt
def toggle_features(request):
from features import built_in_features
from . import models
name = request.POST.get('name')
# check if the feature is already installed
existing_features = Feature.objects.filter(name=name)
if len(existing_features) > 0:
# disable the feature
Feature.objects.filter(name=name).delete()
msg = "Disabled feature: %s" % str(name)
return _respond(dict(msg=msg, status="success"))
elif name in built_in_features:
import_path = built_in_features[name].__module__ + '.' + built_in_features[name].__qualname__
feat = Feature(name=name, import_path=import_path)
feat.save()
msg = "Enabled built-in feature: %s" % str(feat.name)
return _respond(dict(msg=msg, status="success", id=feat.id))
else:
# something is wrong
return _respond(dict(msg="feature not valid!", status="error"))
@csrf_exempt
def add_new_feature(request):
from . import models
name, import_path = request.POST['name'], request.POST['import_path']
# verify import path
try:
import_by_path(import_path)
except:
import traceback
traceback.print_exc()
return _respond(dict(msg="import path invalid!", status="error"))
feat = Feature(name=name, import_path=import_path)
feat.save()
feature_data = dict(id=feat.id, name=feat.name, import_path=feat.import_path)
return _respond(dict(msg="Added new feature: %s" % feat.name, status="success", data=feature_data))
@csrf_exempt
def setup_run_upkeep(request):
# Update the list of generators
from . import models
Generator.populate()
return HttpResponse("Updated generators!")
@csrf_exempt
def get_report(request):
'''
Get data for the report field in the frontend
'''
def report_fn(tracker):
tracker.task_proxy.update_report_stats()
reportstats = tracker.task_proxy.reportstats
return reportstats
return rpc(report_fn)
@csrf_exempt
def trigger_control(request):
'''
Trigger an action via controls on the web interface
'''
def control_fn(tracker):
try:
method = getattr(tracker.task_proxy, request.POST["control"])
if "params" in request.POST:
params = json.loads(request.POST.get("params"))
print(method)
return method(**params)
else:
return method()
except Exception as e:
traceback.print_exc()
if "base_class" in request.POST:
# If this is a static method, it will have a base_class
task_id = request.POST["base_class"]
feature_names = json.loads((request.POST['feats'])).keys()
task = Task.objects.get(pk=task_id).get(feats=feature_names)
try:
fn = getattr(task, request.POST["control"])
print(fn)
if "params" in request.POST:
params = json.loads(request.POST.get("params"))
result = fn(**params)
else:
result = fn()
return _respond(dict(status="success", value=result))
except Exception as e:
traceback.print_exc()
return _respond_err(e)
else:
# Otherwise it is a method belonging to the active task
return rpc(control_fn)
@csrf_exempt
def get_status(request):
""" Send the task tracker's status back to the frontend """
tracker = Track.get_instance()
if tracker.task_kwargs is None:
saveid = None
else:
saveid = tracker.task_kwargs["saveid"]
print("saveid", saveid)
return _respond(dict(status=tracker.get_status(), saveid=saveid))
@csrf_exempt
def save_entry_name(request):
from . import models
te_rec = TaskEntry.objects.get(id=request.POST["id"])
te_rec.entry_name = request.POST["entry_name"]
te_rec.save()
return _respond(dict(status="success", msg="Saved entry name: %s" % te_rec.entry_name))
def update_built_in_feature_import_paths(request):
"""For built-in features, update the import path based on the features module"""
from . import models
for feat in Feature.objects.all():
feat.get(update_builtin=True)
return _respond(dict(status="success", msg="Updated built-in feature paths!"))
def update_database_storage_path(request):
from . import models
db_name = request.POST['db_name']
db_storage_path = request.POST['db_storage_path']
KeyValueStore.set("data_path", db_storage_path, dbname=db_name)
return _respond(dict(status="success", msg="Updated storage path for %s db" % db_name))
def save_recording_sys(request):
from . import models
KeyValueStore.set('recording_sys', request.POST['selected_recording_sys'])
print(KeyValueStore.get('recording_sys'))
ret_msg = "Set recording_sys to %s" % KeyValueStore.get('recording_sys')
return _respond(dict(status="success", msg=ret_msg))
def save_rig_name(request):
from . import models
KeyValueStore.set('rig_name', request.POST['rig_name'])
print(KeyValueStore.get('rig_name'))
ret_msg = "Set rig_name to %s" % KeyValueStore.get('rig_name')
return _respond(dict(status="success", msg=ret_msg))
@csrf_exempt
def setup_handler(request):
"""One-stop handler for setup functions to avoid adding a bunch of URLs"""
action = request.POST['action']
if action == "update_database_storage_path":
return update_database_storage_path(request)
elif action == "save_recording_sys":
return save_recording_sys(request)
elif action == "save_rig_name":
return save_rig_name(request)
elif action == "update_built_in_feature_paths":
return update_built_in_feature_import_paths(request)
else:
return _respond(dict(status="error", msg="Unrecognized data type: %s" % data_type))
| null | db/tracker/ajax.py | ajax.py | py | 24,165 | python | en | code | null | code-starcoder2 | 50 |
501437840 | # -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import( Column,
DJANGOCMS_GRID_LG_CHOICES,
DJANGOCMS_GRID_MD_CHOICES,
DJANGOCMS_GRID_SM_CHOICES,
DJANGOCMS_GRID_XS_CHOICES)
class ColumnPluginForm(forms.ModelForm):
size_md = forms.ChoiceField(label=_("Medium size"), help_text=_('Medium devices Desktops (>=992px)'),
choices=DJANGOCMS_GRID_MD_CHOICES, required=True)
size_lg = forms.ChoiceField(label=_("Large size"), help_text=_('Large devices Desktops (>=1200px)'),
choices=DJANGOCMS_GRID_LG_CHOICES, required=False)
size_sm = forms.ChoiceField(label=_("Small size"), help_text=_('Small devices Tablets (>=768px)'),
choices=DJANGOCMS_GRID_SM_CHOICES, required=False)
size_xs = forms.ChoiceField(label=_("Extra small size"), help_text=_('Extra small devices Phones (<768px)'),
choices=DJANGOCMS_GRID_XS_CHOICES, required=False)
class Meta:
model = Column
exclude = ('size', 'page', 'position', 'placeholder', 'language', 'plugin_type')
def __init__(self, *args, **kwargs):
super(ColumnPluginForm, self).__init__(*args, **kwargs)
if self.instance:
current_size_list = self.instance.size.split()
for size in current_size_list:
if size in [x[0] for x in DJANGOCMS_GRID_LG_CHOICES]:
self.fields['size_lg'].initial = size
elif size in [x[0] for x in DJANGOCMS_GRID_MD_CHOICES]:
self.fields['size_md'].initial = size
elif size in [x[0] for x in DJANGOCMS_GRID_SM_CHOICES]:
self.fields['size_sm'].initial = size
elif size in [x[0] for x in DJANGOCMS_GRID_XS_CHOICES]:
self.fields['size_xs'].initial = size
| null | djangocms_bootstrap3/forms.py | forms.py | py | 2,012 | python | en | code | null | code-starcoder2 | 50 |
123570584 | # A function which accepts an input string consisting of alphabetic characters
# and spaces and returns the string with all the spaces removed. Do NOT use any
# string methods for this problem.
# method 1
def Remove_All_Spaces(input_str):
new_str = ""
for chara in input_str:
if ord(chara) == ord(" "):
continue
else:
new_str += chara
return new_str
# method 2
def Remove_All_Spaces(my_str):
new_str = ""
for i in range(len(my_str)):
if ord(my_str[i]) != " ":
new_str = new_str + chr(ord(my_str[i]))
return new_str
# Driver code tester
my_str = input("Please enter your string : ")
result = Remove_All_Spaces(my_str)
print(result) | null | 10 - Week 6/02 String Methods/10 Strings Functions Exercise 4 (Remove All Spaces).py | 10 Strings Functions Exercise 4 (Remove All Spaces).py | py | 719 | python | en | code | null | code-starcoder2 | 50 |
588713837 | from __future__ import division
from bokeh.plotting import cursession, figure, output_server, output_notebook, show
from copy import copy
from keras.callbacks import Callback
from numpy import inf, nan
from os import _exit
from Print import printflush
# The following defines a Class object that monitors and records
# certain key data from the Neural Network training process;
# it also includes a method "plot_learning_curves" that turns on a separate CPU process
# that plots the Training and Validation learning curves live
class NeuralNetworkTrainingMonitor(Callback):
def __init__(self, reporting_freq=False, plot_title='Neural Network Learning Curves', bokeh_output='server'):
super(Callback, self).__init__()
self.latest_epoch = -1
self.latest_batch = -1
self.batches = []
self.train_losses = []
self.approx_train_acc_in_latest_epoch = 0.
self.val_losses = []
self.latest_val_acc = None
self.min_val_loss = inf
self.best_model = None
self.best_model_epoch = None
self.best_model_train_acc = None
self.best_model_val_acc = None
self.reporting_freq = reporting_freq
printflush('\nConnecting to Bokeh Server for live Learning Curves plotting...\n')
try:
output_server('')
self.bokeh_session = cursession()
self.fig = figure(title=plot_title,
x_axis_label='# of Training Data Batches', y_axis_label='Loss',
plot_height=680, plot_width=880)
self.fig.line((), (), name='TrainLoss', legend='Training Loss')
self.fig.circle((), (), name='ValidLoss', legend='Validation Loss', color='red')
show(self.fig)
self.train_losses_curve_data_source = self.fig.select(dict(name='TrainLoss'))[0].data_source
self.valid_losses_curve_data_source = self.fig.select(dict(name='ValidLoss'))[0].data_source
printflush('\nConnecting to Bokeh Server for live Learning Curves plotting... done!\n')
except:
printflush('\nBokeh Server Connection *FAILED!*')
printflush('Please make sure Bokeh package is already installed in Python, and')
printflush('please open a new Command-Line Terminal window\n (separate from this Terminal window)')
printflush(' and run the following command firs to launch Bokeh Server:')
printflush(' bokeh-server --backend=memory\n')
_exit(0)
def on_train_begin(self, logs={}):
printflush('\nFFNN Training Progress')
printflush('______________________')
def on_epoch_begin(self, epoch, logs={}):
self.latest_epoch += 1
def on_batch_end(self, batch, logs={}):
self.latest_batch += 1
self.batches.append(self.latest_batch)
self.train_losses.append(logs.get('loss'))
train_acc = logs.get('acc')
if not train_acc:
train_acc = logs.get('accuracy')
self.approx_train_acc_in_latest_epoch += (train_acc - self.approx_train_acc_in_latest_epoch) / (batch + 1)
self.val_losses.append(logs.get('val_loss', nan))
if self.reporting_freq and not(self.latest_batch % self.reporting_freq):
self.report(batch_in_epoch=batch)
def on_epoch_end(self, epoch, logs={}):
current_val_loss = logs.get('val_loss')
self.latest_val_acc = logs.get('val_acc')
if not self.latest_val_acc:
self.latest_val_acc = logs.get('val_accuracy')
if current_val_loss is None:
self.best_model = copy(self.model)
else:
self.val_losses[-1] = current_val_loss
if current_val_loss < self.min_val_loss:
self.min_val_loss = current_val_loss
self.best_model = copy(self.model)
self.best_model_epoch = epoch
self.best_model_train_acc = self.approx_train_acc_in_latest_epoch
self.best_model_val_acc = self.latest_val_acc
if not self.reporting_freq:
self.report()
def on_train_end(self, logs={}):
if self.reporting_freq:
self.report()
printflush('\nFFNN Training Finished! (%s Batches in total)\n'
% '{:,}'.format(self.latest_batch))
if self.latest_val_acc is None:
printflush('Training Accuracy (approx) = %s%%\n'
% '{:.1f}'.format(100. * self.approx_train_acc_in_latest_epoch))
else:
printflush('Best trained FFNN (with lowest Validation Loss) is from epoch #%s'
% '{:,}'.format(self.best_model_epoch))
printflush('Training Accuracy (approx) = %s%%, Validation Accuracy = %s%%\n'
% ('{:.1f}'.format(100. * self.best_model_train_acc),
'{:.1f}'.format(100. * self.latest_val_acc)))
def report(self, batch_in_epoch=None):
if batch_in_epoch:
batch_text = ' Batch ' + '{0:03}'.format(batch_in_epoch)
else:
batch_text = ''
if self.latest_val_acc is None:
val_acc_text = ''
else:
val_acc_text = 'ValidAcc (prev epoch)=' + '{:.1f}'.format(100. * self.latest_val_acc) + '%'
printflush('Epoch %s%s: TrainAcc (approx)=%s%%, %s'
% ('{:,}'.format(self.latest_epoch),
batch_text,
'{:.1f}'.format(100. * self.approx_train_acc_in_latest_epoch),
val_acc_text), end='\r')
self.train_losses_curve_data_source.data['x'] = self.batches
self.train_losses_curve_data_source.data['y'] = self.train_losses
self.valid_losses_curve_data_source.data['x'] = self.batches
self.valid_losses_curve_data_source.data['y'] = self.val_losses
self.bokeh_session.store_objects(self.train_losses_curve_data_source, self.valid_losses_curve_data_source)
| null | Programming Scripts/zzz Utility Code/Python/KerasTrainingMonitor.py | KerasTrainingMonitor.py | py | 5,998 | python | en | code | null | code-starcoder2 | 51 |
44860443 | from __future__ import print_function
import errno
import os
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
EPS = 1e-7
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
class EvalbyTypeLogger(object):
def __init__(self, a_type_dict, q_type_dict):
self.a_type_dict = a_type_dict
self.q_type_dict = q_type_dict
self.at_num = len(a_type_dict)
self.qt_num = len(q_type_dict)
self.at_accu = np.zeros(self.at_num)
self.at_count = np.zeros(self.at_num)
self.qt_accu = np.zeros(self.qt_num)
self.qt_count = np.zeros(self.qt_num)
def update(self, score_tensor, a_type, q_type):
"""
score_tensor: [batch_size, num_answers]
a_type: [batch_size] LongTensor
q_type: [batch_size] LongTensor
"""
batch_scores = score_tensor.sum(1)
a_type = a_type.view(-1)
q_type = q_type.view(-1)
for i in range(self.at_num):
num_at_i = torch.nonzero(a_type == (i+1)).numel()
self.at_count[i] += num_at_i
score_at_i = ((a_type == (i+1)).float() * batch_scores).sum()
self.at_accu[i] += score_at_i
for i in range(self.qt_num):
num_qt_i = torch.nonzero(q_type == (i+1)).numel()
self.qt_count[i] += num_qt_i
score_qt_i = ((q_type == (i+1)).float() * batch_scores).sum()
self.qt_accu[i] += score_qt_i
def printResult(self, show_q_type=False, show_a_type=True):
if(show_a_type):
print("========== Accuracy by Type of Answers ==========")
for key in self.a_type_dict.keys():
type_score = self.at_accu[self.a_type_dict[key]-1]
type_num = self.at_count[self.a_type_dict[key]-1] + 1e-10
print('Type: \t %s \t Accuracy: \t %.6f \t Total Tpye Num: \t %.1f' % (key, float(type_score)/float(type_num), float(type_num)) )
if(show_q_type):
print("========== Accuracy by Type of Questions ==========")
for key in self.q_type_dict.keys():
type_score = self.qt_accu[self.q_type_dict[key]-1]
type_num = self.qt_count[self.q_type_dict[key]-1] + 1e-10
print('Type: \t %s \t Accuracy: \t %.6f \t Total Tpye Num: \t %.1f' % (key, float(type_score)/float(type_num), float(type_num)) )
#print("==================== End print ====================")
| null | utils.py | utils.py | py | 4,690 | python | en | code | null | code-starcoder2 | 51 |
28841343 | from typing import Dict
import numpy
from overrides import overrides
from allennlp.data.fields.field import Field
class ArrayField(Field[numpy.ndarray]):
"""
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
def __init__(self, array: numpy.ndarray, padding_value: int = 0) -> None:
self.array = array
self.padding_value = padding_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.shape)}
@overrides
def as_array(self, padding_lengths: Dict[str, int]) -> numpy.ndarray:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
return_array = numpy.ones(max_shape, "float32") * self.padding_value
# If the array has a different shape from the largest
# array, pad dimensions with zeros to form the right
# shaped list of slices for insertion into the final array.
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
slices = [slice(0, x) for x in slicing_shape]
return_array[slices] = self.array
return return_array
@overrides
def empty_field(self): # pylint: disable=no-self-use
return ArrayField(numpy.array([], dtype="float32"))
| null | src/allennlp/data/fields/array_field.py | array_field.py | py | 1,616 | python | en | code | null | code-starcoder2 | 51 |
223646032 | import annoy
import numpy as np
import logging
import glob
import re
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class RecommenderVectorIndex(object):
"""Database index for finding nearest neighbors. The rows are float
vectors, such as Word2Vec vectors or some other embeddings.
"""
def __init__(self, vector_size, n_indices=1, n_trees=16):
"""Creates an instance of recommender index which contains one or many
AnnoyIndex instances.
Args:
vector_size (int): Size of vector annoy index keeps.
n_indices (int, optional): Number of annoy indices to create.
n_trees (int, optional): Number of trees in each annoy
index. A larger value will give more accurate results,
but larger indexes.
"""
self.indices = [annoy.AnnoyIndex(vector_size) for _ in
range(n_indices)]
self.vector_size = vector_size
self.n_trees = n_trees
@property
def n_indices(self):
return len(self.indices)
def _fill_build_index(self, index, data):
"""Fills one annoy index with data and build.
Args:
index (annoy.AnnoyIndex): Index to fill and build.
data (numpy.array): Array with vectors.
"""
logger.info("INSERTing {0} vectors.".format(data.shape[0]))
for i in xrange(data.shape[0]):
index.add_item(i, data[i])
logger.info("Building index.")
index.build(self.n_trees)
def fill_build(self, data):
"""Fills annoy indices with vectors in data and builds all indices.
Args:
data (numpy.array, list of numpy.array): If `self.n_indices` ==
1, then `data` is a numpy.array with number of columns ==
`self.vector_size`. Otherwise, `data` is a list of length
equal to `self.n_indices` of `numpy.array`'s with the shape
above.
"""
assert (self.n_indices == 1 and isinstance(data, np.ndarray)) or (
self.n_indices > 1 and isinstance(data, list) and all(map(
lambda x: isinstance(x, np.ndarray), data)))
logger.info("Fill {0} indices.".format(self.n_indices))
if self.n_indices == 1:
self._fill_build_index(self.indices[0], data)
else:
_parallel_fill_build(self, data)
def get_n_items(self):
"""Gets a list of sizes of each index.
Returns:
res (list of ints): List of sizes of each index.
"""
return [index.get_n_items() for index in self.indices]
def get_nns_by_vector(self, vector, n_neighbors, n_index=0, search_k=-1,
include_distances=True):
"""Returns `n_neighbors` closest items of `vector`
in index with number `n_index`.
Args:
vector (numpy.array): Vector which neighbors you want to find.
n_neighbors (int): How many neighbors to find.
n_index (int): In which index to search.
search_k: The number of nodes to inspect during searching. A larger
value will give more accurate results,
but will take longer time to return.
include_distances (bool): Whether to include distances or not.
If True, it will return a 2 element tuple with two lists in it:
the second one containing all corresponding distances.
Returns:
res (list or tuple of two lists): List of neigbors ids. If
`include_distances` is True, then tuple of two lists.
"""
res = self.indices[n_index].get_nns_by_vector(vector, n_neighbors,
search_k=search_k,
include_distances=include_distances)
return res
def get_item_vector(self, i, n_index=0):
"""Returns vector with number `i` from index `n_index`.
Args:
i (int): Id of vector.
n_index: Number of index for searching.
"""
return self.indices[n_index].get_item_vector(i)
def save(self, fname):
for i in range(self.n_indices):
index = self.indices[i]
fname_out = fname + str(i)
logger.info("Save index #{0} to {1}".format(i, fname_out))
index.save(fname_out)
@staticmethod
def __resolve_index_number(fname):
match = re.fullmatch(r".*?(\d+)", fname)
return int(match.group(1))
def load(self, fname):
index_fnames = glob.glob(fname + '*')
index_fnames = [fname + '0']
assert len(index_fnames) == self.n_indices
for index_fname in index_fnames:
self.indices[0] = annoy.AnnoyIndex(self.vector_size)
self.indices[0].load(index_fname)
| null | django/mapdrive/utils/putin_face_recognition/index.py | index.py | py | 4,923 | python | en | code | null | code-starcoder2 | 51 |
216731355 | import pymysql
class SQL :
def __init__(self, host='192.168.137.172', user='ssu', password='',
db='ssudb', charset='utf8'):
self.connection = pymysql.connect(host=host,
user=user,
password=password,
db=db,
charset=charset)
self.cursor = self.connection.cursor()
def test(self) :
try:
query = "UPDATE data_table SET data=98 WHERE seat_number=0 AND name='Soongsil Univ';"
print(query)
self.cursor.execute(query)
self.connection.commit()
except Exception as e :
print(str(e))
def insert(self, table, *values):
try:
query = "INSERT INTO "+table+" VALUES ({}, {}, {},{});".format(*values)
print(query)
self.cursor.execute(query)
self.connection.commit()
except Exception as e :
print(str(e))
def update(self, table, value1,value2, *conds):
try:
query = "UPDATE "+table+" SET remainedtime={},cond={}".format(value1,value2)
query += " WHERE seat_number={} AND name={};".format(*conds)
print(query)
self.cursor.execute(query)
self.connection.commit()
except Exception as e :
print(str(e))
def delete(self, table, *conds):
try:
query = "DELETE FROM "+table
query += " WHERE seat_number={} AND name={};".format(*conds)
print(query)
self.cursor.execute(query)
self.connection.commit()
except Exception as e :
print(str(e))
def select(self, table):
try:
query = "SELECT * FROM "+table+";"
self.cursor.execute(query)
return self.cursor.fetchone()
except Exception as e :
print(str(e))
def select_where(self, table, where):
try:
query = "SELECT * FROM {} WHERE {};".format(table, where)
self.cursor.execute(query)
return self.cursor.fetchone()
except Exception as e :
print(str(e))
| null | ServerSit/odr.py | odr.py | py | 2,282 | python | en | code | null | code-starcoder2 | 51 |
238164153 | '''
Requirments:
python &
> pip install pillow
> pip install matplotlib
> pip install numpy
Run:
> python task1.py path/to/image
'''
import PIL
from PIL import Image
import numpy as np
import itertools
import sys
import os
import matplotlib.pyplot as plt
import mylib
from mylib import BOX
# open image
image = Image.open(sys.argv[1]) # you have to pass the input image path as input arg
image = image.convert("L") # convert to signle channeled image
outDir = sys.argv[2]
if not os.path.exists(outDir):
os.makedirs(outDir)
width, height = image.size
totalPixels = width* height
freq = [0] * 256 # fill
cProbability = [0] * 256 # fill zeros
# save original image histogram
freq = image.histogram()
a = np.array(image)
plt.hist(a.ravel(), bins=256)
plt.ylabel('Probability')
plt.xlabel('Gray Level')
image.save(outDir+'/input.jpg')
plt.savefig(outDir+'/inputhist.svg')
plt.show()
centerX,centerY = (int(width/2),int(height/2))
# HISTOGRAM EQUALIZATION
editableImage = image.load()
image = mylib.equalizeHistogram(image,editableImage,BOX(0,0,centerX,centerY))
image = mylib.equalizeHistogram(image,editableImage,BOX(centerX,0,width,centerY))
image = mylib.equalizeHistogram(image,editableImage,BOX(0,centerY,centerX,height))
iamge = mylib.equalizeHistogram(image,editableImage,BOX(centerX,centerY,width,height))
# save resultant image and histogram
image.save(outDir+'/output.jpg')
a = np.array(image)
plt.hist(a.ravel(), bins=256)
plt.savefig(outDir+'/outputhist.svg')
plt.show() | null | DIP/LABS/lab08/solution/task2.py | task2.py | py | 1,522 | python | en | code | null | code-starcoder2 | 51 |
145270068 | import os
from telebot import types
import flask
from app.bot_handlers import bot
from app.config import settings
server = flask.Flask(__name__)
@server.route('/' + settings.TOKEN, methods=['POST'])
def get_message():
bot.process_new_updates(
[types.Update.de_json(flask.request.stream.read().decode('utf-8'))]
)
return '!', 200
@server.route('/', methods=['GET'])
def index():
bot.remove_webhook()
bot.set_webhook(
url='https://{}.herokuapp.com/{}'.format(settings.APP_NAME, settings.TOKEN)
)
return 'Hello from Heroku!', 200
if __name__ == '__main__':
bot.remove_webhook()
# bot.polling(none_stop=True) use to local run
server.run(host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))
| null | run_server.py | run_server.py | py | 754 | python | en | code | null | code-starcoder2 | 51 |
214577347 | #Find and show the sum of all multiples of 3 or 5 less than 1000 (this number is 233168)
sum=int(0)
n=0
while n<1000:
if n%3==0 or n%5==0:
sum=sum+n
n=n+1
print(sum)
| null | Lezione1/Ex5.py | Ex5.py | py | 200 | python | en | code | null | code-starcoder2 | 51 |
370998504 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 14:39:29 2019
@author: em812
"""
import pdb
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
def univariate_tests(
X, y, control='N2', test='ANOVA',
comparison_type='multiclass',
multitest_correction='fdr_by', fdr=0.05,
n_jobs=-1):
"""
Test whether a single compound has siginificant effects compared to the
control using univariate tests for each feature.
Each feature is tested using one of the methods 'ANOVA', 'Kruskal-Wallis',
'Mann-Whitney test' or 't-test'.
The pvalues from the different features are corrected for multiple
comparisons using the multitest methods of statsmodels.
Parameters
----------
X : TYPE
DESCRIPTION.
y : TYPE
DESCRIPTION.
comparison_type : TYPE, optional
DESCRIPTION. The default is 'multiclass'.
control : float, optional. The default is .0.
The drug_dose entry for the control points.
Must provide control dose if the comparison_type is 'binary_each_group'.
test : TYPE, optional
DESCRIPTION. The default is 'ANOVA'.
multitest_correction : string or None, optional
DESCRIPTION. The default is 'fdr_by'.
fdr : TYPE, optional
DESCRIPTION. The default is 0.05.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
TYPE
DESCRIPTION.
"""
from scipy.stats import kruskal, mannwhitneyu, f_oneway, ttest_ind
from functools import partial
if not np.isin(control, np.array(y)):
raise ValueError('control not found in the y array.')
if test.startswith('Wilkoxon') or test == 't-test':
if comparison_type=='multiclass' and np.unique(y).shape[0]>2:
raise ValueError(
"""
The Wilkoxon rank sum test cannot be used to compare between
more than two groups. Use a different test or the
binary_each_dose comparison_method instead.
""")
else:
comparison_type = 'binary_each_group'
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
# Local function for parallel processing of univariate tests for each drug
def stats_test(X, y, test, **kwargs):
from joblib import Parallel, delayed
def _one_fit(ift, samples, **kwargs):
samples = [s[~np.isnan(s)] for s in samples if not all(np.isnan(s))]
if len(samples)<2:
return ift, (np.nan, np.nan)
return ift, test(*samples, **kwargs)
parallel = Parallel(n_jobs=n_jobs, verbose=True)
func = delayed(_one_fit)
try:
res = parallel(
func(ift, [sample[:,ift]
for sample in [np.array(X[y==iy]) for iy in np.unique(y)]],
**kwargs)
for ift in range(X.shape[1]))
except:
pdb.set_trace()
order = [ift for ift,(r,p) in res]
rs = np.array([r for ift,(r,p) in res])
ps = np.array([p for ift,(r,p) in res])
return rs[order], ps[order]
# Create the function that will test every feature of a given drug
if test == 'ANOVA':
func = partial(stats_test, test=f_oneway)
elif test.startswith('Kruskal'):
func = partial(stats_test, test=kruskal, nan_policy='raise')
elif test.startswith('Mann-Whitney'):
func = partial(stats_test, test=mannwhitneyu)
if test == 't-test':
func = partial(stats_test, test=ttest_ind)
# For each dose get significant features
if comparison_type=='multiclass':
stats, pvals = func(X, y)
pvals = pd.DataFrame(pvals.T, index=X.columns, columns=[test])
stats = pd.DataFrame(stats.T, index=X.columns, columns=[test])
elif comparison_type=='binary_each_group':
groups = np.unique(y[y!=control])
pvals=[]
stats=[]
for igrp, grp in enumerate(groups):
mask = np.isin(y,[control, grp])
_stats, _pvals = func(X[mask], y[mask])
pvals.append(_pvals)
stats.append(_stats)
pvals = pd.DataFrame(np.array(pvals).T, index=X.columns, columns=groups)
stats = pd.DataFrame(np.array(stats).T, index=X.columns, columns=groups)
else:
raise ValueError('Comparison type not recognised.')
reject, pvals = _multitest_correct(pvals, multitest_correction, fdr)
return stats, pvals, reject
def get_effect_sizes(
X, y, control='N2',
test='ANOVA', comparison_type='multiclass',
n_jobs=-1):
"""
Test whether a single compound has siginificant effects compared to the
control using univariate tests for each feature.
Each feature is tested using one of the methods 'ANOVA', 'Kruskal-Wallis',
'Mann-Whitney' or 't-test'.
The pvalues from the different features are corrected for multiple
comparisons using the multitest methods of statsmodels.
Parameters
----------
X : TYPE
DESCRIPTION.
drcomparison_variableug_dose : TYPE
DESCRIPTION.
comparison_type : TYPE, optional
DESCRIPTION. The default is 'multiclass'.
control : float, optional. The default is .0.
The drug_dose entry for the control points.
Must provide control dose if the comparison_type is 'binary_each_dose'.
test : TYPE, optional
DESCRIPTION. The default is 'ANOVA'.
multitest_method : TYPE, optional
DESCRIPTION. The default is 'fdr_by'.
fdr : TYPE, optional
DESCRIPTION. The default is 0.05.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
TYPE
DESCRIPTION.
"""
if not np.isin(control, np.array(y)):
raise ValueError('control not found in the comparison_variable array.')
if comparison_type=='multiclass' and np.unique(y).shape[0]>2:
if test.startswith('Mann') or test == 't-test':
raise ValueError(
"""
The Mann-Whitney test cannot be used to compare between
more than two groups. Use a different test or the
binary_each_dose comparison_method instead.
""")
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
groups = np.unique(y[y!=control])
# For each dose get significant features
if test=='ANOVA' and comparison_type=='multiclass':
effect = pd.Series(index=X.columns)
samples = [x for ix,x in X.groupby(by=y)]
for ft in X.columns:
effect[ft] = eta_squared_ANOVA(*[s.loc[~s[ft].isna(), ft] for s in samples])
effect = pd.DataFrame(effect, columns=['_'.join([test,'effect_size'])])
else:
if test=='Mann-Whitney' or test=='Kruskal-Wallis':
func = cliffs_delta
elif test=='ANOVA':
func = eta_squared_ANOVA
elif test=='t-test':
func = cohen_d
effect = pd.DataFrame(index=X.columns, columns=groups)
for igrp, grp in enumerate(groups):
mask = np.isin(y,[control, grp])
samples = [x for ix,x in X[mask].groupby(by=y)]
for ft in X.columns:
effect.loc[ft, grp] = func(*[s.loc[~s[ft].isna(), ft] for s in samples])
return effect
#%% Effect size functions
def cohen_d(x,y):
""" Return the cohen d effect size for t-test
"""
from numpy import nanstd, nanmean, sqrt
nx = len(x)
ny = len(y)
dof = nx + ny - 2
return (nanmean(x) - nanmean(y)) / sqrt(((nx-1)*nanstd(x, ddof=1) ** 2 + (ny-1)*nanstd(y, ddof=1) ** 2) / dof)
def eta_squared_ANOVA( *args):
""" Return the eta squared as the effect size for ANOVA
"""
return( float( __ss_between_( *args) / __ss_total_( *args)))
def cliffs_delta(lst1, lst2):
"""Returns delta and true if there are more than 'dull' differences"""
m, n = len(lst1), len(lst2)
lst2 = sorted(lst2)
j = more = less = 0
for repeats, x in _runs(sorted(lst1)):
while j <= (n - 1) and lst2[j] < x:
j += 1
more += j*repeats
while j <= (n - 1) and lst2[j] == x:
j += 1
less += (n - j)*repeats
d = (more - less) / (m*n)
return d
def __concentrate_( *args):
""" Concentrate input list-like arrays
"""
v = list( map( np.asarray, args))
vec = np.hstack( np.concatenate( v))
return( vec)
def __ss_total_( *args):
""" Return total of sum of square
"""
vec = __concentrate_( *args)
ss_total = sum( (vec - np.mean( vec)) **2)
return( ss_total)
def __ss_between_( *args):
""" Return between-subject sum of squares
"""
# grand mean
grand_mean = np.mean( __concentrate_( *args))
ss_btwn = 0
for a in args:
ss_btwn += ( len(a) * ( np.mean( a) - grand_mean) **2)
return( ss_btwn)
def _runs(lst):
"""Iterator, chunks repeated values"""
for j, two in enumerate(lst):
if j == 0:
one, i = two, 0
if one != two:
yield j - i, one
i = j
one = two
yield j - i + 1, two
#%% Correct for multiple comparisons
def _multitest_correct(pvals, multitest_method, fdr):
"""
Multiple comparisons correction of pvalues from univariate tests.
Ignores nan values.
Deals with two options:
- 1D array of pvalues (one comparison per feature)
- 2D array of pvalues (multiple comparisons per feature)
Parameters
----------
pvals : pandas series shape=(n_features,) or
pandas dataframe shape=(n_features, n_doses)
The pandas structure containing the pvalues from all the statistical
tests done for a single drug.
multitest_method : string
The method to use in statsmodels.statis.multitest.multipletests function.
fdr : float
False discovery rate.
Returns
-------
c_reject : pandas series shape=(n_features)
Flags indicating rejected null hypothesis after the correction for
multiple comparisons. The null hypothesis for each feature is that the
feature is not affected by the compound.
c_pvals : pandas series shape=(n_features)
The corrected pvalues for each feature. When each dose was tested
separately, the min pvalue is stored in this output.
"""
from statsmodels.stats.multitest import multipletests
if multitest_method is None:
return pvals<fdr, pvals
if np.all(pd.isnull(pvals.values)):
return pvals, pvals
# Mask nans in pvalues
if len(pvals.shape) == 1:
mask = ~pd.isnull(pvals.values)
else:
mask = ~pd.isnull(pvals.values.reshape(-1))
# Initialize array with corrected pvalues and reject hypothesis flags
c_reject = np.ones(mask.shape)*np.nan
c_pvals = np.ones(mask.shape)*np.nan
# Make the correction with the chosen multitest_method
try:
c_reject[mask], c_pvals[mask],_,_ = multipletests(
pvals.values.reshape(-1)[mask], alpha=fdr, method=multitest_method,
is_sorted=False, returnsorted=False
)
except:
pdb.set_trace()
if len(pvals.shape) == 2:
# When multiple comparisons per feature, reshape the corrected arrays
c_reject = c_reject.reshape(pvals.shape)
c_pvals = c_pvals.reshape(pvals.shape)
# Convert the corrected pvals and the flags array to pandas series and
# add feature names as index
c_pvals = pd.DataFrame(c_pvals, index=pvals.index, columns=pvals.columns)
c_reject = pd.DataFrame(c_reject, index=pvals.index, columns=pvals.columns)
else:
# Convert the corrected pvals and the flags array to pandas series and
# add feature names as index
c_pvals = pd.Series(c_pvals, index=pvals.index)
c_reject = pd.Series(c_reject, index=pvals.index)
return c_reject, c_pvals | null | tierpsytools/analysis/statistical_tests.py | statistical_tests.py | py | 12,041 | python | en | code | null | code-starcoder2 | 51 |
630205264 | import pickle
import numpy as np
with open('./bangalore/bangalore.pickle','rb') as f:
model = pickle.load(f)
with open('./bangalore/locations.txt','r') as file:
locs = file.read()
l = locs.split(',')
X = np.array(l)
def predict_price(location,sqft,bath,balcony,bhk):
loc_ind = np.where(X == location)[0][0]
x = np.zeros(len(X))
x[0] = sqft
x[1] = bath
x[2] = balcony
x[3] = bhk
if loc_ind>=0:
x[loc_ind] = 1
return model.predict([x])[0]
| null | bangalore/usethis.py | usethis.py | py | 503 | python | en | code | null | code-starcoder2 | 51 |
244013969 | import torch
from torch.nn import Linear
from torch_geometric.nn import TransformerConv
class GraphAttentionEmbedding(torch.nn.Module):
def __init__(self, in_channels, out_channels, msg_dim, time_enc):
super(GraphAttentionEmbedding, self).__init__()
self.time_enc = time_enc
edge_dim = msg_dim + time_enc.out_channels
self.conv = TransformerConv(in_channels, out_channels // 2, heads=2,
dropout=0.1, edge_dim=edge_dim)
def forward(self, x, last_update, edge_index, t, msg):
rel_t = last_update[edge_index[0]] - t
rel_t_enc = self.time_enc(rel_t.to(x.dtype))
edge_attr = torch.cat([rel_t_enc, msg], dim=-1)
return self.conv(x, edge_index, edge_attr)
class LinkPredictor(torch.nn.Module):
def __init__(self, in_channels):
super(LinkPredictor, self).__init__()
self.lin_src = Linear(in_channels, in_channels)
self.lin_dst = Linear(in_channels, in_channels)
self.lin_final = Linear(in_channels, 1)
def forward(self, z_src, z_dst):
h = self.lin_src(z_src) + self.lin_dst(z_dst)
h = h.relu()
return self.lin_final(h) | null | src/layers/gnn.py | gnn.py | py | 1,188 | python | en | code | null | code-starcoder2 | 51 |
397421462 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'lire' de la commande 'messages'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.communication.mudmail import RECU, ENVOYE, BROUILLON, ARCHIVE
class PrmLire(Parametre):
"""Commande 'messages lire'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "lire", "read")
self.schema = "(<flag_mail>) (<id_mail>)"
self.aide_courte = "lit un mudmail"
self.aide_longue = \
"Cette sous-commande affiche le contenu d'un message. L'id " \
"correspond à celui affiché dans la commande %messages:lister% " \
"pour le même flag de filtre."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
mails = type(self).importeur.communication.mails
masque_flag = dic_masques["flag_mail"]
if masque_flag:
flag = masque_flag.flag
if flag == "recus":
mails = mails.get_mails_pour(personnage, RECU)
elif flag == "brouillons":
mails = mails.get_mails_pour(personnage, BROUILLON)
elif flag == "archives":
mails = mails.get_mails_pour(personnage, ARCHIVE)
elif flag == "envoyes":
mails = mails.get_mails_pour(personnage, ENVOYE)
else:
mails = mails.get_mails_pour(personnage, RECU)
mails = [mail for mail in mails if not mail.lu]
if dic_masques["id_mail"] is None and mails == []:
personnage << "|att|Vous n'avez aucun nouveau message.|ff|"
else:
if dic_masques["id_mail"] is None:
num = max(mails,key=lambda mail : mail.date)
else:
num = dic_masques["id_mail"].id_mail
i = 1
r_mail = None
for mail in mails:
r_mail = mail
if i == num:
break
i += 1
if r_mail is None:
personnage << "|err|Aucun message ne correspond à ce " \
"numéro.|ff|"
else:
personnage << r_mail.afficher()
r_mail.lu = True
| null | src/primaires/communication/commandes/messages/lire.py | lire.py | py | 3,878 | python | en | code | null | code-starcoder2 | 51 |
449859332 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from pandas import ExcelWriter
from numpy import interp
import math
def Load_results1(instance,i,n,type_generator):
'''
This function loads the results that depend of the periods in to a dataframe and creates a excel file with it.
:param instance: The instance of the project resolution created by PYOMO.
:return: A dataframe called Time_series with the values of the variables that depend of the periods.
'''
path = 'Results/Results' + '_' + str(i) + '_' + str(n) + '.xls'
writer = ExcelWriter(path, engine='xlsxwriter')
# Load the variables that does not depend of the periods in python dyctionarys
Number_Scenarios = int(instance.Scenarios.extract_values()[None])
Number_Periods = int(instance.Periods.extract_values()[None])
Number_Renewable_Source = int(instance.Renewable_Source.extract_values()[None])
Number_Generator = int(instance.Generator_Type.extract_values()[None])
Renewable_Nominal_Capacity = instance.Renewable_Nominal_Capacity.extract_values()
Inverter_Efficiency_Renewable = instance.Renewable_Inverter_Efficiency.extract_values()
Renewable_Invesment_Cost = instance.Renewable_Invesment_Cost.extract_values()
OyM_Renewable = instance.Maintenance_Operation_Cost_Renewable.extract_values()
Renewable_Units = instance.Renewable_Units.get_values()
Fix_Invesment = instance.Fix_Invesment_PV.extract_values()
Integer_PV = instance.Integer_PV.get_values()
Data_Renewable = pd.DataFrame()
for r in range(1, Number_Renewable_Source + 1):
Name = 'Source ' + str(r)
Data_Renewable.loc['Units', Name] = Renewable_Units[r]
Data_Renewable.loc['Nominal Capacity (W)', Name] = Renewable_Nominal_Capacity[r]
Data_Renewable.loc['Inverter Efficiency', Name] = Inverter_Efficiency_Renewable[r]
Data_Renewable.loc['Investment Cost (USD/W)', Name] = Renewable_Invesment_Cost[r]
Data_Renewable.loc['OyM', Name] = OyM_Renewable[r]
Data_Renewable.loc['Fix invesment', Name] = Fix_Invesment[r]
Data_Renewable.loc['Investment Decision', Name] = Integer_PV[r]
Data_Renewable.loc['Invesment (USD)', Name] = Fix_Invesment[r]*Integer_PV[r] + Renewable_Units[r]*Renewable_Nominal_Capacity[r]*Renewable_Invesment_Cost[r]
Data_Renewable.loc['OyM Cost (USD)', Name] = Renewable_Units[r]*Renewable_Nominal_Capacity[r]*Renewable_Invesment_Cost[r]*OyM_Renewable[r]
Data_Renewable.loc['Total Nominal Capacity (W)', Name] = Data_Renewable.loc['Nominal Capacity (W)', Name]*Data_Renewable.loc['Units', Name]
Data_Renewable.to_excel(writer, sheet_name='Data Renewable')
columns = []
for i in range(1, Number_Scenarios+1):
columns.append('Scenario_'+str(i))
# Energy Time Series
Scenarios = pd.DataFrame()
Number = 7
if instance.Lost_Load_Probability > 0:
Lost_Load = instance.Lost_Load.get_values()
Number += 1
Renewable_Energy_1 = instance.Renewable_Energy_Production.extract_values()
Renewable_Units = instance.Renewable_Units.get_values()
Renewable_Energy = {}
for s in range(1, Number_Scenarios + 1):
for t in range(1, Number_Periods+1):
foo = []
for r in range(1,Number_Renewable_Source+1 ):
foo.append((s,r,t))
Renewable_Energy[s,t] = sum(Renewable_Energy_1[s,r,t]*Data_Renewable.loc['Inverter Efficiency', 'Source ' + str(r)]
*Data_Renewable.loc['Units', 'Source ' + str(r)] for s,r,t in foo)
Battery_Flow_Out = instance.Energy_Battery_Flow_Out.get_values()
Battery_Flow_in = instance.Energy_Battery_Flow_In.get_values()
Curtailment = instance.Energy_Curtailment.get_values()
Energy_Demand = instance.Energy_Demand.extract_values()
SOC = instance.State_Of_Charge_Battery.get_values()
Generator_Energy = instance.Generator_Energy.get_values()
Total_Generator_Energy = {}
for s in range(1, Number_Scenarios + 1):
for t in range(1, Number_Periods+1):
foo = []
for g in range(1,Number_Generator+1):
foo.append((s,g,t))
Total_Generator_Energy[s,t] = sum(Generator_Energy[i] for i in foo)
Scenarios_Periods = [[] for i in range(Number_Scenarios)]
for i in range(0,Number_Scenarios):
for j in range(1, Number_Periods+1):
Scenarios_Periods[i].append((i+1,j))
foo=0
for i in columns:
Information = [[] for i in range(Number)]
for j in Scenarios_Periods[foo]:
Information[0].append(Renewable_Energy[j])
Information[1].append(Battery_Flow_Out[j])
Information[2].append(Battery_Flow_in[j])
Information[3].append(Curtailment[j])
Information[4].append(Energy_Demand[j])
Information[5].append(SOC[j])
Information[6].append(Total_Generator_Energy[j])
if instance.Lost_Load_Probability > 0:
Information[7].append(Lost_Load[j])
Scenarios=Scenarios.append(Information)
foo+=1
index=[]
for j in range(1, Number_Scenarios+1):
index.append('Renewable Energy '+str(j) + ' (Wh)')
index.append('Battery Flow Out '+str(j) + ' (Wh)')
index.append('Battery Flow in '+str(j) + ' (Wh)')
index.append('Curtailment '+str(j) + ' (Wh)')
index.append('Energy Demand '+str(j) + ' (Wh)')
index.append('SOC '+str(j) + ' (Wh)')
index.append('Gen energy '+str(j) + ' (Wh)')
if instance.Lost_Load_Probability > 0:
index.append('Lost Load '+str(j) + ' (Wh)')
Scenarios.index= index
# Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'
if instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1.0) : # if the step is in hours and minutes
foo = str(instance.Delta_Time()) # trasform the number into a string
hour = foo[0] # Extract the first character
minutes = str(int(float(foo[1:3])*60)) # Extrac the last two character
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(hour + 'h'+ minutes + 'min')) # Creation of an index with a start date and a frequency
elif instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1): # if the step is in hours
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(str(instance.Delta_Time()) + 'h')) # Creation of an index with a start date and a frequency
else: # if the step is in minutes
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(str(int(instance.Delta_Time()*60)) + 'min'))# Creation of an index with a start date and a frequency
Scenarios.columns = columns
Scenarios = Scenarios.transpose()
Scenarios.to_excel(writer, sheet_name='Time Series') # Creating an excel file with the values of the variables that are in function of the periods
columns = [] # arreglar varios columns
for i in range(1, Number_Scenarios+1):
columns.append('Scenario '+str(i))
Scenario_information =[[] for i in range(Number_Scenarios)]
Scenario_Weight = instance.Scenario_Weight.extract_values()
for i in range(1, Number_Scenarios+1):
Scenario_information[i-1].append(Scenario_Weight[i])
Scenario_Information = pd.DataFrame(Scenario_information,index=columns)
Scenario_Information.columns=['Scenario Weight']
Scenario_Information = Scenario_Information.transpose()
Scenario_Information.to_excel(writer, sheet_name='Scenario Information')
Renewable_Energy = pd.DataFrame()
for s in range(1, Number_Scenarios + 1):
for r in range(1, Number_Renewable_Source + 1):
column = 'Renewable ' + str(s) + ' ' + str(r) + ' (Wh)'
column2 = 'Renewable unit ' + str(s) + ' ' + str(r) + ' (Wh)'
Energy = []
Unit_Energy = []
for t in range(1, Number_Periods + 1):
Source = 'Source ' + str(r)
Energy.append(Renewable_Energy_1[s,r,t]*Data_Renewable.loc['Inverter Efficiency', Source]
*Data_Renewable.loc['Units', Source])
Unit_Energy.append(Renewable_Energy_1[s,r,t])
Renewable_Energy[column] = Energy
Renewable_Energy[column2] = Unit_Energy
Renewable_Energy.index = Scenarios.index
Renewable_Energy.to_excel(writer, sheet_name='Renewable Energy Time Series')
Generator_Data = pd.DataFrame()
if instance.formulation == 'LP':
Generator_Efficiency = instance.Generator_Efficiency.extract_values()
Low_Heating_Value = instance.Low_Heating_Value.extract_values()
Fuel_Cost = instance.Fuel_Cost.extract_values()
Generator_Invesment_Cost = instance.Generator_Invesment_Cost.extract_values()
Generator_Nominal_Capacity = instance.Generator_Nominal_Capacity.get_values()
Maintenance_Operation_Cost_Generator = instance.Maintenance_Operation_Cost_Generator.extract_values()
for g in range(1, Number_Generator + 1):
Name = 'Generator ' + str(g)
Generator_Data.loc['Generator Efficiency',Name] = Generator_Efficiency[g]
Generator_Data.loc['Low Heating Value (Wh/l)',Name] = Low_Heating_Value[g]
Generator_Data.loc['Fuel Cost (USD/l)',Name] = Fuel_Cost[g]
Generator_Data.loc['Generator Invesment Cost (USD/W)',Name] = Generator_Invesment_Cost[g]
Generator_Data.loc['Generator Nominal Capacity (W)',Name] = Generator_Nominal_Capacity[g]
Generator_Data.loc['OyM Generator', Name] = Maintenance_Operation_Cost_Generator[g]
Generator_Data.loc['Invesment Generator (USD)', Name] = Generator_Invesment_Cost[g]*Generator_Nominal_Capacity[g]
Generator_Data.loc['OyM Cost (USD)', Name] = Generator_Data.loc['Invesment Generator (USD)', Name]*Generator_Data.loc['OyM Generator', Name]
Generator_Data.loc['Marginal Cost (USD/Wh)', Name] = (Generator_Data.loc['Fuel Cost (USD/l)',Name]/
(Generator_Data.loc['Generator Efficiency',Name]*Generator_Data.loc['Low Heating Value (Wh/l)',Name]))
Generator_Data.loc['Marginal Cost (USD/Wh)', Name] = round(Generator_Data.loc['Marginal Cost (USD/Wh)', Name],3)
if instance.formulation == 'MILP':
Generator_Min_Out_Put = instance.Generator_Min_Out_Put.extract_values()
Generator_Efficiency = instance.Generator_Efficiency.extract_values()
Low_Heating_Value = instance.Low_Heating_Value.extract_values()
Fuel_Cost = instance.Fuel_Cost.extract_values()
Generator_Invesment_Cost = instance.Generator_Invesment_Cost.extract_values()
Cost_Increase = instance.Cost_Increase.extract_values()
Generator_Nominal_Capacity = instance.Generator_Nominal_Capacity.extract_values()
if type_generator == 'Fix':
Integer_generator = instance.Integer_generator
else:
Integer_generator = instance.Integer_generator.get_values()
Maintenance_Operation_Cost_Generator = instance.Maintenance_Operation_Cost_Generator.extract_values()
for g in range(1, Number_Generator + 1):
Name = 'Generator ' + str(g)
Generator_Data.loc['Generator Nominal Capacity (W)',Name] = Generator_Nominal_Capacity[g]
Generator_Data.loc['Generator Min Out Put',Name] = Generator_Min_Out_Put[g]
Generator_Data.loc['Generator Efficiency',Name] = Generator_Efficiency[g]
Generator_Data.loc['Low Heating Value (Wh/l)',Name] = Low_Heating_Value[g]
Generator_Data.loc['Fuel Cost (USD/l)',Name] = Fuel_Cost[g]
Generator_Data.loc['Generator Invesment Cost (USD/W)',Name] = Generator_Invesment_Cost[g]
Generator_Data.loc['Cost Increase',Name] = Cost_Increase[g]
M_1 = Fuel_Cost[g]/(Generator_Efficiency[g]*Low_Heating_Value[g])
M_1 = round(M_1,3)
Generator_Data.loc['Marginal cost Full load (USD/Wh)',Name] = M_1
Generator_Data.loc['Start Cost Generator (USD)',Name] = M_1*Generator_Nominal_Capacity[g]*Cost_Increase[g]
Generator_Data.loc['Start Cost Generator (USD)',Name] = round(Generator_Data.loc['Start Cost Generator (USD)',Name],3)
M_2 = (M_1*Generator_Nominal_Capacity[g]-Generator_Data.loc['Start Cost Generator (USD)',Name])/ \
Generator_Nominal_Capacity[g]
Generator_Data.loc['Marginal cost Partial load (USD/Wh)',Name] = round(M_2,3)
Generator_Data.loc['Number of Generators', Name] = Integer_generator[g]
Generator_Data.loc['Maintenance Operation Cost Generator', Name] = Maintenance_Operation_Cost_Generator[g]
Generator_Data.loc['Invesment Generator (USD)', Name] = (Generator_Nominal_Capacity[g]
*Integer_generator[g]*Generator_Invesment_Cost[g])
Generator_Data.loc['OyM Cost (USD)', Name] = (Generator_Nominal_Capacity[g]*Integer_generator[g]
*Generator_Invesment_Cost[g]
*Maintenance_Operation_Cost_Generator[g])
Generator_Data.to_excel(writer, sheet_name='Generator Data')
Project_Data = pd.Series()
Project_Data['Net Present Cost (USD)'] = instance.ObjectiveFuntion.expr()
Project_Data['Discount Rate'] = instance.Discount_Rate.value
Project_Data['Proyect Life Time (years)'] = instance.Years.value
Project_Data['Value of lost load (USD/Wh)'] = instance.Value_Of_Lost_Load.value
a = Project_Data['Discount Rate']*((1+Project_Data['Discount Rate'])**Project_Data['Proyect Life Time (years)'])
b = ((1 + Project_Data['Discount Rate'])**Project_Data['Proyect Life Time (years)']) - 1
Project_Data['Capital Recovery Factor'] = round(a/b,3)
if instance.Curtailment_Unitary_Cost > 0:
Project_Data['Curtailment Unitary Cost (USD/Wh)'] = instance.Curtailment_Unitary_Cost
Project_Data.to_excel(writer, sheet_name='Project Data')
Battery_Nominal_Capacity = instance.Battery_Nominal_Capacity.get_values()[None]
PriceBattery = instance.Battery_Invesment_Cost.value
Battery_Electronic_Invesmente_Cost = instance.Battery_Electronic_Invesmente_Cost.value
OM_Bat = instance.Maintenance_Operation_Cost_Battery.value
SOC_1 = instance.Battery_Initial_SOC.value
Ch_bat_eff = instance.Charge_Battery_Efficiency.value
Dis_bat_eff = instance.Discharge_Battery_Efficiency.value
Deep_of_Discharge = instance.Deep_of_Discharge.value
Battery_Cycles = instance.Battery_Cycles.value
Fix_Invesment_Battery = instance.Fix_Invesment_Battery.extract_values()[None]
Integer_Battery = instance.Integer_Battery.get_values()[None]
Unitary_Battery_Cost = PriceBattery - Battery_Electronic_Invesmente_Cost
Battery_Repostion_Cost = Unitary_Battery_Cost/(Battery_Cycles*2*(1-Deep_of_Discharge))
Battery_Repostion_Cost = round(Battery_Repostion_Cost, 3)
Battery_Data = pd.DataFrame()
Battery_Data.loc['Nominal Capacity (Wh)','Battery'] = Battery_Nominal_Capacity
Battery_Data.loc['Unitary Invesment Cost (USD/Wh)','Battery'] = PriceBattery
Battery_Data.loc['Unitary invesment cost electronic equipment (USD/Wh)','Battery'] = Battery_Electronic_Invesmente_Cost
Battery_Data.loc['OyM','Battery'] = OM_Bat
Battery_Data.loc['Initial State of Charge','Battery'] = SOC_1
Battery_Data.loc['Charge efficiency','Battery'] = Ch_bat_eff
Battery_Data.loc['Discharge efficiency','Battery'] = Dis_bat_eff
Battery_Data.loc['Deep of Discharge','Battery'] = Deep_of_Discharge
Battery_Data.loc['Battery Cycles','Battery'] = Battery_Cycles
Battery_Data.loc['Unitary Battery Reposition Cost (USD/Wh)','Battery'] = Battery_Repostion_Cost
Battery_Data.loc['Fix invesment','Battery'] = Fix_Invesment_Battery
Battery_Data.loc['Investment Decision','Battery'] = Integer_Battery
Battery_Data.loc['Invesment Cost (USD)','Battery'] = Fix_Invesment_Battery*Integer_Battery + Battery_Nominal_Capacity*PriceBattery
Battery_Data.loc['OyM Cost (USD)', 'Battery'] = Battery_Nominal_Capacity*PriceBattery*OM_Bat
Battery_Data.to_excel(writer, sheet_name='Battery Data')
Generator_Time_Series = pd.DataFrame()
if instance.formulation == 'LP':
for s in range(1, Number_Scenarios + 1):
for g in range(1, Number_Generator + 1):
column_1 = 'Energy Generator ' + str(s) + ' ' + str(g) + ' (Wh)'
column_2 = 'Fuel Cost ' + str(s) + ' ' + str(g) + ' (USD)'
Name = 'Generator ' + str(g)
for t in range(1, Number_Periods + 1):
Generator_Time_Series.loc[t,column_1] = Generator_Energy[s,g,t]
Generator_Time_Series.loc[t,column_2] = (Generator_Time_Series.loc[t,column_1]
*Generator_Data.loc['Marginal Cost (USD/Wh)', Name])
if instance.formulation == 'MILP':
Generator_Integer = instance.Generator_Energy_Integer.get_values()
for s in range(1, Number_Scenarios + 1):
for g in range(1, Number_Generator + 1):
column_1 = 'Energy Generator ' + str(s) + ' ' + str(g) + ' (Wh)'
column_2 = 'Integer Generator ' + str(s) + ' ' + str(g)
column_3 = 'Fuel Cost ' + str(s) + ' ' + str(g) + ' (USD)'
Name = 'Generator ' + str(g)
for t in range(1, Number_Periods + 1):
Generator_Time_Series.loc[t,column_1] = Generator_Energy[s,g,t]
Generator_Time_Series.loc[t,column_2] = Generator_Integer[s,g,t]
Generator_Time_Series.loc[t,column_3] = (Generator_Integer[s,g,t]*Generator_Data.loc['Start Cost Generator (USD)',Name]
+ Generator_Energy[s,g,t]*Generator_Data.loc['Marginal cost Partial load (USD/Wh)',Name] )
Generator_Time_Series.index = Scenarios.index
Generator_Time_Series.to_excel(writer, sheet_name='Generator Time Series')
Cost_Time_Series = pd.DataFrame()
for s in range(1, Number_Scenarios + 1):
if instance.Lost_Load_Probability > 0:
name_1 = 'Lost Load ' + str(s) + ' (Wh)'
name_1_1 = 'Lost Load ' + str(s) + ' (USD)'
name_2 = 'Battery Flow Out ' + str(s) + ' (Wh)'
name_2_1 = 'Battery Flow Out ' + str(s) + ' (USD)'
name_3 = 'Battery Flow in ' + str(s) + ' (Wh)'
name_3_1 = 'Battery Flow In ' + str(s) + ' (USD)'
name_4_1 = 'Generator Cost ' + str(s) + ' (USD)'
for t in Scenarios.index:
if instance.Lost_Load_Probability > 0:
Cost_Time_Series.loc[t,name_1_1] = Scenarios[name_1][t]*Project_Data['Value of lost load (USD/Wh)']
Cost_Time_Series.loc[t,name_2_1] = (Scenarios[name_2][t]
*Battery_Data.loc['Unitary Battery Reposition Cost (USD/Wh)','Battery'])
Cost_Time_Series.loc[t,name_3_1] = (Scenarios[name_3][t]
*Battery_Data.loc['Unitary Battery Reposition Cost (USD/Wh)','Battery'])
Fuel_Cost = 0
for g in range(1, Number_Generator + 1):
name_5 = 'Fuel Cost ' + str(s) + ' ' + str(g) + ' (USD)'
Fuel_Cost += Generator_Time_Series.loc[t,name_5]
Cost_Time_Series.loc[t,name_4_1] = Fuel_Cost
if instance.Curtailment_Unitary_Cost > 0:
name_6 = 'Curtailment ' + str(s) + ' (Wh)'
name_6_1 = 'Curtailment Cost ' + str(s) + ' (USD)'
Cost_Time_Series.loc[t,name_6_1] = (Scenarios[name_6][t]*Project_Data['Curtailment Unitary Cost (USD/Wh)'])
Cost_Time_Series.to_excel(writer, sheet_name='Cost Time Series')
Scenario_Cost = pd.DataFrame()
for s in range(1, Number_Scenarios + 1):
if instance.Lost_Load_Probability > 0:
name_1_1 = 'Lost Load ' + str(s) + ' (USD)'
name_1 ='Lost Load (USD)'
name_2_1 = 'Battery Flow Out ' + str(s) + ' (USD)'
name_2 = 'Battery Flow Out (USD)'
name_3_1 = 'Battery Flow In ' + str(s) + ' (USD)'
name_3 = 'Battery Flow In (USD)'
name_4_1 = 'Generator Cost ' + str(s) + ' (USD)'
name_4 = 'Generator Cost (USD)'
if instance.Curtailment_Unitary_Cost > 0:
name_6 = 'Curtailment ' + str(s) + ' (Wh)'
name_6_1 = 'Curtailment Cost ' + str(s) + ' (USD)'
name_5 = 'Scenario ' + str(s)
if instance.Lost_Load_Probability > 0:
Scenario_Cost.loc[name_1,name_5] = Cost_Time_Series[name_1_1].sum()
Scenario_Cost.loc[name_2,name_5] = Cost_Time_Series[name_2_1].sum()
Scenario_Cost.loc[name_3,name_5] = Cost_Time_Series[name_3_1].sum()
Scenario_Cost.loc[name_4,name_5] = Cost_Time_Series[name_4_1].sum()
if instance.Curtailment_Unitary_Cost > 0:
Scenario_Cost.loc[name_6,name_5] = Cost_Time_Series[name_6_1].sum()
gen_oym = 0
for g in range(1, Number_Generator + 1):
Name_2 = 'Generator ' + str(g)
gen_oym += Generator_Data.loc['OyM Cost (USD)', Name_2]
Scenario_Cost.loc['Gen OyM Cost (USD)',name_5] = gen_oym
renewable_energy_oym = 0
for r in range(1, Number_Renewable_Source + 1):
Name = 'Source ' + str(r)
renewable_energy_oym += Data_Renewable.loc['OyM Cost (USD)', Name]
Scenario_Cost.loc['PV OyM Cost (USD)',name_5] = renewable_energy_oym
Scenario_Cost.loc['Battery OyM Cost (USD)',name_5] = Battery_Data['Battery']['OyM Cost (USD)']
Scenario_Cost.loc['Operation Cost (USD)',name_5] = Scenario_Cost[name_5].sum()
Discount_rate = Project_Data['Discount Rate']
Years = int(Project_Data['Proyect Life Time (years)'])
Scenario_Cost.loc['OyM (USD)',name_5] = (Scenario_Cost.loc['Gen OyM Cost (USD)',name_5]
+Scenario_Cost.loc['PV OyM Cost (USD)',name_5]
+Scenario_Cost.loc['Battery OyM Cost (USD)',name_5])
Scenario_Cost.loc['Present Gen Cost (USD)',name_5] = Scenario_Cost.loc[name_4,name_5]/Project_Data['Capital Recovery Factor']
if instance.Lost_Load_Probability > 0:
Scenario_Cost.loc['Present Lost Load Cost (USD)',name_5] = Scenario_Cost.loc[name_1,name_5]/Project_Data['Capital Recovery Factor']
Scenario_Cost.loc['Present Bat Out Cost (USD)',name_5] = Scenario_Cost.loc[name_2,name_5]/Project_Data['Capital Recovery Factor']
Scenario_Cost.loc['Present Bat In Cost (USD)',name_5] = Scenario_Cost.loc[name_3,name_5]/Project_Data['Capital Recovery Factor']
Scenario_Cost.loc['Present Bat Reposition Cost (USD)',name_5] = (Scenario_Cost.loc[name_2,name_5] + Scenario_Cost.loc[name_3,name_5])/Project_Data['Capital Recovery Factor']
Scenario_Cost.loc['Present OyM Cost (USD)',name_5] = Scenario_Cost.loc['OyM (USD)',name_5]/Project_Data['Capital Recovery Factor']
Scenario_Cost.loc['Present Operation Cost (USD)',name_5] = Scenario_Cost[name_5]['Operation Cost (USD)']/Project_Data['Capital Recovery Factor']
Scenario_Cost.loc['Present Operation Cost Weighted (USD)',name_5] = (Scenario_Cost[name_5]['Present Operation Cost (USD)']
*Scenario_Information[name_5]['Scenario Weight'])
Scenario_Cost.to_excel(writer, sheet_name='Scenario Costs')
NPC = pd.DataFrame()
NPC.loc['Battery Invesment (USD)', 'Data'] = Battery_Data['Battery']['Invesment Cost (USD)']
gen_Invesment = 0
for g in range(1, Number_Generator + 1):
Name_2 = 'Generator ' + str(g)
gen_Invesment += Generator_Data.loc['Invesment Generator (USD)', Name_2]
NPC.loc['Generator Invesment Cost (USD)', 'Data'] = gen_Invesment
renewable_energy_invesment = 0
for r in range(1, Number_Renewable_Source + 1):
Name = 'Source ' + str(r)
renewable_energy_invesment += Data_Renewable.loc['Invesment (USD)', Name]
NPC.loc['Renewable Investment Cost (USD)', 'Data'] = renewable_energy_invesment
operation_cost = 0
for s in range(1, Number_Scenarios + 1):
name_1 = 'Scenario ' + str(s)
operation_cost += Scenario_Cost[name_1]['Present Operation Cost Weighted (USD)']
NPC.loc['Present Operation Cost Weighted (USD)', 'Data'] = operation_cost
NPC.loc['NPC (USD)', 'Data'] = NPC['Data'].sum()
z = round(NPC.loc['NPC (USD)', 'Data'],5) == round(instance.ObjectiveFuntion.expr(), 5)
print(z)
NPC.loc['NPC LP (USD)', 'Data'] = Project_Data['Net Present Cost (USD)']
NPC.loc['Invesment (USD)', 'Data'] = (NPC.loc['Battery Invesment (USD)', 'Data']
+ NPC.loc['Generator Invesment Cost (USD)', 'Data']
+ NPC.loc['Renewable Investment Cost (USD)', 'Data'])
Demand = pd.DataFrame()
NP_Demand = 0
for s in range(1, Number_Scenarios + 1):
a = 'Energy Demand ' + str(s) + ' (Wh)'
b = 'Scenario ' + str(s)
Demand.loc[a,'Total Demand (Wh)'] = sum(Scenarios[a][i] for i in Scenarios.index)
Demand.loc[a,'Present Demand (Wh)'] = sum((Demand.loc[a,'Total Demand (Wh)']/(1+Discount_rate)**i)
for i in range(1, Years+1))
Demand.loc[a,'Rate'] = Scenario_Information[b]['Scenario Weight']
Demand.loc[a,'Rated Demand (Wh)'] = Demand.loc[a,'Rate']*Demand.loc[a,'Present Demand (Wh)']
NP_Demand += Demand.loc[a,'Rated Demand (Wh)']
NPC.loc['LCOE (USD/kWh)', 'Data'] = (Project_Data['Net Present Cost (USD)']/NP_Demand)
NPC.loc['Status','Data'] = z
NPC.to_excel(writer, sheet_name='Results')
Data = []
Data.append(NPC)
Data.append(Scenario_Cost)
Data.append(Project_Data)
Data.append(Scenarios)
Data.append(Generator_Data)
Data.append(Scenario_Information)
Data.append(Data_Renewable)
Data.append(Battery_Data)
writer.save()
return Data
def Integer_Time_Series(instance,Scenarios, S, Data):
if S == 0:
S = instance.PlotScenario.value
Time_Series = pd.DataFrame(index=range(0,8760))
Time_Series.index = Scenarios.index
if instance.Lost_Load_Probability > 0:
Time_Series['Lost Load (Wh)'] = Scenarios['Lost Load ' + str(S) + ' (Wh)']
Time_Series['Renewable Energy (Wh)'] = Scenarios['Renewable Energy '+str(S) + ' (Wh)']
Time_Series['Discharge energy from the Battery (Wh)'] = Scenarios['Battery Flow Out ' + str(S) + ' (Wh)']
Time_Series['Charge energy to the Battery (Wh)'] = Scenarios['Battery Flow in '+str(S) + ' (Wh)']
Time_Series['Curtailment (Wh)'] = Scenarios['Curtailment '+str(S) + ' (Wh)']
Time_Series['Energy Demand (Wh)'] = Scenarios['Energy Demand '+str(S) + ' (Wh)']
Time_Series['State Of Charge Battery (Wh)'] = Scenarios['SOC '+str(S) + ' (Wh)']
Time_Series['Generator Energy (Wh)'] = Scenarios['Gen energy '+str(S) + ' (Wh)']
Renewable_Source = instance.Renewable_Source.value
if Renewable_Source > 1:
Renewable_Energy = pd.read_excel('Results/Results.xls',index_col=0,Header=None,
sheet_name='Renewable Energy Time Series')
for r in range(1,Renewable_Source+1):
name = 'Renewable ' + str(S) + ' ' + str(r) + ' (Wh)'
name_1 = 'Renewable ' + str(r) + ' (Wh)'
Time_Series[name_1] = Renewable_Energy[name]
return Time_Series
def Load_results1_binary(instance):
'''
This function loads the results that depend of the periods in to a
dataframe and creates a excel file with it.
:param instance: The instance of the project resolution created by PYOMO.
:return: A dataframe called Time_series with the values of the variables
that depend of the periods.
'''
# Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'
Number_Scenarios = int(instance.Scenarios.extract_values()[None])
Number_Periods = int(instance.Periods.extract_values()[None])
#Scenarios = [[] for i in range(Number_Scenarios)]
columns = []
for i in range(1, Number_Scenarios+1):
columns.append('Scenario_'+str(i))
# columns=columns
Scenarios = pd.DataFrame()
Lost_Load = instance.Lost_Load.get_values()
PV_Energy = instance.Total_Energy_PV.get_values()
Battery_Flow_Out = instance.Energy_Battery_Flow_Out.get_values()
Battery_Flow_in = instance.Energy_Battery_Flow_In.get_values()
Curtailment = instance.Energy_Curtailment.get_values()
Energy_Demand = instance.Energy_Demand.extract_values()
SOC = instance.State_Of_Charge_Battery.get_values()
Gen_Energy_Integer = instance.Generator_Energy_Integer.get_values()
Gen_Energy_I = {}
for i in range(1,Number_Scenarios+1):
for j in range(1, Number_Periods+1):
Gen_Energy_I[i,j]=(Gen_Energy_Integer[i,j]*instance.Generator_Nominal_Capacity.extract_values()[None])
Last_Generator_Energy = instance.Last_Energy_Generator.get_values()
Total_Generator_Energy = instance.Generator_Total_Period_Energy.get_values()
Gen_cost = instance.Period_Total_Cost_Generator.get_values()
Scenarios_Periods = [[] for i in range(Number_Scenarios)]
for i in range(0,Number_Scenarios):
for j in range(1, Number_Periods+1):
Scenarios_Periods[i].append((i+1,j))
foo=0
for i in columns:
Information = [[] for i in range(11)]
for j in Scenarios_Periods[foo]:
Information[0].append(Lost_Load[j])
Information[1].append(PV_Energy[j])
Information[2].append(Battery_Flow_Out[j])
Information[3].append(Battery_Flow_in[j])
Information[4].append(Curtailment[j])
Information[5].append(Energy_Demand[j])
Information[6].append(SOC[j])
Information[7].append(Gen_Energy_I[j])
Information[8].append(Last_Generator_Energy[j])
Information[9].append(Total_Generator_Energy[j])
Information[10].append(Gen_cost[j])
Scenarios=Scenarios.append(Information)
foo+=1
index=[]
for j in range(1, Number_Scenarios+1):
index.append('Lost_Load '+str(j))
index.append('PV_Energy '+str(j))
index.append('Battery_Flow_Out '+str(j))
index.append('Battery_Flow_in '+str(j))
index.append('Curtailment '+str(j))
index.append('Energy_Demand '+str(j))
index.append('SOC '+str(j))
index.append('Gen energy Integer '+str(j))
index.append('Last Generator Energy '+str(j))
index.append('Total Generator Energy '+str(j))
index.append('Total Cost Generator'+str(j))
Scenarios.index= index
# Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'
if instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1.0) : # if the step is in hours and minutes
foo = str(instance.Delta_Time()) # trasform the number into a string
hour = foo[0] # Extract the first character
minutes = str(int(float(foo[1:3])*60)) # Extrac the last two character
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(hour + 'h'+ minutes + 'min')) # Creation of an index with a start date and a frequency
elif instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1): # if the step is in hours
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(str(instance.Delta_Time()) + 'h')) # Creation of an index with a start date and a frequency
else: # if the step is in minutes
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(str(int(instance.Delta_Time()*60)) + 'min'))# Creation of an index with a start date and a frequency
Scenarios.columns = columns
Scenarios = Scenarios.transpose()
Scenarios.to_excel('Results/Time_Series.xls') # Creating an excel file with the values of the variables that are in function of the periods
columns = [] # arreglar varios columns
for i in range(1, Number_Scenarios+1):
columns.append('Scenario_'+str(i))
Scenario_information =[[] for i in range(Number_Scenarios)]
Scenario_NPC = instance.Scenario_Net_Present_Cost.get_values()
LoL_Cost = instance.Scenario_Lost_Load_Cost.get_values()
Scenario_Weight = instance.Scenario_Weight.extract_values()
Diesel_Cost = instance.Sceneario_Generator_Total_Cost.get_values()
for i in range(1, Number_Scenarios+1):
Scenario_information[i-1].append(Scenario_NPC[i])
Scenario_information[i-1].append(LoL_Cost[i])
Scenario_information[i-1].append(Scenario_Weight[i])
Scenario_information[i-1].append(Diesel_Cost[i])
Scenario_Information = pd.DataFrame(Scenario_information,index=columns)
Scenario_Information.columns=['Scenario NPC', 'LoL Cost','Scenario Weight', 'Diesel Cost']
Scenario_Information = Scenario_Information.transpose()
Scenario_Information.to_excel('Results/Scenario_Information.xls')
S = instance.PlotScenario.value
Time_Series = pd.DataFrame(index=range(0,8760))
Time_Series.index = Scenarios.index
Time_Series['Lost Load'] = Scenarios['Lost_Load '+str(S)]
Time_Series['Energy PV'] = Scenarios['PV_Energy '+str(S)]
Time_Series['Discharge energy from the Battery'] = Scenarios['Battery_Flow_Out '+str(S)]
Time_Series['Charge energy to the Battery'] = Scenarios['Battery_Flow_in '+str(S)]
Time_Series['Curtailment'] = Scenarios['Curtailment '+str(S)]
Time_Series['Energy_Demand'] = Scenarios['Energy_Demand '+str(S)]
Time_Series['State_Of_Charge_Battery'] = Scenarios['SOC '+str(S)]
Time_Series['Gen energy Integer'] = Scenarios['Gen energy Integer '+str(S)]
Time_Series['Last Generator Energy'] = Scenarios['Last Generator Energy ' +str(j)]
Time_Series['Energy Diesel'] = Scenarios['Total Generator Energy '+str(j)]
return Time_Series
def Load_results2_binary(instance):
'''
This function extracts the unidimensional variables into a data frame
and creates a excel file with this data
:param instance: The instance of the project resolution created by PYOMO.
:return: Data frame called Size_variables with the variables values.
'''
# Load the variables that doesnot depend of the periods in python dyctionarys
Amortizacion = instance.Cost_Financial.get_values()[None]
cb = instance.PV_Units.get_values()
cb = cb.values()
Size_PV=[list(cb)[0]*instance.PV_Nominal_Capacity.value]
Size_Bat = instance.Battery_Nominal_Capacity.get_values()[None]
Gen_cap = instance.Generator_Nominal_Capacity.value
Gen_Power = Gen_cap*instance.Integer_generator.get_values()[None]
NPC = instance.ObjectiveFuntion.expr()
Mge_1 = instance.Marginal_Cost_Generator_1.value
Start_Cost = instance.Start_Cost_Generator.value
Funded= instance.Porcentage_Funded.value
DiscountRate = instance.Discount_Rate.value
InterestRate = instance.Interest_Rate_Loan.value
PricePV = instance.PV_invesment_Cost.value
PriceBatery= instance.Battery_Invesment_Cost.value
PriceGenSet= instance.Generator_Invesment_Cost.value
OM = instance.Maintenance_Operation_Cost_PV.value
Years=instance.Years.value
VOLL= instance.Value_Of_Lost_Load.value
Mge_2 = instance.Marginal_Cost_Generator.value
data3 = [Amortizacion, Size_PV[0], Size_Bat, Gen_cap, Gen_Power,NPC,Mge_1, Mge_2 ,
Start_Cost, Funded,DiscountRate,InterestRate,PricePV,PriceBatery,
PriceGenSet,OM,Years,VOLL] # Loading the values to a numpy array
Size_variables = pd.DataFrame(data3,index=['Amortization', 'Size of the solar panels',
'Size of the Battery','Nominal Capacity Generator',
'Generator Install power','Net Present Cost',
'Marginal cost Full load',
'Marginal cost Partial load', 'Start Cost',
'Funded Porcentage', 'Discount Rate',
'Interest Rate','Precio PV', 'Precio Bateria',
'Precio GenSet','OyM', 'Project years','VOLL'])
Size_variables.to_excel('Results/Size.xls') # Creating an excel file with the values of the variables that does not depend of the periods
I_Inv = instance.Initial_Inversion.get_values()[None]
O_M = instance.Operation_Maintenance_Cost.get_values()[None]
Financial_Cost = instance.Total_Finalcial_Cost.get_values()[None]
Batt_Reposition = instance.Battery_Reposition_Cost.get_values()[None]
Data = [I_Inv, O_M, Financial_Cost,Batt_Reposition]
Value_costs = pd.DataFrame(Data, index=['Initial Inversion', 'O & M',
'Financial Cost', 'Battery reposition'])
Value_costs.to_excel('Results/Partial Costs.xls')
VOLL = instance.Scenario_Lost_Load_Cost.get_values()
Scenario_Generator_Cost = instance.Sceneario_Generator_Total_Cost.get_values()
NPC_Scenario = instance.Scenario_Net_Present_Cost.get_values()
columns = ['VOLL', 'Scenario Generator Cost', 'NPC Scenario']
scenarios= range(1,instance.Scenarios.extract_values()[None]+1)
Scenario_Costs = pd.DataFrame(columns=columns, index=scenarios)
for j in scenarios:
Scenario_Costs['VOLL'][j]= VOLL[j]
Scenario_Costs['Scenario Generator Cost'][j]= Scenario_Generator_Cost[j]
Scenario_Costs['NPC Scenario'][j]= NPC_Scenario[j]
Scenario_Costs.to_excel('Results/Scenario Cost.xls')
return Size_variables
def Load_results1_Dispatch(instance):
'''
This function loads the results that depend of the periods in to a
dataframe and creates a excel file with it.
:param instance: The instance of the project resolution created by PYOMO.
:return: A dataframe called Time_series with the values of the variables
that depend of the periods.
'''
Names = ['Lost_Load', 'PV_Energy', 'Battery_Flow_Out','Battery_Flow_in',
'Curtailment', 'Energy_Demand', 'SOC', 'Gen Int', 'Gen energy',
'Total Cost Generator']
Number_Periods = int(instance.Periods.extract_values()[None])
Time_Series = pd.DataFrame(columns= Names, index=range(1,Number_Periods+1))
Lost_Load = instance.Lost_Load.get_values()
PV_Energy = instance.Total_Energy_PV.extract_values()
Battery_Flow_Out = instance.Energy_Battery_Flow_Out.get_values()
Battery_Flow_in = instance.Energy_Battery_Flow_In.get_values()
Curtailment = instance.Energy_Curtailment.get_values()
Energy_Demand = instance.Energy_Demand.extract_values()
SOC = instance.State_Of_Charge_Battery.get_values()
Gen_Energy_Integer = instance.Generator_Energy_Integer.get_values()
Total_Generator_Energy = instance.Generator_Total_Period_Energy.get_values()
Gen_cost = instance.Period_Total_Cost_Generator.get_values()
for i in range(1,Number_Periods+1):
Time_Series['Lost_Load'][i] = Lost_Load[i]
Time_Series['PV_Energy'][i] = PV_Energy[i]
Time_Series['Battery_Flow_Out'][i] = Battery_Flow_Out[i]
Time_Series['Battery_Flow_in'][i] = Battery_Flow_in[i]
Time_Series['Curtailment'][i] = Curtailment[i]
Time_Series['Energy_Demand'][i] = Energy_Demand[i]
Time_Series['SOC'][i] = SOC[i]
Time_Series['Gen Int'][i] = Gen_Energy_Integer[i]
Time_Series['Gen energy'][i] = Total_Generator_Energy[i]
Time_Series['Total Cost Generator'][i] = Gen_cost[i]
# Creation of an index starting in the 'model.StartDate' value with a frequency step equal to 'model.Delta_Time'
if instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1.0) : # if the step is in hours and minutes
foo = str(instance.Delta_Time()) # trasform the number into a string
hour = foo[0] # Extract the first character
minutes = str(int(float(foo[1:3])*60)) # Extrac the last two character
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(hour + 'h'+ minutes + 'min')) # Creation of an index with a start date and a frequency
elif instance.Delta_Time() >= 1 and type(instance.Delta_Time()) == type(1): # if the step is in hours
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(str(instance.Delta_Time()) + 'h')) # Creation of an index with a start date and a frequency
else: # if the step is in minutes
columns = pd.DatetimeIndex(start=instance.StartDate(),
periods=instance.Periods(),
freq=(str(int(instance.Delta_Time()*60)) + 'min'))# Creation of an index with a start date and a frequency
Time_Series.index = columns
Time_Series.to_excel('Results/Time_Series.xls') # Creating an excel file with the values of the variables that are in function of the periods
Time_Series_2 = pd.DataFrame()
Time_Series_2['Lost Load'] = Time_Series['Lost_Load']
Time_Series_2['Renewable Energy'] = Time_Series['PV_Energy']
Time_Series_2['Discharge energy from the Battery'] = Time_Series['Battery_Flow_Out']
Time_Series_2['Charge energy to the Battery'] = Time_Series['Battery_Flow_in']
Time_Series_2['Curtailment'] = Time_Series['Curtailment']
Time_Series_2['Energy_Demand'] = Time_Series['Energy_Demand']
Time_Series_2['State_Of_Charge_Battery'] = Time_Series['SOC']
Time_Series_2['Energy Diesel'] = Time_Series['Gen energy']
Time_Series_2['Total Cost Generator'] = Time_Series['Total Cost Generator']
Time_Series_2.index = columns
return Time_Series_2
def Load_results2_Dispatch(instance):
'''
This function extracts the unidimensional variables into a data frame
and creates a excel file with this data
:param instance: The instance of the project resolution created by PYOMO.
:return: Data frame called Size_variables with the variables values.
'''
Data = []
# Load the variables that doesnot depend of the periods in python dyctionarys
Generator_Efficiency = instance.Generator_Efficiency.extract_values()
Generator_Min_Out_Put = instance.Generator_Min_Out_Put.extract_values()
Low_Heating_Value = instance.Low_Heating_Value.extract_values()
Fuel_Cost = instance.Diesel_Cost.extract_values()
Marginal_Cost_Generator_1 = instance.Marginal_Cost_Generator_1.extract_values()
Cost_Increase = instance.Cost_Increase.extract_values()
Generator_Nominal_Capacity = instance.Generator_Nominal_Capacity.extract_values()
Start_Cost_Generator = instance.Start_Cost_Generator.extract_values()
Marginal_Cost_Generator = instance.Marginal_Cost_Generator.extract_values()
Generator_Data = pd.DataFrame()
g = None
Name = 'Generator ' + str(1)
Generator_Data.loc['Generator Min Out Put',Name] = Generator_Min_Out_Put[g]
Generator_Data.loc['Generator Efficiency',Name] = Generator_Efficiency[g]
Generator_Data.loc['Low Heating Value',Name] = Low_Heating_Value[g]
Generator_Data.loc['Fuel Cost',Name] = Fuel_Cost[g]
Generator_Data.loc['Marginal cost Full load',Name] = Marginal_Cost_Generator_1[g]
Generator_Data.loc['Marginal cost Partial load',Name] = Marginal_Cost_Generator[g]
Generator_Data.loc['Cost Increase',Name] = Cost_Increase[g]
Generator_Data.loc['Generator Nominal Capacity',Name] = Generator_Nominal_Capacity[g]
Generator_Data.loc['Start Cost Generator',Name] = Start_Cost_Generator[g]
Data.append(Generator_Data)
Generator_Data.to_excel('Results/Generator_Data.xls')
Size_Bat = instance.Battery_Nominal_Capacity.extract_values()[None]
O_Cost = instance.ObjectiveFuntion.expr()
VOLL= instance.Value_Of_Lost_Load.value
Bat_ef_out = instance.Discharge_Battery_Efficiency.value
Bat_ef_in = instance.Charge_Battery_Efficiency.value
DoD = instance.Deep_of_Discharge.value
Inv_Cost_Bat = instance.Battery_Invesment_Cost.value
Inv_Cost_elec = instance.Battery_Electronic_Invesmente_Cost.value
Bat_Cycles = instance.Battery_Cycles.value
Bat_U_C = Inv_Cost_Bat - Inv_Cost_elec
Battery_Reposition_Cost= Bat_U_C/(Bat_Cycles*2*(1-DoD))
Number_Periods = int(instance.Periods.extract_values()[None])
data3 = [Size_Bat, O_Cost, VOLL, Bat_ef_out, Bat_ef_in, DoD,
Inv_Cost_Bat, Inv_Cost_elec, Bat_Cycles,
Battery_Reposition_Cost, Number_Periods] # Loading the values to a numpy array
Results = pd.DataFrame(data3,index = ['Size of the Battery',
'Operation Cost', 'VOLL',
'Battery efficiency discharge',
'Battery efficiency charge',
'Deep of discharge',
'Battery unitary invesment cost',
'Battery electronic unitary cost',
'Battery max cycles',
'Battery Reposition Cost',
'Number of periods'])
Results.to_excel('Results/Size.xls') # Creating an excel file with the values of the variables that does not depend of the periods
Data.append(Results)
return Data
def Dispatch_Economic_Analysis(Results,Time_Series):
Data = []
Generator_Data = Results[0]
Result = Results[1]
Time_Series_Economic = pd.DataFrame()
for t in Time_Series.index:
name_1 = "Fuel"
name_2 = "Discharge energy from the Battery"
name_3 = "Charge energy to the Battery"
name_4 = 'Battery Reposition Cost'
name_5 = 'Battery operation Cost'
name_6 = 'VOLL'
Power_Bat = Time_Series[name_2][t] + Time_Series[name_3][t]
Time_Series_Economic.loc[t,name_5] = Power_Bat*Result[0][name_4]
LL = Time_Series['Lost Load'][t]
Time_Series_Economic.loc[t,name_6] = LL*Result[0][name_6]
if Time_Series['Energy Diesel'][t] > 0.1:
a = Generator_Data['Generator 1']['Start Cost Generator']
b = Generator_Data['Generator 1']['Marginal cost Partial load']
Time_Series_Economic.loc[t,name_1]=a + b*Time_Series['Energy Diesel'][t]
else:
Time_Series_Economic.loc[t,name_1]= 0
Operation_Cost = Time_Series_Economic.sum()
Operation_Cost['Total Cost'] = Operation_Cost.sum()
Data.append(Time_Series_Economic)
Data.append(Operation_Cost)
return Data
def Plot_Energy_Total(instance, Time_Series, plot, Plot_Date, PlotTime):
'''
This function creates a plot of the dispatch of energy of a defined number of days.
:param instance: The instance of the project resolution created by PYOMO.
:param Time_series: The results of the optimization model that depend of the periods.
'''
if plot == 'No Average':
Periods_Day = 24/instance.Delta_Time() # periods in a day
foo = pd.DatetimeIndex(start=Plot_Date,periods=1,freq='1h')# Asign the start date of the graphic to a dumb variable
for x in range(0, instance.Periods()): # Find the position form wich the plot will start in the Time_Series dataframe
if foo == Time_Series.index[x]:
Start_Plot = x # asign the value of x to the position where the plot will start
End_Plot = Start_Plot + PlotTime*Periods_Day # Create the end of the plot position inside the time_series
Time_Series.index=range(1,(len(Time_Series)+1))
Plot_Data = Time_Series[Start_Plot:int(End_Plot)] # Extract the data between the start and end position from the Time_Series
columns = pd.DatetimeIndex(start=Plot_Date,
periods=PlotTime*Periods_Day,
freq=('1H'))
Plot_Data.index=columns
Plot_Data = Plot_Data.astype('float64')
Plot_Data = Plot_Data
Plot_Data['Charge energy to the Battery (Wh)'] = -Plot_Data['Charge energy to the Battery (Wh)']
Plot_Data = round(Plot_Data,2)
Fill = pd.DataFrame()
r = 'Renewable Energy (Wh)'
g = 'Generator Energy (Wh)'
c = 'Curtailment (Wh)'
c2 ='Curtailment min (Wh)'
b = 'Discharge energy from the Battery (Wh)'
d = 'Energy Demand (Wh)'
ch = 'Charge energy to the Battery (Wh)'
SOC = 'State Of Charge Battery (Wh)'
Renewable_Source = instance. Renewable_Source.value
for t in Plot_Data.index:
if (Plot_Data[r][t] > 0 and Plot_Data[g][t]>0):
curtailment = Plot_Data[c][t]
Fill.loc[t,r] = Plot_Data[r][t]
Fill.loc[t,g] = Fill[r][t] + Plot_Data[g][t]-curtailment
Fill.loc[t,c] = Fill[r][t] + Plot_Data[g][t]
Fill.loc[t,c2] = Fill.loc[t,g]
elif Plot_Data[r][t] > 0:
Fill.loc[t,r] = Plot_Data[r][t]-Plot_Data[c][t]
Fill.loc[t,g] = Fill[r][t] + Plot_Data[g][t]
Fill.loc[t,c] = Fill[r][t] + Plot_Data[g][t]+Plot_Data[c][t]
Fill.loc[t,c2] = Plot_Data[r][t]-Plot_Data[c][t]
elif Plot_Data[g][t] > 0:
Fill.loc[t,r] = 0
Fill.loc[t,g]= (Fill[r][t] + Plot_Data[g][t] - Plot_Data[c][t] )
Fill.loc[t,c] = Fill[r][t] + Plot_Data[g][t]
Fill.loc[t,c2] = (Fill[r][t] + Plot_Data[g][t] - Plot_Data[c][t] )
else:
Fill.loc[t,r] = 0
Fill.loc[t,g]= 0
if Plot_Data[g][t] == 0:
Fill.loc[t,c] = Plot_Data[g][t]
Fill.loc[t,c2] = Plot_Data[g][t]
else:
if Plot_Data[g][t] > 0:
Fill.loc[t,c] = Plot_Data[g][t]
Fill.loc[t,c2] = Plot_Data[d][t]
else:
Fill.loc[t,c] = Plot_Data[b][t]
Fill.loc[t,c2] = Plot_Data[d][t]
if Renewable_Source > 1:
for R in range(1,Renewable_Source+1):
name = 'Renewable ' + str(R) + ' (Wh)'
if R == 1:
Fill[name] = Plot_Data[name]
else:
name_1 = 'Renewable ' + str(R-1) + ' (Wh)'
Fill[name] = Fill[name_1] + Plot_Data[name]
Fill[b] = (Fill[g] + Plot_Data[b])
Fill[d] = Plot_Data[d]
Fill[ch] = Plot_Data[ch]
Fill[SOC] = Plot_Data[SOC]
Fill.index = columns
New = pd.DataFrame()
for t in Fill.index[:-1]:
if Fill[b][t] > Fill[g][t]:
if Fill[r][t+1]>Fill[d][t+1]:
print(t)
b_d = (Fill[d][t+1] - Fill[d][t])/60
b_g = (Fill[g][t+1] - Fill[g][t])/60
a_d = Fill[d][t]
a_g = Fill[g][t]
x = (a_g - a_d)/(b_d - b_g)
x = round(x,4)
second, minute = math.modf(x)
minute = int(minute)
second = second*60
second = int(second)
if x < 60:
t_1 = t
t_1 = t_1.replace(minute=minute, second=second, microsecond=0)
xp = [0, 60]
New.loc[t_1,r] = interp(x,xp,[Fill[r][t], Fill[r][t+1]])
New.loc[t_1,g] = interp(x,xp,[Fill[g][t], Fill[g][t+1]])
New.loc[t_1,c] = interp(x,xp,[Fill[c][t], Fill[c][t+1]])
New.loc[t_1,c2] = interp(x,xp,[Fill[c2][t], Fill[c2][t+1]])
New.loc[t_1,b] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])
New.loc[t_1,d] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])
New.loc[t_1,SOC] = interp(x,xp,[Fill[SOC][t], Fill[SOC][t+1]])
New.loc[t_1,ch] = interp(x,xp,[Fill[ch][t], Fill[ch][t+1]])
if Renewable_Source > 1:
for R in range(1,Renewable_Source+1):
name = 'Renewable ' + str(R) + ' (Wh)'
New.loc[t_1,name] = interp(x,xp,[Fill[name][t], Fill[name][t+1]])
for t in Fill.index[:-1]:
if (Fill[b][t] == Fill[d][t]) and (Fill[g][t+1] > Plot_Data[d][t+1]):
if Fill[b][t] > Fill[g][t]:
print(t)
b_d = (Fill[d][t+1] - Fill[d][t])/60
b_g = (Fill[g][t+1] - Fill[g][t])/60
a_d = Fill[d][t]
a_g = Fill[g][t]
x = (a_g - a_d)/(b_d - b_g)
x = round(x,4)
second, minute = math.modf(x)
minute = int(minute)
second = second*60
second = int(second)
if x < 60:
t_1 = t
t_1 = t_1.replace(minute=minute, second=second, microsecond=0)
xp = [0, 60]
New.loc[t_1,r] = interp(x,xp,[Fill[r][t], Fill[r][t+1]])
New.loc[t_1,g] = interp(x,xp,[Fill[g][t], Fill[g][t+1]])
New.loc[t_1,c] = interp(x,xp,[Fill[c][t], Fill[c][t+1]])
New.loc[t_1,c2] = interp(x,xp,[Fill[c2][t], Fill[c2][t+1]])
New.loc[t_1,b] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])
New.loc[t_1,d] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])
New.loc[t_1,SOC] = interp(x,xp,[Fill[SOC][t], Fill[SOC][t+1]])
New.loc[t_1,ch] = interp(x,xp,[Fill[ch][t], Fill[ch][t+1]])
if Renewable_Source > 1:
for R in range(1,Renewable_Source+1):
name = 'Renewable ' + str(R) + ' (Wh)'
New.loc[t_1,name] = interp(x,xp,[Fill[name][t], Fill[name][t+1]])
# fix the battery if in one step before the energy production is more than demand
# and in time t the battery is used
for t in Fill.index[1:-1]:
if Fill[g][t] > Plot_Data[d][t] and Fill[b][t+1] == Plot_Data[d][t+1] and Plot_Data[b][t+1] > 0:
print(t)
b_d = (Fill[d][t+1] - Fill[d][t])/60
b_g = (Fill[g][t+1] - Fill[g][t])/60
a_d = Fill[d][t]
a_g = Fill[g][t]
x = (a_g - a_d)/(b_d - b_g)
x = round(x,4)
second, minute = math.modf(x)
minute = int(minute)
second = second*60
second = int(second)
if x < 60:
t_1 = t
t_1 = t_1.replace(minute=minute, second=second, microsecond=0)
xp = [0, 60]
New.loc[t_1,r] = interp(x,xp,[Fill[r][t], Fill[r][t+1]])
New.loc[t_1,g] = interp(x,xp,[Fill[g][t], Fill[g][t+1]])
New.loc[t_1,c] = interp(x,xp,[Fill[c][t], Fill[c][t+1]])
New.loc[t_1,c2] = interp(x,xp,[Fill[c2][t], Fill[c2][t+1]])
New.loc[t_1,b] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])
New.loc[t_1,d] = interp(x,xp,[Fill[d][t], Fill[d][t+1]])
New.loc[t_1,SOC] = interp(x,xp,[Fill[SOC][t], Fill[SOC][t+1]])
New.loc[t_1,ch] = interp(x,xp,[Fill[ch][t], Fill[ch][t+1]])
if Renewable_Source > 1:
for R in range(1,Renewable_Source+1):
name = 'Renewable ' + str(R) + ' (Wh)'
New.loc[t_1,name] = interp(x,xp,[Fill[name][t], Fill[name][t+1]])
Fill = Fill.append(New)
Fill.sort_index(inplace=True)
size = [20,10]
plt.figure(figsize=size)
# Fill[b] = ( Fill[g] + Plot_Data[b])
# Renewable energy
# For 1 energy Source
if Renewable_Source == 1:
c_PV = 'yellow'
Alpha_r = 0.4
ax1 =Fill[r].plot(style='y-', linewidth=1)
ax1.fill_between(Fill.index, 0,Fill[r].values,
alpha=Alpha_r, color = c_PV)
else:
c_r = ['aqua', 'chocolate', 'lightcoral', 'lightgreen']
for R in range (1, Renewable_Source+1):
name = 'Renewable ' + str(R) + ' (Wh)'
print(name)
if R == 1:
c_PV_1 = 'yellow'
Alpha_r = 0.4
ax1 = Fill[name].plot(style='y-', linewidth=0)
ax1.fill_between(Fill.index, 0, Fill[name].values,
alpha=Alpha_r, color = c_PV_1)
elif R == Renewable_Source:
name_1 = 'Renewable ' + str(R-1) + ' (Wh)'
c_r_1 = c_r[R-1]
Alpha_r = 0.4
ax1 = Fill[r].plot(style='c-', linewidth=0)
ax1.fill_between(Fill.index, Fill[name_1].values, Fill[r].values,
alpha=Alpha_r, color =c_r_1)
else:
name_1 = 'Renewable ' + str(R-1) + ' (Wh)'
c_r_1 = c_r[R-1]
Alpha_r = 0.4
ax1 = Fill[r].plot(style='c-', linewidth=0)
ax1.fill_between(Fill.index, Fill[name_1].values, Fill[name].values,
alpha=Alpha_r, color =c_r_1)
# Genset Plot
c_d = 'm'
Alpha_g = 0.3
hatch_g = '\\'
ax2 = Fill[g].plot(style='c', linewidth=0)
ax2.fill_between(Fill.index, Fill[r].values, Fill[g].values,
alpha=Alpha_g, color=c_d, edgecolor=c_d , hatch =hatch_g)
# Battery discharge
alpha_bat = 0.3
hatch_b ='x'
C_Bat = 'green'
ax3 = Fill[b].plot(style='b', linewidth=0)
ax3.fill_between(Fill.index, Fill[g].values, Fill[b].values,
alpha=alpha_bat, color =C_Bat,edgecolor=C_Bat, hatch =hatch_b)
# Demand
ax4 = Plot_Data[d].plot(style='k', linewidth=2, marker= 'o')
# Battery Charge
ax5= Fill[ch].plot(style='m', linewidth=0.5) # Plot the line of the energy flowing into the battery
ax5.fill_between(Fill.index, 0,
Fill[ch].values
, alpha=alpha_bat, color=C_Bat,edgecolor= C_Bat, hatch ='x')
# State of charge of battery
ax6= Fill[SOC].plot(style='k--',
secondary_y=True, linewidth=2, alpha=0.7 )
# Curtailment
alpha_cu = 0.3
hatch_cu = '+'
C_Cur = 'blue'
ax7 = Fill[c].plot(style='b-', linewidth=0)
ax7.fill_between(Fill.index, Fill[c2].values , Fill[c].values,
alpha=alpha_cu, color=C_Cur,edgecolor= C_Cur,
hatch =hatch_cu,
where=Fill[c].values>Fill[d])
# Lost load
if instance.Lost_Load_Probability > 0:
alpha_LL = 0.3
hatch_LL = '-'
C_LL = 'crimson'
ax4.fill_between(Fill.index, Fill[b].values, Fill[d].values,
alpha=alpha_LL, color=C_LL,edgecolor= C_LL,
hatch =hatch_LL)
# Define name and units of the axis
ax1.set_ylabel('Power (kW)',size=30)
ax1.set_xlabel('Time',size=30)
ax6.set_ylabel('Battery State of charge (kWh)',size=30)
ax1.set_xlim(Fill.index[0], Fill.index[len(Fill)-1])
tick_size = 15
#mpl.rcParams['xtick.labelsize'] = tick_size
ax1.tick_params(axis='x', which='major', labelsize = tick_size,pad=8 )
ax1.tick_params(axis='y', which='major', labelsize = tick_size )
# ax1.tick_params(axis='x', which='major', labelsize = tick_size)
ax6.tick_params(axis='y', which='major', labelsize = tick_size )
# Define the legends of the plot
From_Renewable =[]
for R in range(1, Renewable_Source + 1):
if R == 1:
From_Renewable.append(mpatches.Patch(color='yellow',alpha=Alpha_r, label='Renewable 1'))
else:
name = 'From Renewable ' +str(R)
c_r_1 = c_r[R-1]
foo = mpatches.Patch(color=c_r_1,alpha=Alpha_r, label=name)
From_Renewable.append(foo)
From_Generator = mpatches.Patch(color=c_d,alpha=Alpha_g,
label='From Generator',hatch =hatch_g)
Battery = mpatches.Patch(color=C_Bat ,alpha=alpha_bat,
label='Battery Energy Flow',hatch =hatch_b)
Curtailment = mpatches.Patch(color=C_Cur ,alpha=alpha_cu,
label='Curtailment',hatch =hatch_cu)
Energy_Demand = mlines.Line2D([], [], color='black',label='Energy Demand')
State_Of_Charge_Battery = mlines.Line2D([], [], color='black',
label='State Of Charge Battery',
linestyle='--',alpha=0.7)
Legends = []
Legends.append(From_Generator)
for R in range(Renewable_Source):
Legends.append(From_Renewable[R])
Legends.append(Battery)
Legends.append(Curtailment)
Legends.append(Energy_Demand)
Legends.append(State_Of_Charge_Battery)
if instance.Lost_Load_Probability > 0:
Lost_Load = mpatches.Patch(color=C_LL,alpha=alpha_LL,
label='Lost Laod',hatch =hatch_LL)
Legends.append(Lost_Load)
plt.legend(handles=Legends,
bbox_to_anchor=(1.025, -0.15),fontsize = 20,
frameon=False, ncol=4)
plt.savefig('Results/Energy_Dispatch.png', bbox_inches='tight')
plt.show()
else:
start = Time_Series.index[0]
end = Time_Series.index[instance.Periods()-1]
Time_Series = Time_Series.astype('float64')
Plot_Data_2 = Time_Series[start:end].groupby([Time_Series[start:end].index.hour]).mean()
Plot_Data_2 = Plot_Data_2/1000
Plot_Data_2['Charge energy to the Battery'] = -Plot_Data_2['Charge energy to the Battery']
Plot_Data = Plot_Data_2
Vec = Plot_Data['Renewable Energy'] + Plot_Data['Energy Diesel']
Vec2 = (Plot_Data['Renewable Energy'] + Plot_Data['Energy Diesel'] +
Plot_Data['Discharge energy from the Battery'])
ax1= Vec.plot(style='b-', linewidth=0.5) # Plot the line of the diesel energy plus the PV energy
ax1.fill_between(Plot_Data.index, Plot_Data['Energy Diesel'].values, Vec.values,
alpha=0.3, color = 'b')
ax2= Plot_Data['Energy Diesel'].plot(style='r', linewidth=0.5)
ax2.fill_between(Plot_Data.index, 0, Plot_Data['Energy Diesel'].values,
alpha=0.2, color='r') # Fill the area of the energy produce by the diesel generator
ax3 = Plot_Data['Energy_Demand'].plot(style='k', linewidth=2)
ax3.fill_between(Plot_Data.index, Vec.values ,
Plot_Data['Energy_Demand'].values,
alpha=0.3, color='g',
where= Plot_Data['Energy_Demand']>= Vec,interpolate=True)
ax5= Plot_Data['Charge energy to the Battery'].plot(style='m', linewidth=0.5) # Plot the line of the energy flowing into the battery
ax5.fill_between(Plot_Data.index, 0,
Plot_Data['Charge energy to the Battery'].values
, alpha=0.3, color='m') # Fill the area of the energy flowing into the battery
ax6= Plot_Data['State_Of_Charge_Battery'].plot(style='k--', secondary_y=True, linewidth=2, alpha=0.7 ) # Plot the line of the State of charge of the battery
# Define name and units of the axis
# Define name and units of the axis
ax1.set_ylabel('Power (kW)')
ax1.set_xlabel('hours')
ax6.set_ylabel('Battery State of charge (kWh)')
# Define the legends of the plot
From_PV = mpatches.Patch(color='blue',alpha=0.3, label='From PV')
From_Generator = mpatches.Patch(color='red',alpha=0.3, label='From Generator')
From_Battery = mpatches.Patch(color='green',alpha=0.5, label='From Battery')
To_Battery = mpatches.Patch(color='magenta',alpha=0.5, label='To Battery')
Lost_Load = mpatches.Patch(color='yellow', alpha= 0.3, label= 'Lost Load')
Energy_Demand = mlines.Line2D([], [], color='black',label='Energy Demand')
State_Of_Charge_Battery = mlines.Line2D([], [], color='black',
label='State Of Charge Battery',
linestyle='--',alpha=0.7)
plt.legend(handles=[From_Generator, From_PV, From_Battery,
To_Battery, Lost_Load, Energy_Demand,
State_Of_Charge_Battery], bbox_to_anchor=(1.83, 1))
plt.savefig('Results/Energy_Dispatch.png', bbox_inches='tight')
plt.show()
def Energy_Mix(instance,Scenarios,Scenario_Probability):
Number_Scenarios = int(instance.Scenarios.extract_values()[None])
Energy_Totals = Scenarios.sum()
PV_Energy = 0
Generator_Energy = 0
Curtailment = 0
Battery_Out = 0
Demand = 0
Lost_Load = 0
Energy_Mix = pd.DataFrame()
for s in range(1, Number_Scenarios+1):
index_1 = 'Renewable Energy ' + str(s) + ' (Wh)'
index_2 = 'Gen energy ' + str(s) + ' (Wh)'
index_3 = 'Scenario ' + str(s)
index_4 = 'Curtailment ' + str(s) + ' (Wh)'
index_5 = 'Battery Flow Out ' + str(s) + ' (Wh)'
index_6 = 'Energy Demand ' + str(s) + ' (Wh)'
index_7 = 'Lost Load '+str(s) + ' (Wh)'
PV = Energy_Totals[index_1]
Ge = Energy_Totals[index_2]
We = Scenario_Probability[index_3]
Cu = Energy_Totals[index_4]
B_O = Energy_Totals[index_5]
De = Energy_Totals[index_6]
PV_Energy += PV*We
Generator_Energy += Ge*We
Curtailment += Cu*We
Battery_Out += B_O*We
Demand += De*We
Energy_Mix.loc['PV Penetration',index_3] = PV/(PV+Ge)
Energy_Mix.loc['Curtailment Percentage',index_3] = Cu/(PV+Ge)
Energy_Mix.loc['Battery Usage',index_3] = B_O/De
if instance.Lost_Load_Probability > 0:
LL = Energy_Totals[index_7]*We
Lost_Load += LL*We
Energy_Mix.loc['Lost Load', index_3] = LL/De
Renewable_Real_Penetration = PV_Energy/(PV_Energy+Generator_Energy)
Curtailment_Percentage = Curtailment/(PV_Energy+Generator_Energy)
Battery_Usage = Battery_Out/Demand
print(str(round(Renewable_Real_Penetration*100, 1)) + ' % Renewable Penetration')
print(str(round(Curtailment_Percentage*100,1)) + ' % of energy curtail')
print(str(round(Battery_Usage*100,1)) + ' % Battery usage')
if instance.Lost_Load_Probability > 0:
foo = []
for s in range(1, Number_Scenarios+1):
name = 'Scenario ' + str(s)
foo.append(name)
Lost_Load_Real = sum(Energy_Mix.loc['Lost Load', name] for name in foo)
print(str(round(Lost_Load_Real*100,1)) + ' % Lost load in the system')
return Energy_Mix
def Print_Results(instance, Generator_Data, Data_Renewable, Battery_Data ,Results,
formulation):
Number_Renewable_Source = int(instance.Renewable_Source.extract_values()[None])
Number_Generator = int(instance.Generator_Type.extract_values()[None])
for i in range(1, Number_Renewable_Source + 1):
index_1 = 'Source ' + str(i)
index_2 = 'Total Nominal Capacity (W)'
Renewable_Rate = float(Data_Renewable[index_1][index_2])
Renewable_Rate = round(Renewable_Rate, 1)
print('Renewable ' + str(i) + ' nominal capacity is '
+ str(Renewable_Rate) +' kW')
if formulation == 'LP':
for i in range(1, Number_Generator + 1):
index_1 = 'Generator ' + str(i)
index_2 = 'Generator Nominal Capacity (W)'
Generator_Rate = float(Generator_Data[index_1][index_2])
Generator_Rate = round(Generator_Rate, 1)
print('Generator ' + str(i) + ' nominal capacity is '
+ str(Generator_Rate) +' kW')
if formulation == 'MILP':
Number_Generator = int(instance.Generator_Type.extract_values()[None])
for i in range(1, Number_Generator + 1):
index_1 = 'Generator ' + str(i)
index_2 = 'Generator Nominal Capacity (W)'
index_3 = 'Number of Generators'
Generator_Rate = float(Generator_Data[index_1][index_2])
Generator_Rate = round(Generator_Rate, 1)
Generator_Rate = Generator_Rate*Generator_Data[index_1][index_3]
print('Generator ' + str(i) + ' nominal capacity is '
+ str(Generator_Rate) +' kW')
index_2 = 'Nominal Capacity (Wh)'
Battery_Rate = Battery_Data['Battery'][index_2]
Battery_Rate = round(Battery_Rate, 1)
print('Battery nominal capacity is '
+ str(Battery_Rate) +' kWh')
index_2 = 'NPC (USD)'
NPC = Results['Data'][index_2]/1000
NPC = round(NPC, 0)
print('NPC is ' + str(NPC) +' Thousand USD')
index_2 = 'LCOE (USD/kWh)'
LCOE = Results['Data'][index_2]
LCOE = round(LCOE, 3)
print('The LCOE is ' + str(LCOE) + ' USD/kWh')
def Print_Results_Dispatch(instance, Economic_Results):
Operation_Costs = Economic_Results[1]
Fuel_Cost = round(Operation_Costs['Fuel'],2)
print('Diesel cost is ' + str(Fuel_Cost) + ' USD')
LL_Cost = round(Operation_Costs['VOLL'],2)
print('Lost load cost is ' + str(LL_Cost) + ' USD')
Battery_Cost = round(Operation_Costs['Battery operation Cost'],2)
print('Battery operation cost is ' + str(Battery_Cost) + ' USD')
Total_Cost = round(Operation_Costs['Total Cost'],2)
print('Total operation cost is ' + str(Total_Cost) + ' USD')
def Energy_Mix_Dispatch(instance,Time_Series):
Energy_Totals = Time_Series.sum()
PV_Energy = Energy_Totals['Renewable Energy']
Generator_Energy = Energy_Totals['Energy Diesel']
Curtailment = Energy_Totals['Curtailment']
Demand = Energy_Totals['Energy_Demand']
Battery_Out = Energy_Totals['Discharge energy from the Battery']
Renewable_Real_Penetration = PV_Energy/(PV_Energy+Generator_Energy)
Renewable_Real_Penetration = round(Renewable_Real_Penetration,4)
Curtailment_Percentage = Curtailment/(PV_Energy+Generator_Energy)
Curtailment_Percentage = round(Curtailment_Percentage,4)
Battery_Usage = Battery_Out/Demand
Battery_Usage = round(Battery_Usage,4)
print(str(Renewable_Real_Penetration*100) + ' % Renewable Penetration')
print(str(Curtailment_Percentage*100) + ' % of energy curtail')
print(str(Battery_Usage*100) + ' % Battery usage')
| null | Results.py | Results.py | py | 75,742 | python | en | code | null | code-starcoder2 | 51 |
50512460 | import socket
import re
def service_client(client_socket,recv_data):
request_lines=recv_data.splitlines()
# 第一行格式为 GET /index.html HTTP/1.1
# GET可能换为POST,PUT,DEL等
# print(recv_data.decode('utf-8'))
# 发送headers
ret=re.match(r'[^/]+(/[^ ]*)',request_lines[0])
if ret:
file_name=ret.group(1)
if file_name=='/':
file_name='/index.html'
print(file_name)
try:
f=open('./html'+file_name,'rb')
except:
response='HTTP/1.1 404 NOT FOUND\r\n'
response+='Content-Type: text/html; charset=utf-8\r\n'
response+='\r\n'
response+='---没有找到文件---'
client_socket.send(response.encode('utf-8'))
else:
html_content=f.read()
f.close()
response='HTTP/1.1 200 OK\r\n'
response+='Content-Type: text/html; charset=utf-8\r\n'
response+='\r\n'
client_socket.send(response.encode('utf-8'))
client_socket.send(html_content)
client_socket.close()
def main():
http_server=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# 保证服务器意外关闭时端口被保留占用无法再次开启
http_server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
http_server.bind(('',7788))
http_server.listen(128)
# 设置监听套接字为非堵塞
http_server.setblocking(False)
# 设置客户套接字列表
client_socket_list=list()
# 重要 :非堵塞的核心代码如下:
while True:
try:
client_socket,client_addrs=http_server.accept()
except Exception as ret:
# 下面可以打印错误信息
# print(ret)
pass
else:
client_socket.setblocking(False)
client_socket_list.append(client_socket)
# 开始循环客户套接字进行服务
# 采用轮询的方式(for进行循环)
for client in client_socket_list:
try:
recv_data=client.recv(1024).decode('utf-8')
except Exception as ret:
# print(ret)
pass
else:
# 当recv_data为空,表明浏览器已经关闭,此时应从,关闭套接字并从列表中删除该套接字
if recv_data:
service_client(client,recv_data)
else:
client_socket_list.remove(client)
client.close()
http_server.close()
if __name__=='__main__':
main()
| null | python-learn/http/http_server6_noblocking.py | http_server6_noblocking.py | py | 2,568 | python | en | code | null | code-starcoder2 | 51 |
338464965 | import unittest
from array_splitter import split
class TestArraySplitter(unittest.TestCase):
def test_split_should_return_empty_set_if_there_is_no_valid_splitting(self):
# given
array = ['a', 'b', 'c']
GROUP_COUNT_TOO_MANY = len(array) + 1
expected = {}
# when
actual = split(array, GROUP_COUNT_TOO_MANY)
# then
self.assertEqual(actual, expected)
def test_split_should_split_array_into_two_parts(self):
# given
array = ['a', 'a', 'b', 'b']
group_count = 2
expected = {[['a', 'a'], ['b', 'b']]}
# when
actual = split(array, group_count)
# then
self.assertEqual(actual, expected) | null | basics/recursion/src/test_array_splitter.py | test_array_splitter.py | py | 718 | python | en | code | null | code-starcoder2 | 51 |
445510582 | import tensorflow as tf
from functools import partial
from sys import stdout
from sklearn.model_selection import KFold
import time
from tensorflow.python.client import timeline
from noise_models_and_integration import *
def fidelity_cost_fn(network,y_, learning_rate, params, n_ts, evo_time,dim, noise_name):
tmp_integrate_lind = partial(integrate_lind, params=params, n_ts=n_ts, evo_time=evo_time, noise_name=noise_name, tf_result=True)
net = tf.cast(network, tf.complex128)
ctrls_to_mtx = tf.map_fn(tmp_integrate_lind, net) # new batch in which instead of control pulses i have matrices
batch_to_loss_fn = tf.stack([y_, ctrls_to_mtx], axis=1) # create tensor of pairs (target, generated_matrix)
tmp_fid_err = partial(fidelity_err, dim=dim, tf_result=True)
batch_of_fid_err = tf.map_fn(tmp_fid_err, batch_to_loss_fn, dtype=tf.float32) # batch of fidelity errors
loss = tf.cast(tf.reduce_mean(batch_of_fid_err),
tf.float32) # loss function, which is a mean of fid_erros over batch
tf.summary.scalar('loss_func', loss)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9).minimize(loss)
accuracy = tf.cast(tf.reduce_mean(1 - batch_of_fid_err), tf.float32)
return (optimizer, accuracy)
def my_lstm(x_,controls_nb, size_of_lrs, keep_prob):
# 'layers' is a list of the number of the units on each layer
cells = []
for n_units in size_of_lrs:
cell = tf.nn.rnn_cell.LSTMCell(num_units=n_units, use_peepholes=True)
# cell = tf.nn.rnn_cell.GRUCell(num_units=n_units)
cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, output_keep_prob=keep_prob)
cells.append(cell)
cells
print("yes dropout wrapper")
outputs = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw=cells,
cells_bw=cells,
inputs=x_,
dtype=tf.float32,
parallel_iterations=32
)
# for one_lstm_cell in cells:
# print(one_lstm_cell.variables)
# one_kernel, one_bias,w_f_diag,w_i_diag, w_o_diag = one_lstm_cell.variables
# # one_kernel, one_bias, two_kernel, two_bias = one_lstm_cell.variables
# tf.summary.histogram("Kernel", one_kernel)
# tf.summary.histogram("Bias", one_bias)
# # tf.summary.histogram("Kernel2", two_kernel)
# # tf.summary.histogram("Bias2", two_bias)
#
# tf.summary.histogram("w_f_diag", w_f_diag)
# tf.summary.histogram("w_i_diag", w_i_diag)
# tf.summary.histogram("w_o_diag", w_o_diag)
print(outputs[2])
output_fw, output_bw= tf.split(outputs[0], 2, axis=2)
tf.summary.histogram("output_fw", output_fw)
tf.summary.histogram("output_bw", output_bw)
tf.summary.histogram("cell_fw", outputs[1][0])
tf.summary.histogram("cell_bw", outputs[2][0])
sum_fw_bw = tf.add(output_fw, output_bw)
squeezed_layer = tf.reshape(sum_fw_bw, [-1, size_of_lrs[-1]])
droput = tf.nn.dropout(squeezed_layer, keep_prob)
dense = tf.contrib.layers.fully_connected(droput, controls_nb, activation_fn=tf.nn.tanh)
output = tf.reshape(dense, [tf.shape(x_)[0],tf.shape(x_)[1], controls_nb])
return output
def fit(sess,
network,
x_,
y_,
keep_prob,
train_input,
train_target,
test_input,
test_target,
nb_epochs,
batch_size,
train_set_size,
learning_rate,
model_params,
n_ts,
evo_time,
dim,
noise_name):
tensorboard_path = 'tensorboard/' + str(time.ctime())
optimizer, accuracy = fidelity_cost_fn(network, y_, learning_rate, model_params, n_ts, evo_time,dim, noise_name)
# 500 is the number of test samples used in monitoring the efficiency of the network
test_sample_indices = np.arange(500)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
kf = KFold(n_splits=(train_set_size//batch_size), shuffle = True)
print(np.shape(test_input))
# LEARNING LOOP
with sess.as_default():
# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
sess.run(tf.global_variables_initializer())
j = -1
train_table = []
test_table = []
for i in range(int(np.ceil(nb_epochs / (train_set_size // batch_size)))):
for train_index, rand in kf.split(train_input, train_target):
j += 1
batch = (train_input[rand], train_target[rand])
# batch = (train_input[(j%train_set_size):((j+batch_size)%train_set_size)], train_target[(j%train_set_size):((j+batch_size)%train_set_size)])
# MONITORING OF EFFICENCY
if j % 1000 == 0:
summary, train_accuracy = sess.run( [merged, accuracy], feed_dict={x_: batch[0],
y_: batch[1],
keep_prob: 1.0})
train_table.append(train_accuracy)
test_accuracy = accuracy.eval(feed_dict={x_: test_input[test_sample_indices],
y_: test_target[test_sample_indices],
keep_prob: 1.0})
test_table.append(test_accuracy)
train_writer.add_summary(summary, j)
print("step %d, training accuracy %g" % (j, train_accuracy))
stdout.flush()
print("step %d, test accuracies %g" % (j, test_accuracy))
stdout.flush()
print (" ")
stdout.flush()
sess.run(optimizer,
feed_dict={x_: batch[0],
y_: batch[1],
keep_prob: 0.5})#,options=options, run_metadata=run_metadata)
# fetched_timeline = timeline.Timeline(run_metadata.step_stats)
# chrome_trace = fetched_timeline.generate_chrome_trace_format()
#
# with open('timeline_02_step_{}.json'.format(i), 'w') as f:
# f.write(chrome_trace)
test_accuracy = accuracy.eval(feed_dict={x_: test_input,
y_: test_target,
keep_prob: 1.})
return (test_accuracy,train_table,test_table)
def get_prediction(sess, network, x_, keep_prob, test_input):
prediction = sess.run(network, feed_dict={x_:test_input,
keep_prob: 1.0})
return prediction
| null | architecture.py | architecture.py | py | 6,851 | python | en | code | null | code-starcoder2 | 51 |
331807531 | # import numpy as np
# from keras.models import Sequential
# from keras.layers import Dense, Dropout, Flatten
# from keras.layers import Convolution2D, MaxPooling2D
# np.random.seed(42) # set deterministic reproducibility
import image
from log import debug
from keras.models import model_from_json
from multiclasshelper import MultiClassHelper
from image import image2pix, get_resized_pixels
import pickle
import numpy as np
class wswtm():
"""
Determines which pixel data is used.
"""
red = True
green = False
blue = False
default_model_path = 'resources/models/'
default_model_name = 'basic_cnn'
load_on_init = True
def __init__(self):
if self.load_on_init is True:
self.init()
def init(self):
model, dct = self.load_model(self.default_model_path, self.default_model_name)
self.model = model
self.dct = dct
"""
Loads a trained model
"""
def load_model(self, model_path, model_name):
self.model_path = model_path
self.model_name = model_name
json_file = open(self.model_path + self.model_name + '.json', 'r')
loaded_model = json_file.read()
json_file.close()
model = model_from_json(loaded_model)
model.load_weights(self.model_path + self.model_name + ".h5")
dct = pickle.load(open(self.model_path + self.model_name + '_dct.p', 'rb'))
return model, dct
"""
returns an array of possible tags for a give image (specified by a path)
"""
def image2tags(self, path, treshold=0.75):
p, _, _ = get_resized_pixels(path, 96, 96)
pxl = []
for px in p:
pxl.append(px[0])
vec = np.asarray(pxl)
vec = vec.reshape(1, 96, 96, 1)
if self.model is None:
self.init()
r = self.model.predict(vec)
mch = MultiClassHelper()
rp = mch.array_to_classes_with_prob(r[0], self.dct)
return rp
"""
returns a list of all classes known to the classifier
"""
def get_classes(self):
if self.dct:
return self.dct.keys()
return None
debug("wswtm init...")
| null | src/python/wswtm.py | wswtm.py | py | 2,215 | python | en | code | null | code-starcoder2 | 51 |
399666237 | import pygame
import board
from pygame.locals import *
width = 700
height = 600
def start():
pygame.init()
screen = pygame.display.set_mode((width, height))
game_loop(screen)
pygame.quit()
def game_loop(screen):
game_board = board.Board()
colors = {0: (255, 255, 255), 1: (200, 0, 0), 2: (255, 255, 0)}
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MOUSEBUTTONDOWN:
if game_board.winner is not None:
game_board.reset_board()
pos = pygame.mouse.get_pos()
pos = pos[0] // 100
game_board.move(pos)
screen.fill((0, 0, 255))
for i in range(board.board_size[0]):
for j in range(board.board_size[1]):
pygame.draw.circle(screen, (5, 5, 5), (50 + 100 * j, 550 - 100 * i), 45)
pygame.draw.circle(screen, colors[game_board.board[i, j]], (50 + 100 * j, 550 - 100 * i), 43)
if game_board.winner is not None:
pygame.display.set_caption('Show Text')
font = pygame.font.Font('Inkfree.ttf', 72)
text = font.render(f"player {game_board.winner} win!!!", True, colors[game_board.winner], (0, 0, 0))
textRect = text.get_rect()
textRect.center = (width // 2, height // 2)
screen.blit(text, textRect)
pygame.display.flip()
| null | UI.py | UI.py | py | 1,500 | python | en | code | null | code-starcoder2 | 51 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.