seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6655501967 | from functools import partial
from typing import Callable
import numpy as np
import rospy
from stable_baselines3.common.vec_env import VecNormalize
from supersuit.vector import ConcatVecEnv, MarkovVectorEnv
from supersuit.vector.sb3_vector_wrapper import SB3VecEnvWrapper
class MarkovVectorEnv_patched(MarkovVectorEnv):
"""Patched environment wrapper which creates the correct API for vector environments. Dones for dead agents are returned as True instead as False."""
def step(self, actions):
agent_set = set(self.par_env.agents)
act_dict = {
agent: actions[i]
for i, agent in enumerate(self.par_env.possible_agents)
if agent in agent_set
}
observations, rewards, dones, infos = self.par_env.step(act_dict)
# adds last observation to info where user can get it
if all(dones.values()):
for agent, obs in observations.items():
infos[agent]["terminal_observation"] = obs
rews = np.array(
[rewards.get(agent, 0) for agent in self.par_env.possible_agents],
dtype=np.float32,
)
# we changed the default value to true instead of false
dns = np.array(
[dones.get(agent, True) for agent in self.par_env.possible_agents],
dtype=np.uint8,
)
infs = [infos.get(agent, {}) for agent in self.par_env.possible_agents]
if all(dones.values()):
observations = self.reset()
else:
observations = self.concat_obs(observations)
assert (
self.black_death
or self.par_env.agents == self.par_env.possible_agents
), "MarkovVectorEnv does not support environments with varying numbers of active agents unless black_death is set to True"
return observations, rews, dns, infs
def vec_env_create(
env_fn: Callable,
agent_list_fn: Callable,
num_robots: int,
num_cpus: int,
num_vec_envs: int,
PATHS: dict,
) -> SB3VecEnvWrapper:
"""Function which vectorizes a given environment function in multiple parallel environments.
Args:
env_fn (Callable): Function that initializes an environment with wrappers
agent_list_fn (Callable): Object containing the program arguments
num_robots (int): Number of robots in the environment
num_cpus (int): Maximal number of CPUs to use (Currently only process is used anyhow)
num_vec_envs (int): Number of parallel environments to spawn
PATHS (dict): Dictionary which holds hyperparameters for the experiment
Returns:
SB3VecEnvWrapper: Vectorized environments following the SB3 VecEnv API. Each each robot in an environment \
poses as an environment in the vector.
"""
env_list_fns = [
partial(
env_fn,
ns=f"sim_{i}",
num_agents=num_robots,
agent_list_fn=agent_list_fn,
PATHS=PATHS,
)
for i in range(1, num_vec_envs + 1)
]
env = env_list_fns[0]()
action_space = env.observation_space
observation_space = env.observation_space
metadata = env.metadata
num_cpus = min(num_cpus, num_vec_envs)
rospy.init_node("train_env", disable_signals=False, anonymous=True)
vec_env = ConcatVecEnv(env_list_fns, observation_space, action_space)
return SB3VecEnvWrapper(vec_env)
| ignc-research/arena-marl | arena_navigation/arena_local_planner/learning_based/arena_local_planner_drl/rl_agent/utils/supersuit_utils.py | supersuit_utils.py | py | 3,409 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "supersuit.vector.MarkovVectorEnv",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy... |
29550581614 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 15:41:54 2018
@author: usuario
"""
import pandas as pd
import numpy as np
from keras.models import load_model
from collections import Counter
import time
from datetime import datetime
def runClassifier (current_batch, clf):
current_batch=np.array(current_batch)
pred=clf.predict(current_batch)
print(pred)
class_pred=np.argmax(pred, axis=1)
print(class_pred)
counts=Counter(class_pred)
print(counts)
#voting
v=list(counts.values())
print(v)
k=list(counts.keys())
print(k)
batch_fit=k[v.index(max(v))]
print(batch_fit)
return batch_fit
def classify(f_name, model, time_window, stride, batch_size=6, verbose=True):
start_computing_time = time.time()
output_sequence=[]
time_stamp = []
df = pd.read_csv(f_name)
data=df [["x", "y", "z"]].values
timestamp = df[["timestamp"]].values
if (verbose):
print('Data loaded.')
clf=load_model(model)
if (verbose):
print('Model loaded.')
offset=0
current_batch=[]
bc=0
while (offset+time_window)<data.shape[0]:
current_batch.append(data[offset:(offset+time_window)])
if len(current_batch)==batch_size:
#print (str(bc))
if (verbose & (bc%500==0)):
print('Progress (batches): '+ str(bc))
bc+=1
output_sequence.append(runClassifier(current_batch, clf))
dt_object = datetime.fromtimestamp(timestamp[offset+time_window])
time_stamp.append(dt_object.strftime("%d-%b-%Y (%H:%M:%S.%f)"))
current_batch=[]
offset+=time_window
if len(current_batch)>0:
output_sequence.append(runClassifier(current_batch, clf))
dt_object = datetime.fromtimestamp(timestamp[offset])
time_stamp.append(dt_object.strftime("%d-%b-%Y (%H:%M:%S.%f)"))
total_computing_time = time.time() - start_computing_time
print("computing time:", str(total_computing_time))
return np.array(output_sequence), np.array(time_stamp)
f_name = '/home/khaosdev/Documentos/Sandro/Proyecto_Spark/ML_HAR/models/cyclingmodel1.csv'
model='/home/khaosdev/AnacondaProjects/Proyecto_Pulseras/clf_11.bin'
if __name__ == '__main__':
outputs=classify(f_name, model, 1000, 1000)
print(outputs)
| palomadominguez/TFG-pulseras | src/classify.py | classify.py | py | 2,435 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_nu... |
15865559103 | import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--physics", help="physics marks")
parser.add_argument("--chemistry", help="chemistry marks")
parser.add_argument("--maths", help="maths marks")
args = parser.parse_args()
print(args.physics)
print(args.chemistry)
print(args.maths)
print("Result:", (
int(args.physics) + int(args.chemistry) + int(args.maths)
) / 3)
# python3 cmd.py --physics 60 --chemistry 70 --maths 90
| codebasics/py | Basics/Exercise/24_argparse/24_argparse.py | 24_argparse.py | py | 527 | python | en | code | 6,422 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
}
] |
16845647150 | from django.urls import path
from . import views
app_name = 'main'
urlpatterns = [
# not logged in
path('', views.index, name="index"),
path('search/', views.search, name="search"),
# logged in
path('home/', views.home, name="home"),
path('post/', views.addWord, name="post"),
path('results/', views.results, name="results"),
path('dashboard/', views.dashbBoard, name="dashboard"),
# authentication
path('signup/', views.signup, name="signup"),
path('login/', views.loginPage, name="login"),
path('logout/', views.logoutUser, name="logout"),
] | Leomhango/ndamvesta2.0 | backend/main/urls.py | urls.py | py | 597 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
73257029225 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: williamhadnett D00223305
"""
import pymongo
import os
os.chdir('/Users/williamhadnett/Documents/Data_Science/Data_Science_CA3_William_Hadnett')
import atlasCredentials
# =============================================================================
# Connect to MongoDB
# =============================================================================
connection = "mongodb+srv://"+atlasCredentials.username+":"+atlasCredentials.password+"@cluster0.gh4kb.mongodb.net/test?retryWrites=true&w=majority"
client = pymongo.MongoClient(connection)
mydb = client['test']
shopcol = mydb['websiteshop']
# =============================================================================
# Product Association
# =============================================================================
#Find top ten products for association analysis based on quantity purchased.
unwind = {'$unwind':'$Basket'}
group = {'$group': {'_id': '$Basket.StockCode', 'count': {'$sum': 1}}}
sort={'$sort':{'count':-1}}
limit={'$limit': 10}
top10 = list(shopcol.aggregate([unwind,group,sort,limit]))
print(top10)
# [{'_id': '85123A', 'count': 320}, {'_id': '22423', 'count': 211},
# {'_id': '22469', 'count': 182}, {'_id': '22834', 'count': 162},
# {'_id': '22111', 'count': 160}, {'_id': '22961', 'count': 160},
# {'_id': '21485', 'count': 155}, {'_id': '22470', 'count': 152},
# {'_id': '22113', 'count': 146}, {'_id': '22112', 'count': 143}]
# =============================================================================
# Product Association - Product 85123A - Product 21212 - (Benchmark)
# =============================================================================
group = {'$group': {'_id': 0, 'total': {'$sum': 1}}}
totalDocs = list(shopcol.aggregate([group]))
print(totalDocs)
# Support(x) = # of transactions in which x appears/total transactions
query = {'Basket.StockCode': '85123A'}
support85123A = shopcol.count_documents(query) / totalDocs[0]['total']
print(support85123A)
# Support 85123A bought: 0.153
query = {'Basket.StockCode': {'$all': ['85123A', '21212']}}
supportBoth = shopcol.count_documents(query) / totalDocs[0]['total']
print(supportBoth)
# Support Both bought: 0.0105
#Confidence that 21212 will be bought when 85123A is bought.
#Conf(85123A -> 21212) = supp(85123A and 21212)/ supp(85123A)
conf = supportBoth / support85123A
print(conf)
# conf: 0.06862745098039216
# Lift
query = {'Basket.StockCode': '21212'}
support21212 = shopcol.count_documents(query) / totalDocs[0]['total']
print(support21212)
# 0.056
#Life(85123A -> 21212) = supp(85123A and 21212)/ supp(85123A) * supp(21212)
lift = supportBoth / (support85123A * support21212)
print(lift)
# Lift: 1.2254901960784315
# So the support for 21212 is 0.004% more likely to bough if the basket contains
# product 85123A than in general.
# =============================================================================
# Product Association - Generalize formula for top 10
# =============================================================================
# The above support, confidence and lift will act as a bench mark to ensure that the
# calculates for the top ten are carried out correctly.
# This funciton is more general and can be applied to the top 10 as well as the
# entire database. However, please note that processing the entire database
# may take some time.
def calculateAssoication(mongoResponse):
pairs = findPairs(mongoResponse)
group = {'$group': {'_id': 0, 'total': {'$sum': 1}}}
totalDocs = list(shopcol.aggregate([group]))
for i in pairs:
# Support(x) = # of transactions in which x appears/total transactions
# query = {'Basket.StockCode': i[0]['_id']}
supportItem1 = i[0]['count'] / totalDocs[0]['total']
query = {'Basket.StockCode': {'$all': [i[0]['_id'], i[1]['_id']]}}
supportBoth = shopcol.count_documents(query) / totalDocs[0]['total']
#Confidence that Item 1 will be bought when Item 2 is bought.
#Conf(Item 1 -> Item 2) = supp(Item 1 and Item 2)/ supp(Item 1)
# Lift
query = {'Basket.StockCode': i[1]['_id']}
supportItem2 = i[1]['count'] / totalDocs[0]['total']
conf = supportBoth / supportItem1
# The only metric that changes in regards to the inverse association
# appears to be confidence as number of appearances remains the same for both
# items individually and together in the same basket. This metric can be
# gathered to display the inverse realtionship to the reader.
inverseConf = supportBoth / supportItem2
#Lift(Item 1 -> Item 2) = supp(Item 1 and Item 2)/ supp(Item1) * supp(Item2)
lift = supportBoth / (supportItem1 * supportItem2)
displayAssoication(supportItem1, supportBoth, supportItem2, conf, inverseConf, lift, i)
# Converting to a list of tuples using iterator
# https://stackoverflow.com/questions/23286254/how-to-convert-a-list-to-a-list-of-tuples
def findPairs(mongoResponse):
it = iter(mongoResponse)
pairs = list(zip(it, it))
return pairs
def displayAssoication(support1, supportBoth, support2, conf, inverseConf, lift, i):
print('Support for Item ',i[0]['_id'],': ',support1)
print('Support Both: ',supportBoth)
print('Support Item ',i[1]['_id'],': ',support2)
print('Confidence: ',conf)
print('Lift ',i[0]['_id'],' -> ',i[1]['_id'],': ',lift)
print('\nSupport Item ',i[1]['_id'],': ',support2)
print('Support Both: ',supportBoth)
print('Support for Item ',i[0]['_id'],': ',support1)
print('Confidence: ',inverseConf)
print('Lift ',i[1]['_id'],' -> ',i[0]['_id'],': ',lift)
print("\n")
calculateAssoication(top10)
# Output of Assoication Analysis of Top Ten Items
'''
Support for Item 85123A : 0.16
Support Both: 0.016
Support Item 22423 : 0.1055
Confidence: 0.1
Lift 85123A -> 22423 : 0.9478672985781991
Support Item 22423 : 0.1055
Support Both: 0.016
Support for Item 85123A : 0.16
Confidence: 0.15165876777251186
Lift 22423 -> 85123A : 0.9478672985781991
Support for Item 22469 : 0.091
Support Both: 0.0085
Support Item 22834 : 0.081
Confidence: 0.09340659340659342
Lift 22469 -> 22834 : 1.1531678198344866
Support Item 22834 : 0.081
Support Both: 0.0085
Support for Item 22469 : 0.091
Confidence: 0.10493827160493828
Lift 22834 -> 22469 : 1.1531678198344866
Support for Item 22111 : 0.08
Support Both: 0.0105
Support Item 22961 : 0.08
Confidence: 0.13125
Lift 22111 -> 22961 : 1.640625
Support Item 22961 : 0.08
Support Both: 0.0105
Support for Item 22111 : 0.08
Confidence: 0.13125
Lift 22961 -> 22111 : 1.640625
Support for Item 21485 : 0.0775
Support Both: 0.0095
Support Item 22470 : 0.076
Confidence: 0.12258064516129032
Lift 21485 -> 22470 : 1.6129032258064517
Support Item 22470 : 0.076
Support Both: 0.0095
Support for Item 21485 : 0.0775
Confidence: 0.125
Lift 22470 -> 21485 : 1.6129032258064517
Support for Item 22113 : 0.073
Support Both: 0.017
Support Item 22112 : 0.0715
Confidence: 0.23287671232876717
Lift 22113 -> 22112 : 3.257016955647093
Support Item 22112 : 0.0715
Support Both: 0.017
Support for Item 22113 : 0.073
Confidence: 0.2377622377622378
Lift 22112 -> 22113 : 3.257016955647093
'''
| hadnett/Data_Science_Ecommerce_Performance | section2_CA3_William_Hadnett.py | section2_CA3_William_Hadnett.py | py | 7,432 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "atlasCredentials.username",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "atlasCredentials.password",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name":... |
32842087226 | import cv2 as cv
import os
def YOLO():
dir = os.path.dirname(__file__)
net = cv.dnn.readNetFromDarknet(dir + "/models/yolov3-tiny.cfg", dir + "/models/yolov3-tiny.weights")
blob_options = {"scale": 1/255.0, "MeanSubtraction": (0, 0, 0)}
labels = open(dir + "/data/coco2014.names").read().strip().split("\n")
return net, blob_options, labels
def SSD():
dir = os.path.dirname(__file__)
net = cv.dnn.readNetFromTensorflow(dir + "/models/ssdlite_mobilenet_v2.pb", dir + "/models/ssdlite_mobilenet_v2.pbtxt")
blob_options = {"scale": 1.0, "MeanSubtraction": (127.5, 127.5, 127.5)}
labels = open(dir + "/data/coco2017.names").read().strip().split("\n")
labels.insert(0, "unknown")
return net, blob_options, labels
def FasterRCNN():
dir = os.path.dirname(__file__)
net = cv.dnn.readNetFromTensorflow(dir + "/models/faster_rcnn_inception_v2.pb", dir + "/models/faster_rcnn_inception_v2.pbtxt")
blob_options = {"scale": 1, "MeanSubtraction": (127.5, 127.5, 127.5)}
labels = open(dir + "/data/coco2017.names").read().strip().split("\n")
return net, blob_options, labels
| adagun/detector | models.py | models.py | py | 1,137 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.readNetFromDarknet",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"l... |
70096896743 | # -*- coding: utf-8 -*-
"""Example script to show how to use mcetl.launch_main_gui with defined DataSource objects.
@author: Donald Erb
Created on Aug 22, 2020
"""
import itertools
import mcetl
import numpy as np
import pandas as pd
from scipy import optimize
def offset_data(df, target_indices, calc_indices, excel_columns,
first_row, offset=None, **kwargs):
"""Example CalculationFunction with named kwargs"""
total_count = 0
for i, sample in enumerate(calc_indices):
for j, calc_col in enumerate(sample):
if excel_columns is not None:
y = df[target_indices[0][i][j]]
y_col = excel_columns[target_indices[0][i][j]]
calc = [
f'= {y_col}{k + first_row} + {offset * total_count}' for k in range(len(y))
]
# use np.where(~np.isnan(y)) so that the calculation works for unequally-sized
# datasets
df[calc_col] = np.where(~np.isnan(y), calc, None)
else:
y_col = df[df.columns[target_indices[0][i][j]]]
df[df.columns[calc_col]] = y_col + (offset * total_count)
total_count += 1
return df
def offset_normalized_data(df, target_indices, calc_indices, excel_columns,
offset=None, **kwargs):
"""Adds an offset to normalized data"""
for i, sample in enumerate(calc_indices):
for j, calc_col in enumerate(sample):
y_col = df[df.columns[target_indices[0][i][j]]]
if excel_columns is not None:
df[calc_col] = y_col + f' + {offset * i}'
else:
df[calc_col] = y_col + (offset * i)
return df
def normalize(df, target_indices, calc_indices, excel_columns, first_row, **kwargs):
"""Performs a min-max normalization to bound values between 0 and 1."""
for i, sample in enumerate(calc_indices):
for j, calc_col in enumerate(sample):
if excel_columns is not None:
y = df[target_indices[0][i][j]]
y_col = excel_columns[target_indices[0][i][j]]
end = y.count() + 2
calc = [
(f'=({y_col}{k + first_row} - MIN({y_col}$3:{y_col}${end})) / '
f'(MAX({y_col}$3:{y_col}${end}) - MIN({y_col}$3:{y_col}${end}))')
for k in range(len(y))
]
df[calc_col] = np.where(~np.isnan(y), calc, None)
else:
y_col = df.columns[target_indices[0][i][j]]
min_y = df[y_col].min()
max_y = df[y_col].max()
df[calc_col] = (df[y_col] - min_y) / (max_y - min_y)
return df
def split(df, target_indices, **kwargs):
"""Preprocess function that separates each entry where delta-x changes sign."""
x_col = df[df.columns[target_indices[0]]].to_numpy()
diff = np.diff(x_col)
mask = np.where(np.sign(diff[1:]) != np.sign(diff[:-1]))[0] + 2 # +2 b/c diff is one less, and mask is one less than diff
if len(mask) > 1:
mask = np.array([mask[0], *mask[np.where(mask[1:] - mask[:-1] != 1)[0] + 1]]) # in case x[i] - x[i+1] = 0
return np.array_split(df, mask)
def split_segments(df, target_indices, **kwargs):
"""
Preprocess function that separates each entry based on the segment number.
Also removes the segment column after processing since it is not needed
in the final output.
"""
segment_index = target_indices[0]
segment_col = df[df.columns[segment_index]].to_numpy()
mask = np.where(segment_col[:-1] != segment_col[1:])[0] + 1 # + 1 since mask loses one index
output_dataframes = np.array_split(df, mask)
for dataframe in output_dataframes:
dataframe.drop(segment_index, 1, inplace=True)
return output_dataframes
def derivative(df, target_indices, calc_indices, excel_columns, first_row, **kwargs):
"""Calculates the derivative."""
for i, sample in enumerate(calc_indices):
for j, calc_col in enumerate(sample):
if excel_columns is not None:
y = df[target_indices[1][i][j]]
x_col = excel_columns[target_indices[0][i][j]]
y_col = excel_columns[target_indices[1][i][j]]
calc = [
f'= ({y_col}{k + first_row} - {y_col}{k + first_row - 1}) / ({x_col}{k + first_row} - {x_col}{k + first_row - 1})' for k in range(len(y))
]
calc[0] = 0
df[calc_col] = np.where(~np.isnan(y), calc, None)
else:
x = df[target_indices[0][i][j]].to_numpy()
y = df[target_indices[1][i][j]].to_numpy()
derivative = np.zeros(x.size)
derivative[1:] = (y[1:] - y[0:-1]) / (x[1:] - x[0:-1])
df[calc_col] = derivative
return df
def pore_preprocessor(df, target_indices, **kwargs):
"""
Sorts the dataframe according to the diameter.
Easier to do for each individual data file rather than when each
dataset is combined together.
"""
return [df.sort_values(target_indices[0])]
def pore_analysis(df, target_indices, calc_indices, excel_columns, **kwargs):
"""
Creates a histogram of pore sizes weighted by the pore area for each entry.
Also computes the average pore diameter and the standard deviation of pore size.
"""
if excel_columns is None and kwargs['processed'][0]:
return df # to prevent processing twice
elif excel_columns is not None:
kwargs['processed'][0] = True
max_pore_size = df[itertools.chain.from_iterable(target_indices[0])].max(numeric_only=True).max()
pore_bins = np.arange(-kwargs['bin_size'][0], max_pore_size + kwargs['bin_size'][0],
kwargs['bin_size'][0])
# in case the number of measured pores is less than the number of bins
if pore_bins[1:].size > len(df):
df = pd.concat((df, pd.DataFrame({'temp': pore_bins})), axis=1).drop('temp', axis=1)
for i, sample in enumerate(calc_indices):
for j in range(len(sample) // 10): # 10 calc columns per entry in each sample
# d designates diameters, a designates areas
d_index = target_indices[0][i][j]
a_index = target_indices[1][i][j]
nan_mask = (~np.isnan(df[d_index])) & (~np.isnan(df[a_index]))
avg_pore_size = np.average(df[d_index][nan_mask], weights=df[a_index][nan_mask])
area_histogram = np.histogram(df[d_index], pore_bins, weights=df[a_index])[0]
norm_area_histogram = np.histogram(df[d_index], pore_bins,
weights=df[a_index], density=True)[0] * kwargs['bin_size'][0]
df[sample[1 + (j * 10)]] = pd.Series(pore_bins[1:])
df[sample[2 + (j * 10)]] = pd.Series(np.histogram(df[d_index], pore_bins)[0])
df[sample[3 + (j * 10)]] = pd.Series(area_histogram)
df[sample[4 + (j * 10)]] = pd.Series(np.cumsum(area_histogram))
df[sample[5 + (j * 10)]] = df[sample[3 + (j * 10)]] / kwargs['bin_size'][0]
df[sample[6 + (j * 10)]] = pd.Series(np.cumsum(norm_area_histogram))
df[sample[7 + (j * 10)]] = pd.Series(norm_area_histogram / kwargs['bin_size'][0])
df[sample[8 + (j * 10)]] = pd.Series((
'non-weighted', np.average(df[d_index][nan_mask]),
'Area-weighted', avg_pore_size
))
df[sample[9 + (j * 10)]] = pd.Series((
'', np.std(df[d_index][nan_mask]),
'', np.sqrt(np.average((df[d_index][nan_mask] - avg_pore_size)**2,
weights=df[a_index][nan_mask]))
))
return df
def pore_sample_summary(df, target_indices, calc_indices, excel_columns, **kwargs):
"""
Creates a histogram of pore sizes weighted by the pore area for each sample.
Also computes the average pore diameter and the standard deviation of pore size.
"""
if excel_columns is None and kwargs['processed'][0]:
return df # to prevent processing twice
max_pore_size = df[itertools.chain.from_iterable(target_indices[0])].max(numeric_only=True).max()
pore_bins = np.arange(-kwargs['bin_size'][0], max_pore_size + kwargs['bin_size'][0],
kwargs['bin_size'][0])
for i, sample in enumerate(calc_indices):
if not sample: # skip empty lists
continue
diameters = np.hstack([df[num][~np.isnan(df[num])] for num in target_indices[0][i]])
areas = np.hstack([df[num][~np.isnan(df[num])] for num in target_indices[1][i]])
avg_pore_size = np.average(diameters, weights=areas)
area_histogram = np.histogram(diameters, pore_bins, weights=areas)[0]
norm_area_histogram = np.histogram(diameters, pore_bins,
weights=areas, density=True)[0] * kwargs['bin_size'][0]
df[sample[0]] = pd.Series(pore_bins[1:])
df[sample[1]] = pd.Series(np.histogram(diameters, pore_bins)[0])
df[sample[2]] = pd.Series(area_histogram)
df[sample[3]] = pd.Series(np.cumsum(area_histogram))
df[sample[4]] = df[sample[2]] / kwargs['bin_size'][0]
df[sample[5]] = pd.Series(np.cumsum(norm_area_histogram))
df[sample[6]] = pd.Series(norm_area_histogram / kwargs['bin_size'][0])
df[sample[7]] = pd.Series(('non-weighted', np.average(diameters),
'Area-weighted', avg_pore_size))
df[sample[8]] = pd.Series(('', np.std(diameters),
'', np.sqrt(np.average((diameters - avg_pore_size)**2, weights=areas))))
return df
def pore_dataset_summary(df, target_indices, calc_indices, excel_columns, **kwargs):
"""
Summarizes the average pore size for each sample and its standard deviation.
"""
if excel_columns is None and kwargs['processed'][0]:
return df # to prevent processing twice
# calc index is -1 since only the last dataframe is the dataset summary dataframe
df[calc_indices[-1][0]] = pd.Series((f'Sample {num + 1}' for num in range(len(calc_indices[:-1]))))
df[calc_indices[-1][1]] = pd.Series((df[indices[-2]][1] for indices in target_indices[0][:-1]))
df[calc_indices[-1][2]] = pd.Series((df[indices[-1]][1] for indices in target_indices[0][:-1]))
df[calc_indices[-1][3]] = pd.Series((df[indices[-2]][3] for indices in target_indices[0][:-1]))
df[calc_indices[-1][4]] = pd.Series((df[indices[-1]][3] for indices in target_indices[0][:-1]))
return df
def stress_model(strain, modulus):
"""
Returns the linear estimate of the stress-strain curve using the strain and estimated modulus.
Used for fitting data with scipy.
Parameters
----------
strain : array-like
The array of experimental strain values, unitless (or with cancelled
units, such as mm/mm).
modulus : float
The estimated elastic modulus for the data, with units of GPa (Pa * 10^9).
Returns
-------
array-like
The estimated stress data following the linear model, with units of Pa.
"""
return strain * modulus * 1e9
def stress_strain_analysis(df, target_indices, calc_indices, excel_columns, **kwargs):
"""
Calculates the mechanical properties from the stress-strain curve for each entry.
Calculated properties include elastic modulus, 0.2% offset yield stress,
ultimate tensile strength, and fracture strain.
"""
if excel_columns is None and kwargs['processed'][0]:
return df # to prevent processing twice
empty_filler = 'N/A' if excel_columns is not None else None
num_columns = 7 # the number of calculation columns per entry
for i, sample in enumerate(calc_indices):
for j in range(len(sample) // num_columns):
strain_index = target_indices[0][i][j]
stress_index = target_indices[1][i][j]
nan_mask = (~np.isnan(df[strain_index])) & (~np.isnan(df[stress_index]))
strain = df[strain_index].to_numpy()[nan_mask] / 100 # to convert from % to unitless
stress = df[stress_index].to_numpy()[nan_mask] * 1e6 # to convert from MPa to Pa
line_mask = (strain >= kwargs['lower_limit'][0]) & (strain <= kwargs['upper_limit'][0])
modulus, covar = optimize.curve_fit(
stress_model, strain[line_mask], stress[line_mask], p0=[80],
method='trf', loss='soft_l1'
)
predicted_ultimate = np.nanmax(stress)
uts_index = np.abs(stress - predicted_ultimate).argmin() + 1
offset = stress - ((strain - 0.002) * modulus * 1e9) # 0.2% strain offset
# using linear interpolation to get the exact crossing point of the offset and measured curves
y0, y1 = (offset[offset > 0][-1], offset[offset <= 0][0])
x0, x1 = (strain[offset > 0][-1], strain[offset <= 0][0])
x_intercept = x0 - ((y0 * (x1 - x0)) / (y1 - y0))
predicted_yield = float((x_intercept - 0.002) * modulus * 1e9)
# predict fracture where stress[i] - stress[i + 1] is > 50 MPa
try:
predicted_fracture = 100 * strain[np.where(stress[:-1] - stress[1:] > 50e6)[0][0]]
except IndexError: # fracture condition never reached
predicted_fracture = 'N/A'
df[sample[0 + (j * num_columns)]] = pd.Series(100 * np.log(1 + strain[:uts_index]))
df[sample[1 + (j * num_columns)]] = pd.Series(stress[:uts_index] * (1 + strain[:uts_index]) / 1e6)
df[sample[2 + (j * num_columns)]] = pd.Series(('Value', 'Standard Error'))
df[sample[3 + (j * num_columns)]] = pd.Series((modulus[0], np.sqrt(np.diag(covar)[0])))
df[sample[4 + (j * num_columns)]] = pd.Series((predicted_yield / 1e6, empty_filler))
df[sample[5 + (j * num_columns)]] = pd.Series((predicted_ultimate / 1e6, empty_filler))
df[sample[6 + (j * num_columns)]] = pd.Series((predicted_fracture, empty_filler))
# prevents reprocessing the data
kwargs['processed'][0] = True if excel_columns is not None else False
return df
def tensile_sample_summary(df, target_indices, calc_indices, excel_columns, **kwargs):
"""
Summarizes the mechanical properties for each sample.
"""
if excel_columns is None and kwargs['processed'][0]:
return df # to prevent processing twice
num_cols = 7 # the number of calculation columns per entry from stress_strain_analysis
for i, sample in enumerate(calc_indices):
if not sample: # skip empty lists
continue
entries = [
target_indices[0][i][j * num_cols:(j + 1) * num_cols] for j in range(len(target_indices[0][i]) // num_cols)
]
df[sample[0]] = pd.Series(('Elastic Modulus (GPa)', 'Offset Yield Stress (MPa)',
'Ultimate Tensile Strength (MPa)', 'Fracture Strain (%)'))
df[sample[1]] = pd.Series(
[np.mean([df[entry[3 + j]][0] for entry in entries if df[entry[3 + j]][0] != 'N/A']) for j in range(4)]
)
df[sample[2]] = pd.Series(
[np.std([df[entry[3 + j]][0] for entry in entries if df[entry[3 + j]][0] != 'N/A']) for j in range(4)]
)
return df
def tensile_dataset_summary(df, target_indices, calc_indices, excel_columns, **kwargs):
"""
Summarizes the mechanical properties for each dataset.
"""
if excel_columns is None and kwargs['processed'][0]:
return df # to prevent processing twice
# calc index is -1 since only the last dataframe is the dataset summary dataframe
df[calc_indices[-1][0]] = pd.Series([''] + [f'Sample {num + 1}' for num in range(len(calc_indices[:-1]))])
df[calc_indices[-1][1]] = pd.Series(['Average'] + [df[indices[1]][0] for indices in target_indices[0][:-1]])
df[calc_indices[-1][2]] = pd.Series(
['Standard Deviation'] + [df[indices[2]][0] for indices in target_indices[0][:-1]]
)
df[calc_indices[-1][3]] = pd.Series(['Average'] + [df[indices[1]][1] for indices in target_indices[0][:-1]])
df[calc_indices[-1][4]] = pd.Series(
['Standard Deviation'] + [df[indices[2]][1] for indices in target_indices[0][:-1]]
)
df[calc_indices[-1][5]] = pd.Series(['Average'] + [df[indices[1]][2] for indices in target_indices[0][:-1]])
df[calc_indices[-1][6]] = pd.Series(
['Standard Deviation'] + [df[indices[2]][2] for indices in target_indices[0][:-1]]
)
df[calc_indices[-1][7]] = pd.Series(['Average'] + [df[indices[1]][3] for indices in target_indices[0][:-1]])
df[calc_indices[-1][8]] = pd.Series(
['Standard Deviation'] + [df[indices[2]][3] for indices in target_indices[0][:-1]]
)
return df
def carreau_model(shear_rate, mu_0, mu_inf, lambda_, n):
"""
Estimates the Carreau model for viscosity.
Used for fitting data using scipy.
Parameters
----------
shear_rate : array-like
The experimental shear rate data, with units of 1/s.
mu_0 : float
The estimated viscosity at a shear rate of 0 1/s; units of Pa*s.
mu_inf : float
The estimated viscosity at infinite shear rate; units of Pa*s.
lambda_ : float
The reciprocal of the shear rate at which the material begins
to flow in a non-Newtonian way; units of s.
n : float
The power law index for the material (1-n defines the slope of the
curve of the non-Newtonian section of the log(viscosity) vs log(shear rate)
curve); unitless.
Returns
-------
array-like
The estimated viscosity following the Carreau model, with units of Pa*s.
"""
return mu_inf + (mu_0 - mu_inf) * (1 + (lambda_ * shear_rate)**2)**((n - 1) / 2)
def rheometry_analysis(df, target_indices, calc_indices, excel_columns, **kwargs):
"""
Fits each data entry to the Carreau model and tabulates the results.
"""
if excel_columns is None and kwargs['processed'][0]:
return df # to prevent processing twice
num_columns = 5 # the number of calculation columns per entry
for i, sample in enumerate(calc_indices):
for j in range(len(sample) // num_columns):
shear_index = target_indices[0][i][j]
viscosity_index = target_indices[1][i][j]
nan_mask = (~np.isnan(df[shear_index])) & (~np.isnan(df[viscosity_index]))
shear_rate = df[shear_index].to_numpy()[nan_mask]
viscosity = df[viscosity_index].to_numpy()[nan_mask]
# mu_0, mu_inf, lambda_, n
initial_guess = (viscosity[0], viscosity[-1], 1, 0.2)
bounds = ((1e-10, 1e-10, 1e-5, 1e-5), (1e10, 1e10, 1e5, 5))
params, covariance = optimize.curve_fit(
carreau_model, shear_rate, viscosity, p0=initial_guess,
bounds=bounds, method='trf', loss='soft_l1'
)
# need to catch the following errors: ValueError('x0 is infeasible')
predicted_viscosity = carreau_model(shear_rate, *params)
r_sq = mcetl.fitting.r_squared(viscosity, predicted_viscosity, 4)[1]
df[sample[1 + (j * num_columns)]] = pd.Series(predicted_viscosity)
df[sample[2 + (j * num_columns)]] = pd.Series(
('\u03bc_0 (Pa*s)', '\u03bc_inf (Pa*s)',
'\u03bb, relaxation time (s)', 'n, power law index (unitless)',
'', 'Fit R\u00b2')
)
df[sample[3 + (j * num_columns)]] = pd.Series(list(params) + ['', r_sq])
df[sample[4 + (j * num_columns)]] = pd.Series(np.sqrt(np.diag(covariance)))
# prevents reprocessing the data
kwargs['processed'][0] = True if excel_columns is not None else False
return df
if __name__ == '__main__':
# the kwargs for some functions; make a variable so it can be shared between Function objects;
# uses lists as the values so that they can be permanently alterred
pore_kwargs = {'bin_size': [5], 'processed': [False]}
tensile_kwargs = {'lower_limit': [0.0015], 'upper_limit': [0.005], 'processed': [False]}
# Definitions for the Function objects
offset = mcetl.CalculationFunction(
name='offset', target_columns='y', functions=offset_data,
added_columns=1, function_kwargs={'offset': 1000}
)
normalize = mcetl.CalculationFunction('normalize', 'y', normalize, 1)
offset_normalized = mcetl.CalculationFunction(
'offset_normalized', 'normalize', offset_normalized_data, 'normalize', {'offset': 1}
)
delta_x_separator = mcetl.PreprocessFunction('delta_x_sep', 'temperature', split)
segment_separator = mcetl.PreprocessFunction('segment_sep', 'segment', split_segments,
deleted_columns=['segment'])
derivative_calc = mcetl.CalculationFunction('derivative', ['time', 'mass'], derivative, 1)
pore_preprocess = mcetl.PreprocessFunction('pore_preprocess', 'diameter', pore_preprocessor)
pore_histogram = mcetl.CalculationFunction(
'pore_hist', ['diameter', 'area'], pore_analysis, 10, pore_kwargs
)
pore_sample_summation = mcetl.SummaryFunction(
'pore_sample_sum', ['diameter', 'area'], pore_sample_summary, 9, pore_kwargs
)
pore_dataset_summation = mcetl.SummaryFunction(
'pore_dataset_sum', ['pore_sample_sum'], pore_dataset_summary, 5,
pore_kwargs, False
)
stress_analysis = mcetl.CalculationFunction(
'tensile_test', ['strain', 'stress'], stress_strain_analysis, 7, tensile_kwargs
)
stress_sample_summary = mcetl.SummaryFunction(
'tensile_sample_summary', ['tensile_test'], tensile_sample_summary, 3, tensile_kwargs
)
stress_dataset_summary = mcetl.SummaryFunction(
'tensile_dataset_summary', ['tensile_sample_summary'], tensile_dataset_summary, 9,
tensile_kwargs, False
)
rheometry_calc = mcetl.CalculationFunction(
'rheology', ['shear rate', 'viscosity'], rheometry_analysis, 5, {'processed': [False]}
)
# Definitions for each data source
xrd = mcetl.DataSource(
name='XRD',
column_labels=['2\u03B8 (\u00B0)', 'Intensity (Counts)', 'Offset Intensity (a.u.)'],
functions=[offset],
column_numbers=[1, 2],
start_row=1,
end_row=0,
separator=',',
xy_plot_indices=[0, 2],
file_type='csv',
num_files=1,
unique_variables=['x', 'y'],
entry_separation=1,
sample_separation=2,
)
ftir = mcetl.DataSource(
name='FTIR',
column_labels=['Wavenumber (1/cm)', 'Absorbance (a.u.)', 'Normalized Absorbance (a.u.)'],
functions=[normalize, offset_normalized],
column_numbers=[0, 1],
start_row=1,
end_row=0,
separator=',',
xy_plot_indices=[0, 2],
file_type='csv',
num_files=1,
unique_variables=['x', 'y'],
entry_separation=1,
sample_separation=2
)
raman = mcetl.DataSource(
name='Raman',
column_labels=['Raman Shift (1/cm)', 'Intensity (a.u.)', 'Normalized Intensity (a.u.)'],
functions=[normalize, offset_normalized],
column_numbers=[0, 1],
start_row=0,
end_row=0,
separator='\t',
xy_plot_indices=[0, 2],
file_type='txt',
num_files=1,
unique_variables=['x', 'y'],
entry_separation=1,
sample_separation=2
)
tga = mcetl.DataSource(
name='TGA',
column_labels=['Temperature (\u00B0C)', 'Time (min)',
'Mass (%)', 'Mass Loss Rate (%/min)'],
functions=[delta_x_separator, derivative_calc],
column_numbers=[0, 1, 2],
start_row=34,
end_row=0,
separator=';',
xy_plot_indices=[0, 2],
file_type='txt',
num_files=1,
unique_variables=['temperature', 'time', 'mass'],
unique_variable_indices=[0, 1, 2],
entry_separation=1,
sample_separation=2
)
dsc = mcetl.DataSource(
name='DSC',
column_labels=['Temperature (\u00B0C)', 'Time (min)', 'Heat Flow, exo up (mW/mg)'],
functions=[segment_separator],
column_numbers=[0, 1, 2, 3],
start_row=34,
end_row=0,
separator=';',
xy_plot_indices=[1, 2],
file_type='txt',
num_files=1,
unique_variables=['segment'],
unique_variable_indices=[3],
entry_separation=1,
sample_separation=2
)
rheometry = mcetl.DataSource(
name='Rheometry',
column_labels=['Shear Stress (Pa)', 'Shear Rate (1/s)', 'Viscosity (Pa*s)',
'Time (s)', 'Temperature (\u00B0C)', '',
'Carreau Model Viscosity (Pa*s)', 'Carreau Model Variable',
'Value', 'Standard Error'],
functions=[rheometry_calc],
column_numbers=[0, 1, 2, 3, 4],
start_row=167,
end_row=0,
separator='\t',
xy_plot_indices=[1, 2],
file_type='txt',
num_files=1,
unique_variables=['shear rate', 'viscosity'],
unique_variable_indices=[1, 2],
entry_separation=1,
sample_separation=2
)
tensile = mcetl.DataSource(
name='Tensile Test',
column_labels=['Strain (%)', 'Stress (MPa)', 'Time (s)', 'Extension (mm)', 'Load (kN)',
'True Strain (%)', 'True Stress (MPa)',
'', 'Elastic Modulus (GPa)', 'Offset Yield Stress (MPa)',
'Ultimate Tensile Strength (MPa)', 'Fracture Strain (%)',
'Property', 'Average', 'Standard Deviation',
'Sample', 'Elastic Modulus (GPa)', '',
'Offset Yield Stress (MPa)', '',
'Ultimate Tensile Strength (MPa)', '',
'Fracture Strain (%)', ''],
functions=[stress_analysis, stress_sample_summary, stress_dataset_summary],
column_numbers=[4, 3, 0, 1, 2],
start_row=6,
end_row=0,
separator=',',
xy_plot_indices=[0, 1],
file_type='txt',
num_files=3,
unique_variables=['stress', 'strain'],
unique_variable_indices=[1, 0],
entry_separation=2,
sample_separation=3
)
pore_size = mcetl.DataSource(
name='Pore Size Analysis',
column_labels=['Measured Feret Diameters (\u03bcm)', 'Measured Areas (\u03bcm\u00b2)',
'', 'Histogram Diameter, D (\u03bcm)',
'Pore Count (#)', 'Area (\u03bcm\u00b2)',
'Cumulative Area, A (\u03bcm\u00b2)',
'Pore Size Distribution, dA/dD (\u03bcm\u00b2/\u03bcm)',
'Normalized Cumulative Area (\u03bcm\u00b2)',
'Normalized PSD, dA/dD (\u03bcm\u00b2/\u03bcm)',
'Average Diameter (\u03bcm)', 'Diameter Standard Deviation (\u03bcm)',
'Summarized Histogram Diameter, D (\u03bcm)',
'Summarized Pore Count (#)',
'Summarized Area (\u03bcm\u00b2)',
'Summarized Cumulative Area, A (\u03bcm\u00b2)',
'Summarized Pore Size Distribution, dA/dD (\u03bcm\u00b2/\u03bcm)',
'Summarized Normalized Cumulative Area (\u03bcm\u00b2)',
'Summarized Normalized PSD, dA/dD (\u03bcm\u00b2/\u03bcm)',
'Summarized Average Diameter (\u03bcm)',
'Summarized Diameter Standard Deviation (\u03bcm)',
'Sample', 'Average Diameter, non-weighted (\u03bcm)',
'Diameter Standard Deviation, non-weighted (\u03bcm)',
'Average Diameter, area-weighted (\u03bcm)',
'Diameter Standard Deviation, area-weighted (\u03bcm)'],
functions=[pore_preprocess, pore_histogram,
pore_sample_summation, pore_dataset_summation],
column_numbers=[4, 1],
start_row=1,
end_row=0,
separator=',',
xy_plot_indices=[3, 7],
file_type='csv',
num_files=3,
unique_variables=['diameter', 'area'],
unique_variable_indices=[0, 1],
entry_separation=1,
sample_separation=2
)
# For use in case you need to open arbitrary files without processing
other = mcetl.DataSource('Other')
# Put all DataSource objects in this tuple in order to use them
data_sources = (xrd, ftir, raman, tga, dsc, rheometry, tensile, pore_size, other)
#set dpi awareness so GUI is not blurry on Windows os
mcetl.set_dpi_awareness()
# Call the launch_main_gui function with data_sources as the input
output = mcetl.launch_main_gui(data_sources)
| derb12/mcetl | examples/use_main_gui.py | use_main_gui.py | py | 29,127 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.where",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": ... |
2305398126 | from sqlalchemy.orm import sessionmaker
import sqlalchemy as db
from models.model import Base
from models.team_models import NCAATeam
from models.team_models import NBATeam
from models.oddshark_models import OddSharkNCAA
from models.oddshark_models import OddSharkNBA
from models.hasla_metrics_model import HaslaMetrics
from models.curated_picks_model import TeamRankingNCAA
from models.curated_picks_model import TeamRankingNBA
from models.curated_picks_model import PicksWiseNCAA
from models.curated_picks_model import PicksWiseNBA
from models.vegas_insider_model import VegasInsider
from models.espn_model import ESPNNCAAB
from models.betql_model import BetQL_NBA
from models.betql_model import BetQL_NCAA
from models.sportsinsights_model import SportsInsightsBETSIGNALS
from models.sportsinsights_model import SportsInsightsBESTBETS
import config
if config.SERVER_ENVIRONMENT:
engine = db.create_engine(config.SERVER_DATABASE_URI)
else:
engine = db.create_engine(config.LOCAL_DATABASE_URI)
connection = engine.connect()
if connection:
print("Database opened successfully")
else:
print("failed")
Session = sessionmaker(bind=engine)
session = Session()
def recreate_database():
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
print("Created All Tables")
def recreate_team_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[NCAATeam.__table__, NBATeam.__table__])
Base.metadata.create_all(engine, tables=[NCAATeam.__table__, NBATeam.__table__])
print("Created Team Tables")
def recreate_oddshark_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[OddSharkNBA.__table__, OddSharkNCAA.__table__])
Base.metadata.create_all(engine, tables=[OddSharkNBA.__table__, OddSharkNCAA.__table__])
print("Created OddShark Tables")
def recreate_hasla_metrics_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[HaslaMetrics.__table__])
Base.metadata.create_all(engine, tables=[HaslaMetrics.__table__])
print("Created hasla_metrics Tables")
def recreate_curated_picks_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[TeamRankingNCAA.__table__, TeamRankingNBA.__table__,
PicksWiseNCAA.__table__, PicksWiseNBA.__table__])
Base.metadata.create_all(engine, tables=[TeamRankingNCAA.__table__, TeamRankingNBA.__table__,
PicksWiseNCAA.__table__, PicksWiseNBA.__table__])
print("Created CuratedPicks Tables")
def recreate_espn_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[ESPNNCAAB.__table__])
Base.metadata.create_all(engine, tables=[ESPNNCAAB.__table__])
print("Created ESPN Tables")
def recreate_vegas_insider_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[VegasInsider.__table__])
Base.metadata.create_all(engine, tables=[VegasInsider.__table__])
print("Created VegasInsider Tables")
def recreate_betql_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[BetQL_NBA.__table__, BetQL_NCAA.__table__])
Base.metadata.create_all(engine, tables=[BetQL_NBA.__table__, BetQL_NCAA.__table__])
print("Created BetQL Tables")
def recreate_sportsinsights_table(is_reset=0):
if is_reset:
Base.metadata.drop_all(engine, tables=[SportsInsightsBETSIGNALS.__table__, SportsInsightsBESTBETS.__table__])
Base.metadata.create_all(engine, tables=[SportsInsightsBETSIGNALS.__table__, SportsInsightsBESTBETS.__table__])
print("Created SportsInsights Tables")
def close_connection():
if connection:
connection.close()
if session:
session.close()
| happy-ruby/SportsBettingAnalysis | database.py | database.py | py | 3,793 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "config.SERVER_ENVIRONMENT",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "config.SERVER_DATABASE_URI",
"line_number": 22,
"usage_type": "attribute"
},
... |
4509215591 | from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import os, sys, errno
import argparse
import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import post_process_depth, flip_lr
from networks.NewCRFDepth import NewCRFDepth
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
def print_minmax(arr,desc):
"""visualize depths and uncertainty of any method"""
print("*" * 60)
print("***{}*** :".format(desc))
print("arr.shape = {}".format(arr.shape))
print("type(arr[0,0] = {}".format(type(arr[0,0])))
print("np.min = {}".format(np.min(arr)))
print("np.max = {}".format(np.max(arr)))
print("np.mean = {}".format(np.mean(arr)))
print("np.median = {}".format(np.median(arr)))
#print("arr[200:220,200:220] = \n",arr[200:220,200:220])
print("arr[0:10,0:10] = \n",arr[0:10,0:10])
print("*" * 60 + "\n")
parser = argparse.ArgumentParser(description='NeWCRFs PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='newcrfs')
parser.add_argument('--encoder', type=str, help='type of encoder, base07, large07', default='large07')
parser.add_argument('--data_path_eval', type=str, help='path to the data', required=True)
parser.add_argument('--gt_path_eval',type=str, help='path to the groundtruth data for evaluation', required=False)
parser.add_argument('--filenames_file_eval', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=10)
parser.add_argument('--min_depth_eval',type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on', default='nyu')
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--save_viz', help='if set, save visulization of the outputs', action='store_true')
parser.add_argument('--gray', help='Use gray images for testing', action='store_true')
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
if args.dataset == 'kitti' or args.dataset == 'nyu' or args.dataset == '12scenes' or args.dataset == 'iitd':
from dataloaders.dataloader import NewDataLoader
elif args.dataset == 'kittipred':
from dataloaders.dataloader_kittipred import NewDataLoader
model_dir = os.path.dirname(args.checkpoint_path)
sys.path.append(model_dir)
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def test(params):
"""Test function."""
#args.mode = 'test'
args.distributed = False
args.mode = 'online_eval'
dataloader = NewDataLoader(args, args.mode)
model = NewCRFDepth(version='large07', inv_depth=False, max_depth=args.max_depth)
model = torch.nn.DataParallel(model)
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_params = sum([np.prod(p.size()) for p in model.parameters()])
print("Total number of parameters: {}".format(num_params))
num_test_samples = get_num_lines(args.filenames_file_eval)
with open(args.filenames_file_eval) as f:
lines = f.readlines()
print('now testing {} files with {}'.format(num_test_samples, args.checkpoint_path))
pred_depths = []
gt_depths = []
images = []
start_time = time.time()
#save_name = 'models/result_' + args.model_name
save_name = 'visualisations/result_' + args.dataset + "_" + args.filenames_file_eval.split("/")[-1].split("_")[-1].split(".")[-2] + ("_gray" if args.gray else "")
os.makedirs(save_name,exist_ok=True)
try:
os.mkdir(save_name + '/raw')
#os.mkdir(save_name + '/cmap')
os.mkdir(save_name + '/rgb')
os.mkdir(save_name + '/gt')
except OSError as e:
if e.errno != errno.EEXIST:
raise
with torch.no_grad():
for i, sample in enumerate(tqdm(dataloader.data)):
image = Variable(sample['image'].cuda())
has_valid_depth = sample['has_valid_depth']
if not has_valid_depth:
# print('Invalid depth. continue.')
continue
gt_depth = sample['depth']
# Predict
depth_est = model(image)
post_process = True
if post_process:
image_flipped = flip_lr(image)
depth_est_flipped = model(image_flipped)
depth_est = post_process_depth(depth_est, depth_est_flipped)
if args.dataset == "kitti":
pred_depth = (depth_est.cpu().numpy().squeeze() * 256.0 ).astype(np.uint16)
image = (image.cpu().numpy().squeeze() * 255).astype(np.uint8)
gt_depth = (gt_depth.numpy().squeeze() * 256.0).astype(np.uint16)
elif args.dataset == "iitd":
pred_depth = depth_est.cpu().numpy().squeeze()
pred_depth[pred_depth < args.min_depth_eval] = args.min_depth_eval
pred_depth[pred_depth > 6.0] = 6.0
pred_depth[np.isinf(pred_depth)] = 6.0
pred_depth[np.isnan(pred_depth)] = args.min_depth_eval
pred_depth = (pred_depth * 1000.0).astype(np.uint16)
image = (image.cpu().numpy().squeeze() * 255).astype(np.uint8)
gt_depth = (gt_depth.numpy().squeeze() * 1000.0).astype(np.uint16)
else:
print("please change here for you dataset !!")
sys.exit(0)
#print_minmax(gt_depth,"gt_depth after")
#print_minmax(pred_depth, "pred_depth")
image = image.transpose(1,2,0)
image = image[:,:,::-1] #because cv2.imwrite assumes in BGR format.
#print(image.shape)
#sys.exit(0)
if args.do_kb_crop:
height, width = 352, 1216
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.uint16)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = pred_depth
pred_depth = pred_depth_uncropped
#save pred_depth, image and gt_depths
#print_minmax(gt_depth,"gt_depth")
cv2.imwrite(save_name + f'/raw/{i:06d}.png',pred_depth)
cv2.imwrite(save_name + f'/gt/{i:06d}.png',gt_depth)
cv2.imwrite(save_name + f'/rgb/{i:06d}.png',image)
# pred_depths.append(pred_depth)
# gt_depths.append(gt_depth)
# images.append(image)
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
os.makedirs(save_name+"/orig_rgb/",exist_ok=True)
i = 0
print(len(lines),num_test_samples)
for s in tqdm(range(num_test_samples)):
if lines[s].split()[1] == "None":
print("continue")
continue
if args.dataset == 'kitti' or args.dataset == 'iitd':
orig_rgb_path = os.path.join(args.data_path_eval, lines[s].split()[0])
orig_rgb = cv2.imread(orig_rgb_path,-1)
cv2.imwrite(save_name + f'/orig_rgb/{i:06d}.png' , orig_rgb)
i+=1
print()
print(f"{i} orig_rgb saved!!")
"""
#save_name = 'models/result_' + args.model_name
save_name = 'visualisations/result_' + args.dataset + ("_gray" if args.gray else "")
os.makedirs(save_name,exist_ok=True)
print('Saving result pngs..')
if not os.path.exists(save_name):
try:
os.mkdir(save_name)
os.mkdir(save_name + '/raw')
#os.mkdir(save_name + '/cmap')
os.mkdir(save_name + '/rgb')
os.mkdir(save_name + '/gt')
except OSError as e:
if e.errno != errno.EEXIST:
raise
print("len(line) = ",len(lines))
for i in tqdm(range(len(images))):
#for s in tqdm(range(num_test_samples)):
# if lines[s].split()[1] == "None":
# continue
if args.dataset == 'kitti':
date_drive = lines[s].split('/')[1]
filename_pred_png = save_name + '/raw/' + date_drive + '_' + lines[s].split()[0].split('/')[-1].replace(
'.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + date_drive + '_' + lines[s].split()[0].split('/')[
-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + date_drive + '_' + lines[s].split()[0].split('/')[-1]
#filename_gtnorm_png = save_name + '/gt_normalized/' + scene_name + '_' + lines[s].split()[0].split('/')[2].replace(
#'.jpg', '.png')
filename_gt_png = save_name + '/gt/' + date_drive + '_' + lines[s].split()[0].split('/')[2].replace(
'.jpg', '_gt.png')
elif args.dataset == 'kittipred':
filename_pred_png = save_name + '/raw/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + lines[s].split()[0].split('/')[-1]
else:
# scene_name = lines[s].split()[0].split('/')[0]
# filename_pred_png = save_name + '/raw/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
# '.jpg', '.png')
# filename_cmap_png = save_name + '/cmap/' + scene_name + '_' + lines[s].split()[0].split('/rgb_')[1].replace(
# '.jpg', '.png')
# filename_gt_png = save_name + '/gt/' + scene_name + '_' + lines[s].split()[0].split('/rgb_')[1].replace(
# '.jpg', '_gt.png')
# filename_image_png = save_name + '/rgb/' + scene_name + '_' + lines[s].split()[0].split('/rgb_')[1]
scene_name = lines[s].split()[0].split('/')[0]
filename_pred_png = save_name + '/raw/' + scene_name + '_' + lines[s].split()[0].split('/')[2].replace(
'.jpg', '.png')
filename_gtnorm_png = save_name + '/gt_normalized/' + scene_name + '_' + lines[s].split()[0].split('/')[2].replace(
'.jpg', '.png')
filename_gt_png = save_name + '/gt/' + scene_name + '_' + lines[s].split()[0].split('/')[2].replace(
'.jpg', '_gt.png')
filename_image_png = save_name + '/rgb/' + scene_name + '_' + lines[s].split()[0].split('/')[2]
#rgb
rgb_path = os.path.join(args.data_path, './' + lines[s].split()[0])
image = cv2.imread(rgb_path)
#gt_depth
if args.dataset == 'nyu':
gt_path = os.path.join(args.data_path, './' + lines[s].split()[1])
gt = cv2.imread(gt_path, -1).astype(np.float32) / 1000.0 # Visualization purpose only
gt[gt == 0] = np.amax(gt)
elif args.dataset=="kitti":
gt_path = os.path.join(args.data_path, lines[s].split()[0].split('/')[0] , lines[s].split()[1])
print("gt_ptah ", gt_path)
gt = cv2.imread(gt_path, -1).astype(np.float32) / 1000.0 # Visualization purpose only
gt[gt == 0] = np.amax(gt)
#pred_depth
pred_depth = pred_depths[s]
if args.dataset == 'kitti' or args.dataset == 'kittipred':
pred_depth_scaled = pred_depth * 256.0
else:
pred_depth_scaled = pred_depth * 1000.0
pred_depth_scaled = pred_depth_scaled.astype(np.uint16)
#save pred_depth
cv2.imwrite(filename_pred_png, pred_depth_scaled, [cv2.IMWRITE_PNG_COMPRESSION, 0])
if args.save_viz:
#save rgb
cv2.imwrite(filename_image_png, image[10:-1 - 9, 10:-1 - 9, :])
if args.dataset == 'nyu' or args.dataset=="kitti":
#save gtnorm
#plt.imsave(filename_gtnorm_png, (10 - gt) / 10, cmap='plasma')
#save gt
plt.imsave(filename_gt_png, gt)
# pred_depth_cropped = pred_depth[10:-1 - 9, 10:-1 - 9]
#plt.imsave(filename_cmap_png, (10 - pred_depth) / 10, cmap='plasma')
#else:
#plt.imsave(filename_cmap_png, np.log10(pred_depth), cmap='Greys')
return
"""
if __name__ == '__main__':
test(args)
| surajiitd/jetson-documentation | model_compression/pixelformer/test.py | test.py | py | 13,368 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.min",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 36,
... |
20491810241 | import re
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def active(context, pattern):
path = context['request'].path
if re.search(pattern, path):
return 'active'
return ''
| sirodoht/avocado-jobs | main/templatetags/app_filters.py | app_filters.py | py | 248 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 10,
"usage_type": "call"
}
] |
2808266661 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = "0.1.0"
__author__ = "Abien Fred Agarap"
import argparse
from normalize_data import list_files
import numpy as np
import os
import pandas as pd
def csv_to_npy(csv_path, npy_path, npy_filename):
files = list_files(path=csv_path)
df = pd.DataFrame()
for file in files:
df = df.append(pd.read_csv(filepath_or_buffer=file, header=None))
print("Appending file : {}".format(file))
df = df.drop_duplicates(subset=df, keep="first", inplace=False)
data = np.array(df)
np.save(file=os.path.join(npy_path, npy_filename), arr=data)
def parse_args():
parser = argparse.ArgumentParser(
description="Module for converting CSV to NPY files"
)
group = parser.add_argument_group("Arguments")
group.add_argument(
"-c",
"--csv_path",
required=True,
type=str,
help="path of the CSV files to be converted",
)
group.add_argument(
"-n",
"--npy_path",
required=True,
type=str,
help="path where converted NPY files will be stored",
)
group.add_argument(
"-f",
"--npy_filename",
required=True,
type=str,
help="filename of the NPY file to save",
)
arguments = parser.parse_args()
return arguments
def main(arguments):
csv_to_npy(arguments.csv_path, arguments.npy_path, arguments.npy_filename)
if __name__ == "__main__":
args = parse_args()
main(args)
| AFAgarap/gru-svm | dataset/csv_to_npy.py | csv_to_npy.py | py | 1,586 | python | en | code | 136 | github-code | 36 | [
{
"api_name": "normalize_data.list_files",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.arra... |
10358623287 | import asyncio
import io
import pickle
import discord
from pgbot import common
# Store "name: pickled data" pairs as cache. Do not store unpickled data
db_obj_cache: dict[str, bytes] = {}
# Optimisation: store per-db bool on whether it got updated or not
db_changed: dict[str, bool] = {}
# store per-resource lock
db_locks: dict[str, asyncio.Lock] = {}
# bool to indicate whether db module was init
is_init: bool = False
async def init():
"""
Initialise local cache and db channel. Call this function when the
bot boots up
"""
global is_init
if is_init or common.TEST_MODE or common.GENERIC:
is_init = True
return
async for msg in common.db_channel.history():
if msg.attachments:
db_obj_cache[msg.content] = await msg.attachments[0].read()
db_changed[msg.content] = False
is_init = True
async def quit():
"""
Flushes local cache for storage to the DB, and cleans up
"""
global is_init
if not is_init or common.TEST_MODE or common.GENERIC:
is_init = False
return
print("Calling cleanup functions!")
async for msg in common.db_channel.history():
if msg.content in db_obj_cache and db_changed[msg.content]:
await msg.delete()
for name, picked in db_obj_cache.items():
if not db_changed[name]:
continue
with io.BytesIO(picked) as fobj:
await common.db_channel.send(name, file=discord.File(fobj))
print("Successfully called cleanup functions")
is_init = False
class DiscordDB:
"""
DiscordDB is a class to interface with a DB like solution, that stores data
via discord messages. Uses heavy caching, and saves to DB only on program
exit
"""
def __init__(self, name: str):
"""
Initialise Discord DB Object
"""
self.name = name
if name not in db_locks:
db_locks[name] = asyncio.Lock()
self._lock = db_locks[name]
async def acquire(self):
"""
Acquire internal resource lock
"""
# wait for a maximum of 10 seconds for init to happen if it has not
for _ in range(1000):
if is_init:
break
await asyncio.sleep(0.01)
else:
raise RuntimeError("pgbot.db module was not init")
await self._lock.acquire()
def release(self):
"""
Release internal resource lock
"""
self._lock.release()
async def __aenter__(self):
"""
Aquire lock, "with" statement support
"""
await self.acquire()
return self
async def __aexit__(self, *_):
"""
Release lock, "with" statement support
"""
self.release()
def _check_active(self):
if not self._lock.locked() or not is_init:
raise RuntimeError("Invalid operation on unlocked data object")
def get(self, failobj=None):
"""
Get object of discord DB
"""
self._check_active()
try:
return pickle.loads(db_obj_cache[self.name])
except KeyError:
return failobj
def write(self, obj):
"""
Store object in DB
"""
self._check_active()
dumped = pickle.dumps(obj)
if dumped != db_obj_cache.get(self.name):
db_obj_cache[self.name] = dumped
db_changed[self.name] = True
def delete(self):
"""
Delete DB, returns whether it was deleted successfully
"""
self._check_active()
db_changed[self.name] = True
try:
db_obj_cache.pop(self.name)
return True
except KeyError:
return False
| gresm/PygameCommunityBot | pgbot/db.py | db.py | py | 3,779 | python | en | code | null | github-code | 36 | [
{
"api_name": "asyncio.Lock",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pgbot.common.TEST_MODE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pgbot.common",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pgbot.comm... |
6249836683 | from trytond.model import fields
from trytond.pool import PoolMeta
from trytond.i18n import gettext
from trytond.exceptions import UserError
__all__ = ['BOMInput']
class BOMInput(metaclass=PoolMeta):
__name__ = 'production.bom.input'
use_lot = fields.Boolean('Use Lot')
@classmethod
def validate(cls, boms):
super(BOMInput, cls).validate(boms)
for bom in boms:
bom.check_unique_use_lot_in_bom()
def check_unique_use_lot_in_bom(self):
inputs = self.search([
('bom', '=', self.bom.id),
('use_lot', '=', True)
])
if len(inputs) > 1:
raise UserError(
gettext('production_output_lot.unique_use_lot_in_bom'))
| NaN-tic/trytond-production_output_lot | bom.py | bom.py | py | 736 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "trytond.pool.PoolMeta",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "trytond.model.fields.Boolean",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "trytond.model.fields",
"line_number": 11,
"usage_type": "name"
},
{
"api_name":... |
35026404794 | from google.api_core import retry
from loguru import logger
from pkg.utils.mongo_utils import get_db
from pkg.project.validate import validate
from bson.objectid import ObjectId
@logger.catch
def delete_project(request):
db = get_db()
request_data = request.json
logger.debug(request_data["_id"])
"""validate"""
error = validate(db, request_data, "DELETE")
if error:
return {"error": error}, 400
_id = request_data["_id"]
if not db["project"].find({"_id": ObjectId(_id)}).count():
return {"error": f"delete fail, project not found"}, 404
rst = db["project"].delete_many({"_id": ObjectId(_id)})
# return {'message': f"delete {rst.deleted_count} projects"}
return {}
| rayjan0114/infra | gcp/main/gcpFunction/functions/pkg/project/delete_project.py | delete_project.py | py | 757 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pkg.utils.mongo_utils.get_db",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "loguru.logger.debug",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pkg.pr... |
18631976524 | import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_score, recall_score
from scipy.stats import norm
import plotly.graph_objects as go
from libs.Models.ModelParent import ModelParent
import itertools
class STD(ModelParent):
def __init__(self, trainX: np.array, testX: np.array, testy: np.array, threshold=0.999, split=5):
"""
:param trainX: Training data
:param testX: Test data
:param testy: Labels of the test data
:param n_epochs: Training epochs
:param threshold: Anomaly threshold in range [0,1]
:param split: How many predecessors the model considers for one forecast
"""
super().__init__(trainX, testX, testy)
self.threshold = threshold
self.split = split
self.precision = 0
self.recall = 0
self.predictions = []
self.specificity = 0
def create_Xy_dataset(self, sequence, steps):
"""
Splits the whole dataset (train+test) into a 2D array X of shape (len(sequence),steps) and a 1D array y of
len(sequence). X consists of lookback values for each elements of y.
:param sequence: univariate dataset
:param steps: Number of lookback values for each element of y
:return: X,y
"""
X, y = [], []
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + steps
# check if we are beyond the sequence
if end_ix > len(sequence) - 1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.asarray(X), np.asarray(y)
def standardize_dataset(self):
"""
Standardizes dataset (mean=0, std=1) according to training data
"""
self.scaler = StandardScaler().fit(self.trainX)
self.trainX = self.scaler.transform(self.trainX)
self.testX = self.scaler.transform(self.testX)
def AnomalyScore(self, rawscore):
std = np.std(self.trainX) # = 1
mean = np.mean(self.trainX) # = 0
zscore = abs((rawscore - mean) / std)
anomalyscore = (norm.cdf(zscore)-norm.cdf(-zscore))
return anomalyscore
def fit(self) -> None:
"""
Initializes the LSTM. The goal of the NN is to forecast one value of the time series based on the last
observations.
It's fitted using trainXX (the observed "lookback" values) and trainXy (the "labels").
Finally, precision and recall of the model are calculated
"""
self.standardize_dataset()
self.std = np.std(self.trainX)
self.mean = np.mean(self.trainX)
self.testyPredicted = self.predict(self.testX)
self.precision = precision_score(self.testy, self.testyPredicted)
self.recall = recall_score(self.testy, self.testyPredicted)
self.specificity = recall_score(self.testy, self.testyPredicted, pos_label=0)
def getROC(self):
"""
Calculate specificity and recall for parameter combinations
:return:
Returns the mean distance between predicted and true anomalies as well as the data for the roc curve
"""
#TODO Parameterräume wählen
threshold = np.arange(1,3.2,0.2)
parameters = threshold
roc = []
distances = []
for e in parameters:
self.threshold = e
self.fit()
roc.append({"parameter": e,"value": [float(self.recall), float(1- self.specificity)]})
distances.append({"parameter": e, "value": float(self.getStartDeltas())})
return roc, distances
def predict(self, testfeatures: np.ndarray) -> np.ndarray:
"""
Forecasts the dataset based on observed values. Calulates errors based on the truevalues
:param testfeatures: Lookback dataset of the test values
:param truevalues: true test values
:return:
"""
threshold = self.std*self.threshold
results = [1 if np.abs(e) > np.abs(self.mean) + threshold else 0 for e in testfeatures]
return results
def getStartDeltas(self):
"""
Überprüfe für jede Anomalie nach wie vielen Schritten eine Anomalie erkannt
wurde, falls diese erkannt wurde, miss die Distanz
:return:
gibt den Mittelwert der Distanzen zurück
"""
result = []
for e in enumerate(self.testy):
if e[1] == 1:
for el in list(enumerate(self.testyPredicted))[e[0]:]:
if el[1] == 1:
result.append(el[0] - e[0])
break
return np.mean(result)
def showResults(self):
"""
Plots the performance of the model by displaying performance metrics as well as a test and prediction
distribution
"""
fig = go.Figure()
x0 = self.trainX.reshape(1, -1)[0]
x1 = self.testX.reshape(1, -1)[0]
fig.add_trace(go.Scatter(x=x0, y=[0.5 for e in range(len(self.trainX))],
name="Training data", mode="markers", marker_color="blue"))
fig.add_trace(go.Scatter(x=x1, y=self.testy,
name="test labels True", mode="markers", marker_color="red"))
fig.add_trace(go.Scatter(x=x1, y=self.predictions,
name="test labels predicted", mode="markers", marker_color="yellow"))
title = "LSTM Recall: " + str(self.recall) + " Precision: " + str(
self.precision) + " n_epochs: " + str(self.n_epochs) + " threshold: " \
+ str(self.threshold) + " split: " + str(self.split) + "\n"
fig.update_layout(title=title)
fig.show()
| xorbey/CATS_public | libs/Models/Anomaly/STD.py | STD.py | py | 5,909 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "libs.Models.ModelParent.ModelParent",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sk... |
41748486239 | from flask import Flask, request, jsonify
import producer as p
import time
import metricas as m
app = Flask(__name__)
@app.route('/', methods=['POST'])
def index():
if request.method == 'POST':
data = request.form
tiempo_inicio = time.time() # Registrar el tiempo de inicio
m.tiempo_inicio_list.append(tiempo_inicio)
print(f"Nombre: {data['nombre']}")
print(f"Usuario: {data['usuario']}")
print(f"Correo: {data['correo']}")
print(f"PAID: {data['paid']}")
# Convertir 'paid' a un booleano
paid = True if data['paid'].lower() == 'true' else False
if paid:
p.formulario(data, 1)
else:
p.formulario(data, 0)
return jsonify(data)
@app.route('/salir', methods=['POST'])
def salir():
if request.method == 'POST':
print("Petición recibida.")
m.escribir_json()
print("Ejecución completada.")
return ""
if __name__ == '__main__':
app.run(debug = True, host= "0.0.0.0") | cesarlmt27/CIT2011 | tarea2/inscripcion/api.py | api.py | py | 1,077 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.request.form... |
40892463582 | import io
from datetime import datetime
import cv2
from django.http import FileResponse
from pdf2image import convert_from_bytes
from PIL import Image
from rest_framework import mixins, permissions, viewsets
from rest_framework.decorators import (action, api_view,
authentication_classes,
permission_classes)
from rest_framework.parsers import MultiPartParser
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from planning import samba
from planning.models import Commande, LigneDeCommande, Operation, Tier, WebCam
from planning.renderers import JPEGRenderer
from planning.serializers import (AnonymousCommandeSerializer,
AnonymousOrderLineSerializer,
CommandeSerializer, OperationSerializer,
OrderLineSerializer, TierSerializer,
WebCamSerializer)
EXACT_STATUS_CLOSED = 21
EXACT_STATUS_CANCELLED = 45
EXACT_STATUS_OPEN = 12
EXACT_STATUS_PARTIAL = 20
@api_view(["GET"])
@permission_classes([IsAuthenticated])
# find out who is logged in
def me(request, format=None):
content = {
"user": str(request.user), # `django.contrib.auth.User` instance.
"auth": str(request.auth), # None
}
return Response(content)
class WebCamViewSet(viewsets.ReadOnlyModelViewSet):
queryset = WebCam.objects.all()
serializer_class = WebCamSerializer
@action(detail=True, methods=["get"], renderer_classes=[JPEGRenderer])
def thumbnail(self, request, pk=None):
webcam = self.get_object()
video_capture = cv2.VideoCapture(f"{webcam.url}/profile3")
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (640, 375))
return Response(cv2.imencode(".jpg", small_frame)[1].tobytes())
@action(
detail=True,
methods=["post"],
parser_classes=[MultiPartParser],
)
def capture(self, request, pk=None):
if int(pk) > 0:
webcam = self.get_object()
video_capture = cv2.VideoCapture(f"{webcam.url}/profile1")
ret, frame = video_capture.read()
image = io.BytesIO(cv2.imencode(".jpg", frame)[1].tobytes())
else:
image = request.data["file"]
order_id = request.data["order_id"]
smb_connection = samba.factory()
if order_id:
order_id = int(request.data["order_id"])
order = Commande.objects.get(id=order_id)
tier = order.exact_tier
filename = datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"//Workspace$/documents/32-Clients/{tier.exact_name}/C{order.exact_order_number}/{filename}.jpg"
else:
date = datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"//Documentsv7$/OFFICE One Documents/spool/{date}.jpg"
samba.store_file_and_create_folders(smb_connection, filename, image)
return Response({"filename": filename})
class ClientViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = TierSerializer
queryset = (
Tier.objects.filter(exact_is_sales=1, commande__exact_status=EXACT_STATUS_OPEN)
.distinct()
.order_by("exact_name")
)
class CommandeViewSet(viewsets.ReadOnlyModelViewSet):
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated:
return CommandeSerializer
return AnonymousCommandeSerializer
queryset = Commande.objects.all().order_by("-id")
filterset_fields = {"exact_tier_id": ["exact"], "exact_status": ["exact", "in"]}
@action(detail=True, url_name="files")
def files(self, request, **kwargs):
order = self.get_object()
smb_connection = samba.factory()
folder_content = samba.list_path(smb_connection, order.folder_path)
result = []
for content in folder_content:
if not content.isDirectory:
full_path = f"{order.folder_path}/{content.filename}"
mimetype = samba.find_file_mime_type(smb_connection, full_path)
if mimetype.startswith("image") or mimetype == "application/pdf":
result.append(
{
"filename": content.filename,
"last_write_time": content.last_write_time,
"file_size": content.file_size,
"mimetype": samba.find_file_mime_type(
smb_connection, full_path
),
}
)
return Response(data=result)
@action(detail=True, url_name="thumbnail")
def thumbnail(self, request, **kwargs):
THUMBNAIL_SIZE = (400, 400)
order = self.get_object()
smb_connection = samba.factory()
filename = request.query_params.get("filename")
folder_content = samba.list_path(smb_connection, order.folder_path)
# Throw an error if file not a direct children
next(x for x in folder_content if x.filename == filename)
full_path = f"{order.folder_path}/{filename}"
file_type = samba.find_file_mime_type(smb_connection, full_path)
if file_type.startswith("image"):
buffer_file = samba.retrieve_file(smb_connection, full_path)
result = io.BytesIO()
file = Image.open(buffer_file)
file.thumbnail(THUMBNAIL_SIZE)
file.save(result, "PNG", compress_level=9)
result.seek(0)
return FileResponse(result)
elif file_type == "application/pdf":
buffer_file = samba.retrieve_file(smb_connection, full_path)
images = convert_from_bytes(
buffer_file.read(), size=THUMBNAIL_SIZE[0], fmt="png"
)
image = images[0]
result = io.BytesIO()
image.save(result, "PNG", compress_level=9)
result.seek(0)
return FileResponse(result)
return
@action(detail=True, url_name="file_download")
def file_download(self, request, **kwargs):
order = self.get_object()
smb_connection = samba.factory()
filename = request.query_params.get("filename")
folder_content = samba.list_path(smb_connection, order.folder_path)
# Throw an error if file not a direct children
next(x for x in folder_content if x.filename == filename)
full_path = f"{order.folder_path}/{filename}"
buffer_file = samba.retrieve_file(smb_connection, full_path)
return FileResponse(buffer_file)
class OperationViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = OperationSerializer
queryset = Operation.objects.all().order_by("-id")
# Only update for now.
class BulkOrderLineViewSet(mixins.UpdateModelMixin, viewsets.GenericViewSet):
permission_classes = [AllowAny]
queryset = LigneDeCommande.objects.all()
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated:
return OrderLineSerializer
return AnonymousOrderLineSerializer
@action(detail=False, methods=["put"], url_name="bulk_update")
def bulk_update(self, request, **kwargs):
data = { # we need to separate out the id from the data
i["id"]: {k: v for k, v in i.items() if k != "id"} for i in request.data
}
for inst in self.get_queryset().filter(id__in=data.keys()):
serializer = self.get_serializer(inst, data=data[inst.id], partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({})
class ClientCommandeViewSet(viewsets.ModelViewSet):
serializer_class = CommandeSerializer
def get_queryset(self):
exact_tier_id = self.kwargs("exact_tier_id")
return Commande.object.filter(
exact_tier_id=exact_tier_id, exact_status=EXACT_STATUS_OPEN
).order_by("-exact_order_number")
| pierrotlemekcho/exaged | sifapi/planning/views.py | views.py | py | 8,175 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.response.Response",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.permission_classes",
"line_number": 32,
"us... |
43507385682 | # MODULE IMPORTS
# Flask modules
from flask import Flask, render_template, request, url_for, request, redirect, abort
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
from flask_talisman import Talisman
from flask_pymongo import PyMongo
from flask_bcrypt import Bcrypt
from flask_wtf.csrf import CSRFProtect
# Other modules
from urllib.parse import urlparse, urljoin
from datetime import datetime
import configparser
import json
import sys
import os
# Local imports
from user import User, Anonymous
from message import Message
from note import Note
from email_utility import send_email, send_registration_email, send_message_email
from verification import confirm_token
# Create app
app = Flask(__name__)
# Configuration
config = configparser.ConfigParser()
config.read('configuration.ini')
default = config['DEFAULT']
app.secret_key = default['SECRET_KEY']
app.config['MONGO_DBNAME'] = os.environ.get('MONGO_DBNAME')
app.config['MONGO_URI'] = os.environ.get('MONGODB_URI') #default['MONGO_URI']
app.config['PREFERRED_URL_SCHEME'] = "https"
# Create Pymongo
mongo = PyMongo(app)
# Create Bcrypt
bc = Bcrypt(app)
# Create Talisman
csp = {
'default-src': [
'\'self\'',
'https://stackpath.bootstrapcdn.com',
'https://pro.fontawesome.com',
'https://code.jquery.com',
'https://cdnjs.cloudflare.com'
]
}
talisman = Talisman(app, content_security_policy=csp)
# Create CSRF protect
csrf = CSRFProtect()
csrf.init_app(app)
# Create login manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.anonymous_user = Anonymous
login_manager.login_view = "login"
# ROUTES
# Index
@app.route('/')
def index():
return render_template('index.html')
# Login
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
if current_user.is_authenticated:
# Redirect to index if already authenticated
return redirect(url_for('/index'))
# Render login page
return render_template('login.html', error=request.args.get("error"))
# Retrieve user from database
users = mongo.db.users
user_data = users.find_one({'email': request.form['email']}, {'_id': 0})
if user_data:
# Check password hash
if bc.check_password_hash(user_data['password'], request.form['pass']):
# Create user object to login (note password hash not stored in session)
user = User.make_from_dict(user_data)
login_user(user)
# Check for next argument (direct user to protected page they wanted)
next = request.args.get('next')
if not is_safe_url(next):
return abort(400)
# Go to profile page after login
return redirect(next or url_for('profile'))
# Redirect to login page on error
return redirect(url_for('login', error=1))
# Register
@app.route('/register', methods=['POST', 'GET'])
def register():
if request.method == 'POST':
# Trim input data
email = request.form['email'].strip()
title = request.form['title'].strip()
first_name = request.form['first_name'].strip()
last_name = request.form['last_name'].strip()
password = request.form['pass'].strip()
users = mongo.db.users
# Check if email address already exists
existing_user = users.find_one(
{'email': email}, {'_id': 0})
if existing_user is None:
logout_user()
# Hash password
hashpass = bc.generate_password_hash(password).decode('utf-8')
# Create user object (note password hash not stored in session)
new_user = User(title, first_name, last_name, email)
# Create dictionary data to save to database
user_data_to_save = new_user.dict()
user_data_to_save['password'] = hashpass
# Insert user record to database
if users.insert_one(user_data_to_save):
login_user(new_user)
send_registration_email(new_user)
return redirect(url_for('profile'))
else:
# Handle database error
return redirect(url_for('register', error=2))
# Handle duplicate email
return redirect(url_for('register', error=1))
# Return template for registration page if GET request
return render_template('register.html', error=request.args.get("error"))
# Confirm email
@app.route('/confirm/<token>', methods=['GET'])
def confirm_email(token):
logout_user()
try:
email = confirm_token(token)
if email:
if mongo.db.users.update_one({"email": email}, {"$set": {"verified": True}}):
return render_template('confirm.html', success=True)
except:
return render_template('confirm.html', success=False)
else:
return render_template('confirm.html', success=False)
# Verification email
@app.route('/verify', methods=['POST'])
@login_required
def send_verification_email():
if current_user.verified == False:
send_registration_email(current_user)
return "Verification email sent"
else:
return "Your email address is already verified"
# Profile
@app.route('/profile', methods=['GET'])
@login_required
def profile():
notes = mongo.db.notes.find(
{"user_id": current_user.id, "deleted": False}).sort("timestamp", -1)
return render_template('profile.html', notes=notes, title=current_user.title)
# Messages
@app.route('/messages', methods=['GET'])
@login_required
def messages():
all_users = mongo.db.users.find(
{"id": {"$ne": current_user.id}}, {'_id': 0})
inbox_messages = mongo.db.messages.find(
{"to_id": current_user.id, "deleted": False}).sort("timestamp", -1)
sent_messages = mongo.db.messages.find(
{"from_id": current_user.id, "deleted": False, "hidden_for_sender": False}).sort("timestamp", -1)
return render_template('messages.html', users=all_users, inbox_messages=inbox_messages, sent_messages=sent_messages)
# Logout
@app.route('/logout', methods=['GET'])
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
# POST REQUEST ROUTES
# Add note
@app.route('/add_note', methods=['POST'])
@login_required
def add_note():
title = request.form.get("title")
body = request.form.get("body")
user_id = current_user.id
user_name = current_user.display_name()
note = Note(title, body, user_id, user_name)
if mongo.db.notes.insert_one(note.dict()):
return "Success! Note added: " + title
else:
return "Error! Could not add note"
# Delete note
@app.route('/delete_note', methods=['POST'])
@login_required
def delete_note():
note_id = request.form.get("note_id")
if mongo.db.notes.update_one({"id": note_id}, {"$set": {"deleted": True}}):
return "Success! Note deleted"
else:
return "Error! Could not delete note"
# Send message
@app.route('/send_message', methods=['POST'])
@login_required
def send_message():
title = request.form.get("title")
body = request.form.get("body")
from_id = current_user.id
from_name = current_user.display_name()
to_id = request.form.get("user")
to_user_dict = mongo.db.users.find_one({"id": to_id})
to_user = User.make_from_dict(to_user_dict)
to_name = to_user.display_name()
message = Message(title, body, from_id, from_name, to_id, to_name)
if mongo.db.messages.insert_one(message.dict()):
send_message_email(from_user=current_user,
to_user=to_user, message=message)
return "Success! Message sent to " + to_name + ": " + title
else:
return "Error! Could not send message"
# Delete message
@app.route('/delete_message', methods=['POST'])
@login_required
def delete_message():
message_id = request.form.get("message_id")
if mongo.db.messages.update_one({"id": message_id}, {"$set": {"deleted": True}}):
return "Success! Message deleted"
else:
return "Error! Could not delete message"
# Hide sent message
@app.route('/hide_sent_message', methods=['POST'])
@login_required
def hide_sent_message():
message_id = request.form.get("message_id")
if mongo.db.messages.update_one({"id": message_id}, {"$set": {"hidden_for_sender": True}}):
return "Success! Message hidden from sender"
else:
return "Error! Could not hide message"
# Change Name
@app.route('/change_name', methods=['POST'])
@login_required
def change_name():
title = request.form['title'].strip()
first_name = request.form['first_name'].strip()
last_name = request.form['last_name'].strip()
if mongo.db.users.update_one({"email": current_user.email}, {"$set": {"title": title, "first_name": first_name, "last_name": last_name}}):
return "User name updated successfully"
else:
return "Error! Could not update user name"
# Delete Account
@app.route('/delete_account', methods=['POST'])
@login_required
def delete_account():
user_id = current_user.id
# Deletion flags
user_deleted = False
notes_deleted = False
messages_deleted = False
# Delete user details
if mongo.db.users.delete_one({"id": user_id}):
user_deleted = True
logout_user()
# Delete notes
if mongo.db.notes.delete_many({"user_id": user_id}):
notes_deleted = True
# Delete messages
if mongo.db.messages.delete_many({"$or": [{"from_id": user_id}, {"to_id": user_id}]}):
messages_deleted = True
return {"user_deleted": user_deleted, "notes_deleted": notes_deleted, "messages_deleted": messages_deleted}
# LOGIN MANAGER REQUIREMENTS
# Load user from user ID
@login_manager.user_loader
def load_user(userid):
# Return user object or none
users = mongo.db.users
user = users.find_one({'id': userid}, {'_id': 0})
if user:
return User.make_from_dict(user)
return None
# Safe URL
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
# Heroku environment
if os.environ.get('APP_LOCATION') == 'heroku':
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
else:
app.run(host='localhost', port=8080, debug=True)
| chriswilson1982/flask-mongo-app | run.py | run.py | py | 10,514 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.environ",
... |
24108741135 |
# Generating images of handwritten digits using a Deep Convolutional Generative Adversarial Network
import numpy as np
import tensorflow as tf
from tensorflow.layers import batch_normalization
from tensorflow.keras.layers import UpSampling2D
import matplotlib.pyplot as plt
class DCGAN:
def __init__(self, z_shape=100, img_shape=(28, 28), channels=1, learning_rate=0.0001):
# input characteristics
self.channels = channels
self.z_shape = z_shape
self.img_rows, self.img_cols = img_shape
# defining Initializing discriminator weights and network
with tf.variable_scope('d'):
self.disc_W1 = tf.Variable(tf.random_normal(shape=[5, 5, channels, 64]) * 2 / np.sqrt(64))
self.disc_b1 = tf.Variable(tf.zeros([64]))
self.disc_W2 = tf.Variable(tf.random_normal(shape=[3, 3, 64, 64]) * 2 / np.sqrt(64))
self.disc_b2 = tf.Variable(tf.zeros([64]))
self.disc_W3 = tf.Variable(tf.random_normal(shape=[3, 3, 64, 128]) * 2 / np.sqrt(128))
self.disc_b3 = tf.Variable(tf.zeros([128]))
self.disc_W4 = tf.Variable(tf.random_normal(shape=[2, 2, 128, 256]) * 2 / np.sqrt(256))
self.disc_b4 = tf.Variable(tf.zeros([256]))
self.disc_W5 = tf.Variable(tf.random_normal(shape=[7 * 7 * 256, 1]) * 2 / np.sqrt(1))
self.disc_b5 = tf.Variable(tf.zeros([1]))
# defining Initializing generator weights and network
with tf.variable_scope('g'):
self.gen_W1 = tf.Variable(tf.random_normal(shape=[100, 7 * 7 * 512]) * 2 / np.sqrt(7 * 7 * 512))
self.gen_W2 = tf.Variable(tf.random_normal(shape=[3, 3, 512, 256]) * 2 / np.sqrt(256))
self.gen_W3 = tf.Variable(tf.random_normal(shape=[3, 3, 256, 128]) * 2 / np.sqrt(128))
self.gen_W4 = tf.Variable(tf.random_normal(shape=[3, 3, 128, 1]) * 2 / np.sqrt(1))
# placeholder for inputs
self.X = tf.placeholder(tf.float32, [None, self.img_rows, self.img_cols])
self.Z = tf.placeholder(tf.float32, [None, self.z_shape])
# generated output
self.output_gen = self.gen_forward(self.Z)
disc_logits_fake = self.disc_forward(self.output_gen)
disc_logits_real = self.disc_forward(self.X)
# defining gan costs
disc_fake_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(disc_logits_fake), logits=disc_logits_fake))
disc_real_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(disc_logits_real), logits=disc_logits_real))
self.disc_loss = tf.add(disc_fake_loss, disc_real_loss)
self.gen_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(disc_logits_fake), logits=disc_logits_fake))
# learned parameters
train_vars = tf.trainable_variables()
disc_vars = [var for var in train_vars if 'd' in var.name]
gen_vars = [var for var in train_vars if 'g' in var.name]
# optimizing network parameters
self.disc_opt = tf.train.AdamOptimizer(learning_rate).minimize(self.disc_loss, var_list=disc_vars)
self.gen_opt = tf.train.AdamOptimizer(learning_rate).minimize(self.gen_loss, var_list=gen_vars)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
# Discriminator feed forward
def disc_forward(self, X):
X = tf.reshape(X, [-1, self.img_rows, self.img_cols, self.channels])
# layer 1
z = tf.nn.conv2d(X, self.disc_W1, [1, 2, 2, 1], padding="SAME")
z = tf.nn.bias_add(z, self.disc_b1)
z = tf.nn.leaky_relu(z)
# layer 2
z = tf.nn.conv2d(z, self.disc_W2, [1, 1, 1, 1], padding="SAME")
z = tf.nn.bias_add(z, self.disc_b2)
z = batch_normalization(z)
z = tf.nn.leaky_relu(z)
# layer 3
z = tf.nn.conv2d(z, self.disc_W3, [1, 2, 2, 1], padding="SAME")
z = tf.nn.bias_add(z, self.disc_b3)
z = batch_normalization(z)
z = tf.nn.leaky_relu(z)
# layer 4
z = tf.nn.conv2d(z, self.disc_W4, [1, 1, 1, 1], padding="SAME")
z = tf.nn.bias_add(z, self.disc_b4)
z = batch_normalization(z)
z = tf.nn.leaky_relu(z)
# layer 5
z = tf.reshape(z, [-1, 7 * 7 * 256])
logits = tf.matmul(z, self.disc_W5)
logits = tf.nn.bias_add(logits, self.disc_b5)
return logits
# Generator feed forward
def gen_forward(self, X):
# layer 1
z = tf.matmul(X, self.gen_W1)
z = tf.nn.relu(z)
z = tf.reshape(z, [-1, 7, 7, 512])
# layer 2
z = UpSampling2D()(z)
z = tf.nn.conv2d(z, self.gen_W2, [1, 1, 1, 1], padding="SAME")
z = batch_normalization(z)
z = tf.nn.leaky_relu(z)
# layer 3
z = UpSampling2D()(z)
z = tf.nn.conv2d(z, self.gen_W3, [1, 1, 1, 1], padding="SAME")
z = batch_normalization(z)
z = tf.nn.leaky_relu(z)
z = tf.nn.conv2d(z, self.gen_W4, [1, 1, 1, 1], padding="SAME")
return tf.nn.tanh(z)
# generate sample from generator
def generate_sample(self, epoch, batch_size):
z = np.random.uniform(-1, 1, (batch_size, self.z_shape))
imgs = self.sess.run(self.output_gen, feed_dict={self.Z: z})
imgs = imgs * 0.5 + 0.5
fig, axs = plt.subplots(5, 5)
cnt = 0
for i in range(5):
for j in range(5):
axs[i, j].imshow(imgs[cnt, :, :, 0], cmap="gray")
axs[i, j].axis('off')
cnt += 1
fig.savefig("samples/%d.png" % epoch)
plt.close()
def train(self, X_train, batch_size=128, epoch=15):
n_batches = len(X_train) // batch_size
for e in range(epoch):
for i in range(n_batches):
x_batch = X_train[i * batch_size:(i + 1) * batch_size]
Z = np.random.uniform(-1, 1, (batch_size, self.z_shape))
_, d_loss = self.sess.run([self.disc_opt, self.disc_loss], feed_dict={self.X: x_batch, self.Z: Z})
Z = np.random.uniform(-1, 1, (batch_size, self.z_shape))
_, g_loss = self.sess.run([self.gen_opt, self.gen_loss], feed_dict={self.Z: Z})
if i % 20 == 0:
self.generate_sample(i, batch_size)
print(f"Epoch: {i}. Discriminator loss: {d_loss}. Generator loss: {g_loss}")
import pandas as pd
# processing the dataset
df1 = pd.read_csv('./dataset/mnist_train.csv')
df2 = pd.read_csv('./dataset/mnist_test.csv')
X1 = df1.iloc[:, 1:].values
X2 = df2.iloc[:, 1:].values
X = np.concatenate([X1, X2])
X = X.reshape(-1, 28, 28)
# normalize between -1 and 1
X = X / 127.5 - 1
# creating and training the GAN
gan = DCGAN()
gan.train(X)
| ShankulShukla/Generative-Modeling | DC-GAN.py | DC-GAN.py | py | 7,046 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.variable_scope",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow.random_normal",
"line_number": 22,
"usage_type": "call"
},
{
"api_name":... |
31045215488 | import os
import logging
import boto3
import json
import io
import pandas as pd
logger = logging.getLogger()
logger.setLevel(logging.INFO)
s3 = boto3.client("s3")
iam = boto3.client("iam")
personalizeRt = boto3.client("personalize-runtime")
solution_arn = os.environ["SOLUTION_ARN"]
campaign_arn = os.environ["CAMPAIGN_ARN"]
num_results = int(os.environ["NUM_RESULTS"])
bucket = os.environ["BUCKET"]
metadata_key = os.environ["METADATA_KEY"]
def get_real_time_recommendations(
campaign_arn, user_id, bucket, movies_key, num_results, **context
):
if context:
response = personalizeRt.get_recommendations(
campaignArn=campaign_arn, userId=user_id, context=context
)
else:
response = personalizeRt.get_recommendations(
campaignArn=campaign_arn, userId=user_id, numResults=num_results
)
logger.info("Recommended items: \n")
for item in response["itemList"]:
movie_id = int(item["itemId"])
title, genre = get_movie_names_from_id(bucket, movies_key, movie_id)
print(f"{title} ({genre})")
return response
def get_movie_names_from_id(bucket, key, movie_id):
obj = s3.get_object(Bucket=bucket, Key=key)
df = pd.read_csv(io.BytesIO(obj["Body"].read()))
title = df.loc[df["movieId"] == movie_id, ["title"]].values.flatten()[0]
genre = df.loc[df["movieId"] == movie_id, ["genres"]].values.flatten()[0]
return title, genre
def lambda_handler(event, context):
user_id = event["user_id"]
context_metadata = event.get("context", "{}")
context_metadata = json.loads(context_metadata)
if len(context_metadata) == 0:
logger.info(
f"Generating {num_results} recommendations for user {user_id} using campaign {campaign_arn}"
)
else:
logger.info(
f"Generating recommendations for user {user_id} using campaign {campaign_arn}, with provided context: \n\n {context_metadata}"
)
return get_real_time_recommendations(
campaign_arn, user_id, bucket, metadata_key, num_results, **context_metadata
)
| ryankarlos/AWS-ML-services | lambdas/realtimepersonalize/lambda_function.py | lambda_function.py | py | 2,097 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "boto3.client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"l... |
44292327781 | from collections import namedtuple
import hashlib
from itertools import product
from typing import TYPE_CHECKING, Optional
import uuid
import pytest
from pynenc import Pynenc
from pynenc.broker.base_broker import BaseBroker
from pynenc.orchestrator.base_orchestrator import BaseOrchestrator
from pynenc.runner.base_runner import BaseRunner
from pynenc.serializer.base_serializer import BaseSerializer
from pynenc.state_backend.base_state_backend import BaseStateBackend
from tests.conftest import MockPynenc
if TYPE_CHECKING:
from _pytest.python import Metafunc
from _pytest.fixtures import FixtureRequest
from pynenc.task import Task
AppComponents = namedtuple(
"AppComponents",
[
"broker",
"orchestrator",
"runner",
"serializer",
"state_backend",
],
)
def get_combination_id(combination: AppComponents) -> str:
return (
f"run.{combination.runner.__name__.replace('Runner', '')}-"
f"brk.{combination.broker.__name__.replace('Broker', '')}-"
f"orc.{combination.orchestrator.__name__.replace('Orchestrator', '')}-"
f"sbk.{combination.state_backend.__name__.replace('StateBackend', '')}-"
f"ser.{combination.serializer.__name__.replace('Serializer', '')}"
)
def pytest_generate_tests(metafunc: "Metafunc") -> None:
def get_subclasses(cls: type, mem_cls: Optional[bool] = None) -> list[type]:
subclasses = []
for c in cls.__subclasses__():
if "mock" in c.__name__.lower() or c.__name__.startswith("Dummy"):
continue
if mem_cls is not None and mem_cls != c.__name__.startswith("Mem"):
continue
if c.__name__.startswith("Process"):
continue
subclasses.append(c)
return subclasses
if "app" in metafunc.fixturenames:
# mem runners can run with any combination of components (including memory components)
mem_combinations = map(
lambda x: AppComponents(*x),
product(
get_subclasses(BaseBroker),
get_subclasses(BaseOrchestrator),
get_subclasses(BaseRunner, mem_cls=True),
get_subclasses(BaseSerializer),
get_subclasses(BaseStateBackend),
),
)
# If the runner is not a memory runner, it cannot be used with memory components
not_mem_combinations = map(
lambda x: AppComponents(*x),
product(
get_subclasses(BaseBroker, mem_cls=False),
get_subclasses(BaseOrchestrator, mem_cls=False),
get_subclasses(BaseRunner, mem_cls=False),
get_subclasses(BaseSerializer, mem_cls=False),
get_subclasses(BaseStateBackend, mem_cls=False),
),
)
combinations = list(mem_combinations) + list(not_mem_combinations)
ids = list(map(get_combination_id, combinations))
metafunc.parametrize("app", combinations, ids=ids, indirect=True)
def get_unique_id() -> str:
_id = uuid.uuid4()
return hashlib.sha256(_id.bytes).hexdigest()[:8]
@pytest.fixture
def app(request: "FixtureRequest") -> Pynenc:
components: AppComponents = request.param
test_name = request.node.name.replace("[", "(").replace("]", ")")
test_module = request.node.module.__name__
app = Pynenc(app_id=f"{test_module}.{test_name}")
app.set_broker_cls(components.broker)
app.set_orchestrator_cls(components.orchestrator)
app.set_serializer_cls(components.serializer)
app.set_state_backend_cls(components.state_backend)
app.runner = components.runner(app)
# purge before and after each test
app.purge()
request.addfinalizer(app.purge)
return app
mock_app = MockPynenc()
@mock_app.task
def sum(x: int, y: int) -> int:
return x + y
@pytest.fixture(scope="function")
def task_sum(app: Pynenc) -> "Task":
sum.app = app
return sum
@mock_app.task
def cycle_start() -> None:
cycle_end().result
@mock_app.task
def cycle_end() -> None:
cycle_start().result
@pytest.fixture(scope="function")
def task_cycle(app: Pynenc) -> "Task":
# this replacing the app of the task works in multithreading
# but not in multi processing runner,
# the process start from scratch and reference the function
# with the mocked decorator
cycle_start.app = app
cycle_end.app = app
return cycle_start
| pynenc/pynenc | tests/integration/apps/mem_combinations/conftest.py | conftest.py | py | 4,473 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "collections.namedtuple",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "itertools... |
42504873893 | import random as rnd
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
p_show = lambda x: rnd.uniform(0.907, 0.968)
def show_up(p):
if rnd.random() <= p:
return True
return False
def flight(num_tix, tix_price, comp_cost, capacity):
p = p_show(0)
shows = sum([1 for x in range(num_tix) if show_up(p)])
if shows <= capacity:
return tix_price * shows
else:
denials = shows - capacity
return tix_price * shows - comp_cost * denials
def run_sim(ticket_price, capacity):
x_vals = []
y_vals = []
for i in ticket_price:
comp_cost = 4 * i
for j in capacity:
num_tix = [j + k * 5 for k in range(8)]
num_tix, res = sims_10000(i, comp_cost, j, num_tix)
x_vals.append(num_tix + [i]), y_vals.append(res)
# print(x_vals), print(j)
print(x_vals)
return x_vals, y_vals
def sims_10000(ticket_price, comp_cost, capacity, num_tix):
res = [[], [], [], [], [], [], [], []]
for i in range(len(num_tix)):
for _ in range(10000):
res[i].append(flight(num_tix[i], ticket_price, comp_cost, capacity))
res = [sum(y) / len(y) for y in res]
return num_tix, res
def plot_results(x_vals, y_vals):
for i in range(len(x_vals)):
x, y = x_vals[i], y_vals[i]
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(x[:-1], y)
ax.set(title="Simulation where capacity = %d and ticket price = %d" % (x[0], x[-1]),
xlabel = 'Number of tickets sold',
ylabel = 'Expected revenue in $')
ax.fill_between(x[:-1], 10000, y, alpha=0.5)
ax.set_ylim([min(y) - 2000, max(y) + 2000])
ax.set_xlim([min(x[:-1]), max(x[:-1])])
plt.setp(ax.get_xticklabels(), rotation=45)
plt.grid()
plt.show()
fig.savefig('filename%d.eps'%i, format='eps')
t = [100, 500, 1000]
c = [200, 500]
x, y = run_sim(t, c)
plot_results(x, y)
| behtashgolshani/Monte-Carlo-simulation-airline-overbooking | simulation.py | simulation.py | py | 1,991 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.uniform",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
72734245545 | from neo4j import GraphDatabase
from lxml import etree, objectify
import mwparserfromhell
import spacy
import re
import sys
import math
import enchant
import argparse
"""
The interface to the Neo4J database.
"""
class Neo4JInterface:
def __init__(self, uri, user, password):
self.driver = GraphDatabase.driver(uri, auth=(user, password))
def close(self):
self.driver.close()
def print_create_page(self, w_id, title, text, page_type):
# Calls the private create_page method, and uses it to create a page.
# Requires the ID of the wiki page, the title, text, and the type of page.
with self.driver.session() as session:
result = session.write_transaction(
self._create_page, w_id, title, text, page_type)
print(result)
def print_create_relationship(self, link_from, link_to, relation):
# Creates a relationship between two pages.
with self.driver.session() as session:
result = session.write_transaction(
self._create_relationship, link_from, link_to, relation)
if result != None: print(result, "with relation:", relation)
@staticmethod
def _create_page(tx, w_id, title, text, page_type):
query = "MERGE (n:$PTYPE { id: $id, name: $name })".replace("$PTYPE", page_type)
result = tx.run(query +
"ON CREATE SET n.text = $text "
"ON MATCH SET n.text = $text "
"RETURN n.name, n.id ", id=w_id, name=title, text=text, ptype=page_type)
return result.single()[0]
@staticmethod
def _create_relationship(tx, link_from, link_to, relation):
# Apparently the $RELATION won't be replaced by the tx.run variable substitution
# mechanism, therfore I've adopted this janky approach.
query = ("MATCH (from { name: $link_from }) "
"MATCH (to { name: $link_to }) "
"MERGE (from)-[rel:$RELATION]->(to)".replace("$RELATION", relation))
query += " RETURN from.name, to.name"
result = tx.run(query, link_from=link_from, link_to=link_to)
result = result.single()
return result
class Page:
def __init__(self, w_id, title, text):
self.w_id = w_id
self.title = title
self.text = text
self.wikicode = mwparserfromhell.parse(text)
self.full_links = self.links()
self.partial_links = self.lookup_links()
self.templates = self.wikicode.filter_templates()
def links(self):
r = []
for link in self.wikicode.filter_wikilinks():
link = link.title
if not link.startswith("File:"):
# If there is an anchor in the page, we remove it.
r.append(link.split("#", 1)[0])
return r
def lookup_links(self):
"""
Returns a list with all the links split into their word components.
This is to enable easier lookups, as NP chunks will likely split the word up.
And there will be other components in the NP chunks, muddying lookup.
"""
r = []
for link in self.full_links:
for y in link.split(" "):
r.append(y)
return r
def process_text(self):
"""
Converts the text from Wikicode into plain text.
"""
filtered = self.wikicode.strip_code(normalize=True)
# Ensures all the words are split.
filtered = filtered.replace("\n", " ").split(" ")
# The RS Wiki places an image at the start of most articles.
# The image's text is "left" in the Wikicode, but it isn't.
# This is our crude attempt at filtering it out.
if filtered[0].startswith("left"):
del filtered[0]
# strip_code isn't fully perfect on our dataset. Occasionally, remnants of images
# sneak through as "thumb|XXXpx|Word", so we try and catch these instances, and
# extract the word from it, or otherwise remove the broken word entirely.
i = 0
while i < len(filtered):
if "thumb|" in filtered[i]:
filtered[i] = filtered[i].split("|")[-1]
if "File:" in filtered[i] or filtered[i].endswith("|left"):
del filtered[i]
i -= 1
i += 1
return " ".join(map(str, filtered)).strip()
def rel_standardise(self, rel):
"""
Changes relationships to a standard format, and changes any spaces to underscores.
"""
return rel.upper().strip().replace(" ", "_")
def find_link_relation_word(self, max_deps, nlp, dictionary):
"""
It takes the current page, filters it by links, processes with spaCy NLP,
loops over all NP chunks, checks if it is a link, and finds the relation word
that links the current page to the link. It restricts the amount of relation words
per link depending on the value of max_dependencies.
"""
link_dependency = {}
# Parses the text with the spaCy NLP that is passed through.
doc = nlp(self.process_text())
for chunk in doc.noun_chunks:
# If the dependency type ends with "obj", it finds if there are
# any links within the NP chunk.
# If there are any links, it ensures they are complete links (i.e. not just
# part of a link). After that, it'll add the link and dependency to the
# {link, set of dependencies}.
if chunk.root.dep_.endswith("obj") and chunk.root.head.text.isalpha():
link = []
dependency = self.rel_standardise(chunk.root.head.text)
for word in chunk.text.split(" "):
if word in self.partial_links:
link.append(word)
link = " ".join(link)
if link in self.full_links and dictionary.check(dependency) and len(dependency) > 1:
if link in link_dependency:
if len(link_dependency[link]) >= max_deps:
# Finds the minimum length word in the set.
min_word = min(link_dependency[link], key=len)
# If the new dependency is bigger, we'll substitute it in.
if len(min_word) < len(dependency):
link_dependency[link].remove(min_word)
link_dependency[link].add(dependency)
else:
link_dependency[link].add(dependency)
else:
link_dependency[link] = {dependency}
return link_dependency
def infobox_link_dep(self):
"""
Finds the Infobox template within a page, and returns any links present within
the parameter value, along with the name of the parameter it originates from.
"""
link_dependency = {}
for template in self.templates:
# Not always the first link in a page.
if str(template.name).lower().startswith("infobox"):
for param in template.params:
for link in param.value.filter_wikilinks():
link = link.title.split("#", 1)[0]
if not link.startswith("File:") and not link == title:
dependency = self.rel_standardise(str(param.name))
link_dependency[link] = {dependency}
return link_dependency
if __name__ == "__main__":
# Parses the arguments for input file.
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str,
help='name of input filename')
parser.add_argument('neo4j_address', type=str,
help='address of the neo4j server')
parser.add_argument('neo4j_username', type=str,
help='username of neo4j server')
parser.add_argument('neo4j_password', type=str,
help='neo4j db password')
args = parser.parse_args()
neoInst = Neo4JInterface(args.neo4j_address, args.neo4j_username, args.neo4j_password)
# The file that will be processed.
xmldoc = etree.parse(args.input)
root = xmldoc.getroot()
# Creates an instance of the en_US dictionary.
dictionary = enchant.Dict("en_US")
# Strips the tags of namespaces. Makes traversal easier.
# We can still do this and be far more efficient than Minidom.
for elem in root.iterdescendants():
elem.tag = etree.QName(elem).localname
# Creates an iterator for all the page elements, so I can iterate over them.
itemlist = root.iterfind("page")
# Setup and load spaCy model
nlp = spacy.load("en_core_web_sm")
print("Parsing file.")
for item in itemlist:
# Retrieves the page ID, title and page text.
w_id = item.find('id').text
title = item.find('title').text
text = item.find('revision').find('text').text
page_type = "Page"
PageInst = Page(w_id, title, text)
for template in PageInst.templates:
# If the infobox is annotated with a type, use it as the page type
if str(template.name).lower().startswith("infobox") and len(template.name) > 2:
page_type = template.name.split(" ")[-1].capitalize()
neoInst.print_create_page(w_id, title, text, page_type)
print("Creating relationships")
# As the iterator is reset, we'll instantiate it again.
itemlist = root.iterfind("page")
for item in itemlist:
# Retrieves the page ID, title and page text.
w_id = item.find('id').text
title = item.find('title').text
text = item.find('revision').find('text').text
# Creates a page object
PageInst = Page(w_id, title, text)
# Finds links on each page, with a relation word that links them together.
link_dependency = PageInst.find_link_relation_word(2, nlp, dictionary)
info_link_dependency = PageInst.infobox_link_dep()
# Overrides any links from the unstructured links with the structured links.
for link in info_link_dependency:
link_dependency[link] = info_link_dependency[link]
# Writes it all into the database.
for link in link_dependency:
for relation in link_dependency[link]:
neoInst.print_create_relationship(title, link, relation)
neoInst.close()
| arrivance/wiki-to-neo4j | wiki4j.py | wiki4j.py | py | 10,651 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "neo4j.GraphDatabase.driver",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "neo4j.GraphDatabase",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "mwparserfromhell.parse",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": ... |
13938469838 | from analysers.WarningAnalyser import WarningAnalyser
from auto_editor.StructuredProjectSource_Recommendation import StructuredProjectSource_Recommendation
from enums import RecommendationItem
from typing import List
class WarningRecommendationAnalyser(WarningAnalyser):
"""
Basically, the same as the pre-analyzer,
plus the ability to also detect and count new CTA warnings/recommendations.
We currently don't have any of these,
so I recommend working with a 'dummy' string here.
E.g. for the ticket you can assume that all CTA warnings/recommendations begin with "CTA<some-number>: "
"""
def count_warnings_numbers(self, warning_code, cta_number, dpct_number):
if 'CTA' in warning_code:
cta_number += 1
elif 'DPCT' in warning_code:
dpct_number += 1
return cta_number, dpct_number
def get_all_recommendation(self) -> List[RecommendationItem]:
project = StructuredProjectSource_Recommendation(self.project_root_path)
recommendations_dict = project.recommendations_dict
all_recommendations = []
all_codes = {}
all_ids = {}
for name, line_items in project.paths_to_lines.items():
for i in line_items:
all_codes.setdefault(name, []).append(i.code)
all_ids.setdefault(name, []).append(i.id)
for k, v in recommendations_dict.items():
for info in v:
first_line_id = info[0]
last_line_id = info[1]
file_path = info[2]
path = '/' + file_path
if file_path in all_ids.keys():
codes = all_codes[file_path]
ids = all_ids[file_path]
first_line = self.get_first_line_num(first_line_id, codes, ids)
message = self.get_warning_message(first_line, last_line_id, codes, ids)
warning = RecommendationItem(project_name=self.project_root_path.stem,
recommendation_code=k,
file_path=path,
message=message,
line=first_line)
all_recommendations.append(warning)
return all_recommendations
| UCL-oneAPI/CTA-oneAPI | analysers/WarningRecommendationAnalyser.py | WarningRecommendationAnalyser.py | py | 2,371 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "analysers.WarningAnalyser.WarningAnalyser",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "auto_editor.StructuredProjectSource_Recommendation.StructuredProjectSource_Recommendation",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "enums.Recommen... |
23363352897 | import torch
import torch.nn as nn
import math
class GlobalReinitNet(nn.Module):
def __init__(self):
super(GlobalReinitNet, self).__init__()
# Spatial transformer localization-network
self.localization = nn.Sequential(
nn.Conv2d(3, 8, kernel_size=5, stride=2, padding=0),
nn.MaxPool2d(2, stride=2),
nn.PReLU(),
nn.Conv2d(8, 16, kernel_size=3, stride=1, padding=0),
nn.PReLU(),
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0),
nn.MaxPool2d(2, stride=2),
nn.PReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=0),
nn.PReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=0),
nn.MaxPool2d(2, stride=2),
nn.PReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=0),
nn.PReLU(),
#nn.Conv2d(48, 96, kernel_size=3, stride=1, padding=1),
#nn.PReLU()
)
# Regressor for the 3 * 2 affine matrix
self.fc_loc = nn.Sequential(
nn.Linear(256, 32),
nn.PReLU(),
nn.Linear(32, 3 * 2)
)
self._initialize_weights()
# Initialize the weights/bias with identity transformation
#self.fc_loc[2].weight.data.zero_()
#self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
def forward(self, x):
# transform the input
xs = self.localization(x)
xs = torch.flatten(xs, 1)
out = self.fc_loc(xs)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class LocalReinitNet(nn.Module):
def __init__(self, input_dim=196):
super(LocalReinitNet, self).__init__()
# Spatial transformer localization-network
self.left_eye_net = self.make_net(input_dim)
self.right_eye_net = self.make_net(input_dim)
self.nose_net = self.make_net(input_dim)
self.mouth_net = self.make_net(input_dim)
self._initialize_weights()
# Initialize the weights/bias with identity transformation
def make_net(self, input_dim):
backbone_net = nn.Sequential(
nn.Linear(input_dim, 128),
nn.PReLU(),
nn.Linear(128, 64),
nn.PReLU(),
nn.Linear(64, 3 * 2),
)
return backbone_net
def forward(self, x):
#pdb.set_trace()
out_1 = self.left_eye_net(x)
out_2 = self.right_eye_net(x)
out_3 = self.nose_net(x)
out_4 = self.mouth_net(x)
return [out_1, out_2, out_3, out_4]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
| shaoxiaohu/Face_Alignment_DPR | networks/ReinitNet.py | ReinitNet.py | py | 3,833 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
27337201293 | from flask import Flask, render_template, redirect, request, session, url_for, send_file
import sqlite3
from datetime import datetime, timedelta, date
from werkzeug.security import check_password_hash, generate_password_hash
from io import BytesIO
import openpyxl as xl
from openpyxl.styles import Font
from os import path
app = Flask(__name__)
app.secret_key = "hello"
app.config['SESSION_TYPE'] = 'filesystem'
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(hours=5)
root = path.dirname(path.realpath(__file__))
def drcr(amount):
if amount >= 0:
return f"{amount:,.2f} Dr."
else:
return f"{abs(amount):,.2f} Cr."
def toabs(amount):
return f"{abs(amount):,.2f}"
def topty(amount):
if amount == 0:
return "0.00"
elif amount > 0:
return f"{amount:,.2f}"
else:
return f"({- amount:,.2f})"
def tomillion(amount):
m = int(amount / 1000000)
return f"{m:,} m"
types = {
"NCA": "Non-current asset",
"CA": "Current asset",
"NCL": "Non-current liability",
"CL": "Current liability",
"EQT": "Equity",
"INC": "Income",
"EXP": "Expenses"
}
subtypes = {
"ppe": "Property, plant and equipment",
"investments": "Investments",
"intangible": "Intangibles",
"inventories": "Inventories",
"receivables": "Trade receivables",
"cash": "Cash and cash equivalents",
"long-borrowings": "Long-term borrowings",
"deferred-tax": "Deferred tax",
"payables": "Trade and other payables",
"short-borrowings": "Short term borrowings",
"tax-payable": "Current tax payable",
"provisions": "Short-term provisions",
"capital": "Capital",
"other-equity": "Other components of equity",
"sales": "Sales",
"investment-income": "Investment income",
"other-income": "Other income",
"cost-of-sales": "Cost of sales",
"distribution-costs": "Distribution costs",
"admin-exp": "Administrative expenses",
"finance-costs": "Finance costs",
"tax-exp": "Income tax expense",
}
currency_list = ["RM", "$", "€", "£", "¥"]
@app.route("/")
def index():
if "name" in session:
return redirect("/home")
else:
return render_template("index.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
name = request.form.get("login-name")
password = request.form.get("login-password")
if not name or not password:
return render_template("login.html", msg="Input field is empty")
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT password FROM persons WHERE name=?", (name, ))
password_list = db.fetchall()
if len(password_list) != 1:
conn.close()
return render_template("login.html", msg="Invalid username")
if check_password_hash(password_list[0][0], password):
conn.close()
session.permanent = True
session["name"] = name
return redirect("/home")
else:
conn.close()
return render_template("login.html", msg="Invalid password")
else:
return render_template("login.html")
@app.route("/signup", methods=["GET", "POST"])
def signup():
if request.method == "POST":
name = request.form.get("signup-name")
password1 = request.form.get("signup-password1")
password2 = request.form.get("signup-password2")
currency = request.form.get("currency")
print(currency)
date = datetime.now().replace(microsecond=0)
agree = request.form.get("agree")
if not name or not password1 or not password2:
return render_template("signup.html", msg="Input field is empty", currency_list=currency_list)
if password1 != password2:
return render_template("signup.html", msg="Password and confirmation password are different", currency_list=currency_list)
if agree != "agree":
return render_template("signup.html", msg="You must agree the terms of use to sign up", currency_list=currency_list)
if currency not in currency_list:
return render_template("signup.html", msg="Invalid currency", currency_list=currency_list)
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT * FROM persons WHERE name = ?", (name, ))
if len(db.fetchall()) == 1:
conn.close()
return render_template("signup.html", msg="Username has been taken. Please choose another username.", currency_list=currency_list)
else:
db.execute("INSERT INTO persons (name, password, date, currency) VALUES (?, ?, ?, ?)", (name, generate_password_hash(password1), date, currency))
db.execute("SELECT id FROM persons WHERE name = ?", (name, ))
id = db.fetchall()[0][0]
db.execute("INSERT INTO accounts (name, type, subtype, balance, persons_id, dependency, deleted) VALUES (?, ?, ?, ?, ?, ?, ?)", ("Cash", "CA", "cash", 0, id, 0, 0))
db.execute("INSERT INTO accounts (name, type, subtype, balance, persons_id, dependency, deleted) VALUES (?, ?, ?, ?, ?, ?, ?)", ("Bank", "CA", "cash", 0, id, 0, 0))
db.execute("INSERT INTO accounts (name, type, subtype, balance, persons_id, dependency, deleted) VALUES (?, ?, ?, ?, ?, ?, ?)", ("Capital", "EQT", "capital", 0, id, 0, 0))
conn.commit()
conn.close()
session.permanent = True
session["name"] = name
return redirect("/home")
else:
return render_template("signup.html", currency_list=currency_list)
@app.route("/home", methods=["GET", "POST"])
def home():
if "name" not in session:
return redirect("/")
if request.method == "POST":
person = session["name"]
debit = request.form.get("debit")
credit = request.form.get("credit")
particular = request.form.get("particular")
amount = request.form.get("amount")
date = datetime.now().replace(microsecond=0)
if not debit or not credit or not particular or not amount:
return redirect("/home")
if debit == credit:
return redirect("/home")
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT id FROM persons WHERE name = ?", (person, ))
id = db.fetchall()[0][0]
db.execute("SELECT id FROM accounts WHERE persons_id = ? AND name = ?", (id, debit))
debit_id = db.fetchall()[0][0]
db.execute("SELECT id FROM accounts WHERE persons_id = ? AND name = ?", (id, credit))
credit_id = db.fetchall()[0][0]
db.execute("INSERT INTO transactions (persons_id, debit_id, credit_id, particular, amount, date) VALUES (?, ?, ?, ?, ?, ?)", (id, debit_id, credit_id, particular, amount, date))
db.execute("SELECT balance FROM accounts WHERE id = ?", (debit_id, ))
debit_balance = db.fetchall()[0][0]
db.execute("SELECT balance FROM accounts WHERE id = ?", (credit_id, ))
credit_balance = db.fetchall()[0][0]
db.execute("UPDATE accounts SET balance = ? WHERE persons_id = ? AND id = ?", (debit_balance + float(amount), id, debit_id))
db.execute("UPDATE accounts SET balance = ? WHERE persons_id = ? AND id = ?", (credit_balance - float(amount), id, credit_id))
conn.commit()
conn.close()
return redirect("/home")
else:
person = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT name FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0", (person, ))
accounts = [item[0] for item in db.fetchall()]
db.execute('SELECT SUM(balance) FROM accounts WHERE subtype = "cash" AND persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0', (person, ))
balance = db.fetchall()[0][0]
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
if not balance:
balance = 0
if balance > 1000000 or balance < - 1000000:
balance = tomillion(balance)
else:
balance = topty(balance)
db.execute('SELECT SUM(balance) FROM accounts WHERE type = "INC" AND persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0', (person, ))
income = db.fetchall()[0][0]
db.execute('SELECT SUM(balance) FROM accounts WHERE type = "EXP" AND persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0', (person, ))
expenses = db.fetchall()[0][0]
if not income:
income = 0
if not expenses:
expenses = 0
pft = - (income + expenses)
if pft > 1000000 or pft < - 1000000:
profit = tomillion(pft)
else:
profit = topty(pft)
transactions = []
db.execute("SELECT * FROM transactions WHERE persons_id = (SELECT id FROM persons WHERE name = ?)", (person, ))
history = db.fetchall()
for item in history:
date = item[6]
db.execute("SELECT name FROM accounts WHERE id = ?", (item[2], ))
debit = db.fetchall()[0][0]
db.execute("SELECT name FROM accounts WHERE id = ?", (item[3], ))
credit = db.fetchall()[0][0]
particular = item[4]
amount = toabs(item[5])
transactions.append([date, debit, credit, particular, amount])
conn.close()
return render_template("home.html", accounts=accounts, balance=balance, profit=profit, pft=pft, transactions=transactions, currency=currency)
@app.route("/add-account", methods=["GET", "POST"])
def add_account():
if "name" not in session:
return redirect("/")
if request.method == "POST":
person = session["name"]
type = request.form.get("type")
subtype = request.form.get("subtype")
account_name = request.form.get("account-name")
if type not in types or not subtype or not account_name:
return render_template("add-account.html", types=list(types.items()), alert_msg="Input field is empty")
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT id FROM persons WHERE name = ?", (person, ))
id = db.fetchall()[0][0]
db.execute("SELECT id, deleted FROM accounts WHERE name = ? AND persons_id = ?", (account_name, id))
record = db.fetchall()
if len(record) != 1:
db.execute("INSERT INTO accounts (name, type, subtype, balance, persons_id, dependency, deleted) VALUES (?, ?, ?, ?, ?, ?, ?)", (account_name, type, subtype, 0, id, 0, 0))
conn.commit()
conn.close()
return render_template("add-account.html", types=list(types.items()), primary_msg="Account added")
if len(record) == 1 and record[0][1] == 1:
db.execute("UPDATE accounts SET deleted = 0 WHERE id = ?", (record[0][0], ))
conn.commit()
conn.close()
return render_template("add-account.html", types=list(types.items()), primary_msg="Archived account recovered")
else:
conn.close()
return render_template("add-account.html", types=list(types.items()), alert_msg="Account name must be unique")
else:
return render_template("add-account.html", types=list(types.items()))
@app.route("/view-account")
def view_account():
if "name" not in session:
return redirect("/")
success = request.args.get("success", None)
person = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT * FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0", (person, ))
accounts = db.fetchall()
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
balances = []
for account in accounts:
type = types[account[2]]
subtype = subtypes[account[3]]
balances.append([account[0], account[1], type, subtype, drcr(account[4])])
conn.close()
return render_template("view-account.html", balances=balances, success=success, currency=currency)
@app.route("/accounts/<int:id>")
def details(id):
if "name" not in session:
return redirect("/")
name = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT * FROM accounts WHERE id = ?", (id, ))
account = db.fetchall()[0]
db.execute("SELECT currency FROM persons WHERE name = ?", (name, ))
currency = db.fetchall()[0][0]
persons_id1 = account[5]
db.execute("SELECT id FROM persons WHERE name = ?", (name, ))
persons_id2 = db.fetchall()[0][0]
if persons_id1 != persons_id2:
conn.close()
return redirect("/")
account_details = [account[1], types[account[2]], subtypes[account[3]], drcr(account[4])]
transactions = []
db.execute("SELECT * FROM transactions WHERE debit_id = ? OR credit_id = ?", (id, id))
history = db.fetchall()
for item in history:
date = item[6]
db.execute("SELECT name FROM accounts WHERE id = ?", (item[2], ))
debit = db.fetchall()[0][0]
db.execute("SELECT name FROM accounts WHERE id = ?", (item[3], ))
credit = db.fetchall()[0][0]
particular = item[4]
amount = topty(item[5])
transactions.append([date, debit, credit, particular, amount])
conn.commit()
conn.close()
return render_template("details.html", id=id, account_details=account_details, transactions=transactions, currency=currency)
@app.route("/delete-account/<int:id>")
def delete_account(id):
if "name" not in session:
return redirect("/")
name = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT persons_id, balance FROM accounts WHERE id = ?", (id, ))
persons_id1, balance = db.fetchall()[0]
db.execute("SELECT id FROM persons WHERE name = ?", (name, ))
persons_id2 = db.fetchall()[0][0]
if persons_id1 != persons_id2:
conn.close()
return redirect("/")
if balance != 0:
conn.close()
return redirect(url_for("view_account", success="false"))
else:
db.execute("UPDATE accounts SET deleted = 1 WHERE id = ?", (id, ))
conn.commit()
conn.close()
return redirect(url_for("view_account", success="true"))
@app.route("/terms-of-use")
def terms():
return render_template("terms.html")
@app.route("/trial-balance")
def tb():
if "name" not in session:
return redirect("/")
person = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT name, balance FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0", (person, ))
balances = db.fetchall()
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
today_date = date.today().strftime('%d %B %Y')
accounts = []
for balance in balances:
if balance[1] >= 0:
accounts.append([balance[0], toabs(balance[1]), None])
else:
accounts.append([balance[0], None, toabs(balance[1])])
db.execute("SELECT SUM(balance) FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0 AND balance >= 0", (person, ))
debit_total = toabs(db.fetchall()[0][0])
db.execute("SELECT SUM(balance) FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0 AND balance >= 0", (person, ))
credit_total = toabs(db.fetchall()[0][0])
conn.close()
return render_template("trial-balance.html", accounts=accounts, debit_total=debit_total, credit_total=credit_total, today_date=today_date, currency=currency)
@app.route("/sopl")
def sopl():
if "name" not in session:
return redirect("/")
year = request.args.get("year", "all")
person = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
db.execute("SELECT id FROM persons WHERE name = ?", (person, ))
id = db.fetchall()[0][0]
db.execute("SELECT DISTINCT(strftime('%Y', date)) from transactions WHERE persons_id = ?", (id, ))
yrs = db.fetchall()
if year not in [str(yr[0]) for yr in yrs] and year != "all":
return redirect("/")
sopl_list = []
for subtype in list(subtypes.keys())[14:]:
if year == "all":
db.execute("SELECT SUM(amount) FROM transactions WHERE debit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ?", (subtype, id))
debit = db.fetchall()[0][0]
db.execute("SELECT SUM(amount) FROM transactions WHERE credit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ?", (subtype, id))
credit = db.fetchall()[0][0]
else:
db.execute("SELECT SUM(amount) FROM transactions WHERE debit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ? AND strftime('%Y', date) = ?", (subtype, id, year))
debit = db.fetchall()[0][0]
db.execute("SELECT SUM(amount) FROM transactions WHERE credit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ? AND strftime('%Y', date) = ?", (subtype, id, year))
credit = db.fetchall()[0][0]
if debit is None:
debit = 0
if credit is None:
credit = 0
amount = credit - debit
sopl_list.append([subtype, amount])
gp = sopl_list[0][1] + sopl_list[3][1]
pfo = gp + sopl_list[4][1] + sopl_list[5][1] + sopl_list[2][1]
pbt = pfo + sopl_list[6][1] + sopl_list[1][1]
pat = pbt + sopl_list[7][1]
total = [topty(gp), topty(pfo), topty(pbt), topty(pat)]
for i in range(len(sopl_list)):
sopl_list[i][1] = topty(sopl_list[i][1])
today_date = date.today().strftime('%d %B %Y')
today_year = date.today().strftime('%Y')
conn.close()
return render_template("sopl.html", today_date=today_date, sopl_list=sopl_list, total=total, subtypes=subtypes, year=year, yrs=yrs, today_year=today_year, currency=currency)
@app.route("/sofp")
def sofp():
if "name" not in session:
return redirect("/")
person = session["name"]
today_date = date.today().strftime('%d %B %Y')
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
sofp_list = []
for subtype in list(subtypes.keys())[:14]:
db.execute("SELECT SUM(balance) FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0 AND subtype = ?", (person, subtype))
balance = db.fetchall()[0][0]
if balance is None:
balance = 0.0
sofp_list.append([subtype, balance])
db.execute('SELECT SUM(balance) FROM accounts WHERE type = "INC" AND persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0', (person, ))
income = db.fetchall()[0][0]
db.execute('SELECT SUM(balance) FROM accounts WHERE type = "EXP" AND persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0', (person, ))
expenses = db.fetchall()[0][0]
if not income:
income = 0
if not expenses:
expenses = 0
profit = income + expenses
sofp_list[12][1] = sofp_list[12][1] + profit
# Handle overdraft
if sofp_list[5][1] < 0:
sofp_list[8][1] = sofp_list[8][1] - sofp_list[12][1]
sofp_list[5][1] = 0.0
nca = sofp_list[0][1] + sofp_list[1][1] + sofp_list[2][1]
ca = sofp_list[3][1] + sofp_list[4][1] + sofp_list[5][1]
ncl = sofp_list[6][1] + sofp_list[7][1]
cl = sofp_list[8][1] + sofp_list[9][1] + sofp_list[10][1] + sofp_list[11][1]
eqt = sofp_list[12][1] + sofp_list[13][1]
ast = nca + ca
liaeqt = ncl + cl + eqt
total = [topty(nca), topty(ca), topty(ast), topty(- eqt), topty(- ncl), topty(- cl), topty(- liaeqt)]
for i in range(6):
sofp_list[i][1] = topty(sofp_list[i][1])
for i in range(6, 14):
sofp_list[i][1] = topty(- sofp_list[i][1])
conn.close()
return render_template("sofp.html", sofp_list=sofp_list, subtypes=subtypes, total=total, today_date=today_date, currency=currency)
@app.route("/trial-balance/download-excel")
def tb_download_excel():
if "name" not in session:
return redirect("/")
person = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
db.execute("SELECT name, balance FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0", (person, ))
balances = db.fetchall()
output = BytesIO()
wb = xl.Workbook()
sheet = wb.active
sheet.title = "Trial balance"
boldfont = Font(bold=True)
today_date = date.today().strftime('%d %B %Y')
sheet.cell(1, 1).value = person
sheet.cell(2, 1).value = f"Trial Balance as at {today_date}"
sheet.cell(3, 1).value = "Account name"
sheet.cell(3, 2).value = f"Debit ({currency})"
sheet.cell(3, 3).value = f"Credit ({currency})"
sheet.cell(3, 1).font = boldfont
sheet.cell(3, 2).font = boldfont
sheet.cell(3, 3).font = boldfont
for index, balance in enumerate(balances):
name_cell = sheet.cell(index + 4, 1)
name_cell.value = balance[0]
if balance[1] >= 0:
sheet.cell(index + 4, 2).value = abs(balance[1])
else:
sheet.cell(index + 4, 3).value = abs(balance[1])
sheet.cell(len(balances) + 4, 2).value = f"=SUM(B3:B{len(balances) + 3})"
sheet.cell(len(balances) + 4, 3).value = f"=SUM(C3:C{len(balances) + 3})"
sheet.cell(len(balances) + 4, 2).font = boldfont
sheet.cell(len(balances) + 4, 3).font = boldfont
sheet.column_dimensions['A'].width = 30
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
wb.save(output)
output.seek(0)
conn.close()
return send_file(output, download_name="trial-balance.xlsx", as_attachment=True)
@app.route("/sopl/download-excel")
def sopl_download_excel():
if "name" not in session:
return redirect("/")
year = request.args.get("year", "all")
person = session["name"]
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT id FROM persons WHERE name = ?", (person, ))
id = db.fetchall()[0][0]
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
db.execute("SELECT DISTINCT(strftime('%Y', date)) from transactions WHERE persons_id = ?", (id, ))
yrs = db.fetchall()
if year not in [str(yr[0]) for yr in yrs] and year != "all":
return redirect("/")
sopl_list = []
for subtype in list(subtypes.keys())[14:]:
if year == "all":
db.execute("SELECT SUM(amount) FROM transactions WHERE debit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ?", (subtype, id))
debit = db.fetchall()[0][0]
db.execute("SELECT SUM(amount) FROM transactions WHERE credit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ?", (subtype, id))
credit = db.fetchall()[0][0]
else:
db.execute("SELECT SUM(amount) FROM transactions WHERE debit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ? AND strftime('%Y', date) = ?", (subtype, id, year))
debit = db.fetchall()[0][0]
db.execute("SELECT SUM(amount) FROM transactions WHERE credit_id IN (SELECT id FROM accounts WHERE subtype = ?) AND persons_id = ? AND strftime('%Y', date) = ?", (subtype, id, year))
credit = db.fetchall()[0][0]
if debit is None:
debit = 0
if credit is None:
credit = 0
amount = credit - debit
sopl_list.append([subtype, amount])
today_date = date.today().strftime('%d %B %Y')
today_year = date.today().strftime('%Y')
output = BytesIO()
wb = xl.Workbook()
sheet = wb.active
sheet.title = "SOPL"
boldfont = Font(bold=True)
sheet.cell(1, 1).value = person
if year == "all" or year == today_year:
sheet.cell(2, 1).value = f"Statement of Profit or Loss for the year ended {today_date}"
else:
sheet.cell(2, 1).value = f"Statement of Profit or Loss for the year ended 31 December {year}"
sheet.cell(3, 2).value = f"{currency}"
sheet.cell(3, 2).font = boldfont
sheet.cell(4, 1).value = subtypes[sopl_list[0][0]]
sheet.cell(4, 2).value = sopl_list[0][1]
sheet.cell(5, 1).value = subtypes[sopl_list[3][0]]
sheet.cell(5, 2).value = sopl_list[3][1]
sheet.cell(6, 1).value = "Gross profit"
sheet.cell(6, 2).value = "=SUM(B4:B5)"
sheet.cell(6, 1).font = boldfont
sheet.cell(6, 2).font = boldfont
sheet.cell(7, 1).value = subtypes[sopl_list[4][0]]
sheet.cell(7, 2).value = sopl_list[4][1]
sheet.cell(8, 1).value = subtypes[sopl_list[5][0]]
sheet.cell(8, 2).value = sopl_list[5][1]
sheet.cell(9, 1).value = subtypes[sopl_list[2][0]]
sheet.cell(9, 2).value = sopl_list[2][1]
sheet.cell(10, 1).value = "Profit from operations"
sheet.cell(10, 2).value = "=SUM(B6:B9)"
sheet.cell(10, 1).font = boldfont
sheet.cell(10, 2).font = boldfont
sheet.cell(11, 1).value = subtypes[sopl_list[6][0]]
sheet.cell(11, 2).value = sopl_list[6][1]
sheet.cell(12, 1).value = subtypes[sopl_list[1][0]]
sheet.cell(12, 2).value = sopl_list[1][1]
sheet.cell(13, 1).value = "Profit before tax"
sheet.cell(13, 2).value = "=SUM(B10:B12)"
sheet.cell(13, 1).font = boldfont
sheet.cell(13, 2).font = boldfont
sheet.cell(14, 1).value = subtypes[sopl_list[7][0]]
sheet.cell(14, 2).value = sopl_list[7][1]
sheet.cell(15, 1).value = "Profit before tax"
sheet.cell(15, 2).value = "=SUM(B13:B14)"
sheet.cell(15, 1).font = boldfont
sheet.cell(15, 2).font = boldfont
sheet.column_dimensions['A'].width = 30
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
wb.save(output)
output.seek(0)
conn.close()
return send_file(output, download_name="sopl.xlsx", as_attachment=True)
@app.route("/sofp/download-excel")
def sofp_download_excel():
if "name" not in session:
return redirect("/")
person = session["name"]
today_date = date.today().strftime('%d %B %Y')
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT currency FROM persons WHERE name = ?", (person, ))
currency = db.fetchall()[0][0]
sofp_list = []
for subtype in list(subtypes.keys())[:14]:
db.execute("SELECT SUM(balance) FROM accounts WHERE persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0 AND subtype = ?", (person, subtype))
balance = db.fetchall()[0][0]
if balance is None:
balance = 0.0
sofp_list.append([subtype, balance])
db.execute('SELECT SUM(balance) FROM accounts WHERE type = "INC" AND persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0', (person, ))
income = db.fetchall()[0][0]
db.execute('SELECT SUM(balance) FROM accounts WHERE type = "EXP" AND persons_id = (SELECT id FROM persons WHERE name = ?) AND deleted = 0', (person, ))
expenses = db.fetchall()[0][0]
if not income:
income = 0
if not expenses:
expenses = 0
profit = income + expenses
sofp_list[12][1] = sofp_list[12][1] + profit
# Handle overdraft
if sofp_list[5][1] < 0:
sofp_list[8][1] = sofp_list[8][1] - sofp_list[12][1]
sofp_list[5][1] = 0.0
output = BytesIO()
wb = xl.Workbook()
sheet = wb.active
sheet.title = "SOFP"
boldfont = Font(bold=True)
today_date = date.today().strftime('%d %B %Y')
sheet.cell(1, 1).value = person
sheet.cell(2, 1).value = f"Statement of Financial Position as at {today_date}"
sheet.cell(3, 2).value = f"{currency}"
sheet.cell(3, 3).value = f"{currency}"
sheet.cell(3, 2).font = boldfont
sheet.cell(3, 3).font = boldfont
sheet.cell(4, 1).value = "Assets"
sheet.cell(4, 1).font = boldfont
sheet.cell(5, 1).value = "Non-current assets"
sheet.cell(5, 1).font = boldfont
sheet.cell(6, 1).value = subtypes[sofp_list[0][0]]
sheet.cell(6, 2).value = sofp_list[0][1]
sheet.cell(7, 1).value = subtypes[sofp_list[1][0]]
sheet.cell(7, 2).value = sofp_list[1][1]
sheet.cell(8, 1).value = subtypes[sofp_list[2][0]]
sheet.cell(8, 2).value = sofp_list[2][1]
sheet.cell(9, 3).value = "=SUM(B6:B8)"
sheet.cell(11, 1).value = "Current assets"
sheet.cell(11, 1).font = boldfont
sheet.cell(12, 1).value = subtypes[sofp_list[3][0]]
sheet.cell(12, 2).value = sofp_list[3][1]
sheet.cell(13, 1).value = subtypes[sofp_list[4][0]]
sheet.cell(13, 2).value = sofp_list[4][1]
sheet.cell(14, 1).value = subtypes[sofp_list[5][0]]
sheet.cell(14, 2).value = sofp_list[5][1]
sheet.cell(15, 3).value = "=SUM(B12:B14)"
sheet.cell(16, 1).value = "Total assets"
sheet.cell(16, 3).value = "=SUM(C9:C15)"
sheet.cell(16, 1).font = boldfont
sheet.cell(16, 3).font = boldfont
sheet.cell(18, 1).value = "Equity and liabilities"
sheet.cell(18, 1).font = boldfont
sheet.cell(19, 1).value = "Capital:"
sheet.cell(19, 1).font = boldfont
sheet.cell(20, 1).value = subtypes[sofp_list[12][0]]
sheet.cell(20, 2).value = - sofp_list[12][1]
sheet.cell(21, 1).value = subtypes[sofp_list[13][0]]
sheet.cell(21, 2).value = - sofp_list[13][1]
sheet.cell(22, 3).value = "=SUM(B20:B21)"
sheet.cell(24, 1).value = "Non-current liabilities"
sheet.cell(24, 1).font = boldfont
sheet.cell(25, 1).value = subtypes[sofp_list[6][0]]
sheet.cell(25, 2).value = - sofp_list[6][1]
sheet.cell(26, 1).value = subtypes[sofp_list[7][0]]
sheet.cell(26, 2).value = - sofp_list[7][1]
sheet.cell(27, 3).value = "=SUM(B25:B26)"
sheet.cell(29, 1).value = "Current liabilities"
sheet.cell(29, 1).font = boldfont
sheet.cell(30, 1).value = subtypes[sofp_list[8][0]]
sheet.cell(30, 2).value = - sofp_list[8][1]
sheet.cell(31, 1).value = subtypes[sofp_list[9][0]]
sheet.cell(31, 2).value = - sofp_list[9][1]
sheet.cell(32, 1).value = subtypes[sofp_list[10][0]]
sheet.cell(32, 2).value = - sofp_list[10][1]
sheet.cell(33, 1).value = subtypes[sofp_list[11][0]]
sheet.cell(33, 2).value = - sofp_list[11][1]
sheet.cell(34, 3).value = "=SUM(B30:B33)"
sheet.cell(35, 1).value = "Total equity and liabilities"
sheet.cell(35, 3).value = "=SUM(C22:C34)"
sheet.cell(35, 1).font = boldfont
sheet.cell(35, 3).font = boldfont
sheet.column_dimensions['A'].width = 30
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
wb.save(output)
output.seek(0)
conn.close()
return send_file(output, download_name="sofp.xlsx", as_attachment=True)
@app.route("/profile")
def profile():
if "name" not in session:
return redirect("/")
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT date FROM persons WHERE name = ?", (session["name"], ))
date = datetime.strptime(db.fetchall()[0][0], "%Y-%m-%d %H:%M:%S").strftime("%d %B %Y")
db.execute("SELECT currency FROM persons WHERE name = ?", (session["name"], ))
currency = db.fetchall()[0][0]
conn.close()
return render_template("profile.html", date=date, currency=currency)
@app.route("/change-password", methods=["POST", "GET"])
def change_password():
if "name" not in session:
return redirect("/")
person = session["name"]
if request.method == "POST":
old_password = request.form.get("old-password")
new_password = request.form.get("new-password")
confirm_new_password = request.form.get("confirm-new-password")
if not old_password or not new_password or not confirm_new_password:
return render_template("change-password.html", alert_msg="Input field is empty")
if new_password != confirm_new_password:
return render_template("change-password.html", alert_msg="Password not matched")
conn = sqlite3.connect(path.join(root, "data.db"))
db = conn.cursor()
db.execute("SELECT password FROM persons WHERE name=?", (person, ))
password = db.fetchall()[0][0]
if not check_password_hash(password, old_password):
conn.close()
return render_template("change-password.html", alert_msg="Old password is wrong")
db.execute("UPDATE persons SET password = ? WHERE name = ?", (generate_password_hash(new_password), person))
conn.commit()
conn.close()
return render_template("change-password.html", primary_msg="Password changed successfully")
else:
return render_template("change-password.html")
@app.route("/logout")
def logout():
session.pop("name", None)
return redirect("/")
if __name__ == "__main__":
app.run()
| weien0905/drcr | app.py | app.py | py | 34,057 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
10062739523 | from django.http import HttpResponse, JsonResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser, FormParser,MultiPartParser
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import settings
from prapp.models import Userlist
@csrf_exempt
def user(request):
if request.method == "GET":
print("GET SUCCESS 查询")
return HttpResponse("GET SUCCESS")
elif request.method == "POST":
print("POST SUCCESS 添加")
return HttpResponse("POST SUCCESS")
elif request.method == "PUT":
print("PUT SUCCESS 修改")
return HttpResponse("PUT SUCCESS")
elif request.method == "DELETE":
print("DELETE SUCCESS 删除")
return HttpResponse("DELETE SUCCESS")
@method_decorator(csrf_exempt, name="dispatch")
class UserView(View):
def get(self, request, *args, **kwargs):
user_id = kwargs.get("id")
if user_id:
user_val = Userlist.objects.filter(pk=user_id).values("username", "password", "gender").first()
if user_val:
return JsonResponse({
"status": 200,
"message": "查询单个用户成功",
"results": user_val
})
else:
user_list = Userlist.objects.all().values("username", "password", "gender")
print(type(user_list))
if user_list:
return JsonResponse({
"status": 200,
"message": "查询所有用户成功",
"results": list(user_list),
})
return JsonResponse({
"status": 500,
"message": "查询失败",
})
def post(self, request, *args, **kwargs):
username = request.POST.get("username")
pwd = request.POST.get("password")
try:
user_obj = Userlist.objects.create(username=username, password=pwd)
return JsonResponse({
"status": 201,
"message": "创建用户成功",
"results": {"username": user_obj.username, "gender": user_obj.gender}
})
except:
return JsonResponse({
"status": 500,
"message": "创建用户失败",
})
class UserAPIView(APIView):
def get(self, request, *args, **kwargs):
print('123')
user_id = kwargs.get("pk")
# user_val = Userlist.objects.filter(pk=user_id)
user_val = Userlist.objects.get(pk=user_id)
print(request._request.GET)
print(request.GET)
print(request.query_params)
return Response("DRF GET SUCCESS")
def post(self, request, *args, **kwargs):
print(request._request.POST)
print(request.POST)
print(request.data)
return Response("POST GET SUCCESS")
class StudentAPIView(APIView):
# 局部使用解析器
# parser_classes = [MultiPartParser]
def post(self, request, *args, **kwargs):
print("POST方法")
print(request.data)
return Response("POST方法访问成功") | hongdy-python/03drf | prapp/views.py | views.py | py | 3,368 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 23,
"usage_type": "call"
},
{
"api_na... |
24458145308 | import time
import os
import requests
from sense_hat import SenseHat
updateInterval = 300 # update once every 5 minutes
writeAPIkey = 'OY8DUS7XDPAU2KTT' # write API key for the channel
readAPIkey = 'TXI2BWJFGPTIVELP' # read API key for the channel
channelID = '2003669' # channel ID
def sensorData():
"""Function that returns the temperature and humidity"""
sense = SenseHat()
sense.clear()
sense_temp = sense.temp
# command to get the CPU temperature
cmd = 'cat /sys/class/thermal/thermal_zone0/temp'
process = os.popen(cmd).readline().strip()
cpu_temp = int(process.split('=')[0].split(
"'")[0]) / 1000 # get the CPU temperature
temp = sense_temp - (cpu_temp - sense_temp)
return temp
def getData():
"""Function that returns the data from the ThingSpeak channel"""
URL = "https://api.thingspeak.com/channels/" + channelID + "/feeds.json?api_key=" + readAPIkey + "&results=8000"
response = requests.get(URL)
if response:
print('GET Succes!')
else:
print('Error occurred!')
data = response.json()
return data
def postData():
"""Function that posts the data to the ThingSpeak channel"""
temp = sensorData()
feeds = getData()['feeds']
temps = []
first = True
for feed in feeds:
if feed['field1'] != 'None': # check if the field is empty
first = False
temps.append(float(feed['field1']))
if not first:
avgTemp = sum(temps) / len(temps) # calculate the average temperature
minTemp = min(temps) # calculate the minimum temperature
maxTemp = max(temps) # calculate the maximum temperature
if first:
fields = '&field1=' + str(temp)
else:
stats = '&field2=' + str(avgTemp) + '&field3=' + str(minTemp) + '&field4=' + str(maxTemp)
fields = '&field1=' + str(temp) + stats
response = requests.post('https://api.thingspeak.com/update?api_key=' + writeAPIkey + fields)
if response:
print('POST Succes!')
else:
print('Error occurred!')
if __name__ == '__main__':
while True:
time.sleep(updateInterval)
postData()
| jycal/iot-temps-rpi | temps_monitor.py | temps_monitor.py | py | 2,180 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sense_hat.SenseHat",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_nu... |
26428963144 | import sys
from flask import Flask, render_template, request, jsonify
from clusterization import clusterize
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.route("/test-action", methods=["POST", "GET"])
def test_btn_handle():
data = request.get_json()
# try:
# data = clusterize(data)
# except BaseException as e:
# print(e)
# pass
data = clusterize(data)
return jsonify(data)
return ""
# No caching at all for API endpoints.
@app.after_request
def add_header(response):
# response.cache_control.no_store = True
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
if __name__ == "__main__":
# app.run(ssl_context='adhoc')
# app.run(host='0.0.0.0', port="8880")
if (len(sys.argv) > 1):
host = sys.argv[1]
port = sys.argv[2]
print(host, port)
app.run(host=host, port=port, debug=True)
else:
app.run(host='0.0.0.0', port="8880", debug=True)
| alt2019/SRW-visualization | flask-proj/app-python-mcs.py | app-python-mcs.py | py | 1,187 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "clusterization.cluste... |
29984268730 | import cv2
import torch
from .flowers_dataset import FlowersDataset
import warnings
warnings.filterwarnings("ignore")
def prepare_data_for_model(path_to_image, transform=None, use_descriptors_as_features=False, features_type='hog'):
image = cv2.imread(path_to_image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if transform is not None:
image = transform(image=image)['image']
if use_descriptors_as_features:
image = image.permute(1, 2, 0).detach().cpu().numpy()
if features_type == 'hog':
image = FlowersDataset._get_hog_features(image)
elif features_type == 'lbp':
image = FlowersDataset._get_lbp_features(image)
elif features_type == 'lbp+hog':
image = FlowersDataset._get_features(image)
else:
raise NotImplementedError()
image = torch.FloatTensor(image)
return image.unsqueeze(0)
| kryvokhyzha/azure-ml-courses | flowers-azure-ml/src/datasets/__init__.py | __init__.py | py | 943 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
... |
28918133028 | #!/usr/bin/env python
import piexif # need to install by pip install piexif
import exifread # need to install by pip install exifread
from fractions import Fraction
import datetime
import time
# Class used to change image EXIF Data
def set_gps_location(file_name, lat, lng, altitude):
"""Adds GPS position as EXIF metadata
Keyword arguments:
file_name -- image file
lat -- latitude (as float)
lng -- longitude (as float)
altitude -- altitude (as float)
"""
lat_deg = to_deg(lat, ["S", "N"])
lng_deg = to_deg(lng, ["W", "E"])
exif_lat = (change_to_rational(lat_deg[0]), change_to_rational(lat_deg[1]),
change_to_rational(lat_deg[2]))
exif_lng = (change_to_rational(lng_deg[0]), change_to_rational(lng_deg[1]),
change_to_rational(lng_deg[2]))
# Create new EXIF GPS data
gps_ifd = {
piexif.GPSIFD.GPSAltitudeRef: 1,
piexif.GPSIFD.GPSAltitude: change_to_rational(round(altitude, 2)),
piexif.GPSIFD.GPSLatitudeRef: lat_deg[3],
piexif.GPSIFD.GPSLatitude: exif_lat,
piexif.GPSIFD.GPSLongitudeRef: lng_deg[3],
piexif.GPSIFD.GPSLongitude: exif_lng,
}
gps_exif = {"GPS": gps_ifd}
# get original exif data first!
try:
exif_data = piexif.load(file_name)
# update original exif data to include GPS tag
exif_data.update(gps_exif)
exif_bytes = piexif.dump(exif_data)
# Save EXIF data in image
piexif.insert(exif_bytes, file_name)
except:
exif_bytes = piexif.dump(gps_exif)
# Save EXIF data in image
piexif.insert(exif_bytes, file_name)
def change_to_rational(number):
"""convert a number to rational
Keyword arguments: number
return: tuple like (1, 2), (numerator, denominator)
"""
f = Fraction(str(number))
return f.numerator, f.denominator
def to_deg(value, loc):
"""convert decimal coordinates into degrees, minutes and seconds tuple
Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"]
return: tuple like (25, 13, 48.343 ,'N')
"""
if value < 0:
loc_value = loc[0]
elif value > 0:
loc_value = loc[1]
else:
loc_value = ""
abs_value = abs(value)
deg = int(abs_value)
t1 = (abs_value-deg)*60
min_v = int(t1)
sec = round((t1 - min_v) * 60, 5)
return deg, min_v, sec, loc_value
def get_image_timestamp(path):
try:
with open(path, 'rb') as image_file: # open image
target_timestamp = 0.0
# Get Image Datetime Original
tags = exifread.process_file(image_file, stop_tag="EXIF DateTimeOriginal")
date_taken = tags["EXIF DateTimeOriginal"]
# Convert to Seconds
datetime_object = datetime.datetime.strptime(str(date_taken), '%Y:%m:%d %H:%M:%S')
target_timestamp = time.mktime(datetime_object.timetuple())
return target_timestamp
except:
return 0.0
| ronakbhag/ids_coordinates_setter | scripts/image_editor.py | image_editor.py | py | 3,027 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "piexif.GPSIFD",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "piexif.GPSIFD",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "piexif.GPSIFD",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "piexif.GPSIF... |
23395076072 | import argparse
import datetime
import hashlib
import logging
import shutil
import os
import tempfile
import time
import requests
from stoq import Stoq, RequestMeta
from malwaretl_stoq_transformer import transformer
from malware_collector import MalwareCollector
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class URLHausSource(MalwareCollector):
# TODO: make a dynamic user-agent that appears up to date. For now, the user-agent process is so messy, skipping
ua_string = "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240" # noqa
everything_url = "https://urlhaus.abuse.ch/downloads/text/"
thirty_day_url = "https://urlhaus.abuse.ch/downloads/text_recent/"
def __init__(self, stoq: Stoq, metadata: RequestMeta):
super().__init__()
self.path: str = os.environ.get("URLHAUS_PATH", "/RAID")
self.last_urls_collected = set()
self.stoq = stoq
self.metadata = metadata
def get_targets(self, url):
response = requests.get(url, timeout=60)
if response.status_code != 200:
raise Exception(f"Error getting target list from urlhaus {response.content}")
for line in response.iter_lines():
if line:
line = line.decode("utf-8")
line = line.strip()
if not line or line.startswith("#"):
continue
yield line
def get_everything(self):
logger.info("Get everything beginning")
with tempfile.TemporaryDirectory() as tempdir:
for url in self.get_targets(self.everything_url):
try:
self.process_target(url, tempdir)
except Exception:
logger.exception(f"error processing url {url}")
def get_recent(self):
logger.info("get recent beginning")
urls_processed = set()
with tempfile.TemporaryDirectory() as tempdir:
for url in self.get_targets(self.thirty_day_url):
if url in self.last_urls_collected:
urls_processed.add(url)
continue
try:
saved_path = self.process_target(url, tempdir)
if saved_path:
self.metadata.extra_data['source_url'] = url
self.metadata.extra_data['collection_time'] = datetime.datetime.utcnow().isoformat()
self.scan_single_file(stoq, self.metadata, saved_path)
except Exception:
logger.exception(f"error processing {url}")
urls_processed.add(url)
self.last_urls_collected = urls_processed
def process_target(self, url, tempdir) -> str:
logger.info(f"getting {url}")
hash256 = hashlib.sha256()
with tempfile.NamedTemporaryFile(mode="rb+", dir=tempdir) as outfile:
headers = {"User-Agent": self.ua_string}
try:
response = requests.get(url, headers=headers, timeout=1)
for chunk in response.iter_content(128*1024):
hash256.update(chunk)
outfile.write(chunk)
except requests.exceptions.Timeout:
logger.info(f"timeout pulling {url}")
return ""
except requests.exceptions.ConnectionError:
logger.info(f"error connecting to {url}")
return ""
except requests.exceptions.RequestException:
logger.info(f"Other requests exception connecting to {url}")
except Exception:
logger.exception(f"Error raised getting malware url {url}")
return ""
hex_name = hash256.hexdigest()
daydirname = self.make_day_directory()
done = False
counter = 0
while not done:
new_full_path = os.path.join(daydirname, hex_name + f"___{counter}")
if os.path.exists(new_full_path):
counter += 1
continue
else:
done = True
with open(new_full_path, "wb") as final_file:
outfile.seek(0)
final_file.write(outfile.read())
return new_full_path
def cleanup(self):
self.get_everything()
def get(self):
while True:
self.get_recent()
# it's theoretically updated every 5 minutes, but that seems abusive. Let's do every 30
time.sleep(60*30)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a","--all", help="get all files", action="store_true")
arguments = parser.parse_args()
input_mode = transformer.InputMode.manual
output_mode = transformer.OutputMode.silent
stoq, metadata = transformer.init_vxug(input_mode=input_mode, output_mode=output_mode)
urlhaus = URLHausSource(stoq, metadata)
if arguments.all:
urlhaus._cleanup = True
if os.environ.get("URLHAUS_GET_ALL", "False").lower() in ("true", "1", "yes"):
urlhaus._cleanup = True
urlhaus.run()
| g-clef/malware_collector | URLHausSource.py | URLHausSource.py | py | 5,247 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "malware_collector.MalwareCollector",
"line_number": 22,
"usage_type": "name"
},
{
"api_name":... |
16140915477 | import typing
import time
import sys
import logging
import itertools
import numpy as np
from scipy.spatial import distance as sp_dist
import pyautogui as pg
import actionplanner as planner
# pylint: disable=too-few-public-methods
class MouseClicker(planner.MouseClicker):
def __init__(self, bdetector):
"""
:type bdetector: virtual.vboard.BoardDetector
"""
super().__init__(None, None, bdetector)
def do_click(self, ploc: typing.Tuple[int, int], leftbutton: bool):
if leftbutton:
self.bd.left_click_cell(ploc)
else:
self.bd.flag_cell(ploc)
time.sleep(pg.PAUSE)
if sys.platform == 'darwin':
time.sleep(pg.DARWIN_CATCH_UP_TIME)
class LBMouseClicker(MouseClicker):
"""
MouseClicker that buffers left clicks till commit.
"""
def __init__(self, bdetector):
"""
:param mon: ...
:param dpr: ...
:param bdetector: the ``BoardDetector`` to use
:param sct: an ``mss.mss`` instance
"""
super().__init__(bdetector)
self.left_bx = np.array([], dtype=int)
self.left_by = np.array([], dtype=int)
def click(self, blocs, leftbutton):
bx, by = blocs
if isinstance(leftbutton, bool):
leftbutton = np.array(list(itertools.repeat(leftbutton, len(bx))))
right_blocs = bx[~leftbutton], by[~leftbutton]
if np.any(~leftbutton):
self._l.info('right clicks: %s', list(zip(*right_blocs)))
for pxy in zip(*self.bd.boardloc_as_pixelloc(right_blocs)):
self.do_click(pxy, False)
self.left_bx = np.append(self.left_bx, bx[leftbutton])
self.left_by = np.append(self.left_by, by[leftbutton])
def commit(self):
if self.left_bx.shape[0]:
blocs = self.left_bx, self.left_by
planner.buffered_homo_clicks(self.bd, None, blocs, True,
self.do_click, self._l)
self.left_bx = np.array([], dtype=int)
self.left_by = np.array([], dtype=int)
class NatChrfBMouseClicker(MouseClicker):
"""
``NatChrfBMouseClicker`` using Christofides algorithm to reorder buffered
clicks with natural mouse movement.
"""
def __init__(self, bdetector):
super().__init__(bdetector)
self.prev_ploc = None
self.unit_dur = 0.07
self.left_bx = np.array([], dtype=int)
self.left_by = np.array([], dtype=int)
self.right_bx = np.array([], dtype=int)
self.right_by = np.array([], dtype=int)
def do_click(self, ploc: typing.Tuple[int, int], leftbutton: bool):
super().do_click(ploc, leftbutton)
if self.prev_ploc is not None:
pd = sp_dist.euclidean(ploc, self.prev_ploc)
self._l.info('mouse cursor move distance: %f', pd)
dur = self.unit_dur * pd
else:
dur = 0.0
time.sleep(dur)
self.prev_ploc = ploc
def click(self, blocs, leftbutton):
bx, by = blocs
if isinstance(leftbutton, bool):
leftbutton = np.array(list(itertools.repeat(leftbutton, len(bx))))
self.left_bx = np.append(self.left_bx, bx[leftbutton])
self.left_by = np.append(self.left_by, by[leftbutton])
self.right_bx = np.append(self.right_bx, bx[~leftbutton])
self.right_by = np.append(self.right_by, by[~leftbutton])
def _commit_button(self, leftbutton: bool):
if leftbutton:
bx, by = self.left_bx, self.left_by
else:
bx, by = self.right_bx, self.right_by
if bx.shape[0] > 1:
blocs = planner.christofide_reorder(self.bd, bx, by,
self.prev_ploc)
else:
blocs = bx, by
if bx.shape[0] > 0:
planner.buffered_homo_clicks(self.bd, None, blocs, leftbutton,
self.do_click, self._l)
if leftbutton:
self.left_bx = np.array([], dtype=int)
self.left_by = np.array([], dtype=int)
else:
self.right_bx = np.array([], dtype=int)
self.right_by = np.array([], dtype=int)
def commit(self):
# this order is important
self._commit_button(False)
self._commit_button(True)
| kkew3/sat-minesweeper | virtual/actionplanner.py | actionplanner.py | py | 4,350 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "actionplanner.MouseClicker",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pyautogu... |
6791618821 | from rest_framework import serializers
from ...models import ServiceRequest
class ServiceRequestSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=True)
email = serializers.EmailField(required=True)
class Meta:
model = ServiceRequest
fields = [
'name',
'last_name',
'email',
'company',
'position',
'country',
'comment',
'status',
'motivation',
'motivation_other',
'goal',
'employees',
'initiatives',
'book',
]
| tomasgarzon/exo-services | service-exo-core/marketplace/api/serializers/service_request.py | service_request.py | py | 646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 7,
"usage_... |
7132570944 | ### Retrieve only the pieces of document chunks that are relevant to the query because context window of LLMs is limited.
### Different ways to split the documents :
#### Characters, tokens, context aware splitting such Markdown header splitter.
### Parameter needed to be tuned : separated, chunk size, chunk overlap, length function, etc.
from langchain.text_splitter import MarkdownTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.text_splitter import CharacterTextSplitter
def markdown_text_splitter(documents):
markdown_text_splitter = MarkdownTextSplitter(chunk_size=100,
chunk_overlap=0)
document_chunks = markdown_text_splitter.split_documents(documents)
return document_chunks
def character_splitter(documents):
chunk_size = 1024
chunk_overlap = 5
text_splitter = CharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separator="\n")
document_chunks = text_splitter.split_documents(documents)
return document_chunks
def recursive_character_spliter(documents):
chunk_size = 512
chunk_overlap = 5
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
document_chunks = text_splitter.split_documents(documents)
return document_chunks
def test_doc_splitter(document_chunks, page_index):
print("Number of document chunks created : ", len(document_chunks))
print("\n")
print("*** Testing document chunk at index : " + str(page_index) + " ***")
print("\n")
page = document_chunks[page_index]
print("chunk content :", page.page_content)
print("\n")
print("chunk metadata : ", page.metadata)
print("\n")
print("*** End resutls ****")
| kn-neeraj/NotionKnowledgeAssistant | document_chunks.py | document_chunks.py | py | 1,879 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "langchain.text_splitter.MarkdownTextSplitter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "langchain.text_splitter.CharacterTextSplitter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "langchain.text_splitter.RecursiveCharacterTextSplitter... |
40145189651 | from pyface.qt.QtGui import QLineEdit, QGroupBox, QHBoxLayout, QVBoxLayout
from pyface.qt.QtGui import QWidget
class AtomPropertiesWidget(QWidget):
"""
This widget modifies properties of a specific atom
"""
def __init__(self, parent=None):
super(AtomPropertiesWidget, self).__init__(parent)
self.atom = None
self.name_editor = QLineEdit()
self.name_editor_groupbox = QGroupBox("Name")
self.name_editor_groupbox_layout = QHBoxLayout()
self.name_editor_groupbox.setLayout(self.name_editor_groupbox_layout)
self.name_editor_groupbox_layout.addWidget(self.name_editor)
self.name_editor_groupbox_layout.addStretch()
main_layout = QVBoxLayout()
main_layout.addWidget(self.name_editor_groupbox)
main_layout.addStretch()
self.setLayout(main_layout)
self.name_editor.textChanged.connect(self.name_editor_text_changed)
self.setDisabled(True)
def switch_to_atom(self, atom):
"""
This method initializes widget with current state of atom provided
and keeps and eye on specific atom writing changes to atom object
as far as properties are modified in graphical interface
:param atom: an atom in concern
:type atom: engine.atom
:return: Nothing
"""
self.atom = atom
self.name_editor.setText(self.atom.name)
self.setEnabled(True)
def name_editor_text_changed(self, value):
self.atom.name = value
def invalidate(self):
self.switch_to_atom(self.atom)
| aloschilov/simple-game-engine | engine_configurator/atom_properties_widget.py | atom_properties_widget.py | py | 1,589 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyface.qt.QtGui.QWidget",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pyface.qt.QtGui.QLineEdit",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui.QGroupBox",
"line_number": 15,
"usage_type": "call"
},
{
"api_na... |
71488499305 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib.widgets import Slider, Button
def dft(x):
N = x.__len__()
print(f'there will be {N} circles')
X = np.array([])
for k in range(N):
re, im = 0, 0
for n in range(N):
phi = (np.pi * 2 * k * n) / N
re += x[n] * np.cos(phi)
im -= x[n] * np.sin(phi)
re /= N
im /= N
freq = k
amp = np.sqrt(re ** 2 + im ** 2)
phase = np.arctan2(im, re)
X = np.append(X, {'freq': freq, 'amp': amp, 'phase': phase})
return X
fig = plt.figure()
# circle:
# x = 100*np.cos(np.arange(0, np.pi*2, 0.1))
# y = 100*np.sin(np.arange(0, np.pi*2, 0.1))
# custom:
data = np.loadtxt('coordinates/fourier_image_coords.txt')
x = data[0]-100
y = data[1]-100
amp_sort = lambda el: -el['amp']
fourierY = np.array(sorted(dft(y), key=amp_sort))
fourierX = np.array(sorted(dft(x), key=amp_sort))
num = fourierY.__len__()
# FOR NEXT LINE: lowering the amount of circles
# num = num-5
print(num)
ax = fig.add_subplot(111)
# ax = fig.add_subplot(111, xlim=[-15000, 15000], ylim=[-15000, 15000])
ax.set_aspect('equal')
plt.axis('off')
axcolor = 'lightgoldenrodyellow'
axWidth = plt.axes([0.15, 0.08, 0.6, 0.03], facecolor=axcolor)
axHeight = plt.axes([0.15, 0.03, 0.6, 0.03], facecolor=axcolor)
xSlide = Slider(axWidth, 'width', 0, 5000, valinit=213)
ySlide = Slider(axHeight, 'height', 0, 5000, valinit=230)
width = xSlide.val
height = ySlide.val
ax.set_xlim([-width, width])
ax.set_ylim([-height, height])
line1, = ax.plot([], [], lw=1)
line2, = ax.plot([], [], lw=1)
wave, = ax.plot([], [], lw=1)
wavex = np.array([])
wavey = np.array([])
mk_circle = lambda: plt.Circle((0, 0), 100, color=(0.1, 0.1, 0.1), fill=False,
linewidth=0.1)
circle_list = np.array([[mk_circle() for _ in range(num)],
[mk_circle() for _ in range(num)]])
for axis in range(2):
fourier = fourierY
if axis:
fourier = fourierX
for i in range(num):
circle_list[axis][i].set_radius(fourier[i]['amp'])
ax.add_artist(circle_list[axis][i])
time = 0
dt = np.pi * 2 / fourierY.__len__()
x1 = np.array([-120])
y1 = np.array([-120])
moveX1 = x1[0]
moveY1 = y1[0]
x2 = np.array([120])
y2 = np.array([120])
moveX2 = x2[0]
moveY2 = y2[0]
def update(val):
width = xSlide.val
height = ySlide.val
ax.set_xlim([-width, width])
ax.set_ylim([-height, height])
resetax = plt.axes([0.85, 0.025, 0.1, 0.04])
button = Button(resetax, 'Update', color=axcolor)
button.on_clicked(update)
def epiCycles(x, y, rotation, fourier, axis):
xarr, yarr = np.array([x]), np.array([y])
for i in range(num):
circle_list[axis][i].set_center((x, y))
freq = fourier[i]['freq']
radius = fourier[i]['amp']
phase = fourier[i]['phase']
x = (radius * np.cos(freq * time + phase + rotation)) + x
y = (radius * np.sin(freq * time + phase + rotation)) + y
xarr, yarr = np.append(xarr, x), np.append(yarr, y)
return xarr, yarr
def animate(t):
global wavex, wavey, time
global x1, x2, y1, y2
time += dt
if time > np.pi * 2:
wavex = np.array([])
wavey = np.array([])
time = 0
x1, y1 = epiCycles(moveX1, moveY1, (np.pi / 2), fourierY, 0)
x2, y2 = epiCycles(moveX2, moveY2, 0, fourierX, 1)
wavex = np.append(x2[-1], wavex)
wavey = np.append(y1[-1], wavey)
x2 = np.append(x2, wavex[0])
y1 = np.append(y1, wavey[0])
y2 = np.append(y2, y1[-1])
x1 = np.append(x1, x2[-1])
line1.set_data(x1, y1)
line2.set_data(x2, y2)
wave.set_data(wavex, wavey)
interval = 1
anim = animation.FuncAnimation(fig, animate, interval=interval)
save = True
show = False
#Here saving the results
if save:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
anim.save('results/letovo_medium_res.gif', writer=writer)
#here to display
if show:
plt.show()
| chickysnail/fourier-transform-drawing | Fourier series.py | Fourier series.py | py | 4,080 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 16... |
16413121892 | import subprocess
import requests
from flask import Flask, request, json
from jproperties import Properties
configs = Properties()
with open('server.properties', 'rb') as config_file:
configs.load(config_file)
app = Flask(__name__)
@app.route("/health")
def healthCheck():
return "alive", 200
@app.route("/page", methods=['POST'])
def page():
body = json.loads(request.data)
if not verifyFreqRange(body["frequency"]):
return "Not in frequency range of server",500
options = ""
if body["type"] == "NUMERIC":
options = "-n"
page_command = subprocess.run("printf \"%s:%s\" | ./pocsag -f %se6 -t 1 -r %s %s" % (body["capcode"], body["msg"], body["frequency"], body["baud"], options), shell=True, executable='/bin/bash')
if page_command.returncode == 0:
return "Page sent", 200
else:
return "Failed to send page",500
def verifyFreqRange(msgFreq):
freqs = configs.get("frequencies").data
if freqs != "any":
if "-" in freqs:
for freq in freqs.split(","):
if msgFreq < float(freq.split("-")[0]) or msgFreq > float(freq.split("-")[1]):
return False
else:
return float(freqs) == float(msgFreq)
return True
def registerWithManager():
try:
req = requests.post("%s/addnode" % (configs.get("manager").data), json = {"name": configs.get("name").data, "address":"http://%s:%s" % (configs.get("ip").data, configs.get("port").data), "location": configs.get("location").data, "frequencies": configs.get("frequencies").data})
if req.status_code == 200:
print ("Node has been registered with the pager management server.")
else:
raise ValueError("Node failed to register with pager management server: %s" % req.text)
except requests.exceptions.RequestException:
print ("Node failed to connect to management server.")
exit()
return
registerWithManager()
print("Server is now running on port: %s" % configs.get("port").data) | zarcha/pirate-pager | node/app.py | app.py | py | 1,926 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jproperties.Properties",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.json.loads",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.json",
"... |
11695999761 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/31 22:32
# @Author : TanLHHH
# @Site :
# @File : 前程无忧_测试.py
# @Software: PyCharm
import requests
from lxml import etree
import csv
import time
import random
import re
fp = open('51job.csv', 'wt', newline='', encoding='utf-8', errors='ignore')
writer = csv.writer(fp)
'''title,salary,company,companyinfo,companyplace,place,exp,edu,num,time,info'''
writer.writerow(('职位', '薪资', '公司', '公司信息', '公司地址', '地区', '工作经验', '学历', '人数', '时间', '岗位信息'))
def parseInfo(url):
headers = {
'User-Agent': 'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/ADR-1301071546) Presto/2.11.355 Version/12.10'
} # 更换请求头,防止被反爬虫
res = requests.get(url, headers=headers)
content = res.content.decode('utf-8')
print("当前正在爬取:",url)
print(content)
res.encoding = 'utf-8'
selector = etree.HTML(res.text)
title = selector.xpath('//*[@id="pageContent"]/div[1]/div[1]/p/text()')
salary = selector.xpath('//*[@id="pageContent"]/div[1]/p/text()')
company = selector.xpath('//*[@id="pageContent"]/div[2]/a[1]/p/text()')
companyinfo = selector.xpath('//*[@id="pageContent"]/div[2]/a[1]/div/text()')
companyplace = selector.xpath('//*[@id="pageContent"]/div[2]/a[2]/span/text()')
place = selector.xpath('//*[@id="pageContent"]/div[1]/div[1]/em/text()')
exp = selector.xpath('//*[@id="pageContent"]/div[1]/div[2]/span[2]/text()')
edu = selector.xpath('//*[@id="pageContent"]/div[1]/div[2]/span[3]/text()')
num = selector.xpath('//*[@id="pageContent"]/div[1]/div[2]/span[1]/text()')
time = selector.xpath('//*[@id="pageContent"]/div[1]/div[1]/span/text()')
info = selector.xpath('string(//*[@id="pageContent"]/div[3]/div[2]/article)')
pat = ' <p class="fp"><span class="label">职能类别:</span><a class="el tdn" href="https:.*?">(.*?)</a>'
function1 = re.compile(pat,re.S).findall(content)
print(function1)
# 类选择器解析URL中对应HTML对应的字段内容
info = str(info).strip()
print(title, salary, company, companyinfo, companyplace, place, exp, edu, num, time, info)
writer.writerow((title, salary, company, companyinfo, companyplace, place, exp, edu, num, time, info))
def getUrl(url):
print('New page')
res = requests.get(url)
res.encoding = 'GBK'
# print(res.text)
if res.status_code == requests.codes.ok:
selector = etree.HTML(res.text)
urls = selector.xpath('//*[@id="resultList"]/div/p/span/a/@href')
# //*[@id="resultList"]/div/p/span/a
# id选择器找到当前网页每一个职位a标签对应的当前岗位具体信息URL列表
print(urls)
for url in urls:
parseInfo(url)
time.sleep(random.randrange(1, 4))
# 设置线程休眠时间防止被反爬虫
if __name__ == '__main__':
key = '心理学'
# 第一页URL格式和后面的网页不一样
url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,' + key + ',2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
getUrl(url)
# 后页[2,100)
urls = [
'https://search.51job.com/list/000000,000000,0000,00,9,99,' + key + ',2,{}.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='.format(
i) for i in range(2, 2)]
for url in urls:
getUrl(url)
| TanLHHHH/Spiders | 测试文件夹/前程无忧_测试.py | 前程无忧_测试.py | py | 3,832 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "csv.writer",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number... |
165914696 | import gym
import numpy as np
import random
from time import sleep
import matplotlib.pyplot as plt
from scipy.special import entr
from utils import clear,calculate_entropy,save_training_progress,early_stop
import config
import time
import cv2
class Agent():
def __init__(self):
clear()
"""Setup"""
# env = gym.make("Taxi-v3", render_mode="human").env # Setup the Gym Environment
if config.train_flag:
# self.env = config.env(render_mode='rgb_array') # Setup the Gym Environment
self.env = config.env
else:
# self.env = config.env(render_mode='human') # Setup the Gym Environment
self.env = config.env
self.train_flag = config.train_flag
self.q_matrix = config.q_matrix
self.alpha_matrix = config.alpha_matrix
# env = TaxiEnvCustomized(render_mode='human')
# self.env = TaxiEnvCustomized(render_mode='rgb_array')
self.q_table = np.zeros([self.env.observation_space.n, self.env.action_space.n])
if self.train_flag:
if config.approach == 'normal' or config.approach == 'two':
self.q_table = np.zeros([self.env.observation_space.n, self.env.action_space.n])
else:
self.q_table = self.calculate_q_table(self.q_matrix)
# self.q_table = np.random.rand(self.env.observation_space.n, self.env.action_space.n)
else:
self.q_table = np.load(config.q_table_DIR)
# For plotting metrics
self.all_epochs = []
self.all_penalties = []
def calculate_q_table(self,matrix):
""" Intitalize the Q table and do necessary preprocessing """
q_table = self.q_table
no_of_states = self.env.observation_space.n
no_of_pass_locations = 5
no_of_dest_locations = 4
no_of_grids = int(no_of_states) / (no_of_pass_locations*no_of_dest_locations)
no_of_rows = no_of_cols = int(np.sqrt(no_of_grids))
no_of_actions = self.env.action_space.n
for row in range(no_of_rows):
for col in range(no_of_cols):
for pass_idx in range(no_of_pass_locations):
for dest_idx in range(no_of_dest_locations):
state = self.env.encode(row, col, pass_idx, dest_idx)
#print(self.q_table[state])
q_table[state,:] = matrix[row][col]
return q_table
def train(self):
"""Training the Agent"""
# reward_window = []
# entropies = []
episodes_num_steps = []
epsiodes_cumulative_reward = []
epsiodes_mean_reward = []
episodes_entropy = []
episodes_penalty = []
episodes_info_gain = []
for i in range(config.training_episodes):
t0 = time.time()
if i%100==0:
print("episode: ",i)
save_training_progress(self.q_table,episodes_num_steps,epsiodes_mean_reward,epsiodes_cumulative_reward,episodes_entropy,episodes_penalty,episodes_info_gain)
state = self.env.reset()[0] # Reset returns observation state and other info. We only need the state.
done = False
penalties, reward = 0, 0
num_steps = 0
rewards = []
entropy_value = 0
#print(i)
while not done:
num_steps+=1
if random.uniform(0, 1) < config.epsilon:
action = self.env.action_space.sample() # Pick a new action for this state.
#action = self.env.action_space.sample(info["action_mask"])
else:
action = np.argmax(self.q_table[state]) # Pick the action which has previously given the highest reward.
next_state, reward, done, truncated,info = self.env.step(action)
rewards.append(reward)
old_value = self.q_table[state, action] # Retrieve old value from the q-table.
next_max = np.max(self.q_table[next_state])
if config.approach == 'normal' or config.approach == 'one':
new_value = (1 - config.alpha) * old_value + config.alpha * (reward + config.gamma * next_max)
else:
row,col,_,_ = self.env.decode(state)
next_row,next_col,_,_ = self.env.decode(next_state)
alpha_old = self.alpha_matrix[row][col]
alpha_new = self.alpha_matrix[next_row][next_col]
alpha_difference = alpha_new - alpha_old
new_value = (1 - config.alpha) * old_value + config.alpha * ((reward+alpha_difference) + config.gamma * next_max)
#update tue alpha change
# Update q-value for current state.
#alpha_factor = np.log(np.sum(info["action_mask"])/334)
# print(alpha_factor)
#new_value = (1 - config.alpha) * old_value + config.alpha * (alpha_factor+reward + config.gamma * next_max)
# print(new_value)
self.q_table[state, action] = new_value
if reward == -10: # Checks if agent attempted to do an illegal action.
penalties += 1
state = next_state
episodes_num_steps.append(num_steps)
epsiodes_cumulative_reward.append(np.sum(rewards))
epsiodes_mean_reward.append(np.average(rewards))
if i==0:
past_intropy=0
else:
past_intropy = episodes_entropy[-1]
t1 =time.time()
# entropy = calculate_entropy(self.q_table)[0]
entropy = 0
episodes_info_gain.append(entropy-past_intropy)
episodes_entropy.append(entropy)
episodes_penalty.append(penalties)
# print(time.time()-t0)
# print(time.time()-t1)
if early_stop(epsiodes_cumulative_reward):
print(f"early stopped training at episode: {i}")
return self.q_table,episodes_num_steps,epsiodes_mean_reward,epsiodes_cumulative_reward,episodes_entropy,episodes_penalty,episodes_info_gain
# if episodes_info_gain[-1]<0.01:
# print("early stopping")
# return self.q_table,episodes_num_steps,epsiodes_mean_reward,epsiodes_cumulative_reward,episodes_entropy,episodes_penalty,episodes_info_gain
print("Training finished.\n")
return self.q_table,episodes_num_steps,epsiodes_mean_reward,epsiodes_cumulative_reward,episodes_entropy,episodes_penalty,episodes_info_gain
"""Display and evaluate agent's performance after Q-learning."""
def display(self):
total_epochs, total_penalties = 0, 0
for _ in range(config.display_episodes):
state,info_ = self.env.reset()
epochs, penalties, reward = 0, 0, 0
done = False
while not done:
action = np.argmax(self.q_table[state])
print(self.q_table[state])
state, reward, done, truncated,info = self.env.step(action)
print(info["action_mask"])
if reward == -10:
penalties += 1
epochs += 1
# clear()
self.env.render()
print(f"Timestep: {epochs}")
print(f"State: {state}")
print(f"Action: {action}")
print(f"Reward: {reward}")
sleep(0.15) # Sleep so the user can see the
total_penalties += penalties
total_epochs += epochs
print(f"Results after {config.display_episodes} episodes:")
print(f"Average timesteps per episode: {total_epochs / config.display_episodes}")
print(f"Average penalties per episode: {total_penalties / config.display_episodes}")
| Abdulhady-Feteiha/Information-Digital-Twin | Genesis-Taxi/Agent.py | Agent.py | py | 8,004 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "utils.clear",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.train_flag",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "config.env",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "config.env",
"... |
24846541046 | #!/usr/bin/env python3
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle
from skimage.io import imread_collection, imshow
from skimage.feature import canny
from skimage.color import rgb2gray
from skimage.transform import hough_circle, hough_circle_peaks
def make_sequence(directory, color):
path = 'train/' + directory + '/'
names = [path + name for name in sorted(os.listdir(path))]
data = imread_collection(names).concatenate()
data = data[:, :, :, color].astype('float64')
data -= np.mean(data)
data /= np.std(data)
data = np.gradient(data, axis=0)
data = np.sum(data, axis=(1, 2)).ravel()
return data
def draw_images(directory, label):
path1 = 'train/' + directory + '/'
path2 = '.' + directory + '.png'
images = imread_collection([
path1 + str(label).zfill(3) + path2,
path1 + str(label + 1).zfill(3) + path2,
path1 + str(label + 2).zfill(3) + path2
]).concatenate().astype('float64')
[(plt.imshow(image.astype('uint8')), plt.show()) for image in images]
def draw_sequence(data, label, title):
plt.plot(data)
plt.title(title)
patch = Rectangle((label - 6, data.min()), 12,
data.max() - data.min(), fill=False, color='black')
plt.axes().add_patch(patch)
plt.show()
def draw_circles(directory):
path = 'train/' + directory + '/'
names = [path + name for name in sorted(os.listdir(path))]
data = imread_collection(names).concatenate()
for j, frame in enumerate(data[115:]):
edge = canny(rgb2gray(frame), sigma=2, low_threshold=0.2)
edge = (edge*255).astype('uint8')
imshow(edge, cmap='gray'); plt.show()
imshow(frame)
hspace = hough_circle(edge, range(10, 30))
accums, cx, cy, radii = hough_circle_peaks(
hspace, range(10, 30), total_num_peaks=40)
for x, y, r in zip(cx, cy, radii):
patch = Circle((x, y), r, fill=True, color='black')
plt.axes().add_patch(patch)
plt.show()
print(j)
if __name__=='__main__':
labels, sequences = list(), list()
count = '/' + str(len(os.listdir('train/'))) + ' samples'
for j, directory in enumerate(sorted(os.listdir('train/'))):
path = 'train/' + directory + '/'
red = make_sequence(directory, 0)
green = make_sequence(directory, 1)
blue = make_sequence(directory, 2)
sequences.append(np.vstack((red, green, blue)))
print(str(j + 1) + count)
np.save('train.npy', sequences) | eugenbobrov/vision-hack | vision.py | vision.py | py | 2,580 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "skimage.io.imread_collection",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"l... |
33147614582 | #!/usr/bin/env python3
# Author: Jan Demel (xdemel01@fit.vutbr.cz)
# This script was made as a part of IPK course
# Don't copy this please...
# My API key: 419db25b1d35c32d9f83525f3bc9931c
import socket
import json
import sys
# Error codes
ERROR_ARGS = -1
ERROR_SOCKET_CONNECTION = -2
ERROR_FORMAT_OUTPUT_DATA = -3
ERROR_API_CALL = -4
# ============== Variables definitions =================
if len(sys.argv) != 3:
print("Forbidden use of arguments.\n", file=sys.stderr)
sys.exit(ERROR_ARGS)
if sys.argv[2] == "":
print("Please enter city name correctly\n", file=sys.stderr)
sys.exit(ERROR_ARGS)
api_key = sys.argv[1]
city = sys.argv[2]
host_name = "api.openweathermap.org"
port = 80
request = "GET /data/2.5/weather?q=" + city + "&APPID=" + api_key + "&units=metric HTTP/1.1\r\nHost: " + host_name + "\r\n\r\n"
# ============== Socket connection and response parsing =================
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
connection.connect((host_name, port))
connection.send(request.encode())
except:
print("Caught exception socket.error", file=sys.stderr)
sys.exit(ERROR_SOCKET_CONNECTION)
(response_headers, response_json) = connection.recv(4096).decode().split("\r\n\r\n")
response_json = json.loads(response_json)
# ============== Handeling errors =================
if int(response_json["cod"]) != 200:
print(response_json["message"])
sys.exit(ERROR_API_CALL)
# ============== Printing response =================
try:
print(response_json["name"])
print(response_json["weather"][0]["description"])
print("temp:" + str(response_json["main"]["temp"]) + "°C")
print("humidity:" + str(response_json["main"]["humidity"]) + "%")
print("pressure:" + str(response_json["main"]["pressure"]) + " hPa")
print("wind-speed:" + str(response_json["wind"]["speed"]) + " km/h")
print("wind-deg:" + (str(response_json["wind"]["deg"]) if ("deg" in response_json["wind"]) else "-"))
except:
print("Error with formating output data...", file=sys.stderr)
sys.exit(ERROR_FORMAT_OUTPUT_DATA) | hondem/FIT | ipk_proj_1/script.py | script.py | py | 2,040 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": ... |
33541610808 | #!/usr/bin/python
import os
import sys
import subprocess
import datetime
import math
try:
# Change the next line if your config folder is not $HOME/.config
config_directory = f"{os.environ['HOME']}/.config"
# If $HOME isn't set, os.environ['HOME'] will cause an error
except KeyError:
print("The environment variable $HOME is not set.")
print("You need to change the config_directory variable.")
print("See README.md on github (https://github.com/michaelskyba/kvrg-avg) for more information.")
sys.exit(1)
# If config_directory doesn't exist, print an error an exit
if not os.path.isdir(config_directory):
print(f"The config directory that is set ({config_directory}) does not exist.")
print("You need to change the config_directory variable.")
print("See README.md on github (https://github.com/michaelskyba/kvrg-avg) for more information.")
sys.exit(1)
# If config_director/avg/trackers does not exist, create it
# mkdir without -p will raise an error if config_directory/avg doesn't exist first
if not os.path.isdir("f{config_directory}/avg/trackers"):
subprocess.run(["mkdir", "-p", f"{config_directory}/avg/trackers"])
# config for either average (default) or ETA for date trackers in avg list
date_list_ETA_set = False
if os.path.isfile(f"{config_directory}/avg/config"):
with open(f"{config_directory}/avg/config", "r") as config_file:
if "ETA\n" in config_file.readlines():
date_list_ETA_set = True
# Starts checking for command-line arguments
# You ran "avg" without any extra arguments, or you ran "avg list"
# running something like "avg list foo bar" is the same
if len(sys.argv) == 1 or sys.argv[1] == "list":
# Get the tracker names by looking in config/avg/trackers
tracker_names = os.listdir(f"{config_directory}/avg/trackers")
# Alert the user if they have no trackers
if not tracker_names:
print("You have no trackers.")
print("Use 'avg create \"<name>\" [\"<description>\"]' to create one.")
sys.exit(1)
# Print the tracker names and their average values, if the user has a tracker
else:
for tracker in tracker_names:
with open(f"{config_directory}/avg/trackers/{tracker}", "r") as tracker_file:
tracker_lines = tracker_file.readlines()
if len(tracker_lines) > 2 and tracker_lines[2].strip() == "date":
if not date_list_ETA_set:
# convert to human-readable
seconds = int(tracker_lines[1].strip())
if seconds == 0:
output = seconds
print(f"{tracker} - {output}")
continue
minutes = math.floor(seconds / 60)
if minutes == 0:
output = seconds
print(f"{tracker} - {output}")
continue
hours = math.floor(minutes / 60)
if hours == 0:
output = f"{minutes} minutes and {seconds - minutes * 60} seconds"
print(f"{tracker} - {output}")
continue
days = math.floor(hours / 24)
if days == 0:
output = f"{hours} hours and {minutes - hours * 60} minutes"
print(f"{tracker} - {output}")
continue
months = math.floor(days / 30)
if months == 0:
output = f"{days} days and {hours - days * 24} hours"
print(f"{tracker} - {output}")
continue
years = math.floor(months / 12)
if years == 0:
output = f"{months} months and {days - months * 30} days"
print(f"{tracker} - {output}")
continue
output = f"{years} years and {months - years * 12} months"
print(f"{tracker} - {output}")
elif len(tracker_lines) > 4:
# we want the ETA
argument = tracker_lines[len(tracker_lines) - 1].strip()
date = []
date.append(argument[0:4])
date.append(argument[5:7])
date.append(argument[8:10])
date.append(argument[11:13])
date.append(argument[14:16])
# Make sure everything is an integer
int_date = []
for part in date:
int_date.append(int(part))
date = []
for part in int_date:
date.append(part)
latest_date = datetime.datetime(date[0], date[1], date[2], date[3], date[4])
average = tracker_lines[1].strip()
average = int(average)
average = datetime.timedelta(seconds=average)
print(f"{tracker} - {latest_date + average}")
else:
# not enough intervals for an ETA
print(f"{tracker} - 0")
else:
print(f"{tracker} - {tracker_lines[1].strip()}")
sys.exit(0)
# You ran "avg create ..."
if sys.argv[1] == "create":
# If user runs "avg create"
if len(sys.argv) == 2:
print("You need a <name> argument.")
sys.exit(1)
# Check if config/avg/trackers contains a tracker called <name>
if sys.argv[2] in os.listdir(f"{config_directory}/avg/trackers"):
print(f"Tracker with name '{sys.argv[2]}' already exists.")
sys.exit(1)
# Create a file with name <name> in config/avg/trackers
with open(f"{config_directory}/avg/trackers/{sys.argv[2]}", "w") as tracker_file:
# Saves the description if the user provided one
# the description is the fourth argument, so the length has to be > 3 (>=4)
# and sys.argv[3] will get the fourth argument (3rd when not including "avg")
if len(sys.argv) > 3 and sys.argv[3] != "date":
description = sys.argv[3]
# Date tracker with description
elif len(sys.argv) > 4:
description = sys.argv[4]
# No description
else:
description = "This tracker does not have a description."
# avg create ... date
if len(sys.argv) > 3 and sys.argv[3] == "date":
tracker_file.write(f"{description}\n0\n{sys.argv[3]}\n")
else:
tracker_file.write(f"{description}\n0\n")
sys.exit(0)
# You ran "avg delete ..."
if sys.argv[1] == "delete":
# If user runs "avg delete"
if len(sys.argv) == 2:
print("You need a <name> argument.")
sys.exit(1)
# Removes the tracker file
try:
os.remove(f"{config_directory}/avg/trackers/{sys.argv[2]}")
# Tracker does not exist
except FileNotFoundError:
print(f"There is no such tracker '{sys.argv[2]}'.")
sys.exit(1)
sys.exit(0)
# You ran "avg push ..."
if sys.argv[1] == "push":
# If user runs "avg push"
if len(sys.argv) == 2:
print("You need a <name> and a <one or more values> argument.")
sys.exit(1)
# Check if config/avg/trackers contains a tracker called <name>
if sys.argv[2] not in os.listdir(f"{config_directory}/avg/trackers"):
print(f"Tracker with name '{sys.argv[2]}' does not exist.")
sys.exit(1)
# If user runs "avg push <name>"
if len(sys.argv) == 3:
print("You need a <one or more values> argument.")
sys.exit(1)
# Check type of tracker
with open(f"{config_directory}/avg/trackers/{sys.argv[2]}", "r") as tracker_file:
tracker_lines = tracker_file.readlines()
if len(tracker_lines) > 2 and tracker_lines[2].strip() == "date":
tracker_type = "date"
else:
tracker_type = "normal"
# Makes sure all values are numbers if it's a normal tracker
if tracker_type == "normal":
for index, argument in enumerate(sys.argv):
if index > 2:
try:
float_argument = float(argument)
except ValueError:
print(f"Value '{argument}' is not a number.")
sys.exit(1)
# Makes sure all values are dates (or "now") if it's a date tracker
else:
for index, argument in enumerate(sys.argv):
if index > 2:
# Skip it if they type "now"
if argument == "now":
continue
# Make sure the date is the right length
if len(argument) != 16:
print(f"Value '{argument}' is invalid.")
sys.exit(1)
# Test if they put slashes in the right places
for slash in [4, 7, 10, 13]:
if argument[slash] != "/":
print(f"Value '{argument}' is invalid.")
sys.exit(1)
date = []
date.append(argument[0:4])
date.append(argument[5:7])
date.append(argument[8:10])
date.append(argument[11:13])
date.append(argument[14:16])
# Make sure they put integers as the date values (month, day, etc.)
for date_index, value in enumerate(date):
try:
date[date_index] = int(value)
except ValueError:
print(f"Value '{value}' is not a number.")
sys.exit(1)
# Test if user's date is a real date
try:
final_date = datetime.datetime(date[0], date[1], date[2], date[3], date[4])
except ValueError:
print(f"Value '{argument}' is invalid.")
sys.exit(1)
# Appends values to tracker file
# A separate loop is used to avoid appending a few of the arguments before
# finding out one of them is invalid
for index, argument in enumerate(sys.argv):
if index > 2:
with open(f"{config_directory}/avg/trackers/{sys.argv[2]}", "a") as tracker_file:
if argument == "now":
# cdate -- current date
cdate = datetime.datetime.now()
# zfill puts in zeros accordingly - '14'.zfill(3) = '014'
passed_argument = f"{cdate.year}/{str(cdate.month).zfill(2)}/{str(cdate.day).zfill(2)}/{str(cdate.hour).zfill(2)}/{str(cdate.minute).zfill(2)}"
else:
passed_argument = argument
tracker_file.write(f"{passed_argument}\n")
# Update average
# Calculate the correct average
with open(f"{config_directory}/avg/trackers/{sys.argv[2]}", "r") as tracker_file:
new_tracker_file_lines = tracker_file.readlines()
# Get the number of lines
tracker_file_num_of_lines = len(new_tracker_file_lines)
# Normal tracker
if tracker_type == "normal":
# Add the values
value_sum = 0
for index, value in enumerate(new_tracker_file_lines):
if index > 1:
value_sum += float(value)
# Actual computation
average = value_sum * 100 / (tracker_file_num_of_lines - 2)
average = round(average)
average = average / 100
# it needs to be tracker_file_num_of_lines - 2 because the
# description (first line) and average (second line) aren't entries
new_tracker_file_lines[1] = f"{average}\n"
# Date tracker that has at least two entries
# You can't calculate an average interval with only one entry, because intervals = entries - 1
# Date trackers have a description (first line), an average (second line), and a date identifier (third line)
# That's three lines
# Finally, the entries are listed. With one entry, you have 3 + 1 = 4 lines
# So, to have at least two entries, you need to have more than one entry, or more than 4 lines:
elif tracker_file_num_of_lines > 4:
# Add the intervals between dates
# stored as seconds
intervals = []
for index, value in enumerate(new_tracker_file_lines):
# Entries start on the fourth line, so index has to be at least 3
# lines - 1 is used to avoid later_date being out of range
if index > 2 and index < (tracker_file_num_of_lines - 1):
# print(f"find the distance between {value} and {new_tracker_file_lines[index + 1]}")
# Get the earlier date in the right format (index)
argument = value
date = []
date.append(argument[0:4])
date.append(argument[5:7])
date.append(argument[8:10])
date.append(argument[11:13])
date.append(argument[14:16])
# Make sure everything is an integer
int_date = []
for part in date:
int_date.append(int(part))
date = []
for part in int_date:
date.append(part)
earlier_date = datetime.datetime(date[0], date[1], date[2], date[3], date[4])
# Get the later date in the right format (index + 1)
argument = new_tracker_file_lines[index + 1]
date = []
date.append(argument[0:4])
date.append(argument[5:7])
date.append(argument[8:10])
date.append(argument[11:13])
date.append(argument[14:16])
# Make sure everything is an integer
int_date = []
for part in date:
int_date.append(int(part))
date = []
for part in int_date:
date.append(part)
later_date = datetime.datetime(date[0], date[1], date[2], date[3], date[4])
# Add the interval to the intervals list
intervals.append((later_date - earlier_date).total_seconds())
# calculate the average of the second intervals
interval_sum = 0
for interval in intervals:
interval_sum += interval
average = interval_sum / (len(intervals))
average = round(average)
# write to tracker file
new_tracker_file_lines[1] = f"{average}\n"
# Update the average in the file
with open(f"{config_directory}/avg/trackers/{sys.argv[2]}", "w") as tracker_file:
tracker_file.writelines(new_tracker_file_lines)
sys.exit(0)
# You ran "avg get ..."
if sys.argv[1] == "get":
# If user runs "avg get"
if len(sys.argv) == 2:
print("You need an <attribute> argument and a <name> argument.")
sys.exit(1)
# Check if user gave a valid attribute
if sys.argv[2] not in ["description", "average", "type", "ETA"]:
print(f"No such attribute, '{sys.argv[2]}'.")
sys.exit(1)
# If user runs "avg get <attribute>"
if len(sys.argv) == 3:
print("You need a <name> argument.")
sys.exit(1)
# Checks if user gave a valid tracker name
if sys.argv[3] not in os.listdir(f"{config_directory}/avg/trackers"):
print(f"Tracker with name '{sys.argv[3]}' does not exist.")
sys.exit(1)
# Use has a valid tracker name
with open(f"{config_directory}/avg/trackers/{sys.argv[3]}", "r") as tracker_file:
tracker_lines = tracker_file.readlines()
# User ran "avg get description <name>"
if sys.argv[2] == "description":
print(tracker_lines[0].strip())
# User ran "avg get average <name>"
if sys.argv[2] == "average":
print(tracker_lines[1].strip())
# User ran "avg get type <name>"
if sys.argv[2] == "type":
if len(tracker_lines) > 2 and tracker_lines[2].strip() == "date":
print("date")
else:
print("normal")
if sys.argv[2] == "ETA":
if len(tracker_lines) > 4:
argument = tracker_lines[len(tracker_lines) - 1].strip()
date = []
date.append(argument[0:4])
date.append(argument[5:7])
date.append(argument[8:10])
date.append(argument[11:13])
date.append(argument[14:16])
# Make sure everything is an integer
int_date = []
for part in date:
int_date.append(int(part))
date = []
for part in int_date:
date.append(part)
latest_date = datetime.datetime(date[0], date[1], date[2], date[3], date[4])
average = tracker_lines[1].strip()
average = int(average)
average = datetime.timedelta(seconds=average)
print(latest_date + average)
else:
# No intervals
print("0")
sys.exit(0)
# You ran "avg info ..."
if sys.argv[1] == "info":
# If user runs "avg info"
if len(sys.argv) == 2:
print("You need a <name> argument.")
sys.exit(1)
# Checks if user gave a valid tracker name
if sys.argv[2] not in os.listdir(f"{config_directory}/avg/trackers"):
print(f"Tracker with name '{sys.argv[2]}' does not exist.")
sys.exit(1)
# Lists attributes
with open(f"{config_directory}/avg/trackers/{sys.argv[2]}", "r") as tracker_file:
tracker_lines = tracker_file.readlines()
print(f"Name: {sys.argv[2]}")
print(f"Description: {tracker_lines[0].strip()}")
if len(tracker_lines) > 2 and tracker_lines[2].strip() == "date":
if len(tracker_lines) > 4:
# average
# convert to human-readable
print_average = True
seconds = int(tracker_lines[1].strip())
if seconds == 0 and print_average:
output = seconds
print(f"Average: {output}")
print_average = False
minutes = math.floor(seconds / 60)
if minutes == 0 and print_average:
output = seconds
print(f"Average: {output}")
print_average = False
hours = math.floor(minutes / 60)
if hours == 0 and print_average:
output = f"{minutes} minutes and {seconds - minutes * 60} seconds"
print(f"Average: {output}")
print_average = False
days = math.floor(hours / 24)
if days == 0 and print_average:
output = f"{hours} hours and {minutes - hours * 60} minutes"
print(f"Average: {output}")
print_average = False
months = math.floor(days / 30)
if months == 0 and print_average:
output = f"{days} days and {hours - days * 24} hours"
print(f"Average: {output}")
print_average = False
years = math.floor(months / 12)
if years == 0 and print_average:
output = f"{months} months and {days - months * 30} days"
print(f"Average: {output}")
print_average = False
output = f"{years} years and {months - years * 12} months"
if print_average:
print(f"Average: {output}")
# ETA
argument = tracker_lines[len(tracker_lines) - 1].strip()
date = []
date.append(argument[0:4])
date.append(argument[5:7])
date.append(argument[8:10])
date.append(argument[11:13])
date.append(argument[14:16])
# Make sure everything is an integer
int_date = []
for part in date:
int_date.append(int(part))
date = []
for part in int_date:
date.append(part)
latest_date = datetime.datetime(date[0], date[1], date[2], date[3], date[4])
average = tracker_lines[1].strip()
average = int(average)
average = datetime.timedelta(seconds=average)
print(f"ETA: {latest_date + average}")
else:
# No intervals
print("Average: 0")
print("ETA: 0")
# type
print("This tracker is a date tracker.")
else:
print(f"Average: {tracker_lines[1].strip()}")
print("This is a normal tracker.")
sys.exit(0)
# Invalid command
print(f"'{sys.argv[1]}' is not a kvrg-avg command. See the README for a list of valid commands.")
sys.exit(1)
| michaelskyba/kvrg-avg | main.py | main.py | py | 21,826 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
6482900973 | import requests
url = "https://www.caberj.com.br/wspls/WS005.apw"
querystring = {"WSDL":""}
payload = ""
headers = {
"cookie": "SESSIONID=36c9c80f7d7d823affe2b4d5d3522477",
"Authorization": "Basic cmVzdHVzZXI6UEBzc3cwcmQyMDIz"
}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
print(response.text) | msullivancm/ProjetosComAte10LinhasDeCodigoPython | apiRestMosiaBkp/requestWS005.py | requestWS005.py | py | 353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.request",
"line_number": 13,
"usage_type": "call"
}
] |
37508516152 | from flask import Blueprint, request
from .connection import client
import datetime
now = datetime.datetime.utcnow()
user_route = Blueprint('user_route', __name__)
# Connect to collection
db = client.swiper
collection = db.users
# Post/get route acceser
@user_route.route('/', methods=['GET', 'POST'])
def userCreate():
if request.method == 'POST':
# Check if object is complete
if 'username' in request.json and type(request.json['username']) == str and collection.find({'username': request.json['username']}).count() == 0:
userObject = request.json
userObject['strikes'] = 0
userObject['userId'] = request.json['id']
userObject['timestamp'] = now.strftime('%Y-%m-%d')
collection.insert_one(userObject)
return 'success', 201
else:
return 'POST BODY NOT COMPLETE', 400
else:
return 'Welcome to the post user'
| acedinstitute/swipingApi | routes/user.py | user.py | py | 939 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "flask.Blueprint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "connecti... |
35676671945 | """
*Element Shape*
"""
from dataclasses import dataclass
from strism._geoshape import Pixel
__all__ = ["ElementShape"]
@dataclass
class ElementShape:
width: Pixel
height: Pixel
@classmethod
def create(
cls,
width: int,
height: int,
):
return cls(
Pixel(width),
Pixel(height),
)
| jedhsu/text | text/_shape/_shape.py | _shape.py | py | 375 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "strism._geoshape.Pixel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "strism._geoshape.Pixel",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "strism._geoshape.Pixel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "... |
14566329358 | from django.db import models, migrations
import cover.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255, verbose_name='title')),
('author', models.CharField(max_length=255, verbose_name='author')),
('license_name', models.CharField(max_length=255, verbose_name='license name')),
('license_url', models.URLField(max_length=255, verbose_name='license URL', blank=True)),
('source_url', models.URLField(null=True, verbose_name='source URL', blank=True)),
('download_url', models.URLField(unique=True, null=True, verbose_name='image download URL', blank=True)),
('file', models.ImageField(upload_to=b'cover/image', storage=cover.models.OverwriteStorage(), verbose_name='file')),
],
options={
'verbose_name': 'cover image',
'verbose_name_plural': 'cover images',
},
bases=(models.Model,),
),
]
| fnp/redakcja | src/cover/migrations/0001_initial.py | 0001_initial.py | py | 1,275 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 11,
"usage_type": "call"
},
... |
27976899334 | import tkinter as tk
import tkinter.ttk as ttk
from time import sleep
from PIL import ImageTk, Image
import sys
import Initialise
import Manual
import Settings
class Controls():
def __init__(self, background, initialise_panel, manual_panel, settings_panel, tileprint_panel, state):
self.background = background
self.initialise_panel = initialise_panel
self.manual_panel = manual_panel
self.settings_panel = settings_panel
self.tileprint_panel = tileprint_panel
self.state = state
# Define control flags
self.flag_homeax1 = False
self.flag_homeax2 = False
self.flag_homeax3 = False
self.flag_homeax4 = False
self.flag_printing = False
self.flag_magazineinitialised = False
self.flag_paletteinitialised = False
self.flag_fileloaded = False
self.flag_printpause = False
# Create blank grey default info panel
background_image = Image.open("C:\\Users\\Finlay\\Documents\\Images\\480x315_BLACK.jpg")
self.background_image_tk = ImageTk.PhotoImage(background_image)
self.info_controls = tk.Label(background, image=self.background_image_tk, width=480, height=315)
self.info_controls.place(x=130, y=135)
# Define images for 4 main buttons, header banner, and overall system outline
settings_image = Image.open("C:\\Users\\Finlay\\Documents\\Images\\SETTINGS_BUTTON.jpg")
self.settings_image_tk = ImageTk.PhotoImage(settings_image)
manual_image = Image.open("C:\\Users\\Finlay\\Documents\\Images\\MANUAL_BUTTON.jpg")
self.manual_image_tk = ImageTk.PhotoImage(manual_image)
print_image = Image.open("C:\\Users\\Finlay\\Documents\\Images\\PRINT_BUTTON.jpg")
self.print_image_tk = ImageTk.PhotoImage(print_image)
initialise_image = Image.open("C:\\Users\\Finlay\\Documents\\Images\\INITIALISE_BUTTON.jpg")
self.initialise_image_tk = ImageTk.PhotoImage(initialise_image)
PxlRT_image = Image.open("C:\\Users\\Finlay\\Documents\\Images\\BANNER.jpg")
self.PxlRT_image_tk = ImageTk.PhotoImage(PxlRT_image)
# tt_manual_image = Image.open("C:\\Users\\Finlay\\Documents\\Images\\tt_manual.jpg")
# self.tt_manual_image_tk = ImageTk.PhotoImage(tt_manual_image)
#
# Add logo and banner to background
self.PxlRT_icon = tk.Label(self.background, image=self.PxlRT_image_tk, bd=0, highlightthickness=0,
relief=tk.RAISED)
self.PxlRT_icon.place(x=0, y=10)
# Create 4 main buttons
self.button_settings = tk.Button(self.background, image=self.settings_image_tk, bd=1,
command=self.callback_settings,
highlightthickness=0, relief=tk.RAISED)
self.button_settings.place(x=30, y=375)
self.button_manual = tk.Button(self.background, image=self.manual_image_tk, bd=1, highlightthickness=0,
relief=tk.RAISED,
command=self.callback_manual)
self.button_manual.place(x=30, y=295)
self.button_print = tk.Button(self.background, image=self.print_image_tk, bd=1, highlightthickness=0,
command=self.callback_print,
relief=tk.RAISED)
self.button_print.place(x=30, y=215)
self.button_initialise = tk.Button(self.background, image=self.initialise_image_tk, bd=1,
command=self.callback_initialise,
highlightthickness=0, relief=tk.RAISED)
self.button_initialise.place(x=30, y=135)
# Bring initialisation info panel to front as default start display
initialise_panel.info_initialise.lift(aboveThis=None)
def callback_manual(self):
self.manual_panel.info_manual.lift(aboveThis=None)
def callback_print(self):
self.tileprint_panel.info_tileprint.lift(aboveThis=None)
def callback_initialise(self):
self.initialise_panel.info_initialise.lift(aboveThis=None)
def callback_settings(self):
self.settings_panel.info_settings.lift(aboveThis=None)
| InfiniteAnswer/Robot_GUI_V2 | Controls.py | Controls.py | py | 4,308 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"li... |
2006853859 |
import json
import datetime
from . import db
class View(db.Model):
__tablename__ = 'devicer_views'
view_key = db.Column(db.String(20), primary_key=True)
view_name = db.Column(db.String)
view_saved = db.Column(db.DateTime, default=datetime.datetime.now())
selecter_mode = db.Column(db.String)
selecter_code = db.Column(db.Text())
devicer_code = db.Column(db.Text())
data = db.Column(db.Text())
lock = db.Column(db.Boolean, default=False)
lock_password = db.Column(db.String)
settings = db.Column(db.Text())
crontab_enabled = db.Column(db.Boolean, default=False)
crontab = db.Column(db.String, default='0 0 * * 1')
def __init__(self, view_key, view_name=None, data=None, settings=None, **kwargs):
self.view_key = view_key
self.view_name = view_name
if self.view_name is None:
self.view_name = view_key
if data is not None:
self.data = json.dumps(data, ensure_ascii=False)
if settings is not None:
self.settings = json.dumps(settings, ensure_ascii=False)
for attr, value in kwargs.items():
if hasattr(self, attr):
setattr(self, attr, value)
def to_dict(self):
return {
'view_key': self.view_key,
'view_name': self.view_name,
'view_saved': self.view_saved.strftime('%Y/%m/%d %H:%M:%S'),
'data': json.loads(self.data) if self.data else None,
'settings': json.loads(self.settings) if self.settings else None,
'lock': self.lock,
'crontab_enabled': self.crontab_enabled,
'crontab': self.crontab
}
@staticmethod
def before_update_listener(mapper, connection, target):
target.view_saved = datetime.datetime.now()
db.event.listen(View, 'before_update', View.before_update_listener)
| rleschuk/devicer | app/models.py | models.py | py | 1,944 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.dumps",
... |
15826823002 | import logging
import scipy.cluster.hierarchy as sch
import sklearn.cluster as sc
# to map the user labels
# - user_input_df: pass in original user input dataframe, return changed user input dataframe
# - sp2en: change Spanish to English
def map_labels_sp2en(user_input_df):
# Spanish words to English
span_eng_dict = {'revisado_bike': 'test ride with bike', 'placas_de carro': 'car plates', 'aseguranza': 'insurance',
'iglesia': 'church', 'curso': 'course',
'mi_hija recién aliviada': 'my daughter just had a new baby',
'servicio_comunitario': 'community service', 'pago_de aseguranza': 'insurance payment',
'grupo_comunitario': 'community group', 'caminata_comunitaria': 'community walk'}
# change language
user_input_df = user_input_df.replace(span_eng_dict)
return user_input_df
# to map purposes and replaced mode in user inputs
# - cvt_pur_mo: convert purposes and replaced mode
def map_labels_purpose(user_input_df):
# Convert purpose
map_pur_dict = {'course': 'school', 'work_- lunch break': 'lunch_break', 'on_the way home': 'home',
'insurance_payment': 'insurance'}
# convert purpose
user_input_df = user_input_df.replace(map_pur_dict)
return user_input_df
def map_labels_mode(user_input_df):
# convert mode
if "replaced_mode" in user_input_df.columns:
same_mode_df = user_input_df[user_input_df.replaced_mode == "same_mode"]
if len(same_mode_df) > 0:
logging.debug("The following rows will be changed %s" % same_mode_df.index)
for a in range(len(user_input_df)):
if user_input_df.iloc[a]["replaced_mode"] == "same_mode":
# to see which row will be converted
# logging.debug("The following rows will be changed: %s", user_input_df.iloc[a])
user_input_df.iloc[a]["replaced_mode"] = user_input_df.iloc[a]['mode_confirm']
logging.debug("Finished changing all rows")
else:
logging.info("map_labels_mode: no replaced mode column found, early return")
return user_input_df
# this function will change Spanish to English, convert purposes, and convert modes
def map_labels(user_input_df):
# Note that the spanish -> english conversion MUST currently happen before the other
# mode and purpose mappings
user_input_df = map_labels_sp2en(user_input_df)
user_input_df = map_labels_purpose(user_input_df)
user_input_df = map_labels_mode(user_input_df)
return user_input_df
# use hierarchical clustering to get labels of the second round
# - sch.linkage: perform hierarchical(agglomerative) clustering
# In this function, we set a low bound and a higher bound(cutoff) of distance in the dendrogram
# - last_d: the distance of the last cluster in the dendrogram
# - low: the lower bound of distance
# e.g., if low = 300, last_d = 250, we will assign 0s as labels for the points, irrespective of the first round labels.
# and the list of second round labels will be like [0,0,0,0,0].
# It means the points are already similar to each other after the first round of clustering, they don't need to
# go through the second round.
# - max_d: the cutoff of distance
# - dist_pct: the percentage of the last distance in the dendrogram
# - sch.fcluster: form clusters from the hierarchical clustering defined by the given linkage matrix
# e.g., if last_d = 10000, dist_pct = 0.4, max_d = 400, clusters will be assigned at the distance of 400
# by default, using scipy hierarchical clustering
def get_second_labels(x,method,low,dist_pct):
z = sch.linkage(x, method=method, metric='euclidean')
last_d = z[-1][2]
clusters = []
if last_d < low:
for i in range(len(x)):
clusters.append(0)
else:
max_d = last_d * dist_pct
clusters = sch.fcluster(z, max_d, criterion='distance')
return clusters
# using kmeans to build the model
def kmeans_clusters(clusters,x):
n_clusters = len(set(clusters))
kmeans = sc.KMeans(n_clusters=n_clusters, random_state=0).fit(x)
k_clusters = kmeans.labels_
return k_clusters
# this function includes hierarchical clustering and changing labels from the first round to get appropriate labels for
# the second round of clustering
# appropriate labels are label from the first round concatenate label from the second round
# (e.g. label from first round is 1, label from second round is 2, the new label will be 12)
# - second_round_idx_labels: a list to store the indices and labels from the first round.
# - second_labels: labels from the second round of clustering
def get_new_labels(second_labels,second_round_idx_labels,new_labels):
for i in range(len(second_labels)):
first_index = second_round_idx_labels[i][0]
new_label = second_round_idx_labels[i][1]
# concatenate labels from two rounds
new_label = int(str(new_label) + str(second_labels[i]))
for k in range(len(new_labels)):
if k == first_index:
new_labels[k] = new_label
break
return new_labels
# group similar trips according to new_labels, store the original indices of the trips
def group_similar_trips(new_labels,track):
bin_sim_trips_idx = []
# find the unique set of bins and store their indices into `bin_sim_trips`
label_set = set(new_labels)
# convert the set of unique labels into their indices
# concretely, if the input labels are ['a','a','a','b','b','b']
# the unique labels are ['a', 'b']
for sel_label in label_set:
# for the first iteration, bin = [0,1,2]
# for the second iteration, bin = [3,4,5]
bin = [index for (index, label) in enumerate(new_labels) if label == sel_label]
bin_sim_trips_idx.append(bin)
# At the end, bin_sim_trips_idx = [[0,1,2],[3,4,5]]
# using track to replace the current indices with original indices
for bin in bin_sim_trips_idx:
# in the first iteration, bin = [0,1,2]
# in the first iteration of that, we map the trip index of the
# common trip (e.g. 0) to the original index for that trip from the track (e.g. 42)
for i in range(len(bin)):
bin[i] = track[bin[i]][0]
# At this point, the bin_sim_trips_idx will have original indices for the trips
return bin_sim_trips_idx
# replace the first round labels with new labels
# - track: a list to store the indices and labels from the first round of clustering
# for item in track, item[0] is the original index of the trip in filter_trips
# item[1] is the label after the first round of clustering
# we change the labels from the first round with new labels from the second round here
def change_track_labels(track,new_labels):
for i in range(len(new_labels)):
track[i][1] = new_labels[i]
return track
| e-mission/e-mission-server | emission/analysis/modelling/tour_model/label_processing.py | label_processing.py | py | 6,913 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "logging.debug",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.link... |
30372470703 | from django.shortcuts import render,redirect
# from .models import details
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from .forms import SignUpForm, UserName
from django.contrib.auth import authenticate, login, logout
from .models import FriendDetails
import requests
from bs4 import BeautifulSoup
# Create your views here.
def del_request(request, user_name):
if request.user.is_authenticated:
user = request.user
bro = FriendDetails.objects.get(friend_user_name=user_name)
bro.user.remove(user)
return redirect("/main/")
def refresh(request, user_name):
url = f"https://www.codechef.com/users/{user_name}"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html5lib')
name = str(soup.findAll('h2')[1]).strip('<h2/>')
table = soup.find('div', attrs = {'class':'rating-number'})
rat = soup.find('span', attrs={'class':'rating'})
num = int(rat.text[0])
rat = (rat.text[1] + " ")*num
print(rat)
if table is not None:
table = table.text
detail = FriendDetails.objects.get(friend_user_name=user_name)
detail.friend_name = name
detail.rating = table
detail.stars = rat
detail.save()
return redirect("/main/")
def home(request):
bro = {}
if request.user.is_authenticated:
user = request.user
bro = user.frienddetails_set.all()
if request.method == "POST":
form = UserName(request.POST)
if form.is_valid():
user_name = form.cleaned_data.get('user_name')
bro = FriendDetails.objects.filter(friend_user_name=user_name).first()
if bro is not None:
bro.user.add(user)
bro = FriendDetails.objects.filter(friend_user_name=user_name).values()
return(render(request, 'main/home.html', context={'bro':user.frienddetails_set.all(),'form':form}))
else:
url = f"https://www.codechef.com/users/{user_name}"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html5lib')
name = str(soup.findAll('h2')[1]).strip('<h2/>')
table = soup.find('div', attrs = {'class':'rating-number'})
rat = soup.find('span', attrs={'class':'rating'})
num = int(rat.text[0])
rat = (rat.text[1] + " ")*num
if table is not None:
table = table.text
bro = FriendDetails(friend_name=name, friend_user_name=user_name, rating=table, stars=rat)
bro.save()
bro.user.add(user)
return(render(request, 'main/home.html', context={'bro':user.frienddetails_set.all(),'form':form}))
else:
messages.error(request, f"{user_name} is not a valid Username")
form = UserName
# email = details.objects.all
# template = loader.get_template('/index.html')
# context = {'email': email}
return render(request, 'main/home.html', {'form':form, 'bro':bro})
def register(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
user_name = form.cleaned_data.get('username')
messages.success(request, f"User {user_name} created successfully")
return redirect("/main/")
else:
for msg in form.error_messages:
messages.error(request, f"{msg} : {form.error_messages[msg]}")
return render(request = request,
template_name = "main/register.html",
context={"form":form})
form = SignUpForm
return(render(request, 'main/register.html', context={'form':form}))
def logout_request(request):
logout(request)
messages.info(request, "Logged out successfully!")
return redirect("/main/")
def login_request(request):
if request.method == 'POST':
form = AuthenticationForm(request, request.POST)
if form.is_valid():
user_name = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
print(user_name, password)
user = authenticate(request, username=user_name, password=password)
if user is not None:
messages.success(request, "You are now logged in !")
login(request, user)
return redirect("/main/")
else:
messages.error(request, "Invalid username or password")
else:
messages.error(request, "Invalid username or password")
form = AuthenticationForm
return(render(request, "main/login.html", context={'form':form}))
| harithlaxman/CodeChef-Friends | main/views.py | views.py | py | 4,773 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.FriendDetails.objects.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.FriendDetails.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "models.FriendDetails",
"line_number": 14,
"usage_type": "name"
},
... |
6759362866 | import pandas as pd
import numpy as np
import random,math
from scipy.spatial import distance
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from collections import defaultdict
def plot_3d(res_datapoints,m,pdf):
fig = plt.figure()
ax = Axes3D(fig)
x=[]
y=[]
z=[]
l=[]
for d,cl in res_datapoints:
x.append(d[0])
y.append(d[1])
z.append(d[2])
l.append(cl+10)
#print(cl)
ax.scatter(x, y, z, c=l)
xm=[]
ym=[]
zm=[]
l=[]
nm=20
for mm in m:
xm.append(mm[0])
ym.append(mm[1])
zm.append(mm[2])
l.append(nm)
nm +=1
#print(xm,ym,zm)
ax.scatter(xm, ym, zm, c="red",marker="X" , s= 100)
ax.set_xlabel('X-axis')
ax.set_ylabel('Y-axis')
ax.set_zlabel('Z-axis')
plt.show()
pdf.savefig(fig)
def plot_2d(res_datapoints,m,pdf):
fig=plt.figure(0)
x=[]
y=[]
l=[]
for d,cl in res_datapoints:
x.append(d[0])
y.append(d[1])
l.append(cl+10)
#print(cl)
plt.scatter(x, y, c=l)
xm=[]
ym=[]
l=[]
nm=20
for mm in m:
xm.append(mm[0])
ym.append(mm[1])
l.append(nm)
nm +=1
#print(xm,ym)
plt.scatter(xm, ym, c="red",marker="X")
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
pdf.savefig(fig)
def data_gen(k,dim_data,N):
k=k
dim_data = dim_data
N = N
means=np.random.randint(10000, size=(k, dim_data))
std = np.random.randint(1,80,size=k)
param_set = list(zip(means,std))
print("parameter: ",param_set)
m = []
data = []
cluster_mark = 1
for p in param_set:
meu,sigma = p
m.append(meu)
x =np.random.randint(-50,50, size=(int(N/k), dim_data))
#np.random.randint(sigma-3,sigma+3)
data.extend([(np.random.randint(-sigma,sigma)*xx + meu,cluster_mark) for xx in x])
cluster_mark +=1
print("datapoints: ",data)
print("centroids: ",m)
print("cluster mark: ", cluster_mark)
return m,data,cluster_mark
def synthesize(k,dim_data,N,pdf):
k= k
dim_data = dim_data
N = N
m,data,cluster_mark = data_gen(k,dim_data,N)
if len(m[0])==2:
plot_2d(data,m,pdf)
elif len(m[0])==3:
print("3D")
plot_3d(data,m,pdf)
#print("mue: ",means,"\nsigma: ",std)
d = defaultdict(list)
for arr, v in data:
d[v].append(arr)
print(d[1])
test_data = []
for key in range(1,cluster_mark):
test_data.extend(d[key])
print(test_data[0])
print(k)
return test_data
| swadtasnim/My-K-Means-Clustering | synthetic_data.py | synthetic_data.py | py | 2,705 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "mpl_toolkits.mplot3d.Axes3D",
"line_number": 19,
"usage_type": "call"
},
{
"api_name":... |
33621622326 | from models import QP
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
from torch import optim
from torch.autograd import Variable
import torch.nn.functional as F
from copy import copy
from random import shuffle, sample
import numpy as np
from IPython.core.debugger import set_trace
import config
import utils
import model_utils
from copy import deepcopy
np.seterr(all="raise")
class MetaQP:
def __init__(self,
actions,
get_legal_actions,
transition_and_evaluate,
cuda=torch.cuda.is_available(),
best=False):
utils.create_folders()
self.cuda = cuda
self.qp = model_utils.load_model()
if self.cuda:
self.qp = self.qp.cuda()
self.actions = actions
self.get_legal_actions = get_legal_actions
self.transition_and_evaluate = transition_and_evaluate
if not best:
self.q_optim, self.p_optim = model_utils.setup_optims(self.qp)
self.best_qp = model_utils.load_model()
if self.cuda:
self.best_qp = self.best_qp.cuda()
self.history = utils.load_history()
self.memories = utils.load_memories()
def correct_policy(self, policy, state, mask=True):
if mask:
legal_actions = self.get_legal_actions(state[:2])
mask = np.zeros((len(self.actions),))
mask[legal_actions] = 1
policy = policy * mask
pol_sum = (np.sum(policy * 1.0))
if pol_sum == 0:
pass
else:
policy = policy / pol_sum
return policy
def correct_policies(self, policies, states):
for i, (policy, state) in enumerate(zip(policies, states)):
policies[i] = self.correct_policy(policy, state)
return policies
def wrap_to_variable(self, numpy_array, volatile=False):
var = Variable(torch.from_numpy(
numpy_array.astype("float32")), volatile=volatile)
if self.cuda:
var = var.cuda()
return var
def transition_and_evaluate_minibatch(self, minibatch, policies, tasks, num_done, is_done,
bests_turn, best_starts, results):
task_idx = 0
n_way_idx = 0
#map non_done minibatch indices to a smaller tensor
non_done_view = []
for i, (state, policy) in enumerate(zip(minibatch, policies)):
if i % config.N_WAY == 0 and i != 0:
task_idx += 1
if i != 0:
n_way_idx += 1
n_way_idx = n_way_idx % config.N_WAY
# this was causing this error
# the flipping of is done is f'ing something up
if not is_done[i]: # and tasks[task_idx] is not None:
action = np.random.choice(self.actions, p=policy)
state, reward, game_over = self.transition_and_evaluate(
state, action)
bests_turn = (bests_turn+1) % 2
if game_over:
is_done[i] = True
num_done += 1
if results is not None:
for k in range(config.N_WAY-n_way_idx):
if not is_done[i+k] and k != 0:
is_done[i+k] = True
is_done[i] = False
minibatch[i] = minibatch[i+k]
break
if bests_turn == best_starts:
results["best"] += 1
else:
results["new"] += 1
else:
starting_player = tasks[task_idx]["starting_player"]
curr_player = int(state[2][0][0])
if starting_player != curr_player:
reward *= -1
tasks[task_idx]["memories"][n_way_idx]["result"] = reward
else:
non_done_view.extend([i])
return minibatch, tasks, num_done, is_done, results, bests_turn, non_done_view
def get_states_from_next_minibatch(self, next_minibatch):
states = []
for i, state in enumerate(next_minibatch):
if i % config.N_WAY == 0:
states.extend([state])
return states
def setup_tasks(self, states, starting_player_list, episode_is_done):
tasks = []
minibatch = np.zeros((config.EPISODE_BATCH_SIZE,
config.CH, config.R, config.C))
idx = 0
for task_idx in range(config.EPISODE_BATCH_SIZE // config.N_WAY):
if not episode_is_done[idx]:
task = {
"state": states[task_idx],
"starting_player": starting_player_list[task_idx],
"memories": []
}
tasks.extend([task])
else:
tasks.extend([None])
for _ in range(config.N_WAY):
minibatch[idx] = np.array(states[task_idx])
idx += 1
return minibatch, tasks
def run_episode(self, orig_states):
np.set_printoptions(precision=3)
results = {
"new": 0, "best": 0, "draw": 0
}
states = np.array(orig_states)
episode_is_done = []
for _ in range(config.EPISODE_BATCH_SIZE):
episode_is_done.extend([False])
episode_num_done = 0
best_starts = np.random.choice(2)
starting_player_list = [np.random.choice(2) for _ in range(
config.EPISODE_BATCH_SIZE//config.N_WAY)]
if len(states) != config.CH:
for i, state in enumerate(states):
states[i] = np.array(state)
states[i][2] = starting_player_list[i]
else:
new_states = []
for starting_player in starting_player_list:
new_state = np.array(states)
new_state[2] = starting_player
new_states.extend([new_state])
states = new_states
bests_turn = best_starts
while episode_num_done < config.EPISODE_BATCH_SIZE:
print("Num done {}".format(episode_num_done))
states, episode_is_done, episode_num_done, results = self.meta_self_play(states=states,
episode_is_done=episode_is_done,
episode_num_done=episode_num_done,
results=results,
bests_turn=bests_turn,
best_starts=best_starts,
starting_player_list=starting_player_list)
bests_turn = (bests_turn+1) % 2
if len(self.memories) > config.MAX_TASK_MEMORIES:
self.memories[-config.MAX_TASK_MEMORIES:]
utils.save_memories(self.memories)
print("Results: ", results)
if results["new"] > results["best"] * config.SCORING_THRESHOLD:
model_utils.save_model(self.qp)
print("Loading new best model")
self.best_qp = model_utils.load_model()
if self.cuda:
self.best_qp = self.best_qp.cuda()
elif results["best"] > results["new"] * config.SCORING_THRESHOLD:
print("Reverting to previous best")
self.qp = model_utils.load_model()
if self.cuda:
self.qp = self.qp.cuda()
self.q_optim, self.p_optim = model_utils.setup_optims(self.qp)
def meta_self_play(self, states, episode_is_done, episode_num_done, bests_turn,
results, best_starts, starting_player_list):
self.qp.eval()
self.best_qp.eval()
minibatch, tasks = self.setup_tasks(
states=states,
starting_player_list=starting_player_list,
episode_is_done=episode_is_done)
minibatch_variable = self.wrap_to_variable(minibatch)
if bests_turn == 1:
qp = self.best_qp
else:
qp = self.qp
_, policies = qp(minibatch_variable, percent_random=.2)
policies = policies.detach().data.numpy()
corrected_policies = self.correct_policies(policies, minibatch)
# corrected_policies_copy = np.array(corrected_policies)
policies_input = self.wrap_to_variable(corrected_policies)
qs, _ = qp(minibatch_variable, policies_input)
qs = qs.detach().data.numpy()
idx = 0
for task_idx in range(config.EPISODE_BATCH_SIZE // config.N_WAY):
for _ in range(config.N_WAY):
#if tasks[task_idx] is not None:
if not episode_is_done[idx]:
tasks[task_idx]["memories"].extend(
[{"policy": corrected_policies[idx]}])
elif tasks[task_idx] is not None:
tasks[task_idx]["memories"].extend([None])
idx += 1
scaled_qs = (qs + 1) / 2
weighted_policies = corrected_policies * scaled_qs
idx = 0
for task_idx in range(config.EPISODE_BATCH_SIZE // config.N_WAY):
summed_policy = 0
for _ in range(config.N_WAY):
summed_policy += weighted_policies[idx]
idx += 1
idx -= config.N_WAY
improved_policy = self.correct_policy(
summed_policy, minibatch[idx], mask=True)
if tasks[task_idx] is not None:
tasks[task_idx]["improved_policy"] = improved_policy
for _ in range(config.N_WAY):
weighted_policies[idx] = improved_policy
idx += 1
is_done = deepcopy(episode_is_done)
num_done = episode_num_done
improved_policies = weighted_policies
next_minibatch, tasks, \
episode_num_done, episode_is_done, \
results, bests_turn, non_done_view = self.transition_and_evaluate_minibatch(minibatch=np.array(minibatch),
policies=improved_policies,
tasks=tasks,
num_done=episode_num_done,
is_done=episode_is_done,
bests_turn=bests_turn,
best_starts=best_starts,
results=results)
next_states = self.get_states_from_next_minibatch(next_minibatch)
# revert back to orig turn now that we are done
bests_turn = (bests_turn+1) % 2
policies = corrected_policies
while True:
minibatch, tasks, \
num_done, is_done, \
_, bests_turn, non_done_view = self.transition_and_evaluate_minibatch(minibatch=minibatch,
policies=policies,
tasks=tasks,
num_done=num_done,
is_done=is_done,
bests_turn=bests_turn,
best_starts=best_starts,
results=None)
if num_done == config.EPISODE_BATCH_SIZE:
break
minibatch_view = minibatch[non_done_view]
minibatch_view_variable = self.wrap_to_variable(minibatch_view)
# when you fixed this use is_done to make a view of the minibatch_variable which will reduce the batch size going into
# pytorch when you have some that are done, i.e. removing redundancy. perhaps put it in transition and evaluate with an option
if bests_turn == 1:
qp = self.best_qp
else:
qp = self.qp
# Idea: since I am going through a trajectory of states, I could probably
# also learn a value function and have the Q value for the original policy
# be a combination of the V and the reward. so basically we could use the V
# function in a couple different ways. for the main moves we could use it
# to scale the policies according to the V values from the transitioned states,
# i.e. for each of the transitioned states from the improved policies, we
# look at the V values from those, and scale the action probas according to those
# so basically we could rescale it to 0-1 and then multiply it with the policies
# and it should increase the probabilities for estimatedly good actions and
# decrease for bad ones
# for the inner loop Q estimation trajectories we could average together the V
# values for each of the states, i.e. we could have an additional target
# for the Q network, which is the averaged together V values from the trajectory
# that should provide a fairly good estimate of the Q value, and won't be
# as noisy as the result
# another possible improvement is making the policy noise learnable, i.e.
# the scale of the noise, and how much weight it has relative to the generated policy
_, policies_view = self.qp(minibatch_view_variable)
policies_view = policies_view.detach().data.numpy()
policies_view = self.correct_policies(policies_view, minibatch_view)
policies[non_done_view] = policies_view
fixed_tasks = []
for _, task in enumerate(tasks):
if task is not None:
new_memories = []
for i, memory in enumerate(task["memories"]):
if memory is not None:
new_memories.extend([memory])
task["memories"] = new_memories
fixed_tasks.extend([task])
self.memories.extend(fixed_tasks)
return next_states, episode_is_done, episode_num_done, results
def train_memories(self):
self.qp.train()
self.qp.Q.train()
self.qp.P.train()
self.qp.StateModule.train()
# so memories are a list of lists containing memories
if len(self.memories) < config.MIN_TASK_MEMORIES:
print("Need {} tasks, have {}".format(
config.MIN_TASK_MEMORIES, len(self.memories)))
return
for _ in tqdm(range(config.TRAINING_LOOPS)):
# tasks = sample(self.memories, config.SAMPLE_SIZE)
minibatch = sample(self.memories,
min(config.TRAINING_BATCH_SIZE//config.N_WAY, len(self.memories)))
# BATCH_SIZE = config.TRAINING_BATCH_SIZE // config.N_WAY
# extra = config.SAMPLE_SIZE % BATCH_SIZE
# minibatches = [
# tasks[x:x + BATCH_SIZE]
# for x in range(0, len(tasks) - extra, BATCH_SIZE)
# ]
self.train_tasks(minibatch)
utils.save_history(self.history)
# self.train_minibatches(minibatches)
def train_tasks(self, minibatch):
batch_task_tensor = np.zeros((config.TRAINING_BATCH_SIZE,
config.CH, config.R, config.C))
policies_view = []
for i in range(config.TRAINING_BATCH_SIZE):
if i % config.N_WAY == 0:
policies_view.extend([i])
result_tensor = np.zeros((config.TRAINING_BATCH_SIZE, 1))
policies_tensor = np.zeros((
config.TRAINING_BATCH_SIZE, config.R * config.C))
improved_policies_tensor = np.zeros((
config.TRAINING_BATCH_SIZE//config.N_WAY, config.R * config.C))
optimal_value_tensor = np.ones(
(config.TRAINING_BATCH_SIZE//config.N_WAY, 1))
idx = 0
for i, task in enumerate(minibatch):
state = task["state"]
improved_policies_tensor[i] = task["improved_policy"]
for memory in task["memories"]:
#note: as of right now the memories could be less that N_WAY
#so we are using partially zero tensors.
#this could be a major issue for thing like MSE error
result_tensor[idx] = memory["result"]
policies_tensor[idx] = memory["policy"]
batch_task_tensor[idx] = state
idx += 1
result_tensor = result_tensor[:idx]
policies_tensor = policies_tensor[:idx]
batch_task_tensor = batch_task_tensor[:idx]
improved_policies_tensor = improved_policies_tensor[:idx//config.N_WAY]
optimal_value_tensor = optimal_value_tensor[:idx//config.N_WAY]
policies_view = policies_view[:idx//config.N_WAY]
#so lets say we have 20 tasks
#and we only have 80 memories
#we want the 80 to get the same transform
#so 80//config.N_WAY = 16
state_input = self.wrap_to_variable(batch_task_tensor)
policies_input = self.wrap_to_variable(policies_tensor)
improved_policies_target = self.wrap_to_variable(
improved_policies_tensor)
result_target = self.wrap_to_variable(result_tensor)
optimal_value_var = self.wrap_to_variable(optimal_value_tensor)
for e in range(config.EPOCHS):
self.q_optim.zero_grad()
self.p_optim.zero_grad()
for _ in range(config.Q_UPDATES_PER):
Q_loss = 0
Qs, _ = self.qp(state_input, policies_input)
Q_loss += F.mse_loss(Qs, result_target)*10
Q_loss.backward()
self.q_optim.step()
self.q_optim.zero_grad()
# self.p_optim.zero_grad() #should be redundant
policy_loss = 0
Qs, policies = self.qp(state_input)
# corrected_policy_loss = 0
# for corrected_policy, policy in zip(policies_input, policies):
# corrected_policy = corrected_policy.unsqueeze(0)
# policy = policy.unsqueeze(-1)
# corrected_policy_loss += -torch.mm(corrected_policy,
# torch.log(policy))
# corrected_policy_loss /= 3*len(policies_input)
policies_smaller = policies[policies_view]
improved_policy_loss = 0
for improved_policy, policy in zip(improved_policies_target, policies_smaller):
improved_policy = improved_policy.unsqueeze(0)
policy = policy.unsqueeze(-1)
improved_policy_loss += -torch.mm(improved_policy,
torch.log(policy))
improved_policy_loss /= len(policies_smaller)
Qs_smaller = Qs[policies_view]
# policy_loss = corrected_policy_loss +
policy_loss = improved_policy_loss*5 #+ \
#F.mse_loss(Qs_smaller, optimal_value_var)*2
#/ and * 2 to balance improved policies matching and regression
# for _ in range(config.TRAINING_BATCH_SIZE):
# Qs, policies = self.qp(state_input)
# policy_loss += F.mse_loss(Qs, optimal_value_var)
policy_loss.backward()
# policies.grad
# set_trace()
self.p_optim.step()
p_loss = policy_loss.data.numpy()[0]
q_loss = Q_loss.data.numpy()[0]
self.history["q_loss"].extend([q_loss])
self.history["p_loss"].extend([p_loss])
if e == (config.EPOCHS-1):
print("Policy loss {}".format(policy_loss.data.numpy()[0]))
print("Q loss: {}".format(Q_loss.data.numpy()[0]))
| jprothero/MetaQP | MetaQP.py | MetaQP.py | py | 20,649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.seterr",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "utils.create_fol... |
27235166738 | # -*- coding: utf-8 -*-
__author__ = "Michele Samorani"
import pandas as pd
import cplex
import time
import random
TIME_LIMIT_SECONDS = 60
def build_scenarios(show_probs, max_scenarios,seed):
"""
Builds the scenarios
:param show_probs:
:type show_probs: list[float]
:return: a list of (probability, 0-1 show list)
"""
random.seed(seed)
n = len(show_probs)
if 2 ** n <= max_scenarios:
import itertools
lst = [list(i) for i in itertools.product([0, 1], repeat=n)]
for s in lst:
p = 1
for j in range(n):
p *= (show_probs[j] if s[j] == 1 else 1 - show_probs[j])
yield p,s
else:
s = show_probs.copy()
for i in range(max_scenarios):
for j in range(n):
p2 = random.uniform(0, 1)
s[j] = 1 if p2 < show_probs[j] else 0
yield 1 / max_scenarios, s.copy()
# s = show_probs.copy()
# for i in range(max_scenarios):
# for j in range(n):
# p2 = random.uniform(0, 1)
# s[j] = 1 if p2 < show_probs[j] else 0
# p = 1
# for j in range(n):
# p *= (show_probs[j] if s[j] == 1 else 1 - show_probs[j])
#
# # input(f'returning {str(p)}->{str(s)}')
# yield p, s.copy()
def optimally_schedule(show_probs, wtc, otc, nslots,seed, max_scenarios = 100000, delta_sim = 0):
print_steps = False
# First, find the scenarios
qs = [] # a list of sets of patients that show under a scenario
ps = [] # a list of probabilities
init = time.time()
ser = pd.Series(data=show_probs)
sorted_indices = list(ser.sort_values().index)
# Similar index (for each index i, the index of the other patient for constraint 4)
similar = {}
for iii in range(len(sorted_indices)-1):
i = sorted_indices[iii]
j = sorted_indices[iii+1]
# check whether i is similar to j
if show_probs[j] - show_probs[i] <= delta_sim + 0.00000001:
similar[i] = j
else:
similar[i] = -1
similar[sorted_indices[-1]] = -1
if print_steps:
print('Building scenarios')
totp = 0
for p,s in build_scenarios(show_probs, max_scenarios,seed):
qs.append(set()) # set of showing indices
ps.append(p)
totp+=p
for i in range(len(s)):
if s[i] == 1:
qs[-1].add(i)
#print(f'totp={totp}')
# if abs(totp-1) > 0.01:
# input('TOT P < 1!!!!!!')
S = len(qs) # number of scenarios
F = nslots # number of slots
N = len(show_probs) # number of patients
F_max = N
if print_steps:
print(f'Done in {time.time() - init}. Built {S} scenarios. Setting up problem...')
c = cplex.Cplex()
# variables
c.variables.add(names=[f'x{i}_{j}' for i in range(N) for j in range(F)],types=[c.variables.type.binary for i in range(N) for j in range(F)])
c.variables.add(names=[f'b{s}_{j}' for j in range(F_max) for s in range(S)],lb=[0 for j in range(F_max) for s in range(S)])
c.set_log_stream(None)
c.set_results_stream(None)
c.set_warning_stream(None)
c.parameters.timelimit.set(TIME_LIMIT_SECONDS)
# objective
if print_steps:
print(f'Setting up objective...')
for s in range(S):
tot_shows = len(qs[s]) #N^s
#print(f'Scenario {s} with probability {ps[s]} and tot_shows = {tot_shows}:')
#print(qs[s])
if tot_shows == 0:
continue
for j in range(F_max):
#print(f'scenario {s}, j={j}: adding b{s}_{j} * (ps_s={ps[s]}) * (wtc={wtc}) / (tot_shows={tot_shows})')
c.objective.set_linear(f'b{s}_{j}',ps[s] * wtc)
c.objective.set_linear(f'b{s}_{F-1}', ps[s] * (otc + wtc))
#print(f'scenario {s}: adding b{s}_{F-1} * (ps_s={ps[s]}) * (otc={otc})')
# constraint set (1)
if print_steps:
print(f'Setting up constraint set 1...')
for i in range(N):
c.linear_constraints.add(lin_expr=[cplex.SparsePair(
ind = [f'x{i}_{j}' for j in range(F)], val = [1.0 for j in range(F)])],
senses = ['E'],
rhs=[1],
names=[f'(1_{i})'])
# constraint set (2)
if print_steps:
print(f'Setting up constraint set 2...')
for s in range(S):
if print_steps and s % 1000 == 0:
print(f'Built constraints for {s} scenarios')
for j in range(0,F_max):
expr = []
if j < F:
expr = [f'x{i}_{j}' for i in qs[s]]
expr.append(f'b{s}_{j}')
if j >= 1:
expr.append(f'b{s}_{j-1}')
vals = []
if j <F:
vals = [-1.0 for i in qs[s]]
vals.append(1)
if j >=1 :
vals.append(-1)
c.linear_constraints.add(lin_expr=[cplex.SparsePair(expr,vals)],
senses=['G'],
rhs=[-1],
names=[f'(2_{s}_{j})'])
# constraint set (3)
if print_steps:
print(f'Setting up constraint set 3...')
# original constraint 3
if (N >= F):
for j in range(0, F):
c.linear_constraints.add(lin_expr=[cplex.SparsePair(
ind=[f'x{i}_{j}' for i in range(N)], val=[1.0 for i in range(N)])],
senses=['G'],
rhs=[1],
names=[f'(3_{j})'])
# constraint set (4)
if print_steps:
print(f'Setting up constraint set 4...')
for i1 in range(N):
i2 = similar[i1]
if i2 == -1:
continue
for j_prime in range(F-1):
expr = []
vals = []
# old and faster
expr = [f'x{i1}_{j}' for j in range(j_prime+1,F)]
# new and slower
#expr = [f'x{i1}_{j_prime}']
# expr.extend([f'x{i2}_{j}' for j in range(0,j_prime+1)])
# vals = [1 for i in range(len(expr))]
# c.linear_constraints.add(lin_expr=[cplex.SparsePair(expr, vals)],
# senses=['L'],
# rhs=[1],
# names=[f'(4_{i1}_{j_prime})'])
#c.write(filename='model.txt', filetype='lp')
if print_steps:
print(f'Solving...')
c.solve()
time_taken = time.time() - init
# c.solution.write('solution.txt')
#print(f'Value = {c.solution.get_objective_value()}')
solution = []
try:
for i in range(N):
sols = c.solution.get_values([f'x{i}_{j}' for j in range(F)])
for j in range(F):
if sols[j] >= .9:
solution.append(j)
break
except:
import numpy as np
return np.nan, np.nan, np.nan, np.nan
return c.solution.get_objective_value(),c.solution.MIP.get_mip_relative_gap(), solution, time_taken
| samorani/Social-Justice-Appointment-Scheduling | src/stochastic.py | stochastic.py | py | 6,965 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_nu... |
21120299087 | from Sentence_Encoder.meta_response_encoder_fast import encode as response_encode
import Utils.functions as utils
import numpy as np
import torch as T
import copy
def random_response(candidates, conversation_history, p=None):
loop = 5
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i = 0
while response in conversation_history:
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i += 1
if i > loop:
break
return response
def top_candidates(candidates, scores, top=1):
sorted_score_idx = np.flip(np.argsort(scores), axis=-1)
candidates = [candidates[i] for i in sorted_score_idx.tolist()]
scores = [scores[i] for i in sorted_score_idx.tolist()]
return candidates[0:top], scores[0:top], sorted_score_idx.tolist()
def rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer, model_reverse,
utterance, query_encoding,
candidates,
response_context, conversation_history,
bias=None, alpha=0.4, beta=0.6):
if bias is None:
bias = 0.0
#print("In Ranking")
# print(len(candidates))
EOS_token = tokenizer.encode("<|endoftext|>")[0]
original_candidates = copy.deepcopy(candidates)
response_encodings = response_encode(
candidates, USE_QA_model, ConvRT_model, response_context*len(candidates))
#rank_scores = np.inner(query_encoding,response_encodings)
#rank_scores = np.reshape(rank_scores,(-1))
rank_scores = utils.cosine_similarity_nd(query_encoding, response_encodings)
# print(rank_scores)
# print(rank_scores+bias)
normed_rank_scores = utils.normalize(rank_scores+bias)
# print(normed_rank_scores)
# MMI Computation
last_utterance = utterance
def _make_feature(sents, eos):
msg_idx = []
for msg in sents:
msg_idx.append(tokenizer.encode(msg))
input_ids = [i for s in msg_idx for i in s+[eos]][:-1]
input_ids.append(eos)
if len(input_ids) > 300:
input_ids = input_ids[-300:]
return input_ids
output_ids = _make_feature([last_utterance], EOS_token)
with T.no_grad():
original_output_ids = T.tensor(output_ids).to('cuda').long().unsqueeze(0)
losses = []
for candidate in candidates:
input_ids = _make_feature([candidate], EOS_token)
input_ids = T.tensor(input_ids).to('cuda').long().unsqueeze(0)
output_ids_part_1 = T.empty_like(input_ids).to('cuda').fill_(-1).long()
input_ids = T.cat([input_ids, original_output_ids], dim=-1)
output_ids = T.cat([output_ids_part_1, original_output_ids], dim=-1)
loss, _, _ = model_reverse(input_ids, past=None, labels=output_ids)
losses.append(loss.item())
losses = np.asarray(losses, np.float32)
normed_MMI_scores = utils.normalize(1.0-utils.normalize(losses))
# COMBINATION
quasi_probabilities = alpha*(normed_rank_scores+bias) + beta*normed_MMI_scores
candidates, quasi_probabilities, _ = top_candidates(candidates, quasi_probabilities, top=3)
probabilities = utils.normalize(quasi_probabilities)
response = random_response(candidates, conversation_history, p=probabilities)
id = original_candidates.index(response)
return response, id
| JRC1995/Chatbot | ReRanker/rerank.py | rerank.py | py | 3,529 | python | en | code | 79 | github-code | 36 | [
{
"api_name": "numpy.random.choice",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.rando... |
72314053544 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-atlassian',
version='0.1.4',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='Django app for interact with atlassian libraries such as JIRA and Confluence.',
long_description=README,
url='https://www.fluendo.com/',
author='Fluendo',
author_email='web-dev@fluendo.com',
install_requires=[
"Django >= 1.11",
"PyJWT >= 1.6.4",
"atlassian-jwt >= 1.8.1",
"requests >= 2.18.4",
"requests-jwt==0.5.3",
"jira @ git+ssh://git@github.com/fluendo/jira"
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| fluendo/django-atlassian | setup.py | setup.py | py | 1,409 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": ... |
28440391809 | import glob
import os
import pickle
import time
from abc import ABC
from pathlib import Path
from typing import Tuple
import numpy as np
from smart_settings.param_classes import recursive_objectify
from mbrl import allogger
from mbrl.base_types import Controller, ForwardModel, Pretrainer
from mbrl.controllers import controller_from_string
from mbrl.controllers.abstract_controller import (
ModelBasedController,
NeedsPolicyController,
ParallelController,
TrainableController,
)
from mbrl.controllers.cem_memory import CEMDataProcessor
from mbrl.helpers import tqdm_context
from mbrl.rolloutbuffer import RolloutBuffer
valid_data_sources = {"env", "policy", "expert"}
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix) :]
return text
def pretrainer_from_string(trainer_name, trainer_params):
trainers_dict = {
"trajectory": TrajectoryPretrainer,
"CEMDataProcessor": CEMDataProcessor,
}
if trainer_name not in trainers_dict:
raise KeyError(f"trainer name '{trainer_name}' not in dictionary entries: {trainers_dict.keys()}")
return trainers_dict[trainer_name](**trainer_params)
def _parse_no_yes_auto(argument):
no_yes_auto = 0
if argument is not None:
if isinstance(argument, bool) and argument:
no_yes_auto = 1
elif isinstance(argument, str):
if argument == "yes":
no_yes_auto = 1
elif argument == "auto":
no_yes_auto = 2
else:
raise SyntaxError(f"unknown load argument {argument}, valid: None, True, False, 'yes', 'auto'")
return no_yes_auto
def file_name_to_absolute_path(file, path, default):
res = file
if file is None:
res = default
# if the given path is a relative path, use the default path (model_dir)
if not os.path.isabs(res):
res = os.path.join(path, res)
return res
class Initializer(ABC):
def __init__(self, pretrainer: Tuple[str, None], pretrainer_params=None, pickle_path=None):
self.pretrainer = pretrainer
self.pretrainer_params = pretrainer_params
self.pickle_path = pickle_path
class ControllerInit(Initializer):
def initialize(self, controller: Controller, env):
if self.pickle_path is not None:
if isinstance(controller, TrainableController):
controller.load(self.pickle_path)
return True
else:
raise AttributeError("attempting to load controller that cannot be loaded")
elif self.pretrainer is not None:
if isinstance(controller, TrainableController):
pretrainer = pretrainer_from_string(self.pretrainer, self.pretrainer_params)
data = pretrainer.get_data(env)
controller.train(data)
return True
else:
raise AttributeError("attempting to pretrain non-trainable controller")
else:
return False
class ModelInit(Initializer):
def initialize(self, forward_model: ForwardModel, env):
if self.pickle_path is not None:
forward_model.load(self.pickle_path)
if self.pretrainer is not None:
pretrainer = pretrainer_from_string(self.pretrainer, self.pretrainer_params)
data = pretrainer.get_data(env)
forward_model.train(data)
return True
else:
return False
class TrajectoryPretrainer(Pretrainer):
def __init__(self, *, file_name):
self.file_name = file_name
def get_data(self, env):
with open(self.file_name, "rb") as f:
rollouts = pickle.load(f)
return rollouts
class CheckpointManager:
def __init__(
self,
*,
working_dir,
path="checkpoints",
rollouts_file="rollouts",
controller_file="controller",
forward_model_file="forward_model",
reward_dict_file="reward_info.npy",
load,
save,
save_every_n_iter=1,
restart_every_n_iter=None,
keep_only_last=False,
exclude_rollouts=False,
max_runtime=1e6,
):
self.rollouts_file = rollouts_file
self.base_path = file_name_to_absolute_path(path, path=working_dir, default="checkpoints")
self.path = self.base_path
self._check_for_latest()
self.controller_file = controller_file if controller_file is not None else "controller"
self.model_file = forward_model_file if forward_model_file is not None else "forward_model"
self.reward_dict_file = reward_dict_file
self.save = save
self.load_no_yes_auto = _parse_no_yes_auto(load)
self.save_every_n_iter = save_every_n_iter
self.keep_only_last = keep_only_last
self.restart_every_n_iter = restart_every_n_iter
self.do_restarting = self.restart_every_n_iter is not None and self.restart_every_n_iter > 0
if self.do_restarting:
assert self.load_no_yes_auto > 0, "load flag needs to be 'auto' or True"
self.exclude_rollouts = exclude_rollouts
self.was_controller_loaded = False
self.was_model_loaded = False
self.were_buffers_loaded = False
self.was_reward_dict_loaded = False
self.max_runtime = max_runtime
self.main_loop_start_time = time.time()
def _check_for_latest(self):
latest = f"{self.base_path}_latest"
if os.path.exists(latest):
self.path = latest
def update_checkpoint_dir(self, step):
if self.keep_only_last:
self.path = self.base_path
else:
self.path = f"{self.base_path}_{step:03}"
Path(self.path).mkdir(parents=True, exist_ok=True)
def finalized_checkpoint(self):
# create link to latest checkpoint
latest = f"{self.base_path}_latest"
if os.path.islink(latest):
os.remove(latest)
if not os.path.exists(latest):
os.symlink(Path(self.path).name, latest)
def save_main_state(self, main_state):
f = os.path.join(self.path, "main_state.npy")
main_state.save(f)
def load_main_state(self, main_state):
f = os.path.join(self.path, "main_state.npy")
if self.load_no_yes_auto > 0:
try:
main_state.load(f)
except FileNotFoundError as e:
if self.load_no_yes_auto == 1:
raise e
else:
print(f"auto loading: Notice: could not load main state from {f}")
def store_buffer(self, rollout_buffer: RolloutBuffer, suffix=""):
if self.rollouts_file is not None and not self.exclude_rollouts:
with open(os.path.join(self.path, self.rollouts_file + suffix), "wb") as f:
pickle.dump(rollout_buffer, f)
def load_buffer(self, suffix, rollout_buffer: RolloutBuffer):
if self.rollouts_file is not None and self.load_no_yes_auto > 0 and not self.exclude_rollouts:
file_path = os.path.join(self.path, self.rollouts_file + suffix)
try:
with open(file_path, "rb") as f:
r = pickle.load(f)
rollout_buffer.__dict__ = r.__dict__
print(f"loaded rollout buffer from {file_path}, buffer size: {len(r)}")
self.were_buffers_loaded = True
except FileNotFoundError as e:
if self.load_no_yes_auto == 1: # in 'yes'/True mode it has to load it
print(f"Error: could not load rollout buffer from {file_path}")
raise e
else:
print(f"auto loading: Notice: could not load rollout buffer from {file_path}")
def load_controller(self, controller):
file = os.path.join(self.path, self.controller_file)
if isinstance(controller, TrainableController):
if self.load_no_yes_auto == 1:
controller.load(file)
self.was_controller_loaded = True
elif self.load_no_yes_auto == 2:
try:
controller.load(file)
self.was_controller_loaded = True
except FileNotFoundError:
print(f"auto loading: Notice: could not load controller from {file}")
if self.was_controller_loaded:
print(f"loaded controller from file: {file}")
def store_controller(self, controller: Controller):
if self.save and self.controller_file is not None and isinstance(controller, TrainableController):
controller.save(os.path.join(self.path, self.controller_file))
def load_forward_model(self, forward_model):
file = os.path.join(self.path, self.model_file)
if self.load_no_yes_auto == 1:
forward_model.load(file)
self.was_model_loaded = True
elif self.load_no_yes_auto == 2:
try:
forward_model.load(file)
self.was_model_loaded = True
except FileNotFoundError:
print(f"auto loading: Notice: could not load model from {file}")
if self.was_model_loaded:
print(f"loaded forward_model from file: {file}")
def store_forward_model(self, forward_model: ForwardModel, save_as_onnx=False):
if self.save and forward_model and self.model_file is not None:
forward_model.save(os.path.join(self.path, self.model_file))
if save_as_onnx:
forward_model.save_onnx(os.path.join(self.path, self.model_file + ".onnx"))
def save_reward_dict(self, reward_dict):
if self.save and reward_dict and self.reward_dict_file is not None:
np.save(os.path.join(self.path, self.reward_dict_file), reward_dict)
def load_reward_dict(self, reward_dict):
file = os.path.join(self.path, self.reward_dict_file)
if self.load_no_yes_auto == 1:
reward_dict = np.load(file).item() if os.path.exists(file) else {}
self.was_reward_dict_loaded = True
elif self.load_no_yes_auto == 2:
try:
reward_dict = np.load(file).item()
self.was_reward_dict_loaded = True
except FileNotFoundError:
print(f"auto loading: Notice: could not load reward_dict from {file}")
if self.was_reward_dict_loaded:
print(f"loaded reward_dict from file: {file}")
return reward_dict
def _runtime(self):
return (time.time() - self.main_loop_start_time) / (60 * 60) # runtime in hours
def maybe_restart_job(self):
if self._runtime() > self.max_runtime:
print(f"returning with exit code 3 for restarting (max runtime exceeded {self.max_runtime})")
return True
else:
return False
def get_controllers(params, env, forward_model, imitation):
expert_controller = None
if (
"initial_controller" not in params
or params.initial_controller is None
or params.initial_controller
in [
"none",
"null",
None,
]
):
initial_controller = None
else:
controller_class = controller_from_string(params.initial_controller)
if issubclass(controller_class, ModelBasedController):
initial_controller = controller_class(
env=env, forward_model=forward_model, **params.initial_controller_params
)
else:
initial_controller = controller_class(env=env, **params.initial_controller_params)
if "controller" not in params:
main_controller = None
else:
controller_class = controller_from_string(params.controller)
if issubclass(controller_class, ParallelController):
controller_params = recursive_objectify(params.controller_params, make_immutable=False)
else:
controller_params = params.controller_params
if issubclass(controller_class, ModelBasedController):
main_controller = controller_class(env=env, forward_model=forward_model, **controller_params)
else:
main_controller = controller_class(env=env, **controller_params)
if main_controller.needs_data:
if params.controller_data_sources is None or len(params.controller_data_sources) < 1:
raise AttributeError("controller needs data to be trained but no source given")
for s in params.controller_data_sources:
if s not in valid_data_sources:
raise KeyError(f"Invalid data source {s}, valid ones are " + ("".join(valid_data_sources)))
if imitation is not None:
expert_controller_class = controller_from_string(params.imitation.expert_controller)
if issubclass(expert_controller_class, ModelBasedController):
expert_controller = expert_controller_class(env=env, forward_model=forward_model, **imitation.expert_params)
else:
expert_controller = expert_controller_class(env=env, **imitation.expert_params)
if isinstance(expert_controller, NeedsPolicyController):
expert_controller.set_policy(main_controller)
return initial_controller, main_controller, expert_controller
def maybe_load_checkpoint(
params,
buffer,
imitation,
main_state,
main_controller,
forward_model,
reward_info_full,
):
if "checkpoints" in params: # we could check whether we want to check for rollout_length consistency?
checkpoint_manager = CheckpointManager(working_dir=params.working_dir, **params.checkpoints)
for buffer_path in glob.glob(os.path.join(checkpoint_manager.path, checkpoint_manager.rollouts_file) + "*"):
buffer_name = os.path.basename(buffer_path)
buffer_suffix = remove_prefix(buffer_name, "rollouts")
if buffer_name not in buffer:
buffer[buffer_name] = RolloutBuffer()
checkpoint_manager.load_buffer(suffix=buffer_suffix, rollout_buffer=buffer[buffer_name])
if forward_model:
checkpoint_manager.load_forward_model(forward_model)
if main_controller:
checkpoint_manager.load_controller(main_controller)
if reward_info_full is not None:
reward_info_full = checkpoint_manager.load_reward_dict(reward_info_full)
checkpoint_manager.load_main_state(main_state)
else:
checkpoint_manager = CheckpointManager(working_dir=params.working_dir, load=False, save=False)
return checkpoint_manager, reward_info_full
# function that we use for saving a checkpoint
def save_checkpoint(
cpm: CheckpointManager,
main_state,
buffer,
forward_model,
main_controller,
reward_info_full,
final=False,
):
step = main_state.iteration
if cpm is not None and cpm.save:
if final or step % cpm.save_every_n_iter == 0:
cpm.update_checkpoint_dir(step)
cpm.save_main_state(main_state)
for buffer_name, data in buffer.items():
buffer_suffix = remove_prefix(buffer_name, "rollouts")
cpm.store_buffer(rollout_buffer=data, suffix=buffer_suffix)
if forward_model is not None:
cpm.store_forward_model(forward_model)
if main_controller is not None:
cpm.store_controller(main_controller)
if reward_info_full is not None:
cpm.save_reward_dict(reward_info_full)
cpm.finalized_checkpoint()
def maybe_init_model(
params,
forward_model,
checkpoint_manager,
need_pretrained_checkpoint,
env,
):
if (
forward_model
and "forward_model_init" in params
and params.forward_model_init is not None
and not checkpoint_manager.was_model_loaded
):
model_init = ModelInit(**params.forward_model_init)
need_pretrained_checkpoint = model_init.initialize(forward_model, env) or need_pretrained_checkpoint
def maybe_init_controller(
params,
main_controller,
checkpoint_manager,
need_pretrained_checkpoint,
env,
):
if "controller_init" in params and not checkpoint_manager.was_controller_loaded:
controller_init = ControllerInit(**params.controller_init)
need_pretrained_checkpoint = controller_init.initialize(main_controller, env) or need_pretrained_checkpoint
def maybe_prefill_buffer(
params,
rollout_buffer,
):
logger = allogger.get_logger("main")
if "prefill_buffer" in params:
preloaded_rollouts = []
for buffer_path in params.prefill_buffer:
logger.info(f"Loading buffer from {buffer_path}")
with open(buffer_path, "rb") as f:
buffer = pickle.load(f)
preloaded_rollouts.extend(buffer.rollouts)
rollout_buffer.extend(preloaded_rollouts)
def maybe_do_initial_rollouts(
params,
initial_controller,
checkpoint_manager,
):
do_initial_rollouts = initial_controller is not None and params.initial_number_of_rollouts > 0
if checkpoint_manager.were_buffers_loaded:
do_initial_rollouts = False
return do_initial_rollouts
def maybe_do_restarts(checkpoint_manager, main_state, do_initial_rollouts, total_iterations):
potentially_restart = False
current_max_iterations = total_iterations
if checkpoint_manager.do_restarting:
if main_state.iteration + checkpoint_manager.restart_every_n_iter < total_iterations:
current_max_iterations = (
main_state.iteration + checkpoint_manager.restart_every_n_iter + 1 * do_initial_rollouts
)
print(f"Due to restarting we are only running {checkpoint_manager.restart_every_n_iter} iterations now")
potentially_restart = True
return potentially_restart, current_max_iterations
def main_iterator(main_state, current_max_iterations, total_iterations, postfix_dict):
t_main = tqdm_context(
range(main_state.iteration, current_max_iterations),
initial=main_state.iteration,
total=total_iterations,
desc="training_it",
postfix_dict=postfix_dict if postfix_dict is not None else {},
additional_info_flag=True,
)
gen_main = next(t_main)
return t_main, gen_main
| martius-lab/cee-us | mbrl/initialization.py | initialization.py | py | 18,460 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "mbrl.controllers.cem_memory.CEMDataProcessor",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.path.isabs",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name"... |
32542351130 | from google.cloud import storage
from configparser import ConfigParser
from google.oauth2 import service_account
from googleapiclient.discovery import build
from utils.demo_io import (
get_initial_slide_df_with_predictions_only,
get_fovs_df,
get_top_level_dirs,
populate_slide_rows,
get_histogram_df,
list_blobs_with_prefix,
)
import polars as pl
from gcsfs import GCSFileSystem
# Parse in key and bucket name from config file
cfp = ConfigParser()
cfp.read("config.ini")
service_account_key_json = cfp["GCS"]["gcs_storage_key"]
gs_url = cfp["GCS"]["bucket_url"]
bucket_name = gs_url.replace("gs://", "")
# Define GCS file system so files can be read
gcs = GCSFileSystem(token=service_account_key_json)
# Authenticate using the service account key file
credentials = service_account.Credentials.from_service_account_file(
service_account_key_json, scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
client = storage.Client.from_service_account_json(service_account_key_json)
# Create a storage client
storage_service = build("storage", "v1", credentials=credentials)
# Get an initial, mostly-unpopulated slide dataframe
slide_df = get_initial_slide_df_with_predictions_only(
client, bucket_name, gcs, cutoff=20
)
print(slide_df)
slide_files_raw = list_blobs_with_prefix(
client, bucket_name, prefix="patient_slides_analysis", cutoff=40
)["blobs"]
# select a couple of slide
slides_of_interest = [
slidefile.split("/")[-1].strip(".npy")
for slidefile in slide_files_raw
if slidefile.endswith(".npy")
]
# repopulate rows on some slides with spot counts missing, and set threshold
new_slide_df = populate_slide_rows(
client,
bucket_name,
gcs,
slide_df,
slides_of_interest[:4],
set_threshold=0.8,
)
print(new_slide_df)
# get DF for these slides' FOVs
fov_df = get_fovs_df(client, bucket_name, slides_of_interest)
print(fov_df)
| alice-gottlieb/nautilus-dashboard | examples/gcs_example.py | gcs_example.py | py | 1,919 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gcsfs.GCSFileSystem",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Credentials.from_service_account_file",
"line_number": 30,
"u... |
18794245329 | from typing import Any
import pytest
from click.testing import CliRunner
from happi.prompt import enforce_list, read_user_dict
from happi.utils import EnforceError
def test_user_dict(runner: CliRunner):
default_dict = {'default_key': 'default_value'}
# normal operation
with runner.isolation('key1\nvalue1'):
result = read_user_dict('prompt', default=default_dict)
assert result == {'key1': 'value1'}
# read default
with runner.isolation('\n'):
result = read_user_dict('prompt', default=default_dict)
assert result == default_dict
# reject keywords
with runner.isolation('is\nnotis\n1\n'):
result = read_user_dict('prompt', default=default_dict)
assert result == {'notis': 1}
# replace values
with runner.isolation('key\n1\nkey\n2'):
result = read_user_dict('prompt', default=default_dict)
assert result == {'key': 2}
@pytest.mark.parametrize('user_in', (
['a', 'b', 2, 3],
"['a', 'b', 2, 3]"
))
def test_enforce_list(user_in: Any):
result = enforce_list(user_in)
assert result == ['a', 'b', 2, 3]
@pytest.mark.parametrize('user_in', (
'a',
"['a', 'b'=2, 2, 3]",
'[1,2,3,4.5.4]'
))
def test_enforce_list_fail(user_in: str):
with pytest.raises(EnforceError):
_ = enforce_list(user_in)
| pcdshub/happi | happi/tests/test_prompt.py | test_prompt.py | py | 1,324 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "click.testing.CliRunner",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "happi.prompt.read_user_dict",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "happi.prompt.read_user_dict",
"line_number": 21,
"usage_type": "call"
},
{
"a... |
10343389752 | from PyPDF2 import PdfFileWriter, PdfFileReader,PdfFileMerger
import os
import glob
import time
def remove_blank():
files = os.listdir('temp')
print(len(files))
for i in range(len(files)):
input_pdf = PdfFileReader(open(f"temp/temp{i}.pdf", "rb"))
output_pdf = PdfFileWriter()
output_pdf.addPage(input_pdf.getPage(0))
with open(f"temp\_temp{i}.pdf", "wb") as output_file:
output_pdf.write(output_file)
def remove_all_file():
folder_path = 'temp'
file_list = glob.glob(folder_path + '/*')
for file_path in file_list:
os.remove(file_path)
def merge(name):
remove_blank()
time.sleep(2)
pdf_count = 0
files = os.listdir('temp')
pdf_count = int(len(files)/2)
merger = PdfFileMerger()
print(pdf_count)
for i in range(pdf_count):
file_name = f"temp/_temp{i}.pdf"
merger.append(open(file_name, "rb"))
with open(f"pdfs/{name}.pdf", "wb") as output_file:
merger.write(output_file)
time.sleep(2)
time.sleep(2)
# merge("new")
| neel-jotaniya/product_detail | pdf.py | pdf.py | py | 1,080 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileReader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileWriter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"... |
7089966560 | from bottle import redirect, request, post
import uuid
import jwt
import time
from check_if_logged_in import check_if_logged_in
from global_values import *
@post("/new-tweet")
def new_tweet_post():
if not check_if_logged_in():
return redirect("/login")
# title
new_tweet_title = request.forms.get("new_tweet_title")
# description
new_tweet_description = request.forms.get("new_tweet_description")
# can't post empty tweet without either title or description
if not new_tweet_title:
if not new_tweet_description:
return redirect("/new-tweet?error=empty")
# decode jwt cookie to get user information for tweet
user_information = jwt.decode(request.get_cookie("jwt", secret="secret"), JWT_KEY, algorithms=["HS256"])
user_username = user_information["username"]
user_first_name = user_information["first_name"]
user_id = user_information["id"]
# append new tweet with values
new_tweet = {
"id": str(uuid.uuid4()),
"user_id": user_id,
"first_name": user_first_name,
"username": user_username,
"title": new_tweet_title,
"description": new_tweet_description,
"time_posted": time.localtime(),
"time_edited": None,
}
TWEETS.append(new_tweet)
return redirect("/dashboard")
| sara616b/01_mandatory_web_dev | new_tweet_post.py | new_tweet_post.py | py | 1,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "check_if_logged_in.check_if_logged_in",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bottle.redirect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bottle.request.forms.get",
"line_number": 15,
"usage_type": "call"
},
{
"ap... |
72871590183 | import os
import torch
from torch.utils.data import Dataset
import torchvision
import data
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
PGT_LOAD_FILE = "pseudo_gt_thesis.pth"
CLEAN_PGT_LOAD_FILE = "cleaned_pseudo_gt_thesis.pth"
# img size: (200,400)
class TabletopWorkDataset(Dataset):
OBJ_ID = 3
BB_SIZE = (560,560)
def __init__(self, config, return_pgt=False, cleaned_pgt=True, return_gt=False, start=0, end=20000, material=True):
"""
Dataloader for the RGBD dataset to work on the dataset using different modes:
Arguments:
start: Start index of the interval of images to use from the dataset
end: End index of the interval of images to use from the dataset
mode: Defines the mode of the dataset which determines the actions taken on the dataset:
0: Dataset is initialized to generate pseudo ground truths
1: Dataset is initialized to return the pseudo ground truths and images used for training
"""
super().__init__()
self.data_dir = data.id_to_path[config["obj_id"]] if material else data.id_to_path_uniform[config["obj_id"]]
self.config = config
self.meta_info = load_meta_info(self.data_dir)
self.obj_id = self.meta_info[2]['OBJECT_ID']
self.len = end-start
self.start = start
self.return_pgt = return_pgt
self.cleaned_pgt = cleaned_pgt
self.return_gt = return_gt
def __getitem__(self, idx):
# Define the frame from the given index
idx += self.start
frame_id = str(idx).zfill(6)
data_frame_dir = os.path.join(self.data_dir, frame_id)
# Load the data needed by the pose labeling scheme
try:
image = torch.load(os.path.join(data_frame_dir,"rgb_tensor.pt"))
seg_data = torch.load(os.path.join(data_frame_dir, "seg_tensor.pt"))
depth_data = torch.load(os.path.join(data_frame_dir, "depth_tensor.pt"))
loaded=True
except:
try:
meta_data = torch.load(os.path.join(data_frame_dir, "meta_data.pt"))
image = torch.from_numpy(meta_data['rgb_tensor'][...,:3])
seg_data = torch.from_numpy(meta_data['seg_tensor'].astype("int32"))
depth_data = torch.from_numpy(meta_data['depth_tensor'])
loaded=True
except:
image = -torch.eye(4)
seg_data = -torch.eye(4)
depth_data = -torch.eye(4)
print(f"Data for frame {idx} could not been loaded!")
loaded=False
if self.config["verbose"]:
torchvision.utils.save_image(image.permute(2,0,1)/255., "output/pose_labeling_scheme/org_image.png")
torchvision.utils.save_image(depth_data.unsqueeze(0), "output/pose_labeling_scheme/depth_image.png")
#seg_mask = (seg_data==self.obj_id).int()
#depth_data = depth_data * seg_mask
intrinsic = torch.tensor([2/self.meta_info[0][0,0], 2/self.meta_info[0][1,1],image.shape[1]/2, image.shape[0]/2])# (fx, fy, cx, cy)
pseudo_ground_truth = -torch.eye(4)
ground_truth = -torch.eye(4)
if self.return_pgt:
try:
if self.cleaned_pgt:
pseudo_ground_truth = torch.load(os.path.join(self.data_dir, frame_id, CLEAN_PGT_LOAD_FILE))
else:
pseudo_ground_truth = torch.load(os.path.join(self.data_dir, frame_id, PGT_LOAD_FILE))
except:
pseudo_ground_truth = -torch.eye(4)
loaded=False
if pseudo_ground_truth is None or pseudo_ground_truth.shape[0]==0:
pseudo_ground_truth = -torch.eye(4)
loaded=False
if self.return_gt:
try:
ground_truth = torch.load(os.path.join(self.data_dir, frame_id, "ground_truth.pt"))
except:
ground_truth = torch.load(os.path.join(self.data_dir, frame_id, "gt.pt"))
return {
"image": image,
"seg_image": seg_data,
"depth_image": depth_data,
"intrinsic": intrinsic,
"pseudo_gt": pseudo_ground_truth,
"ground_truth": ground_truth,
"index": idx,
"loaded": loaded
}
def __len__(self):
return self.len
def load_meta_info(data_dir):
meta_data = torch.load(os.path.join(data_dir, "000000", "meta_data.pt"))
# Assumption that the camera calibration is consistent for all frames
projection_matrix = meta_data['projection_matrix']
view_matrix = meta_data['view_matrix']
seg_id = meta_data['seg_ids']
return projection_matrix, view_matrix, seg_id
| LDenninger/se3_pseudo_ipdf | data/tabletop/pls_dataset.py | pls_dataset.py | py | 4,837 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Da... |
19988594566 | import bpy
bl_info = {
"name": "Apply Modifier",
"author": "mate.sus304",
"version": (1, 2),
"blender": (2, 80, 0),
"location": "View3D > Object > Apply",
"description": "Apply All Modifier to Mesh Object",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"website":"https://sites.google.com/site/matosus304blendernotes/home",
"category": "Object"}
######################################################
is_legacy = (bpy.app.version < (2, 80, 0))
def select_object(obj, value):
if is_legacy:
obj.select = value
else:
obj.select_set(value)
def get_active_object():
if is_legacy:
return bpy.context.scene.objects.active
else:
return bpy.context.window.view_layer.objects.active
def set_active_object(obj):
if is_legacy:
bpy.context.scene.objects.active = obj
else:
bpy.context.window.view_layer.objects.active = obj
def clear_shape_keys(Name):
obj = get_active_object()
if obj.data.shape_keys is None:
return True
obj.active_shape_key_index = len(obj.data.shape_keys.key_blocks) - 1
while len(obj.data.shape_keys.key_blocks) > 1:
if obj.data.shape_keys.key_blocks[obj.active_shape_key_index].name == Name:
obj.active_shape_key_index = 0
else:
bpy.ops.object.shape_key_remove()
bpy.ops.object.shape_key_remove()
def clone_object(Obj):
tmp_obj = Obj.copy()
tmp_obj.name = "applymodifier_tmp_%s"%(Obj.name)
tmp_obj.data = tmp_obj.data.copy()
tmp_obj.data.name = "applymodifier_tmp_%s"%(Obj.data.name)
if is_legacy:
bpy.context.scene.objects.link(tmp_obj)
else:
bpy.context.scene.collection.objects.link(tmp_obj)
return tmp_obj
def delete_object(Obj):
if Obj.data.users == 1:
Obj.data.user_clear()
for scn in bpy.data.scenes:
try:
if is_legacy:
scn.objects.unlink(Obj)
else:
scn.collection.objects.unlink(Obj)
except:
pass
######################################################
def apply_modifier(target_object=None, target_modifiers=None):
if target_object is None:
obj_src = get_active_object()
else:
obj_src = target_object
if not obj_src.modifiers:
#if object has no modifier then skip
return True
#make single user
if obj_src.data.users != 1:
obj_src.data = obj_src.data.copy()
if obj_src.data.shape_keys is None:
#if object has no shapekeys, just apply modifier
for x in obj_src.modifiers:
if target_modifiers is None or x.name in target_modifiers:
try:
bpy.ops.object.modifier_apply(modifier=x.name)
except RuntimeError:
pass
return True
obj_fin = clone_object(obj_src)
set_active_object(obj_fin)
clear_shape_keys('Basis')
if target_modifiers is None:
target_modifiers = []
for x in obj_fin.modifiers:
if x.show_viewport:
target_modifiers.append(x.name)
for x in target_modifiers:
try:
bpy.ops.object.modifier_apply(modifier=x)
except RuntimeError:
pass
flag_on_error = False
list_skipped = []
for i in range(1, len(obj_src.data.shape_keys.key_blocks)):
tmp_name = obj_src.data.shape_keys.key_blocks[i].name
obj_tmp = clone_object(obj_src)
set_active_object(obj_tmp)
clear_shape_keys(tmp_name)
for x in target_modifiers:
try:
bpy.ops.object.modifier_apply(modifier=x)
except RuntimeError:
pass
select_object(obj_tmp, True)
set_active_object(obj_fin)
try:
bpy.ops.object.join_shapes()
obj_fin.data.shape_keys.key_blocks[-1].name = tmp_name
except:
flag_on_error = True
list_skipped.append(tmp_name)
delete_object(obj_tmp)
if flag_on_error:
def draw(self, context):
self.layout.label("Vertex Count Disagreement! Some shapekeys skipped.")
for s in list_skipped:
self.layout.label(s)
bpy.context.window_manager.popup_menu(draw, title="Error", icon='INFO')
return False
tmp_name = obj_src.name
tmp_data_name = obj_src.data.name
obj_fin.name = tmp_name + '.tmp'
obj_src.data = obj_fin.data
obj_src.data.name = tmp_data_name
for x in target_modifiers:
obj_src.modifiers.remove(obj_src.modifiers[x])
delete_object(obj_fin)
set_active_object(obj_src)
class OBJECT_OT_apply_all_modifier(bpy.types.Operator):
"""Apply All Modifier to Selected Mesh Object"""
bl_idname = "object.apply_all_modifier"
bl_label = "Apply_All_Modifier"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
targets = []
for x in bpy.context.selected_objects:
targets.append(x.name)
bpy.ops.object.select_all(action='DESELECT')
for x in targets:
apply_modifier(target_object=bpy.data.objects[x])
for x in targets:
select_object(bpy.data.objects[x], True)
return {'FINISHED'}
class OBJECT_OT_apply_selected_modifier(bpy.types.Operator):
"""Apply Selected Modifier to Active Mesh Object"""
bl_idname = "object.apply_selected_modifier"
bl_label = "Apply_Selected_Modifier"
bl_options = {'REGISTER', 'UNDO'}
bv: bpy.props.BoolVectorProperty(name="Booleans", description="test value", size=32)
mod_count = 0
@classmethod
def poll(cls, context):
obj = context.object
return obj and obj.type == 'MESH'
def execute(self, context):
obj = get_active_object()
objname = obj.name
bpy.ops.object.select_all(action='DESELECT')
str_targets = []
for i in range(self.mod_count):
if self.bv[i]:
str_targets.append(bpy.data.objects[objname].modifiers[i].name)
apply_modifier(target_object=bpy.data.objects[objname], target_modifiers=str_targets)
select_object(obj, True)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
def draw(self, context):
obj = context.object
self.mod_count = len(obj.modifiers)
layout = self.layout
col = layout.column()
for i in range(self.mod_count):
col.prop(self, "bv", text=obj.modifiers[i].name, index=i)
# Registration
def apply_all_modifier_button(self, context):
self.layout.operator(
OBJECT_OT_apply_all_modifier.bl_idname,
text="Apply All Modifier")
def apply_selected_modifier_button(self, context):
self.layout.operator(
OBJECT_OT_apply_selected_modifier.bl_idname,
text="Apply Selected Modifier")
def register():
bpy.utils.register_class(OBJECT_OT_apply_all_modifier)
bpy.utils.register_class(OBJECT_OT_apply_selected_modifier)
bpy.types.VIEW3D_MT_object_apply.append(apply_all_modifier_button)
bpy.types.VIEW3D_MT_object_apply.append(apply_selected_modifier_button)
def unregister():
bpy.utils.unregister_class(OBJECT_OT_apply_all_modifier)
bpy.utils.unregister_class(OBJECT_OT_apply_selected_modifier)
bpy.types.VIEW3D_MT_object_apply.remove(apply_all_modifier_button)
bpy.types.VIEW3D_MT_object_apply.remove(apply_selected_modifier_button)
if __name__ == "__main__":
register()
| Taremin/ApplyModifier | __init__.py | __init__.py | py | 7,817 | python | en | code | 29 | github-code | 36 | [
{
"api_name": "bpy.app",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"lin... |
6658450371 | from django.contrib import admin
from .models import User, Supplier
class UserAdmin(admin.ModelAdmin):
readonly_fields = ("last_login", "password", "phone_no", "email")
list_display = (
"email",
"first_name",
"last_name",
"is_active",
"created_at",
)
list_filter = ("is_active",)
search_fields = ("email", "first_name", "last_name",)
ordering = ("-created_at",)
class SupplierAdmin(admin.ModelAdmin):
# readonly_fields = ("owner", "company_name", "company_location", "rc_number", 'government_id')
list_display = ("owner", "company_name", "company_location", "rc_number", 'government_id', 'is_verified')
list_filter = ("is_verified",)
search_fields = ("company_name", "company_location", "rc_number",)
admin.site.register(User, UserAdmin)
admin.site.register(Supplier, SupplierAdmin)
| Corestreamng/adzmart-supplier | adzmart-develop/apps/authentication/admin.py | admin.py | py | 866 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 17,
"usage_type": "attribute"
... |
31033799131 | import numpy as np
import os, pickle
import random
import torch
import torch.nn as nn
from collections import deque
from torch import Tensor
from torch import tensor
from time import time, sleep
import src.model_greedy
if torch.cuda.is_available():
cuda = torch.device('cuda')
else:
cuda = torch.device('cpu')
data_path = './outputs/'
model_path = data_path+'model.bin'
memory_path = data_path+'memory.bin'
test = []
if not 'outputs' in os.listdir():
os.mkdir('outputs')
if not 'logs' in os.listdir():
os.mkdir('logs')
greedy_model = src.model_greedy.Model()
greedy_model.load_state_dict(torch.load(data_path+'model_greedy.bin'))
class Model(nn.Module):
input_shape = (46, 7, 11)
output_shape = 4
def __init__(self):
super(Model, self).__init__()
channels = 64
self.conv1 = nn.Conv2d(self.input_shape[0], channels, (3, 3), bias=False, padding=1, padding_mode='circular')
self.conv2 = nn.Conv2d(channels, channels, (3, 3), bias=False, padding=1, padding_mode='circular')
self.lrelu = nn.LeakyReLU()
self.relu = nn.ReLU()
self.logsoftmax = nn.LogSoftmax(dim=1)
self.flatten = nn.Flatten()
self.ln1 = nn.Linear(channels*7*11, channels)
self.ln2 = nn.Linear(channels, channels)
self.ln3 = nn.Linear(channels, self.output_shape)
self.layers = [
self.conv1,
nn.LeakyReLU(),
self.conv2,
nn.LeakyReLU(),
self.flatten,
self.ln1,
nn.LeakyReLU(),
self.ln2,
nn.LeakyReLU(),
self.ln3,
nn.LeakyReLU()
]
self.criterion = nn.MSELoss()
def forward(self, x):
x = x.to(cuda)
for l in self.layers:
if __name__ == '__main__' and l == self.layers[10]:
print("\n",l, l(x).data.cpu().detach().numpy())
test.append(l(x).data.cpu().detach().numpy())
try:
x = l(x)
except:
assert 0, f"something wrong with layer {l}"
return x
def gradientDescent(self, y_pred, y_true):
# print(y_pred[0], y_true[0])
self.optimizer = torch.optim.Adam(self.parameters(), lr=0.0001)
self.optimizer.zero_grad()
loss = self.criterion(y_pred, y_true)
loss.backward()
self.optimizer.step()
# print(f" loss={loss.item()}")
class DQN:
def __init__(self, gamma=0.9, batch_size=256, freq=5, memory_limit=20000):
self.model = Model()
self.model.to(cuda)
self.gamma = gamma
self.memory = list()
self.memory_limit = memory_limit
self.batch_size = batch_size
self.learn_freq = freq
self.save_freq = 5000
self.step = 0
def remember(self, state, action, reward, done, next_state):
self.step += 1
self.memory.append([state.to(cuda), action.to(cuda), reward, int(done), next_state.to(cuda)])
if done:
self.memory.append([state.to(cuda), action.to(cuda), reward, int(done), next_state.to(cuda)])
while len(self.memory) > self.memory_limit:
self.memory.pop(0)
size = min(self.batch_size, len(self.memory))
if self.step % self.learn_freq == 0:
self.train(random.sample(self.memory, size))
if self.step % (self.learn_freq*5) == 0:
self.train(self.memory[-size:])
if self.step % self.save_freq == 0:
self.save()
def train(self, memory, epochs=1):
# print(f" step={self.step} training started")
for _ in range(epochs):
state = torch.cat([sars[0] for sars in memory], axis=0).to(cuda)
action = torch.cat([sars[1] for sars in memory], axis=0).to(cuda).view(-1, 4)
reward = torch.tensor([sars[2] for sars in memory], device=cuda).view(-1, 1)
done = torch.tensor([sars[3] for sars in memory], device=cuda).view(-1, 1)
next_state = torch.cat([sars[4] for sars in memory], axis=0).to(cuda)
oldQ = self.model(state)
targetQ = (1-done)*torch.max(self.model(next_state).detach(), axis=1, keepdim=True).values
targetQ = (1-action)*oldQ + action*(reward + self.gamma*targetQ)
# print(oldQ[-1].data)
# print(targetQ[-1].data)
# print(oldQ[done.view(-1)==1][-1].data)
# print(targetQ[done.view(-1)==1][-1].data)
self.model.gradientDescent(oldQ, targetQ)
# print(f" {time()-b} second")
def save(self):
while True:
try:
with open(model_path, 'wb') as f:
torch.save(self.model.state_dict(), f)
print("MODEL SAVED")
except:
print("Warning: model saving failed, retring to save")
sleep(0.1)
continue
break
while True:
try:
with open(memory_path, 'wb') as f:
pickle.dump(self.memory, f)
print("MEMORY SAVED")
except:
print("Warning: memory saving failed, retring to save")
sleep(0.1)
continue
break
def load(self, load_memory=False):
if 'model.bin' in os.listdir(data_path):
while True:
try:
self.model.load_state_dict(torch.load(model_path))
print("MODEL LOADED")
except:
print("Warning: model loading failed, retring to load")
sleep(0.1)
continue
break
if 'memory.bin' in os.listdir(data_path) and load_memory:
while True:
try:
with open(memory_path, 'rb') as f:
self.memory = pickle.load(f)
print("MEMORY LOADED")
except:
print("Warning: memory loading failed, retring to load")
sleep(0.1)
continue
break
def turnoff(self):
self.save()
global Q
Q = DQN()
try:
Q.model.load_state_dict(torch.load(model_path))
print("MODEL LOADED")
except:
print("NEW MODEL")
if __name__ == '__main__':
print("model.py is working")
inp = torch.zeros((1, 46, 7, 11))
q = tensor(inp)
# for i in range(41, 45):
# inp[0][i][:] = 1
# # q = torch.cat((q, inp), axis=0)
# p = Q.model(inp)
# inp[0][i][:] = 0
# # print(p)
for r, c in [[2, 5], [3, 6], [4, 5], [3, 4]]:
inp[0][12][r][c] = 1
p = Q.model(inp)
inp[0][12][r][c] = 0
# print(p.data)
# print(np.mean(np.abs(test[0]-test[1])))
| atakanyasar/hungry-geese | src/model.py | model.py | py | 7,803 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.device",
... |
75127901544 | from cravat.cravat_report import CravatReport
import sys
import datetime
import re
import pandas as pd
import cravat
import json
import pyreadr
import os
class Reporter(CravatReport):
def setup (self):
self.filenames = []
self.filename = None
self.filename_prefix = None
if self.savepath == None:
self.filename_prefix = "cravat_result"
else:
self.filename_prefix = self.savepath
self.levels_to_write = self.get_standardized_module_option(
self.confs.get("pages", ["variant"])
)
self.separate_header_file = (
self.get_standardized_module_option(
self.confs.get("separate-header-file", "true")
)
== True
)
self.zip = (
self.get_standardized_module_option(self.confs.get("zip", "false")) == True
)
self.show_default_cols_only = (
self.get_standardized_module_option(
self.confs.get("show-default-columns-only", "true")
)
== True
)
self.cols_to_display = [
'base__hugo',
'base__chrom',
'base__pos',
'base__ref_base',
'base__alt_base',
]
self.colno_to_display_hugo = self.cols_to_display.index('base__hugo')
self.colno_to_display_chrom = self.cols_to_display.index('base__chrom')
self.colno_to_display_pos = self.cols_to_display.index('base__pos')
self.colno_to_display_ref = self.cols_to_display.index('base__ref_base')
self.colno_to_display_alt = self.cols_to_display.index('base__alt_base')
self.colname_display_dict = {
'base__hugo': 'group_id',
'base__chrom': 'chr',
'base__pos': 'pos',
'base__ref_base': 'ref',
'base__alt_base': 'alt',
}
self.display_select_columns = {}
self.display_select_columns['variant'] = len(self.cols_to_display) > 0
self.module_col_sep = "."
self.colnos_to_display = {}
self.colnames_to_display = {}
if self.display_select_columns['variant'] == False and self.show_default_cols_only:
db = sqlite3.connect(self.dbpath)
c = db.cursor()
q = f'select name from sqlite_master where name like "%_header"'
c.execute(q)
levels = [v[0].split("_")[0] for v in c.fetchall()]
for level in levels:
q = f"select col_name, col_def from {level}_header"
c.execute(q)
for row in c.fetchall():
(col_name, col_def) = row
col_def = json.loads(col_def)
if "hidden" not in col_def or col_def["hidden"] == False:
if col_name not in self.cols_to_display:
self.cols_to_display.append(col_name)
self.headers = {}
self.dataframe_cols = {}
self.dataframe_colnos = {}
self.dataframe_headers = {}
self.colno_to_colname = {}
self.filename_postfix = 'cfgenesis.RData'
self.data = {}
self.wgs_reader = cravat.get_wgs_reader('hg38')
self.enstnov_ensgnov = {}
data_path = os.path.dirname(os.path.abspath(__file__))
# hugo synonyms
f = open(os.path.join(data_path, 'data', 'hugo_synonyms.txt'))
line = f.readline()
toks = line.split('\t')
app_symbol_colno = toks.index('Approved symbol')
prev_symbols_colno = toks.index('Previous symbols')
#alias_symbols_colno = toks.index('Alias symbols')
self.hugo_synonyms = {}
for line in f:
toks = line.split('\t')
app_symbol = toks[app_symbol_colno]
prev_symbols = [v.strip() for v in toks[prev_symbols_colno].split(',')]
#alias_symbols = [v.strip() for v in toks[alias_symbols_colno].split(',')]
for symbol in prev_symbols:
self.hugo_synonyms[symbol] = app_symbol
#for symbol in alias_symbols:
# self.hugo_synonyms[symbol] = app_symbol
f.close()
# enst to ensg
f = open(os.path.join(data_path, 'data', 'ensg_enst.txt'))
for line in f:
[ensg, enst] = line[:-1].split('\t')
self.enstnov_ensgnov[self.remove_version(enst)] = self.remove_version(ensg)
f.close()
# canonical enst
f = open(os.path.join(data_path, 'data', 'MANE.GRCh38.v0.9.summary.txt'))
f.readline()
self.mane_ensgnv_to_enstnv = {}
self.mane_hugo_to_canonical_enst = {}
self.mane_hugos = []
self.mane_hugo_to_ensg = {}
for line in f:
toks = line[:-1].split('\t')
ensg = toks[1]
hugo = toks[3]
#if hugo in self.hugo_synonyms:
# hugo = self.hugo_synonyms[hugo]
enst = toks[7]
ensgnv = self.remove_version(ensg)
enstnv = self.remove_version(enst)
self.mane_ensgnv_to_enstnv[ensgnv] = enstnv
self.mane_hugos.append(hugo)
self.mane_hugo_to_canonical_enst[hugo] = enst
self.mane_hugo_to_ensg[hugo] = ensgnv
f.close()
# enst alen
f = open(os.path.join(data_path, 'data', 'enst_alen.txt'))
self.enstnv_to_alens = {}
for line in f:
[enst, alen] = line[:-1].split('\t')
self.enstnv_to_alens[self.remove_version(enst)] = int(alen)
f.close()
# hugo to ensg
f = open(os.path.join(data_path, 'data', 'hugo_ensg_chrom.txt'))
self.hugo_to_ensg = {}
self.hugo_to_chrom = {}
self.ensg_to_chrom = {}
for line in f:
[hugo, ensg, chrom] = line[:-1].split('\t')
ensg = ensg.split('.')[0]
#if hugo in self.hugo_synonyms:
# hugo = self.hugo_synonyms[hugo]
self.hugo_to_ensg[hugo] = ensg
if hugo not in self.hugo_to_chrom:
self.hugo_to_chrom[hugo] = []
if ensg not in self.ensg_to_chrom:
self.ensg_to_chrom[ensg] = chrom
self.hugo_to_chrom[hugo].append(chrom)
f.close()
self.csq_consequence_to_oc_so = {
'splice_acceptor_variant': 'splice_site_variant',
'splice_donor_variant': 'splice_site_variant',
'frameshift_variant': 'frameshift_elongation,frameshift_truncation'
}
self.no_mane_hugos = {}
if self.filterpath is None:
self.filter_name = None
else:
self.filter_name = os.path.basename(self.filterpath)
if self.filter_name not in ['coding1.json', 'coding2.json', 'coding3.json', 'coding_noncoding_1.json', 'conding_noncoding_2.json']:
print('\nfilter filename should be one of coding1.json, coding2.json, coding3.json, coding_noncoding_1.json, and coding_noncoding_2.json. Exiting.')
return False
def get_standardized_module_option(self, v):
tv = type(v)
if tv == str:
if "," in v:
v = [val for val in v.split(",") if val != ""]
if v == "true":
v = True
elif v == "false":
v = False
return v
def should_write_level(self, level):
if self.levels_to_write is None:
return True
elif level in self.levels_to_write:
return True
else:
return False
def write_preface (self, level):
self.level = level
if self.should_write_level(level) == False:
return
def write_header (self, level):
if self.should_write_level(level) == False:
return
self.headers[self.level] = []
self.dataframe_colnos[self.level] = []
self.dataframe_cols[self.level] = []
self.dataframe_headers[self.level] = {}
self.colno_to_colname[self.level] = {}
# table columns
for colgroup_dict in self.colinfo[self.level]['colgroups']:
colgroup_name = colgroup_dict['name']
minfo = cravat.admin_util.get_local_module_info(colgroup_name)
if minfo is None:
continue
conf = minfo.conf
if 'output_columns' not in conf:
continue
for output_dict in conf['output_columns']:
if output_dict.get('table', False) == True:
colname = colgroup_name + '__' + output_dict['name']
if colname in self.cols_to_display:
self.cols_to_display.remove(colname)
self.dataframe_cols[self.level].append(colname)
self.dataframe_headers[self.level][colname] = [v['name'] for v in output_dict['table_headers']]
colno = 0
columns = self.colinfo[level]["columns"]
for i in range(len(columns)):
col = columns[i]
colname = col['col_name']
self.colno_to_colname[self.level][colno] = colname
self.headers[self.level].append(colname)
if colname in self.dataframe_cols[self.level]:
self.dataframe_colnos[self.level].append(colno)
if colname == 'genehancer__target_genes':
self.colno_genehancertargetgenes = colno
elif colname == 'base__so':
self.colno_so = colno
elif colname == 'base__coding':
self.colno_coding = colno
elif colname == 'extra_vcf_info__CSQ':
self.colno_csq = colno
elif colname == 'extra_vcf_info__CSQ_SYMBOL':
self.colno_csq_symbol = colno
elif colname == 'extra_vcf_info__CSQ_Consequence':
self.colno_csq_consequence = colno
elif colname == 'extra_vcf_info__CSQ_LoF':
self.colno_csq_lofs = colno
elif colname == 'extra_vcf_info__CSQ_Gene':
self.colno_csq_gene = colno
elif colname == 'extra_vcf_info__CSQ_BIOTYPE':
self.colno_csq_biotype = colno
elif colname == 'extra_vcf_info__CSQ_Feature':
self.colno_csq_ensts = colno
elif colname == 'base__transcript':
self.colno_transcript = colno
elif colname == 'base__all_mappings':
self.colno_all_mappings = colno
elif colname == 'metasvm__score':
self.colno_metasvm_score = colno
elif colname == 'fathmm_xf__score':
self.colno_fathmm_xf_score = colno
elif colname == 'sift__prediction':
self.colno_sift_prediction = colno
elif colname == 'lrt__lrt_pred':
self.colno_lrt_lrt_pred = colno
elif colname == 'polyphen2__hdiv_pred':
self.colno_polyphen2_hdiv_pred = colno
elif colname == 'polyphen2__hvar_pred':
self.colno_polyphen2_hvar_pred = colno
elif colname == 'genehancer__feature_name':
self.colno_genehancer_feature_name = colno
elif colname == 'ensembl_regulatory_build__region':
self.colno_ensembl_regulatory_build_region = colno
colno += 1
colno = 0
self.colnos_to_display[level] = []
self.colnames_to_display[level] = []
for module_col_name in self.cols_to_display:
[module_name, col_name] = module_col_name.split('__')
for colno in range(len(columns)):
if columns[colno]["col_name"] == module_col_name:
self.colnos_to_display[level].append(colno)
self.colnames_to_display[level].append(self.colname_display_dict[module_col_name])
break
self.data[self.level] = []
def remove_version (self, uid):
return uid.split('.')[0]
def convert_csq_consequence (self, c):
cs = []
for tok in c.split('&'):
cs.append(self.csq_consequence_to_oc_so.get(c, c))
cs = '&'.join(cs)
return cs
def has_coding_so (self, sos):
if 'frameshift_elongation' in sos \
or 'frameshift_truncation' in sos \
or 'complex_substitution' in sos \
or 'splice_site_variant' in sos \
or 'start_lost' in sos \
or 'stop_gained' in sos \
or 'stop_lost' in sos \
or 'transcript_ablation' in sos \
or 'inframe_insertion' in sos \
or 'inframe_deletion' in sos \
or 'exon_loss_variant' in sos \
or 'missense_variant' in sos:
return True
else:
return False
def parse_mapping (self, mapping):
[enst, _, _, sos, _, _] = mapping
return enst, sos
def find_canonical_mapping (self, hugo, all_mappings, canonical_enstnv):
for mapping in all_mappings[hugo]:
enst, sos = self.parse_mapping(mapping)
if self.remove_version(enst) == canonical_enstnv:
return mapping
return None
def parse_all_mappings_str (self, all_mappings_str):
all_mappings_t = [v.strip() for v in all_mappings_str.split(';')]
all_mappings = {}
for mapping_t in all_mappings_t:
mapping = mapping_t.split(':')
try:
hugo = mapping[1]
#if hugo in self.hugo_synonyms:
# hugo = self.hugo_synonyms[hugo]
except:
print(f'#####################|\nAn exception occurred. Please contact the OpenCRAVAT team with the following information:')
print(f'#exception: getting hugo from mapping\nall_mappings_t={all_mappings_t}')
print(f'mapping={mapping}')
return {}
if hugo not in all_mappings:
all_mappings[hugo] = []
all_mappings[hugo].append(mapping)
return all_mappings
def get_canonicals(self, row, all_mappings, chrom):
# Which hugos are in MANE and which are not.
hugos_in_mane = []
other_hugos = []
csq_hugos_in_mane = []
csq_other_hugos = []
csq_biotypes = row[self.colno_csq_biotype]
csq_hugos = row[self.colno_csq_symbol]
csq_ensts = row[self.colno_csq_ensts]
if csq_ensts is None:
csq_ensts = []
else:
csq_ensts = csq_ensts.split(';')
if csq_hugos is None:
csq_hugos = []
else:
csq_hugos = csq_hugos.split(';')
for hugo in all_mappings:
if hugo in self.mane_hugos and hugo not in hugos_in_mane:
hugos_in_mane.append(hugo)
elif hugo not in other_hugos:
other_hugos.append(hugo)
for i in range(len(csq_hugos)):
hugo = csq_hugos[i]
if csq_ensts[i].startswith('ENST') == False:
continue
if csq_biotypes[i] != 'protein_coding':
continue
if hugo in self.mane_hugos and hugo not in csq_hugos_in_mane:
csq_hugos_in_mane.append(hugo)
elif hugo not in csq_other_hugos:
csq_other_hugos.append(hugo)
# ENSG and canonical ENST
self.ensgs = {}
canonical_ensts = {}
canonical_enstnvs = {}
# MANE transcript as canonical
for hugo in hugos_in_mane:
self.ensgs[hugo] = self.mane_hugo_to_ensg[hugo]
enst = self.mane_hugo_to_canonical_enst[hugo]
canonical_ensts[hugo] = enst
canonical_enstnvs[hugo] = self.remove_version(enst)
for hugo in csq_hugos_in_mane:
if hugo in self.ensgs:
continue
self.ensgs[hugo] = self.mane_hugo_to_ensg[hugo]
enst = self.mane_hugo_to_canonical_enst[hugo]
canonical_ensts[hugo] = enst
canonical_enstnvs[hugo] = self.remove_version(enst)
for hugo in other_hugos:
if hugo in self.hugo_to_ensg and chrom in self.hugo_to_chrom[hugo]:
self.ensgs[hugo] = self.hugo_to_ensg[hugo]
elif hugo in csq_hugos:
self.ensgs[hugo] = csq_genes[csq_hugos.index(hugo)]
else:
print(f'ENSG ID for {hugo} was not found. Using {hugo} as group_id')
self.ensgs[hugo] = hugo
for hugo in csq_other_hugos:
if hugo in self.ensgs:
continue
if hugo in self.hugo_to_ensg and chrom in self.hugo_to_chrom[hugo]:
self.ensgs[hugo] = self.hugo_to_ensg[hugo]
elif hugo in csq_hugos:
self.ensgs[hugo] = csq_genes[csq_hugos.index(hugo)]
else:
print(f'ENSG ID for {hugo} was not found. Using {hugo} as group_id')
self.ensgs[hugo] = hugo
# Longest transcript as canonical
for hugo in other_hugos:
mappings = all_mappings[hugo]
enst = mappings[0][0]
enstnv = self.remove_version(enst)
canonical_ensts[hugo] = enst
canonical_enstnvs[hugo] = enstnv
if enstnv in self.enstnv_to_alens:
canonical_alen = self.enstnv_to_alens[enstnv]
else:
canonical_alen = -1
for mapping in mappings[1:]:
enst, sos = self.parse_mapping(mapping)
enstnv = self.remove_version(enst)
if enstnv in self.enstnv_to_alens:
alen = self.enstnv_to_alens[enstnv]
else:
alen = -1
if alen > canonical_alen:
canonical_alen = alen
canonical_ensts[hugo] = enst
canonical_enstnvs[hugo] = enstnv
for hugo in csq_other_hugos:
enst = csq_ensts[0]
enstnv = self.remove_version(enst)
if enst.startswith('ENST'):
canonical_ensts[hugo] = enst
canonical_enstnvs[hugo] = enstnv
else:
canonical_ensts[hugo] = None
canonical_enstnvs[hugo] = None
if enstnv in self.enstnv_to_alens:
canonical_alen = self.enstnv_to_alens[enstnv]
else:
canonical_alen = -1
for i in range(1, len(csq_ensts)):
enst = csq_ensts[i]
enstnv = self.remove_version(enst)
if enst.startswith('ENST') == False:
continue
if enstnv in self.enstnv_to_alens:
alen = self.enstnv_to_alens[enstnv]
else:
alen = -1
if canonical_ensts[hugo] is None or alen > canonical_alen:
canonical_alen = alen
canonical_ensts[hugo] = enst
canonical_enstnvs[hugo] = enstnv
# SO for canonical transcripts
canonical_sos = {}
for hugo in list(set(hugos_in_mane) | set(other_hugos)):
canonical_mapping = self.find_canonical_mapping(hugo, all_mappings, canonical_enstnvs[hugo])
if canonical_mapping is not None:
enst, sos = self.parse_mapping(canonical_mapping)
canonical_sos[hugo] = sos
csq_consequences = row[self.colno_csq_consequence]
for hugo in list(set(csq_hugos_in_mane) | set(csq_other_hugos)):
canonical_enstnv = canonical_enstnvs[hugo]
for i in range(len(csq_ensts)):
if csq_ensts[i].split('.')[0] == canonical_enstnv:
sos = self.convert_csq_consequence(csq_consequences[i])
if hugo not in canonical_sos:
canonical_sos[hugo] = sos
else:
canonical_sos[hugo] += ',' + sos
break
return canonical_enstnvs, canonical_sos
def get_lof_of_enstnv(self, enstnv, csq_lofs, csq_enstnvs):
if len(csq_lofs) == 0:
return None
if enstnv in csq_enstnvs:
return csq_lofs[csq_enstnvs.index(enstnv)]
else:
return None
def get_all_mappings(self, row):
all_mappings_t = row[self.colno_all_mappings]
if all_mappings_t != '':
all_mappings = self.parse_all_mappings_str(all_mappings_t)
else:
all_mappings = {}
def get_so_of_enstnv(self, row, hugo, enstnv, csq_enstnvs, all_mappings, csq_sos):
if hugo in all_mappings:
for mapping in all_mappings[hugo]:
if enstnv == mapping[3].split('.')[0]:
return mapping[2]
if enstnv in csq_enstnvs:
return self.convert_csq_consequence(csq_sos[csq_enstnvs.index(enstnv)])
else:
return None
def get_csq_lofs(self, row):
csq_lofs = row[self.colno_csq_lofs]
if csq_lofs is None:
return []
else:
return csq_lofs.split(';')
def get_csq_enstnvs(self, row):
csq_ensts = row[self.colno_csq_ensts]
if csq_ensts is None:
return []
else:
return [v.split('.')[0] for v in csq_ensts.split(';')]
def run_coding1_filter(self, row, all_mappings, canonical_enstnvs, canonical_sos, csq_sos):
csq_lofs = self.get_csq_lofs(row)
csq_enstnvs = self.get_csq_enstnvs(row)
metasvm_score = row[self.colno_metasvm_score]
fathmm_xf_score = row[self.colno_fathmm_xf_score]
group_ids = set()
for hugo in canonical_enstnvs:
enstnv = canonical_enstnvs[hugo]
lof = self.get_lof_of_enstnv(enstnv, csq_lofs, csq_enstnvs)
so = self.get_so_of_enstnv(
row, hugo, enstnv, csq_enstnvs, all_mappings, csq_sos) # oc over vep
if lof == 'HC'\
or\
(so == 'missense_variant' and metasvm_score is not None and metasvm_score > 0)\
or\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5 and\
so in ['complex_substitution',
'exon_loss_variant',
'frameshift_variant',
'frameshift_elongation',
'frameshift_truncation',
'inframe_insertion',
'inframe_deletion'
'missense_variant',
'splice_site_variant',
'splice_acceptor_variant',
'splice_donor_variant',
'start_lost',
'stop_gained',
'stop_lost',
'transcript_ablation'])\
or\
(so == 'synonymous_variant' and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5):
group_ids.add(self.ensgs[hugo])
return group_ids
def run_coding2_filter(self, row, all_mappings, canonical_enstnvs, canonical_sos, csq_sos):
csq_lofs = self.get_csq_lofs(row)
csq_enstnvs = self.get_csq_enstnvs(row)
fathmm_xf_score = row[self.colno_fathmm_xf_score]
sift_prediction = row[self.colno_sift_prediction]
lrt_pred = row[self.colno_lrt_lrt_pred]
polyphen2_hdiv_pred = row[self.colno_polyphen2_hdiv_pred]
polyphen2_hvar_pred = row[self.colno_polyphen2_hvar_pred]
group_ids = set()
for hugo in canonical_enstnvs:
enstnv = canonical_enstnvs[hugo]
lof = self.get_lof_of_enstnv(enstnv, csq_lofs, csq_enstnvs)
so = self.get_so_of_enstnv(row, hugo, enstnv, csq_enstnvs, all_mappings, csq_sos)
if (\
so == 'missense_variant' and\
sift_prediction == 'Damaging' and\
lrt_pred == 'Deleterious' and\
polyphen2_hdiv_pred is not None and 'P' in polyphen2_hdiv_pred and\
polyphen2_hvar_pred is not None and 'P' in polyphen2_hvar_pred
) or\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5 and so in [
'complex_substitution',
'exon_loss_variant',
'frameshift_variant',
'frameshift_elongation',
'frameshift_truncation',
'inframe_insertion',
'inframe_deletion'
'missense_variant',
'splice_site_variant',
'splice_acceptor_variant',
'splice_donor_variant',
'start_lost',
'stop_gained',
'stop_lost',
'transcript_ablation']\
) or\
(\
so == 'synonymous_variant' and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5\
) or\
(lof == 'HC'):
group_ids.add(self.ensgs[hugo])
return group_ids
def run_coding3_filter(self, row, all_mappings, canonical_enstnvs, canonical_sos, csq_sos):
csq_lofs = self.get_csq_lofs(row)
csq_enstnvs = self.get_csq_enstnvs(row)
fathmm_xf_score = row[self.colno_fathmm_xf_score]
sift_prediction = row[self.colno_sift_prediction]
lrt_pred = row[self.colno_lrt_lrt_pred]
polyphen2_hdiv_pred = row[self.colno_polyphen2_hdiv_pred]
polyphen2_hvar_pred = row[self.colno_polyphen2_hvar_pred]
group_ids = set()
for hugo in canonical_enstnvs:
enstnv = canonical_enstnvs[hugo]
lof = self.get_lof_of_enstnv(enstnv, csq_lofs, csq_enstnvs)
so = self.get_so_of_enstnv(row, hugo, enstnv, csq_enstnvs, all_mappings, csq_sos)
if (\
so == 'missense_variant' and\
(\
sift_prediction == 'Damaging' or\
lrt_pred == 'Deleterious' or\
(polyphen2_hdiv_pred is not None and 'P' in polyphen2_hdiv_pred) or\
(polyphen2_hvar_pred is not None and 'P' in polyphen2_hvar_pred)\
)\
)\
or\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5 and so in [
'complex_substitution',
'exon_loss_variant',
'frameshift_variant',
'frameshift_elongation',
'frameshift_truncation',
'inframe_insertion',
'inframe_deletion'
'missense_variant',
'splice_site_variant',
'splice_acceptor_variant',
'splice_donor_variant',
'start_lost',
'stop_gained',
'stop_lost',
'transcript_ablation']) or\
(so == 'synonymous_variant' and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5)\
or\
(lof == 'HC'):
group_ids.add(self.ensgs[hugo])
return group_ids
def run_coding_noncoding_filter_1(
self, row, all_mappings, canonical_enstnvs, canonical_sos, csq_sos):
csq_lofs = self.get_csq_lofs(row)
csq_enstnvs = self.get_csq_enstnvs(row)
fathmm_xf_score = row[self.colno_fathmm_xf_score]
metasvm_score = row[self.colno_metasvm_score]
sift_prediction = row[self.colno_sift_prediction]
lrt_pred = row[self.colno_lrt_lrt_pred]
genehancer_feature_name = row[self.colno_genehancer_feature_name]
ensembl_regulatory_build_region = row[self.colno_ensembl_regulatory_build_region]
group_ids = set()
for hugo in canonical_enstnvs:
enstnv = canonical_enstnvs[hugo]
lof = self.get_lof_of_enstnv(enstnv, csq_lofs, csq_enstnvs)
so = self.get_so_of_enstnv(row, hugo, enstnv, csq_enstnvs, all_mappings, csq_sos)
if lof == 'HC'\
or\
(so == 'missense_variant' and metasvm_score is not None and metasvm_score > 0)\
or\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5 and\
so in ['complex_substitution',
'exon_loss_variant',
'frameshift_variant',
'frameshift_elongation',
'frameshift_truncation',
'inframe_insertion',
'inframe_deletion'
'missense_variant',
'splice_site_variant',
'splice_acceptor_variant',
'splice_donor_variant',
'start_lost',
'stop_gained',
'stop_lost',
'transcript_ablation'])\
or\
(so == 'synonymous_variant' and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5):
group_ids.add(self.ensgs[hugo])
elif genehancer_feature_name == 'Enhancer' and\
(\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5)\
or\
(ensembl_regulatory_build_region in [\
'CTCF_binding_site', 'TF_binding_site'\
])\
):
genehancer_target_genes = [v.split(':')[0]\
for v in row[self.colno_genehancertargetgenes].split(',')]
for target in genehancer_target_genes:
if target.startswith('ENSG'):
group_ids.add(target)
elif genehancer_feature_name == 'Promoter' and\
(\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5)\
or\
(ensembl_regulatory_build_region in [
'CTCF_binding_site', 'TF_binding_site'
])\
):
genehancer_target_genes = [v.split(':')[0]\
for v in row[self.colno_genehancertargetgenes].split(',')]
for target in genehancer_target_genes:
if target.startswith('ENSG'):
group_ids.add(target)
elif so is not None and 'upstream_gene_variant' in so and\
(\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5)\
or\
(ensembl_regulatory_build_region in [\
'CTCF_binding_site', 'TF_binding_site'\
])\
):
group_ids.add(self.ensgs[hugo])
return group_ids
def run_coding_noncoding_filter_2(
self, row, all_mappings, canonical_enstnvs, canonical_sos, csq_sos):
csq_lofs = self.get_csq_lofs(row)
csq_enstnvs = self.get_csq_enstnvs(row)
fathmm_xf_score = row[self.colno_fathmm_xf_score]
metasvm_score = row[self.colno_metasvm_score]
sift_prediction = row[self.colno_sift_prediction]
lrt_pred = row[self.colno_lrt_lrt_pred]
genehancer_feature_name = row[self.colno_genehancer_feature_name]
ensembl_regulatory_build_region = row[self.colno_ensembl_regulatory_build_region]
group_ids = set()
for hugo in canonical_enstnvs:
enstnv = canonical_enstnvs[hugo]
lof = self.get_lof_of_enstnv(enstnv, csq_lofs, csq_enstnvs)
so = self.get_so_of_enstnv(row, hugo, enstnv, csq_enstnvs, all_mappings, csq_sos)
if lof == 'HC'\
or\
(so == 'missense_variant' and metasvm_score is not None and metasvm_score > 0)\
or\
(fathmm_xf_score is not None and fathmm_xf_score > 0.5 and\
so in ['complex_substitution',
'exon_loss_variant',
'frameshift_variant',
'frameshift_elongation',
'frameshift_truncation',
'inframe_insertion',
'inframe_deletion'
'missense_variant',
'splice_site_variant',
'splice_acceptor_variant',
'splice_donor_variant',
'start_lost',
'stop_gained',
'stop_lost',
'transcript_ablation'])\
or\
(so == 'synonymous_variant' and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5):
group_ids.add(self.ensgs[hugo])
elif genehancer_feature_name == 'Enhancer' and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5 and\
ensembl_regulatory_build_region in [
'CTCF_binding_site',
'TF_binding_site',
'enhancer',
'open_chromatin_region',
'promoter',
'promoter_flanking_region'
]:
genehancer_target_genes = [v.split(':')[0]\
for v in row[self.colno_genehancertargetgenes].split(',')]
for target in genehancer_target_genes:
if target.startswith('ENSG'):
group_ids.add(target)
elif genehancer_feature_name == 'Promoter' and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5 and\
ensembl_regulatory_build_region in [
'CTCF_binding_site',
'TF_binding_site'
'enhancer',
'open_chromatin_region',
'promoter',
'promoter_flanking_region'
]:
genehancer_target_genes = [v.split(':')[0]\
for v in row[self.colno_genehancertargetgenes].split(',')]
for target in genehancer_target_genes:
if target.startswith('ENSG'):
group_ids.add(target)
elif so is not None and 'upstream_gene_variant' in so and\
fathmm_xf_score is not None and fathmm_xf_score > 0.5 and\
ensembl_regulatory_build_region in [
'CTCF_binding_site',
'TF_binding_site'
'enhancer',
'open_chromatin_region',
'promoter',
'promoter_flanking_region'
]:
group_ids.add(self.ensgs[hugo])
return group_ids
def write_table_row (self, row):
if self.should_write_level(self.level) == False:
return
try:
if len(self.colnos_to_display[self.level]) > 0:
filtered_row = [row[colno] for colno in self.colnos_to_display[self.level]]
else:
filtered_row = row
chrom = filtered_row[self.colno_to_display_chrom]
pos = int(filtered_row[self.colno_to_display_pos])
ref = filtered_row[self.colno_to_display_ref]
alt = filtered_row[self.colno_to_display_alt]
if ref == '-' or alt == '-': # deletion or insertion
chrom = filtered_row[self.colno_to_display_chrom]
pos = pos - 1
prev_base = self.wgs_reader.get_bases(chrom, pos).upper()
if ref != '-' and alt == '-': # deletion
ref = prev_base + ref
alt = prev_base
elif ref == '-' and alt != '-': # insertion
ref = prev_base
alt = prev_base + alt
filtered_row[self.colno_to_display_pos] = pos
filtered_row[self.colno_to_display_ref] = ref
filtered_row[self.colno_to_display_alt] = alt
all_mappings_t = row[self.colno_all_mappings]
if all_mappings_t != '':
all_mappings = self.parse_all_mappings_str(all_mappings_t)
else:
all_mappings = {}
csq_consequences = row[self.colno_csq_consequence]
if csq_consequences is None:
csq_consequences = []
else:
csq_consequences = csq_consequences.split(';')
#coding = row[self.colno_coding]
#genehancertargetgenes = row[self.colno_genehancertargetgenes]
# VEP annotations
#csq = row[self.colno_csq]
#csq_hugos = row[self.colno_csq_symbol]
#csq_genes = row[self.colno_csq_gene]
#csq_lofs = row[self.colno_csq_lofs]
#csq_biotypes = row[self.colno_csq_biotype]
#csq_ensts = row[self.colno_csq_ensts]
#if csq_hugos is None:
# csq_hugos = []
#else:
#csq_hugos = [self.hugo_synonyms[v] if v in self.hugo_synonyms else v\
#for v in csq_hugos.split(';')]
# csq_hugos = csq_hugos.split(';')
#if csq_ensts is None:
# csq_ensts = []
#else:
# csq_ensts = csq_ensts.split(';')
#if csq_genes is not None:
# csq_genes = [m for v in csq_genes.split(',') for m in v.split(';')]
#else:
# csq_toks = csq.split('|')
# for tok in csq_toks:
# if 'ENSG' in tok:
# csq_genes = [m for v in tok.split(',') for m in v.split(';')]
# break
#if csq_genes is None:
# csq_genes = []
#if csq_lofs is not None:
# csq_lofs = csq_lofs.split(';')
#else:
# csq_toks = csq.split('|')
# for tok in csq_toks:
# if 'HC' in tok:
# csq_lofs = tok.split(';')
# break
#if csq_lofs is None:
# csq_lofs = []
#if csq_biotypes is None:
# csq_biotypes = []
#else:
# csq_biotypes = csq_biotypes.split(';')
canonical_enstnvs, canonical_sos = self.get_canonicals(row, all_mappings, chrom)
# Filters
if self.filter_name.startswith('coding1.json'):
group_ids = self.run_coding1_filter(
row,
all_mappings,
canonical_enstnvs,
canonical_sos,
csq_consequences
)
elif self.filter_name.startswith('coding2.json'):
group_ids = self.run_coding2_filter(
row,
all_mappings,
canonical_enstnvs,
canonical_sos,
csq_consequences
)
elif self.filter_name.startswith('coding3.json'):
group_ids = self.run_coding3_filter(
row,
all_mappings,
canonical_enstnvs,
canonical_sos,
csq_consequences
)
elif self.filter_name.startswith('coding_noncoding_1.json'):
group_ids = self.run_coding_noncoding_filter_1(
row,
all_mappings,
canonical_enstnvs,
canonical_sos,
csq_consequences
)
elif self.filter_name.startswith('coding_noncoding_2.json'):
group_ids = self.run_coding_noncoding_filter_2(
row,
all_mappings,
canonical_enstnvs,
canonical_sos,
csq_consequences
)
'''
# GeneHancer targets
if genehancertargetgenes is not None:
genehancertargetgenes = [v.split(':')[0].strip() for v in genehancertargetgenes.split(',')]
for target in genehancertargetgenes:
if target.startswith('ENSG') and target not in group_ids:
group_ids.add(target)
genehancer_target_exists = True
elif target in self.hugo_to_ensg and target in self.hugo_to_chrom and chrom in self.hugo_to_chrom[target]:
ensg = self.hugo_to_ensg[target]
if ensg not in group_ids:
group_ids.add(ensg)
genehancer_target_exists = True
'''
wrong_chrom_ensgs = []
for ensg in group_ids:
if ensg in self.ensg_to_chrom and self.ensg_to_chrom[ensg] != chrom:
wrong_chrom_ensgs.append(ensg)
if len(wrong_chrom_ensgs) > 0:
print(f'@@@ wrong_chrom_ensgs={wrong_chrom_ensgs}')
for ensg in wrong_chrom_ensgs:
group_ids.remove(ensg)
'''
so_ignores = [
'intron_variant',
'synonymous_variant',
'3_prime_UTR_variant',
'5_prime_UTR_variant',
'downstream_gene_variant',
'intergenic_variant',
'non_coding_transcript_exon_variant',
'splice_region_variant',
'start_retained_variant',
'stop_retained_variant',
'mature_miRNA_variant',
'NMD_transcript_variant',
'non_coding_transcript_variant',
'TFBS_ablation',
'TFBS_amplification',
'TF_binding_site_variant',
'regulatory_region_ablation',
'regulatory_region_amplification',
'feature_elongation',
'regulatory_region_variant',
'feature_truncation',
'incomplete_terminal_codon_variant',
]
# Collects group_id.
group_ids = set()
## coding and splice site variant
for hugo in canonical_enstsnv:
if hugo == '': # For example, ENSTR.
continue
sos = None
if hugo not in canonical_sos:
canonical_enstnv = canonical_enstnvs[hugo]
for i in range(len(csq_ensts)):
enstnv = self.remove_version(csq_ensts[i])
if enstnv == canonical_enstnv:
csq_consq = csq_consequences[i]
if ('intron' in csq_consq and not ('splice_donor' in csq_consq or 'splice_acceptor' in csq_consq)) or 'downstream' in csq_consq or 'non_coding' in csq_consq or 'upstream' in csq_consq:
break
elif enstnv not in self.enstnv_to_alens:
print(f'{enstnv} not in oc aalen')
break
elif (self.filter_name == 'coding1' or self.filter_name == 'coding2' or self.filter_name == 'coding3'):
if csq_biotypes[i] != 'protein_coding':
break
elif self.has_coding_so(
if csq_hugos[i] in canonical_sos:
sos = canonical_sos[csq_hugos[i]]
break
for cano_hugo, cano_enstnv in canonical_enstnvs.items():
if cano_enstnv == canonical_enstnv and cano_hugo in canonical_sos:
sos = canonical_sos[cano_hugo]
break
if sos is None:
print(f'##################\nAn exception occurred. Please contact the OpenCRAVAT team with the following information:')
print(f'#exception: sos is None\n#row={row}\ncanonical_enstnvs={canonical_enstnvs}\ncanonical_sos={canonical_sos}\nin mane? {hugo in self.mane_hugos}\nall_mappings={all_mappings}\ncsq_ensts={csq_ensts}\ncsq_hugos={csq_hugos}\ncsq_consequenced={csq_consequences}\ncsq={csq}\nhugo={hugo}')
return
if sos is not None:
break
if sos is None:
continue
else:
sos = canonical_sos[hugo]
if self.has_coding_so(sos):
ensg = self.ensgs[hugo]
group_ids.add(ensg)
## HC Lof from VEP
if len(csq_ensts) == len(csq_lofs):
for hugo in canonical_enstsnv:
canonical_enstnv = canonical_enstsnv[hugo]
if canonical_enstnv is None:
continue
ensg = self.ensgs[hugo]
if ensg in group_ids:
continue
for i in range(len(csq_lofs)):
enst = csq_ensts[i]
enstnv = self.remove_version(enst)
lof = csq_lofs[i]
biotype = csq_biotypes[i]
### LoF HC and BIOTYPE relationship from chr22.sqlite:
# frameshift_variant protein_coding
# frameshift_variant&splice_region_variant protein_coding
# frameshift_variant&start_lost protein_coding
# frameshift_variant&stop_lost protein_coding
# frameshift_variant&stop_retained_variant protein_coding
# splice_acceptor_variant protein_coding
# splice_acceptor_variant&coding_sequence_variant protein_coding
# splice_acceptor_variant&coding_sequence_variant&intron_variant protein_coding
# splice_acceptor_variant&intron_variant protein_coding
# splice_donor_variant protein_coding
# splice_donor_variant&coding_sequence_variant protein_coding
# splice_donor_variant&coding_sequence_variant&intron_variant protein_coding
# splice_donor_variant&intron_variant protein_coding
# stop_gained protein_coding
# stop_gained&frameshift_variant protein_coding
# stop_gained&inframe_insertion protein_coding
# stop_gained&inframe_insertion&splice_region_variant protein_coding
# stop_gained&splice_region_variant protein_coding
### thus, no need for checking BIOTYPE "protein_coding".
if enstnv == canonical_enstnv and lof == 'HC':
group_ids.add(ensg)
break
## GeneHancer
genehancer_target_exists = False
if genehancertargetgenes is not None:
genehancertargetgenes = [v.split(':')[0].strip() for v in genehancertargetgenes.split(',')]
for target in genehancertargetgenes:
if target.startswith('ENSG') and target not in group_ids:
group_ids.add(target)
genehancer_target_exists = True
elif target in self.hugo_to_ensg and target in self.hugo_to_chrom and chrom in self.hugo_to_chrom[target]:
ensg = self.hugo_to_ensg[target]
if ensg not in group_ids:
group_ids.add(ensg)
genehancer_target_exists = True
## 5k upstream
upstream_but_no_canonical = False
if len(csq_consequences) > 0:
for hugo in canonical_enstsnv:
ensg = self.ensgs[hugo]
if ensg in group_ids:
continue
canonical_enstnv = canonical_enstnvs[hugo]
for i in range(len(csq_genes)):
hugo = csq_hugos[i]
#if hugo in self.hugo_synonyms:
# hugo = self.hugo_synonyms[hugo]
ensg = csq_genes[i]
enst = csq_ensts[i]
enstnv = self.remove_version(enst)
consequence = csq_consequences[i]
if hugo == '': # ENSR for example
continue
if 'upstream_gene_variant' in consequence:
if enstnv == canonical_enstnv:
group_ids.add(csq_genes[i])
upstream_but_no_canonical = False
break
else:
upstream_but_no_canonical = True
if len(group_ids) == 0:
errmsgs = set()
correct_so = False
for hugo in canonical_sos:
sos = canonical_sos[hugo].split(',')
if self.has_coding_so(sos):
correct_so = True
break
if correct_so == False:
errmsgs.add(f'no valid so in canonical transcript')
if genehancertargetgenes is not None \
and len(genehancertargetgenes) > 0 \
and genehancer_target_exists == False:
errmsgs.add(f'GeneHancer targets are not ENSG')
if upstream_but_no_canonical:
errmsgs.add('5k upstream on non-canonical transcript')
if len(csq_ensts) == 0:
errmsgs.add('no transcript detected')
if 'HC' in csq_lofs:
correct_lof_canonical_so = False
for lof_i in range(len(csq_lofs)):
lof = csq_lofs[lof_i]
enst = csq_ensts[lof_i]
consequence = csq_consequences[lof_i]
hugo = csq_hugos[lof_i]
if hugo in canonical_enstnvs:
canonical = canonical_enstnvs[hugo]
else:
canonical = ''
if lof == 'HC' and enst.split('.')[0] == canonical\
and consequence not in so_ignores:
correct_lof_canonical_so = True
break
if correct_lof_canonical_so == False:
errmsgs.add('no HC lof for canonical transcript with valid so')
no_canonical_enst = True
for hugo in canonical_enstnvs:
if hugo in all_mappings:
mappings = all_mappings[hugo]
for mapping in mappings:
enstnv = mapping[0].split('.')[0]
if enstnv == canonical_enstnvs[hugo]:
no_canonical_enst = False
break
if len(csq_hugos) > 0:
for enst_i in range(len(csq_ensts)):
enstnv = csq_ensts[enst_i].split('.')[0]
if enstnv.startswith('ENST') == False:
continue
hugo = csq_hugos[enst_i]
#if hugo in self.hugo_synonyms:
# hugo = self.hugo_synonyms[hugo]
try:
if enstnv == canonical_enstnvs[hugo]:
no_canonical_enst = False
break
except:
print(f'hugo={hugo} canonical_enstnvs={canonical_enstnvs}')
print(f'csq_hugos={csq_hugos}')
print(f'row={row}')
raise
if no_canonical_enst:
errmsgs.add('no canonical transcript')
if len(errmsgs) == 0:
print(f'#################\nAn exception occurred. Please contact the OpenCRAVAT team with the following information:')
print(f'#exception: No gene name for {chrom} {pos} {ref} {alt}\n#row={row}\n# csq={csq}\n# row={row}\n# csq_genes={csq_genes}\n# canonical_sos={canonical_sos}\n# coding={coding}\n# csq_lofs={csq_lofs}\n# genehancertargetgenes={genehancertargetgenes}\n# csq_ensts={csq_ensts}\n# csq_consequence={csq_consequences}\n# group_ids={group_ids}\n# canonical_ensts={canonical_ensts}\n# all_mappings={all_mappings}\n# genehancer_target_exists={genehancer_target_exists}\n# errmsgs={errmsgs}')
else:
if chrom.startswith('chr'):
chrom = chrom[3:]
filtered_row[self.colno_to_display_chrom] = chrom
group_ids = list(group_ids)
group_ids.sort()
group_ids = [v for v in group_ids if v != '']
for group_id in group_ids:
filtered_row[self.colno_to_display_hugo] = group_id
self.data[self.level].append([v for v in list(filtered_row)])
'''
if chrom.startswith('chr'):
chrom = chrom[3:]
filtered_row[self.colno_to_display_chrom] = chrom
group_ids = list(group_ids)
group_ids.sort()
group_ids = [v for v in group_ids if v != '']
for group_id in group_ids:
filtered_row[self.colno_to_display_hugo] = group_id
self.data[self.level].append([v for v in list(filtered_row)])
except Exception as e:
print(f'#################\nAn exception occurred. Please contact the OpenCRAVAT team with the following information:')
print(f'#exception: {e}')
import traceback
traceback.print_exc(file=sys.stdout)
print(f'#row={row}')
def end (self):
self.dfs = {}
for level in self.headers.keys():
level_data = pd.DataFrame(self.data[level], columns=self.colnames_to_display[level])
level_data = level_data.drop_duplicates()
self.filename = f'{self.filename_prefix}.{level}.{self.filename_postfix}'
self.filenames.append(self.filename)
if len(level_data) > 0:
pyreadr.write_rdata(self.filename, level_data, df_name=f'{self.filename_prefix}_{level}')
else:
wf = open(self.filename, 'w')
wf.close()
return self.filenames
def main ():
reporter = Reporter(sys.argv)
reporter.run()
if __name__ == '__main__':
main()
| KarchinLab/open-cravat-modules-karchinlab | reporters/genesis_variant_groupingsreporter/genesis_variant_groupingsreporter.py | genesis_variant_groupingsreporter.py | py | 55,376 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cravat.cravat_report.CravatReport",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cravat.get_wgs_reader",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os... |
19792929420 | from sklearn.ensemble import RandomForestClassifier
from scipy import signal
import pandas as pd
import numpy as np
import statsmodels.api as sm
from sklearn.preprocessing import MinMaxScaler
from statsmodels.tsa.stattools import adfuller
import pickle
from io import BytesIO
def classifier(data_set):
with open('models/frequency1.pkl', 'rb') as f:
frequency = pickle.load(f)
with open('models/model1.pkl', 'rb') as f:
model = pickle.load(f)
with open('models/rfc_tsm1.pkl', 'rb') as f:
rfc = pickle.load(f)
print('Model Loaded')
df = pd.read_csv(BytesIO(data_set))
df=df.drop('Unnamed: 0',axis=1)
df['point_timestamp'] = pd.to_datetime(df['point_timestamp'])
df = df.set_index(['point_timestamp'])
df = df.fillna(df.mean())
indexed_df = df.copy(deep=True)
scaler = MinMaxScaler()
df['point_value']=scaler.fit_transform(df[['point_value']])
dftest = adfuller(df['point_value'], autolag = "AIC")
trend = np.polyfit(df.index.astype(int), df['point_value'], 1)[0]
acf_1 = sm.tsa.stattools.acf(df['point_value'], nlags=1)[1]
volatility = np.std(df['point_value'])
freq = pd.infer_freq(df.index)
frequencies, spectrum = signal.periodogram(df['point_value'])
max_index = spectrum.argmax()
cyclicity = 1 / frequencies[max_index]
feature_value = {'Trend': trend,
'Autocorrelation at lag 1' : acf_1,
'Volatility' : volatility,
'Frequency' : freq,
'Stationarity': dftest[1],
'Cyclicity': cyclicity}
if not feature_value['Frequency']:
feature_value['Frequency'] = frequency['H']
else:
feature_value['Frequency'] = frequency[feature_value['Frequency']]
pred = rfc.predict(pd.DataFrame(feature_value, index=[0]).values.reshape(1, -1))
final_model=""
for key, value in model.items():
if value == pred:
final_model = key
break
return indexed_df,final_model | shyamsivasankar/TIME-SERIES-DATA | findClass.py | findClass.py | py | 2,042 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_numbe... |
27734664933 | import os, sys
from http import HTTPStatus
from fastapi import FastAPI
from fastapi import Response
from fastapi_sqlalchemy import DBSessionMiddleware
from dotenv import load_dotenv
from app.main.adapters import fast_api_adapter
from app.domain.usecases import CreateUserParams, CreateUserResponse
from app.main.factories import create_user_factory
from app.main.routes.helpers import HandledError
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
load_dotenv(os.path.join(BASE_DIR, ".env"))
app = FastAPI()
app.add_middleware(DBSessionMiddleware, db_url=os.environ["DATABASE_URL"])
ROUTES_TAGS = ['User']
@app.get('/hello-world')
def hello_world():
return {'hello': 'world'}
@app.post(
'/user',
responses={
HTTPStatus.CREATED.value: {
'model': CreateUserResponse
},
HTTPStatus.BAD_REQUEST.value: {
'model': HandledError, 'description': 'Company or tenant not found'
}
},
status_code=HTTPStatus.CREATED,
tags=ROUTES_TAGS
)
def create_user(body: CreateUserParams, response: Response):
request = {'body': body, 'headers': None, 'query': None}
result = fast_api_adapter(request, create_user_factory())
response.status_code = result.status_code
return result.body
| victoroliveirabarros/fastapi-sql | app/main/main.py | main.py | py | 1,309 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
... |
27239521850 | # youtube/youtube_api.py
import google.auth
from google.auth.transport.requests import AuthorizedSession
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from config.config import YOUTUBE_API_KEY
class YoutubeAPI:
def __init__(self):
self.credentials = None
self.client = None
def authenticate(self):
self.credentials, _ = google.auth.default(scopes=['https://www.googleapis.com/auth/youtube.readonly'])
self.client = build('youtube', 'v3', credentials=self.credentials)
def get_latest_short_videos(self, max_results=10):
if not self.client:
self.authenticate()
request = self.client.search().list(
part='id,snippet',
q='#shorts',
type='video',
maxResults=max_results,
order='date'
)
response = request.execute()
videos = response['items']
video_ids = [video['id']['videoId'] for video in videos]
video_urls = [f'https://www.youtube.com/watch?v={video_id}' for video_id in video_ids]
return video_urls
| eddari-me/youtube_shorts_to_instagram | youtube/youtube_api.py | youtube_api.py | py | 1,136 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "google.auth.auth.default",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "google.auth.auth",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "google.auth",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "googleapic... |
36998634880 | from django.shortcuts import render
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from api.models import SunExposure
from api.serializers import SunExposureSerializer
from rest_framework.decorators import api_view
# Create your views here.
@api_view(['GET', 'POST', 'DELETE'])
def sunexposure_list(request):
if request.method == 'GET':
sunexposures = SunExposure.objects.all()
garden_id = request.GET.get('title', None)
if garden_id is not None:
sunexposures = sunexposures.filter(garden_id__icontains=garden_id)
sunexposures_serializer = SunExposureSerializer(sunexposures, many=True)
return JsonResponse(sunexposures_serializer.data, safe=False)
elif request.method == 'POST':
sunexposure_data = JSONParser().parse(request)
sunexposure_serializer = SunExposureSerializer(data=sunexposure_data)
if sunexposure_serializer.is_valid():
sunexposure_serializer.save()
return JsonResponse(sunexposure_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(sunexposure_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
sunexposures = SunExposure.objects.all()
garden_id = request.GET.get('title', None)
if garden_id is not None:
sunexposures = sunexposures.filter(garden_id__icontains=garden_id)
count = sunexposures.delete()
return JsonResponse({'message': '{} SunExposures were deleted successfully!'.format(count[0])}, status=status.HTTP_200_OK)
@api_view(['GET', 'DELETE'])
def sunexposure_detail(request, pk):
try:
sunexposure = SunExposure.objects.get(pk=pk)
except SunExposure.DoesNotExist:
return JsonResponse({'message': 'The SunExposure does not exist'}, status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
sunexposure_serializer = SunExposureSerializer(sunexposure)
return JsonResponse(sunexposure_serializer.data)
elif request.method == 'DELETE':
sunexposure.delete()
return JsonResponse({'message': 'SunExposure was deleted successfully!'}, status=status.HTTP_200_OK) | ejustis/garden-tracker-api | api/views.py | views.py | py | 2,265 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "api.models.SunExposure.objects.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "api.models.SunExposure.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "api.models.SunExposure",
"line_number": 15,
"usage_type": "name"
... |
26454125237 | import argparse
import gc
import logging
import os
import glob
import pandas as pd
import sys
sys.path.append("../ddn/")
sys.path.append("./")
from collections import defaultdict
import torch
import warnings
warnings.filterwarnings('ignore')
import numpy as np
torch.backends.cudnn.benchmark = True
from matplotlib import pyplot as plt
import matplotlib as mpl
import matplotlib.patches as patches
from matplotlib import pyplot as plt
from scipy.linalg import block_diag
from torch.utils.data import Dataset, DataLoader
#from bernstein import bernstesin_coeff_order10_new
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.data_loading.argoverse_forecasting_loader import ArgoverseForecastingLoader
from argoverse.visualization.visualize_sequences import viz_sequence
avm = ArgoverseMap()
num = 10
data_path="/datasets/argoverse/val/data"
output_dir="../results/"
t_obs=20
dt=0.3
t_obs=20
pred=False
pred_array=None
batch_size = 512
dpi=50
w,h=200,200
paths = glob.glob(os.path.join(data_path, "*.csv"))
color = {
'polygon': '#e6cf93',
'polygon-outline': '#e6cf93',
'centerline': '#fceec7',
'agent': 'blue',
'av': 'grey',
'other': 'grey',
'outline': 'black'
}
avm = ArgoverseMap()
def denoise(gt_x, gt_y, w = 7):
# denoising
gt_x_t = []
gt_y_t = []
for iq in range(len(gt_x)):
if iq >= w and iq + w <= len(gt_x):
gt_x_t.append(np.mean(gt_x[iq: iq + w]))
gt_y_t.append(np.mean(gt_y[iq: iq + w]))
elif iq < w:
okx = np.mean(gt_x[w: w + w])
gt_x_t.append(gt_x[0] + (okx - gt_x[0]) * (iq) / w)
oky = np.mean(gt_y[w: w + w])
gt_y_t.append(gt_y[0] + (oky - gt_y[0]) * (iq) / w)
else:
okx = np.mean(gt_x[len(gt_x) - w:len(gt_x) - w + w])
oky = np.mean(gt_y[len(gt_x) - w: len(gt_x) - w + w])
gt_x_t.append(okx + (gt_x[-1] - okx) * (w - (len(gt_x) - iq)) / w)
gt_y_t.append(oky + (gt_y[-1] - oky) * (w - (len(gt_y) - iq)) / w)
gt_x = gt_x_t
gt_y = gt_y_t
return gt_x, gt_y
for idx in range(len(paths)):
path = paths[idx]
dff = pd.read_csv(path)
city = dff['CITY_NAME'].values[0]
agent_df = dff[dff['OBJECT_TYPE'] == 'AGENT']
x_a = agent_df['X'].values
y_a = agent_df['Y'].values
x_a, y_a = denoise(x_a, y_a)
av_df = dff[dff['OBJECT_TYPE'] == 'AV']
x_av = av_df['X'].values
y_av = av_df['Y'].values
x_av, y_av = denoise(x_av, y_av)
others_df = dff[dff['OBJECT_TYPE'] == 'OTHERS']
others_dfs = np.array([v for k, v in others_df.groupby('TRACK_ID')], dtype=object)
x_o = {}
y_o = {}
for other_df in others_dfs:
x_other, y_other = other_df['X'].values, other_df['Y'].values
x_other, y_other = denoise(x_other, y_other)
x_o[other_df['TRACK_ID'].values[0]] = x_other
y_o[other_df['TRACK_ID'].values[0]] = other_df['Y'].values
# group by timestamp
dfs = [x for _, x in dff.groupby('TIMESTAMP')]
for ind, df in enumerate(dfs):
agent_df = df[df['OBJECT_TYPE'] == 'AGENT']
others_df = df[df['OBJECT_TYPE'] == 'OTHERS']
others_dfs = [x for _, x in others_df.groupby('TRACK_ID')]
# others_dfs = np.array([v for k, v in others_df.groupby('TRACK_ID')], dtype=object)
av_df = df[df['OBJECT_TYPE'] == 'AV']
# agent
x_traj = agent_df['X'].values
y_traj = agent_df['Y'].values
offsets = [x_a[0], y_a[0]] # offsets for other agents
fig = plt.figure(figsize=(200/dpi,200/dpi), dpi=dpi)
# fig = plt.figure(figsize=(10, 10), dpi=dpi)
x_off = 75
y_off = 75
points = np.array([[x_a[20] - x_off, y_a[20] + y_off],[x_a[20] + x_off, y_a[20] + y_off], [x_a[20] + x_off, y_a[20] - y_off],[x_a[20] - x_off, y_a[20] - y_off],[x_a[20] - x_off, y_a[20] + y_off]])
plt.fill(points[:, 0], points[:, 1], color=color['outline'], zorder=0)
if ind < len(dfs) - 1:
x_off = 0.75
y_off = 1.25
points = np.array([[x_traj[0] - x_off, y_traj + y_off],[x_traj[0] + x_off, y_traj + y_off], [x_traj[0] + x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj + y_off]])
theta = np.arctan2((y_a[ind + 1] - y_a[ind]) , (x_a[ind + 1] - x_a[ind])) - np.pi/2
w = np.zeros(points.shape)
A = np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
points = points - np.array([x_traj[0], y_traj[0]])
for i,v in enumerate(points): w[i] = A @ points[i]
plt.fill(w[:, 0] + x_traj[0], w[:, 1] + y_traj[0], color=color['agent'], zorder=5)
plt.scatter(x_traj[0], y_traj[0], color=color['agent'], label='end observed', zorder=5)
# av
x_traj = av_df['X'].values
y_traj = av_df['Y'].values
x_max, y_max = np.max(x_traj), np.max(y_traj)
x_min, y_min = np.min(x_traj), np.min(y_traj)
if ind < len(dfs) - 1:
x_off = 0.75
y_off = 1.25
points = np.array([[x_traj[0] - x_off, y_traj + y_off],[x_traj[0] + x_off, y_traj + y_off], [x_traj[0] + x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj + y_off]])
theta = np.arctan2((y_av[ind + 1] - y_av[ind]) , (x_av[ind + 1] - x_av[ind])) - np.pi/2
w = np.zeros(points.shape)
A = np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
points = points - np.array([x_traj[0], y_traj[0]])
for i,v in enumerate(points): w[i] = A @ points[i]
plt.fill(w[:, 0] + x_traj[0], w[:, 1] + y_traj[0], color=color['av'], zorder=4)
plt.scatter(x_traj[-1], y_traj[-1], color=color['av'], zorder=4)
# # others
for indoo, other in enumerate(others_dfs):
x_traj = other['X'].values
y_traj = other['Y'].values
indo = other['TRACK_ID'].values[0]
if ind < len(dfs) - 1 and ind < len(x_o[indo]) - 1 and ind < len(y_o[indo]) - 1:
x_off = 0.75
y_off = 1.25
points = np.array([[x_traj[0] - x_off, y_traj + y_off],[x_traj[0] + x_off, y_traj + y_off], [x_traj[0] + x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj - y_off],[x_traj[0] - x_off, y_traj + y_off]])
theta = np.arctan2((y_o[indo][ind + 1] - y_o[indo][ind]) , (x_o[indo][ind + 1] - x_o[indo][ind])) - np.pi/2
w = np.zeros(points.shape)
A = np.matrix([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
points = points - np.array([x_traj[0], y_traj[0]])
for i,v in enumerate(points): w[i] = A @ points[i]
plt.fill(w[:, 0] + x_traj[0], w[:, 1] + y_traj[0], color=color['other'], zorder=4)
# centerlines
lane_centerlines = []
# Get lane centerlines which lie within the range of trajectories
agent_df = df[df['OBJECT_TYPE'] == 'AGENT']
gt_x = agent_df['X'].values
gt_y = agent_df['Y'].values
x_max, y_max = np.max(x_a) + 50, np.max(y_a) + 50
x_min, y_min = np.min(x_a) - 50, np.min(y_a) - 50
# print(x_max, x_min)
# print(y_max, y_min)
for arr in avm.find_local_lane_polygons([x_min, x_max, y_min, y_max], city):
plt.fill(arr[:, 0], arr[:, 1], color=color['polygon'],zorder=0)
for arr in avm.find_local_lane_polygons([x_min, x_max, y_min, y_max], city):
plt.plot(arr[:, 0], arr[:, 1], color=color['polygon-outline'],zorder=1)
seq_lane_props = avm.city_lane_centerlines_dict[city]
for lane_id, lane_props in seq_lane_props.items():
lane_cl = lane_props.centerline
if (np.min(lane_cl[:, 0]) < x_max and np.min(lane_cl[:, 1]) < y_max and np.max(lane_cl[:, 0]) > x_min and np.max(lane_cl[:, 1]) > y_min):
lane_centerlines.append(lane_cl)
for lane_cl in lane_centerlines:
plt.plot(lane_cl[:, 0], lane_cl[:, 1], color=color['centerline'], alpha=1, linewidth=1, zorder=2)
# plt.legend()
plt.xlim([x_a[20] - 50, x_a[20] + 50])
plt.ylim([y_a[20] - 50, y_a[20] + 50])
import os
try:
os.mkdir('./results/{}'.format(idx))
except:
pass
# plt.set_facecolor('red')
plt.axis('off')
plt.savefig('./results/{}/{}.png'.format(idx,ind), dpi=dpi, bbox_inches='tight')
# fig.canvas.draw()
# data_image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# data_image = data_image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# np.save('./results/{}/{}.npy'.format(idx,ind), data_image)
# print(data_image.shape)
plt.clf() | Vikr-182/ddn-forecasting | scripts/data_prep.py | data_prep.py | py | 9,023 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numb... |
26116785786 | from genericpath import samefile
import torch
# import mmcv
# from mmseg.apis import init_segmentor#, inference_segmentor, init_cfg
# from mmseg.models import build_segmentor
# from mmcv import ConfigDict
import torchvision
# from SETR.transformer_seg import SETRModel, Vit
import segmentation_models_pytorch as smp
def model_deeplab3(opt, singlecpop=False):
model = torchvision.models.segmentation.deeplabv3_resnet50(
# pretrained=True,
progress=True,
num_classes=opt.n_class if not singlecpop else 1
)
return model
def model_unet(opt, singlecpop=False):
model = smp.Unet(
encoder_name="resnet18", # encoder
encoder_depth=5,
encoder_weights="imagenet", # random initialization
in_channels=len(opt.x_2D), # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=opt.n_class if not singlecpop else 1 # model output channels (number of classes in your dataset)
)
return model
def model_unet_(opt):
cfg = mmcv.Config.fromfile('model/unet_cfg.py')
# cfg = mmcv.Config.fromfile('/home/aya43/flowMagic_data/src/method/model/unet_cfg.py')
model = init_segmentor(cfg)
return model
def model_setr(opt, singlecpop=False):
model = SETRModel(patch_size=(16, 16),
in_channels=len(opt.x_2D),
out_channels=opt.n_class if not singlecpop else 1,
hidden_size=1024,
num_hidden_layers=8,
num_attention_heads=8,
decode_features=[256, 128, 64, 32])
# sum(p.numel() for p in model.parameters())
t1 = torch.rand(1, 4, 256, 256)
# print("input: " + str(t1.shape))
print("output: " + str(model(t1).shape))
return model
def model_setr_(opt):
cfg = mmcv.Config.fromfile('model/vit_mla_cfg.py')
# cfg = mmcv.Config.fromfile('/home/aya43/flowMagic_data/src/method/model/vit_mla_cfg.py')
model = init_segmentor(cfg)
return model
model_dict = {
'setr': model_setr,
'unet': model_unet,
'deeplab3': model_deeplab3
}
model_names = list()
for name, dict_ in model_dict.items():
model_names.append(name)
def create_model(opt, singlecpop):
return model_dict[opt.model](opt, singlecpop)
def metafreeze_model(model, opt):
# freeze
for p in model.parameters():
p.requires_grad = False
if opt.model == 'setr':
for p in model.encoder_2D.encoder.layer[5].parameters():
p.requires_grad = True
for p in model.encoder_2D.final_dense.parameters():
p.requires_grad = True
if opt.model == 'unet':
for p in model.decoder.parameters():
p.requires_grad = True
return model
| aya49/flowMagic_data | method/models.py | models.py | py | 2,805 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torchvision.models.segmentation.deeplabv3_resnet50",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "segmentation_models_pytorch.Unet",
"line_number": 22,
"usage_t... |
12483249842 | import cfgrib
import xarray as xr
import matplotlib.pyplot as plt
import os
import glob
from pathlib import Path
from tqdm import tqdm, tnrange
import pandas as pd
import math
import shapely.wkt
def get_csv_from_grib_files(grib_files: list) -> pd.DataFrame:
"""_summary_
Args:
grib_files (list): _description_
Returns:
pd.DataFrame: _description_
"""
res = []
for file in tqdm(grib_files):
temp = xr.open_dataset(str(file), engine="cfgrib")
# print("opened gribfile")
res.append(temp.to_dataframe().reset_index())
# print("appended gribfile")
df = pd.concat(res)
return df
def getBounds(shape):
x1 = []
y1 = []
x = shape.exterior.coords.xy[0]
y = shape.exterior.coords.xy[1]
x1.append(min(x))
x1.append(max(x))
y1.append(min(y))
y1.append(max(y))
return x1,y1
def get_mask(df, min_lon, max_lon, min_lat, max_lat):
lon_mask = (df['longitude'] >= min_lon) & (df['longitude'] <= max_lon)
lat_mask = (df['latitude'] >= min_lat) & (df['latitude'] <= max_lat)
return (lon_mask) & (lat_mask)
def prep_group_1(root, read_path, save_path, loc, loc_keys, grid_meta, var_group):
# Defining the path to raw grib files
path = Path(read_path) # /{loc}')
#folders = [i for i in path.iterdir() if i.is_dir()]
#folder = list(filter(lambda x: var_group in x.name, folders))[0]
# Getting grib file names
grib_files = list(path.glob('*.grib2'))
grib_files = list(filter(lambda x: x.name.split('.')[2][-2:] == '00', grib_files))
# Converting each file into csv and concatenating
df = get_csv_from_grib_files(grib_files)
#EDIT
print(f'finished getting csv from grib files for {loc} using files at path {path} for group {var_group}')
df.rename(columns={'unknown':'sunsd'}, inplace=True)
grid_meta = grid_meta[grid_meta['location'] == loc_keys[loc]]
res = []
for i in tqdm(range(grid_meta.shape[0])):
grid_id = grid_meta.iloc[i].grid_id
grid_shape = shapely.wkt.loads(grid_meta.iloc[i]['wkt'])
lon, lat = getBounds(grid_shape)
#print(lon, lat)
lon[0] = lon[0] if lon[0] > 0 else lon[0] + 360 # converting neg lon to pos
lon[1] = lon[1] if lon[1] > 0 else lon[1] + 360
min_lon, max_lon = round(lon[0]*4)/4, math.ceil(lon[1]*4)/4 # Adapting coords for 0.25x0.25 grid of GFS
min_lat, max_lat = round(lat[0]*4)/4, math.ceil(lat[1]*4)/4 # to select the cell containing the grid coords
mask = get_mask(df, min_lon, max_lon, min_lat, max_lat)
# If grid coords go beyond gfs coords for the city
if df[mask].shape[0] == 0:
if min_lon > df['longitude'].max():
min_lon = df['longitude'].max()
if min_lat > df['latitude'].max():
max_lat = df['latitude'].max()
mask = get_mask(df, min_lon, max_lon, min_lat, max_lat)
agg_df = df[mask].groupby('time').mean()
agg_df['grid_id'] = grid_id
agg_df['location'] = loc
res.append(agg_df)
pd.concat(res).reset_index().to_csv(f'{save_path}/{loc}_gfs_{var_group}.csv', index=None)
if __name__ == "__main__":
root = '../../data/gfs'
read_path = f'{root}/downloaded_files'
save_path = f'{root}/merged_csv'
loc = 'la'
loc_keys = {'la':'Los Angeles (SoCAB)', 'tp':'Taipei', 'dl':'Delhi'}
grid_meta = pd.read_csv('../data/grid_metadata.csv')
var_group = 'group_1'
prep_group_1(root, read_path, save_path, loc, loc_keys, grid_meta, var_group)
| drivendataorg/nasa-airathon | pm25/3rd Place/src/preprocessing/gfs/gfs_prep_group_1.py | gfs_prep_group_1.py | py | 3,731 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "tqdm.tqdm",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "xarray.open_dataset",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"l... |
25468573866 | from mycroft import MycroftSkill, intent_file_handler
import openai
import os
class Chatgpt(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
openai.api_key = os.environ[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] # Set the API key
@intent_file_handler('chatgpt.intent')
def handle_chatgpt(self, message):
prompt = "Hello, how are you?" # Example prompt
response = openai.Completion.create(
engine="davinci",
prompt=prompt,
max_tokens=60
)
text = response.choices[0].text.strip() # Get the generated text
self.speak(text) # Speak the generated text
def create_skill():
return Chatgpt()
| adamkalbouneh/chatgpt-skill | __init__.py | __init__.py | py | 723 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "mycroft.MycroftSkill",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "mycroft.MycroftSkill.__init__",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "mycroft.MycroftSkill",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "... |
73202406184 | from bokeh.models import (
HoverTool,
Range1d,
ColumnDataSource,
BBoxTileSource,
TapTool,
)
from bokeh.plotting import figure
from bokeh.layouts import row, column
import bokeh.models as bokeh_models
from bokeh.models.widgets import Div, RadioGroup, CheckboxGroup
BOKEH_BACKGROUNDS = {
"luchtfoto": {
"url": (
"https://service.pdok.nl/hwh/luchtfotorgb/wms/v1_0?"
"service=WMS&version=1.3.0&request=GetMap&layers=Actueel_orthoHR"
"&width=265&height=265&styles=&crs=EPSG:28992&format=image/jpeg"
"&bbox={XMIN},{YMIN},{XMAX},{YMAX}"
),
"class": "BBoxTileSource",
},
"topografie": {
"url": (
"https://services.arcgisonline.nl/arcgis/rest/services/Basiskaarten/Topo/"
"MapServer/export?"
"bbox={XMIN},{YMIN},{XMAX},{YMAX}"
"&layers=show"
"&size=385,385"
"&bboxSR=28892"
"&dpi=2500"
"&transparent=true"
"&format=png"
"&f=image"
),
"class": "BBoxTileSource",
},
}
BOKEH_LOCATIONS_SETTINGS = {
"size": 10,
"line_color": "line_color",
"fill_color": "fill_color",
"selection_color": "red",
"selection_fill_alpha": 1,
"nonselection_fill_alpha": 0.6,
"nonselection_line_alpha": 0.5,
"hover_color": "red",
"hover_alpha": 0.6,
"line_width": 1,
"legend_field": "label",
}
BOKEH_SETTINGS = {
"background": "topografie",
"save_tool": "save",
"active_scroll": "wheel_zoom",
"toolbar_location": "above",
}
def get_tilesource(layer, map_configs=BOKEH_BACKGROUNDS):
url = map_configs[layer]["url"]
if "args" in map_configs[layer]:
args = map_configs[layer]["args"]
else:
args = {}
return getattr(bokeh_models, map_configs[layer]["class"])(url=url, **args)
def make_map(
bounds: list,
locations_source: ColumnDataSource,
map_overlays: dict = {},
settings=BOKEH_SETTINGS,
) -> row:
# figure ranges
x_range = Range1d(start=bounds[0], end=bounds[2], min_interval=100)
y_range = Range1d(start=bounds[1], end=bounds[3], min_interval=100)
# set tools
map_hover = HoverTool(tooltips=[("Locatie", "@name"), ("ID", "@id")])
map_hover.toggleable = False
tools = [
"tap",
"wheel_zoom",
"pan",
"reset",
"box_select",
map_hover,
"save",
]
# initialize figure
map_fig = figure(
tools=tools,
active_scroll=settings["active_scroll"],
x_range=x_range,
y_range=y_range,
toolbar_location=settings["toolbar_location"],
)
# misc settings
map_fig.axis.visible = False
map_fig.toolbar.logo = None
map_fig.toolbar.autohide = True
map_fig.xgrid.grid_line_color = None
map_fig.ygrid.grid_line_color = None
map_fig.select(type=TapTool)
# add background
tile_source = get_tilesource(settings["background"])
map_fig.add_tile(tile_source, name="background")
# add custom map-layers (if any)
if map_overlays:
layer_names = list(map_overlays.keys())
layer_names.reverse()
for layer_name in layer_names:
tile_source = get_tilesource(layer_name, map_configs=map_overlays)
if "alpha" in map_overlays[layer_name].keys():
alpha = map_overlays[layer_name]["alpha"]
else:
alpha = 1
map_fig.add_tile(
tile_source,
name=layer_name,
visible=map_overlays[layer_name]["visible"],
alpha=alpha,
)
# add locations glyph
map_fig.circle(x="x", y="y", source=locations_source, **BOKEH_LOCATIONS_SETTINGS)
return map_fig
def make_options(
map_overlays: dict,
overlays_change,
background_title: str,
background_change,
):
# set overlay and handlers
overlay_options = list(map_overlays.keys())
active_overlays = [
idx for idx, (_, v) in enumerate(map_overlays.items()) if v["visible"]
]
overlay_control = CheckboxGroup(labels=overlay_options, active=active_overlays)
overlay_control.on_change("active", overlays_change)
# set background and handlers
background_options = list(BOKEH_BACKGROUNDS.keys())
background_active = list(BOKEH_BACKGROUNDS.keys()).index(
BOKEH_SETTINGS["background"]
)
background_control = RadioGroup(labels=background_options, active=background_active)
background_control.on_change("active", background_change)
map_controls = column(
overlay_control,
Div(text=f"<h6>{background_title}</h6>"),
background_control,
)
return map_controls
| d2hydro/hydrodashboards | src/hydrodashboards/bokeh/widgets/map_figure_widget.py | map_figure_widget.py | py | 4,761 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bokeh.models",
"line_number": 69,
"usage_type": "argument"
},
{
"api_name": "bokeh.models.ColumnDataSource",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "bokeh.models.Range1d",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "b... |
70943556903 | from pyspark.sql import SparkSession
from pyspark.streaming import StreamingContext
from time import sleep
spark = SparkSession.builder.appName('streaming').getOrCreate()
sc = spark.sparkContext
ssc = StreamingContext(sc, 1)
ssc.checkpoint('/tmp')
lines = ssc.socketTextStream('0.0.0.0', 301)
words = lines.flatMap(lambda s: s.split(' '))
pairs = words.map(lambda word: (word, 1))
counts = pairs.reduceByKey(lambda a, b: a + b)
counts.pprint()
ssc.start()
sleep(5)
ssc.stop(stopSparkContext=False, stopGraceFully=True) | bablookr/big-data-experiments | pyspark-experiments/streaming/stream.py | stream.py | py | 522 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 5,
"usage_type": "... |
73388984743 | """Plot Stream PGM."""
import sys
import daft
import matplotlib.pyplot as plt
from showyourwork.paths import user as user_paths
paths = user_paths()
# Add the parent directory to the path
sys.path.append(paths.scripts.parent.as_posix())
# isort: split
# Matplotlib style
plt.style.use(paths.scripts / "paper.mplstyle")
# Colors.
w_color = {"ec": "tab:blue"}
m_color = {"ec": "#f89406"}
# =============================================================================
# Stream Model
# Instantiate the PGM.
pgm = daft.PGM()
# Astrometric Nodes
pgm.add_node(
"stream_sigma_w,obs", r"$\Sigma_n^{(w)}$", 2, 2, fixed=True, plot_params=w_color
)
pgm.add_node(
"stream_w,obs", r"$w_n^{\rm obs}$", 3, 2, observed=True, plot_params=w_color
)
pgm.add_node(
"stream_mu_w,model", r"$\mu^{(w)}}$", 2, 3, observed=False, plot_params=w_color
)
pgm.add_node(
"stream_sigma_w,model", r"$\Sigma^{(w)}$", 3, 3, observed=False, plot_params=w_color
)
# Add in the edges.
pgm.add_edge("stream_sigma_w,obs", "stream_w,obs")
pgm.add_edge("stream_mu_w,model", "stream_w,obs")
pgm.add_edge("stream_sigma_w,model", "stream_w,obs")
# Photometric Nodes
pgm.add_node(
"stream_sigma_m,obs", r"$\Sigma_n^{(m)}$", 5, 2, fixed=True, plot_params=m_color
)
pgm.add_node(
"stream_m,obs", r"$m_n^{\rm obs}$", 4, 2, observed=True, plot_params=m_color
)
pgm.add_node(
"stream_mu_m,model", r"$\mu^{(m)}$", 4, 3, observed=False, plot_params=m_color
)
pgm.add_node(
"stream_sigma_m,model", r"$\Sigma^{(m)}$", 5, 3, observed=False, plot_params=m_color
)
# Add in the edges.
pgm.add_edge("stream_sigma_m,obs", "stream_m,obs")
pgm.add_edge("stream_mu_m,model", "stream_m,obs")
pgm.add_edge("stream_sigma_m,model", "stream_m,obs")
# Full Data Node
pgm.add_node("stream_x,obs", r"$x_n^{obs}$", 3, 1, alternate=True)
pgm.add_edge("stream_w,obs", "stream_x,obs", directed=False)
pgm.add_edge("stream_m,obs", "stream_x,obs", directed=False)
# Mixture probability
pgm.add_node("stream_mixture_coefficient", r"$f_q$", 1, 3)
pgm.add_node("stream_mixture_index", r"$q_n$", 1, 1)
pgm.add_edge("stream_mixture_coefficient", "stream_mixture_index")
pgm.add_edge("stream_mixture_index", "stream_x,obs")
# Phi1 Node
_ntwk_kw = {"alpha": 0.5, "linestyle": "--", "zorder": -100}
pgm.add_node("stream_phi1", r"${\phi_1}_n$", 3, 4.1, observed=True, plot_params=w_color)
pgm.add_edge("stream_phi1", "stream_mixture_coefficient", plot_params=_ntwk_kw)
pgm.add_edge("stream_phi1", "stream_mu_w,model", plot_params=_ntwk_kw)
pgm.add_edge("stream_phi1", "stream_sigma_w,model", plot_params=_ntwk_kw)
pgm.add_edge("stream_phi1", "stream_mu_m,model", plot_params=_ntwk_kw)
pgm.add_edge("stream_phi1", "stream_sigma_m,model", plot_params=_ntwk_kw)
# And a plate.
pgm.add_plate(
[0.5, 0.5, 5, 4],
label=r"",
shift=-0.1,
rect_params={"linestyle": "--", "alpha": 0.5},
)
pgm.add_plate([0.5, 0.5, 5, 2], label=r"$n = 1, \cdots, N$", shift=-0.1)
pgm.add_plate(
[1.5, 2.75, 4, 1], label=r"$q = 1, \cdots, Q$", shift=-0.1, position="top right"
)
# =============================================================================
# Background Model
base_shift = 7
# Astrometric Nodes
pgm.add_node(
"sigma_w,obs",
r"$\Sigma_n^{(w)}$",
base_shift + 2,
2,
fixed=True,
plot_params=w_color,
)
pgm.add_node(
"w,obs", r"$w_n^{\rm obs}$", base_shift + 3, 2, observed=True, plot_params=w_color
)
pgm.add_node(
"theta_w,model",
r"$\theta^{(w)}}$",
base_shift + 3,
3,
observed=False,
plot_params=w_color,
)
# Add in the edges.
pgm.add_edge("sigma_w,obs", "w,obs")
pgm.add_edge("theta_w,model", "w,obs")
# Photometric Nodes
pgm.add_node(
"sigma_m,obs",
r"$\Sigma_n^{(m)}$",
base_shift + 5,
2,
fixed=True,
plot_params=m_color,
)
pgm.add_node(
"m,obs", r"$m_n^{\rm obs}$", base_shift + 4, 2, observed=True, plot_params=m_color
)
pgm.add_node(
"theta_m,model",
r"$\theta^{(m)}$",
base_shift + 4,
3,
fixed=True,
plot_params=m_color,
)
# Add in the edges.
pgm.add_edge("sigma_m,obs", "m,obs")
pgm.add_edge("theta_m,model", "m,obs")
# Full Data Node
pgm.add_node("x,obs", r"$x_n^{obs}$", base_shift + 3, 1, alternate=True)
pgm.add_edge("w,obs", "x,obs", directed=False)
pgm.add_edge("m,obs", "x,obs", directed=False)
# Mixture probability
pgm.add_node("mixture_coefficient", r"$f_q$", base_shift + 1, 3)
pgm.add_node("mixture_index", r"$q_n$", base_shift + 1, 1)
pgm.add_edge("mixture_coefficient", "mixture_index")
pgm.add_edge("mixture_index", "x,obs")
# Phi1 Node
_ntwk_kw = {"alpha": 0.5, "linestyle": "--", "zorder": -100}
pgm.add_node(
"phi1", r"${\phi_1}_n$", base_shift + 3, 4.1, observed=True, plot_params=w_color
)
pgm.add_edge("phi1", "mixture_coefficient", plot_params=_ntwk_kw)
pgm.add_edge("phi1", "theta_w,model", plot_params=_ntwk_kw)
pgm.add_edge("phi1", "theta_m,model", plot_params=_ntwk_kw)
# And a plate.
pgm.add_plate(
[base_shift + 0.5, 0.5, 5, 4],
label=r"",
shift=-0.1,
rect_params={"linestyle": "--", "alpha": 0.5},
)
pgm.add_plate([base_shift + 0.5, 0.5, 5, 2], label=r"$n = 1, \cdots, N$", shift=-0.1)
pgm.add_plate(
[base_shift + 2.5, 2.75, 2, 1],
label=r"$q = 1, \cdots, Q$",
shift=-0.1,
position="top right",
)
ax2 = pgm.render()
# =============================================================================
ax2.figure.savefig(paths.figures / "pgm.pdf")
| nstarman/stellar_stream_density_ml_paper | src/scripts/pgm.py | pgm.py | py | 5,422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "showyourwork.paths.user",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplo... |
25053087228 | # Std Libs:
import logging
# Django Libs:
from django.contrib.auth.models import User
# Django Rest Framework Libs:
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
# Locals:
from .models import Client
from .permissions import ClientPermissions
from .serializer import ClientSerializer
from user.models import (SalerTHROUGH)
logger = logging.getLogger(__name__)
class ClientCRUD(viewsets.ViewSet):
"""Client management
Generic argument:
- pk (int) : ID of the client
Methods:
- GET : list
- GET : retrieve
- POST : create
- PUT : update
Permissions:
LEGEND: {
'-': 'always permit',
'o': 'need to be assignee',
}
Seller :
- list
- create
o retrieve
o update
Support :
- list
o retrieve
Generic Error:
(HTTP status_code | detail)
- 401 : JWT authentification failed
"""
permission_classes = [ClientPermissions]
def list(self, request):
"""
GET request
Method list
Show all clients linked to the authenticated user
Validate :
(HTTP status_code | detail)
- 200 : clients' list
- 204 : No client
Errors :
(HTTP status_code | detail)
- 403 : Not permission to list
"""
# Show all clients
clients = Client.objects.all()
serialized_clients = ClientSerializer(clients, many=True)
if serialized_clients.data:
content = serialized_clients.data
return Response(data=content,
status=status.HTTP_200_OK)
else:
content = {"detail": "No client available."}
return Response(data=content,
status=status.HTTP_204_NO_CONTENT)
def retrieve(self, request, pk):
"""
GET request
Method retrieve
Get a specific client for the seller|support user.
Validate :
(HTTP status_code | detail)
- 200 : retrieve client
Errors :
(HTTP status_code | detail)
- 403 : Not permission to retrieve
- 404 : Element doesn't exist
"""
try:
client = Client.objects.get(id=pk)
except Client.DoesNotExist:
content = {"detail": "Client doesn't exist."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_404_NOT_FOUND)
serialized_client = ClientSerializer(client)
if serialized_client.data:
content = serialized_client.data
# Check if user has permission to retrieve this client
self.check_object_permissions(request, client)
return Response(data=content,
status=status.HTTP_200_OK)
else:
content = {"detail": "Client details not available."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_404_NOT_FOUND)
def create(self, request):
"""
POST request
Method create
Create a new client. Need to be connected to create one.
Form:
- first_name
- last_name
- email
- phone
- mobile
- company_name
Validate :
(HTTP status_code | detail)
- 201 : created client
Errors :
(HTTP status_code | detail)
- 400 : Invalid form
- 403 : Not permission to create
- 500 : Internal error when added saler
"""
try:
content = dict(request.data.items())
except Exception:
content = {"detail": "Form is invalid."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_400_BAD_REQUEST)
if content:
sale_contact = User.objects.get(id=request.user.id)
try:
content["sales_contact"] = sale_contact
client = Client(**content)
except Exception:
content = {"detail": "Form invalid."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_400_BAD_REQUEST)
# Saving client
client.save()
# Create the saler through
try:
saler = dict()
saler["user"] = sale_contact
saler["client"] = client
contact = SalerTHROUGH(**saler)
except Exception:
content = {"detail": "Saler couldn't be added."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Saving sale contact
contact.save()
# Return client's data
serialized_client = ClientSerializer(client)
return Response(data=serialized_client.data,
status=status.HTTP_201_CREATED)
else:
content = {"detail": "Form is empty."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_400_BAD_REQUEST)
def update(self, request, pk):
"""
PUT request
Method update
Need to own the project to update it.
Form:
- first_name
- last_name
- email
- phone
- mobile
- company_name
Validate :
(HTTP status_code | detail)
- 200 : updated project
Errors :
(HTTP status_code | detail)
- 400 : Invalid form
- 403 : Not permission to update
- 404 : Element doesn't exist
"""
try:
client_update = Client.objects.get(id=pk)
except Client.DoesNotExist:
content = {"detail": "Client doesn't exist."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_404_NOT_FOUND)
self.check_object_permissions(request, client_update)
client = Client.objects.filter(id=pk)
try:
content = dict(request.data.items())
except Exception:
content = {"detail": "Form is invalid."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_400_BAD_REQUEST)
if content:
try:
client.update(**content)
except Exception:
content = {"detail": "Form is invalid."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_400_BAD_REQUEST)
serialized_client = ClientSerializer(client, many=True)
return Response(data=serialized_client.data,
status=status.HTTP_200_OK)
else:
content = {"detail": "Empty form."}
logger.error(content.values())
return Response(data=content,
status=status.HTTP_400_BAD_REQUEST)
| Emericdefay/OCR_P12 | CRM/client/views.py | views.py | py | 7,612 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rest_framework.viewsets.ViewSet",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 19,
"usage_type": "name"
},
{
"ap... |
2318482284 | import torch
import math
import numpy as np
import os
import cv2
import imutils
import random
import shutil
from slim_net import FaceQualityNet, FaceQualitySlim
from myconfig import config as testconf
from load_data import pytorch_to_dpcoreParams, save_feature_channel, get_patches, get_patches_augment
from detector.create_anchors import PriorBox
from detector.config import cfg_slimNet3 as cfg
from detector.face_net import FaceDetectSlimNet
from detector.retinaface_utils import decode, decode_landm
from detector.nms import py_cpu_nms
device = "cpu"
def expand_facebox(rect, imgw, imgh):
bx = rect[0]
by = rect[1]
bw = rect[2] - rect[0]
bh = rect[3] - rect[1]
# face
nbx1 = bx - 0 * bw #0.1,0.1,1.2,1.1
nby1 = by - 0 * bh
nbx2 = nbx1 + 1 * bw
nby2 = nby1 + 1 * bh
# neck
# # randid = random.choice([1, 2, 3, 4, 5, 6])
# # sx1, sy1, sx2, sy2 = rand_ratio(randid)
# sx1 = -0.03
# sy1 = 0.72
# sx2 = 1.06
# sy2 = 0.65
#
# nbx1 = bx + sx1 * bw
# nby1 = by + sy1 * bh
# nbx2 = nbx1 + sx2 * bw
# nby2 = nby1 + sy2 * bh
pp = np.zeros(4, dtype=np.int32)
rx1 = max(nbx1, 0)
ry1 = max(nby1, 0)
rx2 = min(nbx2, imgw)
ry2 = min(nby2, imgh)
pp[0] = rx1
pp[1] = ry1
pp[2] = rx2
pp[3] = ry2
return pp
def img_process(img):
"""将输入图片转换成网络需要的tensor
Args:
img_path: 人脸图片路径
Returns:
tensor: img(batch, channel, width, height)
"""
im = cv2.resize(img, (testconf.img_width, testconf.img_height), interpolation=cv2.INTER_LINEAR)
im = im.astype(np.float32)
# im = (im - testconf.bgr_mean) / testconf.bgr_std
im = im / 255.0
im = im.transpose(2, 0, 1)
im = torch.from_numpy(im)
im = im.unsqueeze(0)
im = im.to(device)
return im
def get_patches_tensor(img):
imgpatches = get_patches(img, patch_size=testconf.crop_size, patch_num=testconf.crop_num)
augment_patches = torch.FloatTensor(testconf.crop_num, 3, testconf.crop_size, testconf.crop_size).to(device)
for i in range(testconf.crop_num):
onepatch = imgpatches[i]
onepatch = onepatch.astype(np.float32)
onepatch = onepatch / 255.0
onepatch = onepatch.transpose(2, 0, 1)
onepatch = torch.from_numpy(onepatch).to(device)
augment_patches[i, :, :, :] = onepatch
return augment_patches
def get_patches_better(img):
imgpatches, nump = get_patches_augment(img, patch_size=testconf.crop_size, timenum=testconf.crop_scale)
if nump == 0:
augment_patches =[]
return augment_patches
augment_patches = torch.FloatTensor(nump, 3, testconf.crop_size, testconf.crop_size).to(device)
for i in range(nump):
onepatch = imgpatches[i]
onepatch = onepatch.astype(np.float32)
onepatch = onepatch / 255.0
onepatch = onepatch.transpose(2, 0, 1)
onepatch = torch.from_numpy(onepatch).to(device)
augment_patches[i, :, :, :] = onepatch
return augment_patches
def detect_one_img(faceNet, img_data, minface):
conf_thresh = 0.5
nms_thresh = 0.3
im_shape = img_data.shape
im_size_max = np.max(im_shape[0:2])
res_scal = 640 / im_size_max
# res_scal = 20 / float(minface)
neww = (int(im_shape[1] * res_scal / 64) + 1) * 64
newh = (int(im_shape[0] * res_scal / 64) + 1) * 64
scalw = neww / im_shape[1]
scalh = newh / im_shape[0]
img = np.float32(img_data)
# img = cv2.resize(img, None, None, fx=res_scal, fy=res_scal, interpolation=cv2.INTER_CUBIC)
img = cv2.resize(img, (neww, newh), interpolation=cv2.INTER_LINEAR)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
scale = scale.to(device)
# 减去均值转成numpy
im_height, im_width, _ = img.shape
img /= 255.0
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
# b, c, h, w = img.shape
# save_feature_channel("txt/imgp.txt", img, b, c, h, w)
loc, conf, landms = faceNet(img) # forward pass
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
# boxes = boxes * scale / res_scal
boxes = boxes * scale
boxes[:, (0, 2)] = boxes[:, (0, 2)] / scalw
boxes[:, (1, 3)] = boxes[:, (1, 3)] / scalh
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
# landms = landms * scale1 / res_scal
landms = landms * scale1
landms[:, (0, 2, 4, 6, 8)] = landms[:, (0, 2, 4, 6, 8)] / scalw
landms[:, (1, 3, 5, 7, 9)] = landms[:, (1, 3, 5, 7, 9)] / scalh
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > conf_thresh)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:5000]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, nms_thresh)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
landms = landms[keep]
return dets, landms
def test_one_nodet(img_path, snet, dir=False):
img_mat = cv2.imread(img_path, cv2.IMREAD_COLOR)
im_h, im_w, _ = img_mat.shape
test_patches = get_patches_tensor(img_mat)
out = snet(test_patches)
out = torch.sigmoid(out)
len_out = out.shape[0]
max_score = torch.max(out)
min_score = torch.min(out)
blur_score = (torch.sum(out) - max_score - min_score) / (len_out - 2)
# blur_score = torch.mean(out)
showscore = np.around(blur_score.item(), 4)
posx = int(5)
posy = int(5)
cv2.putText(img_mat, str(showscore), (posx, posy), cv2.FONT_HERSHEY_COMPLEX, 2.0, (0, 0, 255), 4)
cv2.rectangle(img_mat, (0, 0), (im_w, im_h), (0, 255, 0), 4)
if dir:
return img_mat, showscore
else:
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.imshow('result', img_mat)
cv2.waitKey(0)
def test_patch_nodet(img_path, snet, dir=False):
img_mat = cv2.imread(img_path, cv2.IMREAD_COLOR)
im_h, im_w, _ = img_mat.shape
test_patches = get_patches_better(img_mat)
out = snet(test_patches)
out = torch.sigmoid(out)
len_out = out.shape[0]
max_score = torch.max(out)
min_score = torch.min(out)
blur_score = (torch.sum(out) - max_score - min_score) / (len_out - 2)
# blur_score = torch.mean(out)
showscore = np.around(blur_score.item(), 4)
posx = int(5)
posy = int(5)
cv2.putText(img_mat, str(showscore), (posx, posy), cv2.FONT_HERSHEY_COMPLEX, 2.0, (0, 0, 255), 4)
cv2.rectangle(img_mat, (0, 0), (im_w, im_h), (0, 255, 0), 4)
if dir:
return img_mat, showscore
else:
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.imshow('result', img_mat)
cv2.waitKey(0)
def test_one(img_path, dnet, snet, minface, dir=False):
img_mat = cv2.imread(img_path, cv2.IMREAD_COLOR)
im_h, im_w, _ = img_mat.shape
face_rect, key_points = detect_one_img(dnet, img_mat, minface)
showscore = 0.0
for box, lands in zip(face_rect, key_points):
new_box = expand_facebox(box, im_w, im_h)#人脸框四周扩充
# new_box = np.zeros(4, dtype=np.int32)
# new_box[0] = 0
# new_box[1] = 0
# new_box[2] = im_w
# new_box[3] = im_h
face_roi = img_mat[new_box[1]:new_box[3], new_box[0]:new_box[2], :]
# test_patches = get_patches_tensor(face_roi)
test_patches = get_patches_better(face_roi)
if len(test_patches) == 0:
showscore = 0.0
return img_mat, showscore
# b, c, h, w = roi_process.shape
# save_feature_channel("txt/imgp.txt", roi_process, b, c, h, w)
out = snet(test_patches)
out = torch.sigmoid(out)
blur_score = torch.mean(out)
showscore = np.around(blur_score.item(), 4)
posx = int(new_box[0])
posy = int(new_box[1])
cv2.putText(img_mat, str(showscore), (posx, posy), cv2.FONT_HERSHEY_COMPLEX, 2.0, (0, 0, 255), 4)
cv2.rectangle(img_mat, (new_box[0], new_box[1]), (new_box[2], new_box[3]), (0, 255, 0), 4)
if dir:
return img_mat, showscore
else:
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.imshow('result', img_mat)
cv2.waitKey(0)
def test_dir(imdir, savedir, net1, net2, min_face=60):
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
filetxt = open("D:/data/imgs/facePicture/blur/test/result_2.txt", "w+")
for root, dirs, files in os.walk(imdir):
for file in files:
# filetxt.write(file + ": ")
root = root.replace('\\', '/')
imgpath = root + "/" + file
savepath = savedir + "/" + file
saveimg, _score = test_one(imgpath, net1, net2, min_face, dir=True)
# saveimg, _score = test_one_nodet(imgpath, net2, dir=True)
# saveimg, _score = test_patch_nodet(imgpath, net2, dir=True)
_score = str(_score)
filetxt.write(_score + "\n")
cv2.imshow('result', saveimg)
cv2.waitKey(1)
filetxt.close()
def test_rename_dir(imdir, net1, net2, min_face=60):
for root, dirs, files in os.walk(imdir):
for file in files:
mohu = 0
imgname, imghz = file.split(".")
imgpath = imdir + "/" + file
# savepath = "D:/wx/aa" + "/" + file
# shutil.move(imgpath, savepath)
saveimg, _score = test_one(imgpath, net1, net2, min_face, dir=True)
if _score > 0.5:
mohu = 1
savename = imdir + "/" + imgname + "_" + str(mohu) + "." + imghz
os.rename(imgpath, savename)
def get_face_dirs(imgdirs, savedirs, dnet):
for root, dirs, files in os.walk(imgdirs):
for file in files:
root = root.replace('\\', '/')
imgname, houzui = file.split(".")
imgpath = root + "/" + file
# savepath = savedirs + "/" + imgname + "_0." + houzui
savepath = savedirs + "/" + file
img_mat = cv2.imread(imgpath, cv2.IMREAD_COLOR)
im_h, im_w, _ = img_mat.shape
face_rect, key_points = detect_one_img(dnet, img_mat, 60)
for box, lands in zip(face_rect, key_points):
new_box = expand_facebox(box, im_w, im_h) # 人脸框四周扩充
face_roi = img_mat[new_box[1]:new_box[3], new_box[0]:new_box[2], :]
cv2.imwrite(savepath, face_roi)
def crop_FacePatches_dir(imgdir, savedir, patchSize, patchNum):
for root, dirs, files in os.walk(imgdir):
for file in files:
imgname, houzui = file.split(".")
imgpath = root + "/" + file
dirpath = savedir + "/" + imgname
if not os.path.exists(dirpath):
os.makedirs(dirpath)
img_mat = cv2.imread(imgpath, cv2.IMREAD_COLOR)
patches = get_patches(img_mat, patch_size=patchSize, patch_num=patchNum)
for i in range(patchNum):
patchone = patches[i]
savepath = dirpath + "/" + str(i) + file
cv2.imwrite(savepath, patchone)
def get_score_byname(imgdirs, txtsave):
label_classfication = open(txtsave, mode="w+")
for root, dirs, files in os.walk(imgdirs):
for file in files:
splitfile = file.split(".")[0]
namesplit = splitfile.split("_")
lab = int(namesplit[-1])
change_lab = 0.0
if lab == 0:
change_lab = 0.0
if lab == 1:
change_lab = 0.25
if lab == 2:
change_lab = 0.5
if lab == 3:
change_lab = 0.75
if lab == 4:
change_lab = 1.0
label_classfication.write(str(change_lab) + "\n")
label_classfication.close()
def get_predict_result(imdir, net1, net2, txt1, txt2, txtlab):
txt1 = open(txt1, "w+")
txt2 = open(txt2, "w+")
txtlab = open(txtlab, "w+")
for root, dirs, files in os.walk(imdir):
for file in files:
splitfile = file.split(".")[0]
namesplit = splitfile.split("_")
lab = int(namesplit[-1])
change_lab = 0.0
if lab == 0:
change_lab = 0.0
if lab == 1:
change_lab = 0.25
if lab == 2:
change_lab = 0.5
if lab == 3:
change_lab = 0.75
if lab == 4:
change_lab = 1.0
txtlab.write(str(change_lab) + "\n")
root = root.replace('\\', '/')
imgpath = root + "/" + file
saveimg1, _score1 = test_one_nodet(imgpath, net1, dir=True)
saveimg2, _score2 = test_patch_nodet(imgpath, net2, dir=True)
_score1 = str(_score1)
_score2 = str(_score2)
txt1.write(_score1 + "\n")
txt2.write(_score2 + "\n")
txt1.close()
txt2.close()
txtlab.close()
def change_score(score=0.5):
cha = np.zeros(5, dtype=np.float32)
cha[0] = abs(score - 0.0)
cha[1] = abs(score - 0.25)
cha[2] = abs(score - 0.5)
cha[3] = abs(score - 0.75)
cha[4] = abs(score - 1.0)
index = np.argmin(cha)
ret_score = 0.5
if index == 0:
ret_score = 0.0
if index == 1:
ret_score = 0.25
if index == 2:
ret_score = 0.5
if index == 3:
ret_score = 0.75
if index == 4:
ret_score = 1.0
return ret_score
def create_train_samples(imdir, savedir, net1, net2):
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
for root, dirs, files in os.walk(imdir):
for file in files:
# filetxt.write(file + ": ")
root = root.replace('\\', '/')
imgpath = root + "/" + file
img_mat = cv2.imread(imgpath, cv2.IMREAD_COLOR)
saveimg, _score = test_one(imgpath, net1, net2, minface=60, dir=True)
lab = change_score(_score)
if lab == 0.0:
savepath = savedir + "/0/" + file
if lab == 0.25:
savepath = savedir + "/1/" + file
if lab == 0.5:
savepath = savedir + "/2/" + file
if lab == 0.75:
savepath = savedir + "/3/" + file
if lab == 1.0:
savepath = savedir + "/4/" + file
cv2.imwrite(savepath, img_mat)
cv2.imshow('result', saveimg)
cv2.waitKey(1)
def gaussion_blur(imdir, savedir):
for root, dirs, files in os.walk(imdir):
for file in files:
root = root.replace('\\', '/')
imgname, hz = file.split(".")
imgpath = root + "/" + file
# savep = savedir + "/" + file
# savep1 = savedir + "/" + imgname + "_1." + hz
# savep2 = savedir + "/" + imgname + "_2." + hz
# savep3 = savedir + "/" + imgname + "_3." + hz
# savep4 = savedir + "/" + imgname + "_4." + hz
img_mat = cv2.imread(imgpath, cv2.IMREAD_COLOR)
savep = savedir + "/5/" + imgname + "_4." + hz
cv2.imwrite(savep, img_mat)
# rand_type = random.choice([0, 1, 2, 3, 4])
# if rand_type == 0:
# savep = savedir + "/0/" + imgname + "_0." + hz
# cv2.imwrite(savep, img_mat)
# if rand_type == 1:
# blur = cv2.GaussianBlur(img_mat, (11, 11), 0.8)
# # blur = cv2.GaussianBlur(img_mat, (5, 5), 0.6)
# savep = savedir + "/1/" + imgname + "_1." + hz
# cv2.imwrite(savep, blur)
# if rand_type == 2:
# blur = cv2.GaussianBlur(img_mat, (13, 13), 1.3)
# # blur = cv2.GaussianBlur(img_mat, (7, 7), 1.0)
# savep = savedir + "/2/" + imgname + "_2." + hz
# cv2.imwrite(savep, blur)
# if rand_type == 3:
# blur = cv2.GaussianBlur(img_mat, (15, 15), 1.8)
# # blur = cv2.GaussianBlur(img_mat, (9, 9), 1.4)
# savep = savedir + "/3/" + imgname + "_3." + hz
# cv2.imwrite(savep, blur)
# if rand_type == 4:
# blur = cv2.GaussianBlur(img_mat, (17, 17), 2.2)
# # blur = cv2.GaussianBlur(img_mat, (11, 11), 1.7)
# savep = savedir + "/4/" + imgname + "_4." + hz
# cv2.imwrite(savep, blur)
# blur1 = cv2.GaussianBlur(img_mat, (5, 5), 0.6)
# blur2 = cv2.GaussianBlur(img_mat, (7, 7), 1.0)
# blur3 = cv2.GaussianBlur(img_mat, (9, 9), 1.4)
# blur4 = cv2.GaussianBlur(img_mat, (11, 11), 1.8)
# cv2.imwrite(savep, img_mat)
# cv2.imwrite(savep1, blur1)
# cv2.imwrite(savep2, blur2)
# cv2.imwrite(savep3, blur3)
# cv2.imwrite(savep4, blur4)
# print("done")
if __name__ == "__main__":
qnet = FaceQualityNet(channels=testconf.net_channels, lda_outc=testconf.lad_channel) # 需要修改
q_path = "weights/FaceQuality.pth" # 需要修改
# qnet = FaceQualitySlim(channels=testconf.slim_channels)
# q_path = "weights/FaceQualitySlim_500.pth"
q_dict = torch.load(q_path, map_location=lambda storage, loc: storage)
qnet.load_state_dict(q_dict)
qnet.eval()
qnet = qnet.to(device)
qnet2 = FaceQualityNet(channels=testconf.net_channels, lda_outc=testconf.lad_channel)
q_path2 = "weights/FaceQuality_20200109.pth"
q_dict2 = torch.load(q_path2, map_location=lambda storage, loc: storage)
qnet2.load_state_dict(q_dict2)
qnet2.eval()
qnet2 = qnet2.to(device)
# saveparams = pytorch_to_dpcoreParams(qnet2)
# saveparams.forward("FaceQuality_param_cfg.h", "FaceQuality_param_src.h")
dnet = FaceDetectSlimNet(cfg=cfg) # 需要修改
d_path = "weights/face_slim_0609_250.pth" # 需要修改
d_dict = torch.load(d_path, map_location=lambda storage, loc: storage)
dnet.load_state_dict(d_dict)
dnet.eval()
dnet = dnet.to(device)
imgpath = "D:/data/imgs/facePicture/blur/test/1/10078.jpg"
savepath = "result/res.jpg"
min_face = 60
# test_one(imgpath, dnet, qnet2, min_face, dir=False)
imgdir = "D:/wx/1117"
savedir = "D:/data/imgs/facePicture/blur/faces/add"
txt1 = "D:/data/imgs/facePicture/blur/test/result_1.txt"
txt2 = "D:/data/imgs/facePicture/blur/test/result_2.txt"
txtl = "D:/data/imgs/facePicture/blur/test/label.txt"
# test_dir(imgdir, savedir, dnet, qnet, min_face)
test_rename_dir(imgdir, dnet, qnet, min_face)
# get_face_dirs(imgdir, savedir, dnet)
# get_score_byname(imgdir, txt)
# get_predict_result(imgdir, qnet, qnet2, txt1, txt2, txtl)
imgd = "D:/data/imgs/facePicture/blur/select/4"
saved = "D:/data/imgs/facePicture/blur/patches"
# crop_FacePatches_dir(imgd, saved, 96, 16)
# create_train_samples(imgd, saved, dnet, qnet2)
# gaussion_blur(imgd, saved)
print("done")
| xinyunmian/Face_compliance_detection | face_quality/train/test_quality.py | test_quality.py | py | 19,759 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "myconfig.config.img_width",
... |
42873043264 | """Class definition and associated functions for requesting data from
IUCN Red List. Run as a script it downloads the latest Red List.
You need an API token to use it, available from
http://apiv3.iucnredlist.org/api/v3/token.
"""
import requests
import requests_cache
import csv
import datetime
import time
from argparse import ArgumentParser
INFO_TYPES = ["redlist", "threats", "details", "habitats", "countries",
"conservation_measures", "citations", "narratives", "growth_forms"]
def region_list(token):
"""Get the list of IUCN regions and identifiers"""
url = "http://apiv3.iucnredlist.org/api/v3/region/list"
response = requests.get(url, params={"token": token})
return {line["name"]: line["identifier"] for line in response.json()["results"]}
def make_request(url, token):
"""Utility to make a request and return JSON data"""
response = requests.get(url=url, params={"token": token})
response.raise_for_status()
json_response = response.json()
result = json_response.get("result", [])
if len(result) == 0:
return
else:
return result
def make_throttle_hook(timeout=0.1):
"""Returns a hook that sleeps for timeout seconds if response is
from cache.
"""
def hook(response, *args, **kwargs):
if not getattr(response, "from_cache", False):
time.sleep(timeout)
return response
return hook
class redListGetter(object):
"""An object that gets data from the IUCN red list
"""
def __init__(self, token=None, cache=True, cache_name=None, delay=0.5):
self.page_url = "http://apiv3.iucnredlist.org/api/v3/species/page/{}"
self.species_urls = {"details": "http://apiv3.iucnredlist.org/api/v3/species/{field}/{value}",
"threats": "http://apiv3.iucnredlist.org/api/v3/threats/species/{field}/{value}",
"habitats": "http://apiv3.iucnredlist.org/api/v3/habitats/species/{field}/{value}",
"countries": "http://apiv3.iucnredlist.org/api/v3/species/countries/{field}/{value}",
"conservation_measures": "http://apiv3.iucnredlist.org/api/v3/measures/species/{field}/{value}",
"citations": "http://apiv3.iucnredlist.org/api/v3/species/citation/{field}/{value}",
"narrative": "http://apiv3.iucnredlist.org/api/v3/species/narrative/{field}/{value}",
"growth_forms": "http://apiv3.iucnredlist.org/api/v3/growth_forms/species/{field}/{value}"
}
if token is None:
raise ValueError("You must provide a token for the IUCN API")
else:
self.token = token
if cache_name is None:
self.cache_name = "redlist_api_cache"
else:
self.cache_name = cache_name
self.regions = region_list(self.token)
if cache:
requests_cache.install_cache(self.cache_name)
self.session = requests_cache.CachedSession()
else:
self.session = requests.Session()
self.session.hooks = {"response": make_throttle_hook(delay)}
def get_page(self, page):
"""Request specific page of species data
parameters:
page - str, page number to request
"""
return make_request(self.page_url.format(page), self.token)
def get_species_info(self, info_type, value, field="id", region=None):
"""Get a given type of information (e.g. threats, habitats) for given
species name or id.
parameters:
info_type - str, the type of info to request, e.g. habitats
value - str, the species name or id to get info for
field - str, whether to query by species name or id
region - str, optional region to query within
returns:
json of response information
"""
url = self.species_urls.get(info_type)
if not url:
raise(ValueError("There is no stored url for this information"))
else:
url = url.format(field=field, value=value)
if (field == "name") & (info_type == "details"):
url = url.replace("/name", "")
if field not in ["name", "id"]:
raise ValueError("Not a recognised species search field")
if region:
if region not in self.regions.values():
raise ValueError("Not a recognised region identifier")
else:
url = url + "/region/{}".format(region)
return make_request(url, self.token)
def get_all_pages(self):
"""Run requests to get all of the species data"""
species_data = []
page_idx = 0
species_returned = None
while (page_idx == 0) | (species_returned is not None):
species_returned = self.get_page(page_idx)
if species_returned:
species_data.extend(species_returned)
page_idx = page_idx + 1
return species_data
def get_all_species_info(self, species_list, info_type, field="id", region=None):
"""Get all of a particular type of info (e.g. threats) for a list of species
names or ids.
parameters:
species_list - list of species names or ids to query
info_type - str, the type of info to request, e.g. habitats
field - str, whether to query by species name or id
region - str, optional region to query within
returns:
list of query results
"""
returned_data = []
for species in species_list:
results = self.get_species_info(info_type, species, field=field, region=region)
if results is not None:
returned_data.extend(results)
return returned_data
def get_region_identifier(self, region):
"""Utility to get a region identifier for a region"""
if region not in self.regions:
raise KeyError("Not a recognised region")
return self.regions.get(region)
def log_progress(sequence, every=None, size=None, name='Items'):
"""A html widget for logging the progess of the requests.
Copied from https://github.com/alexanderkuk/log-progress.
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{name}: {index} / ?'.format(
name=name,
index=index
)
else:
progress.value = index
label.value = u'{name}: {index} / {size}'.format(
name=name,
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = "{name}: {index}".format(
name=name,
index=str(index or '?')
)
def main():
parser = ArgumentParser(description="download information from the Red List API")
parser.add_argument("-t", "--token", help="Access token for Red List API")
parser.add_argument("-o", "--outfile", help="Name of file to save redlist to")
args = parser.parse_args()
if not args.token:
raise ValueError("No token provided for API, you must provide a token")
save_file = args.outfile
if save_file is None:
date = datetime.datetime.now()
save_file = "../output/redlist_download_{date}.csv".format(date=date.strftime("%Y%m%d"))
getter = redListGetter(token=args.token)
redlist = getter.get_all_pages()
with open(save_file, "w", newline="") as outfile:
writer = csv.DictWriter(outfile, fieldnames=redlist[0].keys())
writer.writeheader()
writer.writerows(redlist)
if __name__ == "__main__":
main()
| barnabywalker/threatened_species_classification_comparison | scripts/redlist_api.py | redlist_api.py | py | 8,897 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "requests_cache.install_cache",
... |
43508300282 | """
"""
import sys
from pathlib import Path
print(Path(__file__).resolve().parents[1])
sys.path.append(Path(__file__).resolve().parents[1])
if __name__ == '__main__' and __package__ is None:
__package__ = 'kurosc'
#
# from lib.plotformat import setup
import numpy as np
np.set_printoptions(precision=2, suppress=True)
from datetime import datetime as dt
"""
unit test dist in array
call wavelet
"""
def distance_test(m:int = 128,
n:int = 128,
):
from corticalSheet.oscillator import oscillatorArray
domain = (-np.pi,np.pi)
osc = oscillatorArray((m,n),domain)
print(dt.now(),#.strftime('%y%m%d_%H%M%S'),
'\nics\n',
osc.ic,
'\n\ndistance shape\n',
osc.distance.shape,
'\n\ndistance vector\n',
osc.distance.flatten())
return osc.ic,osc.distance.flatten()
def wavelet_test():
from spatialKernel.wavelet import kernel
_,y = distance_test(3,3)
s = kernel()
params = {'a': 10000/3*2,
'b': 0,
'c': 10,
'order': 17,
}
w = s.wavelet(s.spatial_wavelet,y,*params.values(),True)
print(dt.now(),'\nwavelet\n',w)
def decouple_test():
from secondOrderInteraction.decouple import interaction
x,_ = distance_test(3,3)
a = interaction(x.shape)
y = a.delta(x.ravel())
p = {'beta': 0.25, 'r':0.95}
g = a.gamma(y,**p)
print(dt.now(),'\ngamma\n',g,
'\n\nphase difference vector\n',
g.flatten(),
'\n\nmean difference vector\n',
np.mean(g))
return g.flatten()
def system():
#initialize an osc array
dimension = (2,2)
domain = (0,np.pi)
osc = oscillatorArray(dimension,domain)
# fixed time wavelet kernel
s = kernel()
kernel_params = {'a': 10000/3*2,
'b': 0,
'c': 10,
'order': 4,
}
interaction_params = ({'beta': 0, 'r':0},
{'beta': 0.25, 'r':0.95})
w = s.wavelet(s.spatial_wavelet,
osc.distance.flatten(),
*kernel_params.values(),True)
# print(dt.now(),'\nwavelet\n',w)
a = interaction(osc.ic.shape)
phase_difference = a.delta(osc.ic)
g = a.gamma(phase_difference,**interaction_params[0])
print(dt.now(),
'\nwavelet\n',
w,'\n',type(w),
'\n\nphase difference vector\n',
g.flatten(),'\n',
type(g.flatten()),
'\nwavelet*difference\n',
w*g.flatten()
)
def gif_test():
from lib.animate import animate
filepath = Path('/Users/Michael/Documents/GitHub/kuramoto-osc/Python/Oscillator Phase in 0_pi')
vid = animate(filepath)
vid.to_gif(filepath,0.75,True)
def normal_test():
from spatialKernel.wavelet import kernel
s = kernel()
"""construct a normal dist frequency lookup"""
distance = 3/2
resolution = 20 #mln samples
x = np.linspace(-distance,distance,resolution)
# by eye
params = {'a': 1/7,
'b': 0,
'c': 1/2,
}
g = s.wavelet(s.gaussian,x,*params.values(),False)
rng = np.random.default_rng()
p = np.array(rng.choice(g,size=np.prod((2,2))),dtype=float)
print(type(p),'\n',g)
indx = np.zeros([g.shape[0],p.shape[0]],dtype=bool)
indy = np.arange(g.shape[0])
for k,q in enumerate(p):
indx[indy[g==q],k] = 1
print(indx,indx.any(axis=1))
# return
def move_dirs():
from lib.plotformat import setup
fmt = setup('test_dir',3)
txt ='Oscillator Phase in pi'
print(txt)
print(fmt.plot_name(str(txt)))
def load_j():
import json
f = open('model_config.json')
var = json.load(f)
[print(var['test_set0'][k]) for k,v in var['test_set0'].items()]
def index_ts():
zshape = (24,24,500)
rng = np.random.default_rng()
rnd_idx = rng.choice(np.arange(zshape[0]),
size=2,
replace=False,
)
print(rnd_idx)
idx = np.array(
[[ 6, 1],
[ 6, -1],
[ 4, 1],
[ 4, -1],
[ 5, 1],
[ 5, -1],
[ 6 , 0],
[ 4, 0]]
)
idl0 = np.where(idx[:,0]<=zshape[0])[0]
idl1 = np.where(idx[:,1]<=zshape[1])[0]
idz0 = np.where(idx[:,0]>=0)[0]
idz1 = np.where(idx[:,1]>=0)[0]
print(idl0,idl1,idz0,idz1)
idu = np.intersect1d(idl0,idz0)
idv = np.intersect1d(idl1,idz1)
idw = np.intersect1d(idu,idv)
print( idu, idv, idw, idx[idw,:])
def plt_title():
interaction_params:dict = {'beta': 0.75,'r': 0.25}
kernel_params:dict = {'a': 10000/3*2,
'b': 0,
'c': 10, # breadth of wavelet
'order': 4}
title=None
domain = [0,np.pi]
kn=11.1
samples = 5
if abs(domain[0]) % np.pi == 0 and not domain[0] == 0:
ti = r'\pi'
ti = '-'+ti
else:
ti = str(domain[0])
if abs(domain[1]) % np.pi == 0 and not domain[1] == 0:
tf = r'\pi'
else:
tf = str(domain[1])
if not title:
print(interaction_params,
kernel_params,
)
title = 'Timeseries for {s} Random Neighbors R={r:.2f} $\\beta$={beta:.2f} K/N={kn:.1f} & c={c:.0f})'.format(s=samples,
**interaction_params,
**kernel_params,
kn=kn)
print(title)
def spatial_wavelet(self,
x: np.ndarray,
a: float,
b: float,
c: float,
d: int = 4, # 4th derivative
) -> np.ndarray:
"""arbitrary derivation of the gaussian to nth order and substitute params """
wavelet = derivative(d)
fn = lambdify(['x','a','b','c'], wavelet, 'numpy')
return fn(x,a,b,c)
def LSA():
from spatialKernel.symdiff import derivative
from sympy import (symbols,
sin)
x,t,b,r = symbols('x,theta,beta,r')
fn = lambda x,t,b,r: -sin(t-x+b) + r*sin(2*(t-x))
fnc = lambda x,t,b,r: (-1 if r else 1)*sin(t-x+b) + r*sin(2*(t-x))
df = derivative(fnc(x,t,b,r),1,x)
vals = {'r':0.8,'beta':0,'theta':0,'x':0}
print(df)
print(df.subs(vals))
def main():
# distance_test(3,3)
# wavelet_test()
# decouple_test()
LSA()
# gif_test()
# normal_test()
# move_dirs()
# load_j()
# index_ts()
# plt_title()
if __name__ == '__main__':
main()
# build_ics(16,16)
# spatial_kernel()
# decouple()
| chriswilly/kuramoto-osc | Python/kurosc/kurosc/unit_test.py | unit_test.py | py | 6,957 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_numb... |
150778063 | from django.urls import path
from django.contrib import admin
from django.urls import include
#Add URL maps to redirect the base URL to our application
from django.views.generic import RedirectView
from . import views
urlpatterns = [
path('start/', views.StartPlay.as_view(), name='start-play'),
path('start/<int:pk>', views.PlaySelect.as_view(), name='play-select'),
path('start/chooseplayers', views.ChoosePlayers.as_view(), name='choose-players'),
path('start/gameplay', views.GamePlay.as_view(), name = 'game-play'),
path('start/enterscore/<int:pk>', views.EnterScore.as_view(), name = 'enter-score'),
path('start/player/<int:pk>/remove/', views.RemovePlayers.as_view(), name='remove-players'),
path('start/gameplay/hand', views.HandComplete.as_view(), name = 'hand-complete'),
path('start/enterscore/<int:pk>/edit', views.EditScoresList.as_view(), name = 'edit-scores-list'),
path('start/enterscore/<int:pk>/update', views.UpdateScore.as_view(), name = 'update-score'),
path('start/complete', views.GameComplete.as_view(), name = 'game-complete')
] | edbranson/scorekeeping | scorekeeping/score/urls.py | urls.py | py | 1,095 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
29719085837 | from collections import defaultdict
def cast(val, regs):
"""if int return int else return char"""
try:
return int(val)
except ValueError as ve:
return regs[val]
def run_cmd(line, regs, idx):
"""run a command"""
try:
cmd, x, y = line.split()
except ValueError as ve:
cmd, x = line.split()
mul_flag = False
if cmd == 'set': # set reg X to val Y
regs[x] = cast(y, regs)
elif cmd == 'mul': # set reg X to X * Y
regs[x] = regs[x] * cast(y, regs)
mul_flag = True
elif cmd == 'sub': # decrease reg X by val Y
regs[x] -= cast(y, regs)
elif cmd == 'jnz': # jumps by Y if X > 0
if cast(x, regs) != 0:
idx += cast(y, regs) - 1
return regs, idx, mul_flag
def duet(lines):
"""run all the commands"""
regs = defaultdict(int)
idx = 0
mul_count = 0
while idx < len(lines):
regs, idx, mul_flag = run_cmd(lines[idx], regs, idx)
mul_count += int(mul_flag)
idx += 1
return mul_count
print(duet(open('input').read().splitlines()))
| yknot/adventOfCode | 2017/23_01.py | 23_01.py | py | 1,138 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 37,
"usage_type": "call"
}
] |
26614333632 | import enum
import jsonpatch
import jam
from jam import O
from jam import Q
from jam import exceptions
from jam.schemas import load_schema
from jam.backends.util import load_backend
class Operation(enum.IntEnum):
CREATE = 0
UPDATE = 1
REPLACE = 2
DELETE = 3
SNAPSHOT = 4
RENAME = 5
class ReadOnlyCollection:
"""A Collection interface that only allows reading of data.
Used for getting specific states in time as past data is not modifiable
"""
@classmethod
def from_document(cls, document):
return cls.from_dict(document.data)
@staticmethod
def load_schema(schema):
return load_schema(schema['type'], schema['schema'])
@classmethod
def from_dict(cls, data):
return cls(
jam.Storage(load_backend(data['storage']['backend'], **data['storage']['settings'])),
jam.Logger(load_backend(data['logger']['backend'], **data['logger']['settings'])),
jam.State(load_backend(data['state']['backend'], **data['state']['settings'])),
schema=data.get('schema'),
permissions=data.get('permissions'),
)
def __init__(self, storage, logger, state, permissions=None, schema=None):
self._state = state
self._logger = logger
self._storage = storage
self.permissions = permissions or {}
if schema:
schema = self.load_schema(schema)
self.schema = schema
# Snapshot interaction
def regenerate(self):
# Remove all data otherwise we might have some rogue keys
self._state.clear()
try:
snapshot_log = self._logger.latest_snapshot()
except exceptions.NotFound:
# Otherwise apply all logs
logs = list(self._logger.list(O('modified_on', O.ASCENDING)))
else:
# If we've found the snap shot, load it and apply all logs after it
self.load_snapshot(snapshot_log)
# Note: After sorts ascending on timestamp
logs = list(self._logger.after(snapshot_log.modified_on))
data_objects = {}
for data_object in self._storage._backend.query(Q('ref', 'in', [
log.data_ref for log in logs if log.data_ref
])):
data_objects[data_object.ref] = data_object
acc = 0
for log in logs:
acc += 1
self._state.apply(log, log.data_ref and data_objects[log.data_ref].data)
return acc # The number of logs that were not included from the snapshot
def load_snapshot(self, snapshot_log):
# Pull our data object, a list of log refs
data_object = self._storage.get(snapshot_log.data_ref)
logs, data_objects = zip(*data_object.data)
log_map = {log.ref: log for log in self._logger.bulk_read(logs)}
data_object_map = {do.ref: do for do in self._storage.bulk_read(data_objects)}
# Load and apply each log ref
for log, data_object in zip(logs, data_objects):
self._state.apply(log_map[log], data_object_map[data_object].data, safe=False)
# Data interaction
def select(self):
return self._state._backend.select()
def list(self):
return self._state.list()
def keys(self):
return self._state.keys()
def read(self, key):
try:
doc = self._state.get(key)
if doc.data is None and doc.data_ref:
doc.data = self._storage.get(doc.data_ref)
return doc
except exceptions.NotFound:
raise exceptions.NotFound(
code='D404',
title='Document not found',
detail='Document "{}" was not found'.format(key)
)
def history(self, key):
return self._logger.history(key)
def __repr__(self):
return '<{}({}, {}, {})>'.format(self.__class__.__name__, self._storage, self._logger, self._state)
class FrozenCollection(ReadOnlyCollection):
def snapshot(self):
data_object = self._storage.create([(doc.log_ref, doc.data_ref) for doc in self._state.list()])
log = self._logger.create_snapshot(data_object.ref)
return log
class BaseCollection(ReadOnlyCollection):
def snapshot(self):
data_object = self._storage.create([(doc.log_ref, doc.data_ref) for doc in self._state.list()])
log = self._logger.create(None, Operation.SNAPSHOT, data_object.ref, None)
return log
def create(self, key, data, user):
if self.schema:
self.schema.validate(data)
try:
self._state.get(key)
except exceptions.NotFound:
pass
else:
raise exceptions.KeyExists(
code='D409',
title='Document already exists',
detail='Document "{}" already exists'.format(key)
)
data_object = self._storage.create(data)
return self._state.apply(self._logger.create(
key,
Operation.CREATE,
data_object.ref,
user
), data)
def update(self, key, patch, user):
previous = self._state.get(key)
if isinstance(patch, dict):
patch = self._generate_patch(previous.data, patch)
patch = self._validate_patch(patch)
try:
data = jsonpatch.apply_patch(previous.data, patch)
except jsonpatch.JsonPatchTestFailed as e:
raise exceptions.JsonPatchTestFailed(e)
if self.schema:
self.schema.validate(data)
if data.get('schema'):
self.load_schema(data['schema'])
data_object = self._storage.create(data)
return self._state.apply(self._logger.create(
key,
Operation.UPDATE,
data_object.ref,
user,
previous=previous,
operation_parameters={'patch': list(patch)}
), data)
# TODO
def replace(self, key, data, user):
previous = self._state.get(key)
if self.schema:
self.schema.validate(data)
data_object = self._storage.create(data)
return self._state.apply(self._logger.create(
key,
Operation.UPDATE,
data_object.ref,
user,
previous=previous,
), data)
def delete(self, key, user):
# data_ref for delete logs should always be None
previous = self._state.get(key)
return self._state.apply(self._logger.create(
key,
Operation.DELETE,
None,
user,
previous=previous
), None)
def rename(self, key, new_key, user):
# Create two logs, one for the from key, effectively a delete
# and another for the to key, effectively a create
previous = self._state.get(key)
self._state.apply(self._logger.create(
key,
Operation.RENAME,
None,
user,
previous=previous,
operation_parameters={'to': new_key}
), None)
return self._state.apply(self._logger.create(
new_key,
Operation.RENAME,
previous.data_ref,
user,
previous=previous,
operation_parameters={'from': key}
), previous.data)
def at_time(self, timestamp, state, regenerate=True):
"""Given a unix timestamp and a state (Should be empty)
creates a ReadOnlyCollection for this collection at that point in time.
Note: The closer timestamp is to a saved state the faster this will be
"""
frozen = FrozenCollection(
self._storage,
self._logger.at_time(timestamp),
state,
# Note: No need to pass in schema, read-only collections have no use for it
permissions=self.permissions
)
if regenerate:
frozen.regenerate()
return frozen
def _generate_patch(self, previous, new):
return jsonpatch.JsonPatch.from_diff(previous, new)
def _validate_patch(self, patch):
return patch
| CenterForOpenScience/jamdb | jam/base.py | base.py | py | 8,174 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "enum.IntEnum",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "jam.schemas.load_schema",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "jam.Storage",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "jam.backends.ut... |
74224410343 | import datetime
import logging
import numpy as np
import os
import skimage.io as io
from skimage.exposure import equalize_adapthist
from skimage.filters import gaussian
from skimage.transform import resize
from skimage.color import rgb2lab
from src.data_loader import root_dir, FOLDER_EXPERIMENTS, references_paths, references_names, originals_paths
from src.v8_test.segmentation import segmentation
from src.v8_test.validation import validation
from src.utils import apply_on_normalized_luminance, outline_regions
from src.v8_test.fct import plot_kappa_score
from scipy.optimize import minimize
from random import randint
MAX_PATIENTS = 1
MAX_IMAGES_PER_PATIENT = 1
MAX_PATCHES_PER_IMAGE = 2
def test_on_all_images(initial_value):
brown_lab = dictionary_arguments.get('brown_lab', 0)
blue_lab = dictionary_arguments.get('blue_lab', 0)
white_lab = dictionary_arguments.get('white_lab', 0)
k_list = []
for p in range(nb_images):
if param_name == 'brown_lab':
all_mask = segmentation(image_lab_list[p], 2, 2, 2,
brown_lab=initial_value,
blue_lab=blue_lab,
white_lab=white_lab)
elif param_name == 'blue_lab':
all_mask = segmentation(image_lab_list[p], 2, 2, 2,
brown_lab=brown_lab,
blue_lab=initial_value,
white_lab=white_lab)
elif param_name == 'white_lab':
all_mask = segmentation(image_lab_list[p], 2, 2, 2,
brown_lab=brown_lab,
blue_lab=blue_lab,
white_lab=initial_value)
k = validation(all_mask[0, :, :], all_mask[1, :, :], all_mask[2, :, :], ref_paths[p][0], ref_paths[p][1])
print('k = ', k)
k_list.append(k)
k_history[p].append(k)
history.append({
'parameter': param_name,
'value': initial_value,
})
print('k mean = ', np.mean(k_list))
k_history[-1].append(np.mean(k_list))
return 1 / np.mean(k_list)
if __name__ == "__main__":
execution_id = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
results_dir = root_dir(FOLDER_EXPERIMENTS(version=8), execution_id)
os.makedirs(results_dir, exist_ok=True)
logging.basicConfig(
level=logging.INFO,
handlers=[
logging.FileHandler(os.path.join(results_dir, 'log.txt')),
logging.StreamHandler()
]
)
names = references_names()
ref_paths = []
ori_paths = []
for i in range(len(names)):
ref_paths.append(references_paths(names[i]))
ori_paths.append(originals_paths(names[i]))
nb_images = len(ref_paths)
image_lab_list = []
image_original_list = []
for i in range(nb_images):
image = io.imread(ori_paths[i][0])
results_p_dir = os.path.join(results_dir, "image", f'{i}')
os.makedirs(results_p_dir, exist_ok=True)
io.imsave(fname=os.path.join(results_p_dir, '01 00 Original.jpg'),
arr=image)
logging.info('Resizing')
resize_factor = 8
image_original_list.append(resize(image,
(int(image.shape[0] / resize_factor), (image.shape[1] / resize_factor)),
anti_aliasing=True))
logging.info('Gaussian filter')
image = apply_on_normalized_luminance(
operation=lambda img: gaussian(img, sigma=2),
image_rgb=image)
io.imsave(fname=os.path.join(results_p_dir, f'01 01 - Gaussian filter.jpg'),
arr=image)
logging.info('CLAHE')
image = apply_on_normalized_luminance(
lambda img: equalize_adapthist(img, clip_limit=0.02),
image_rgb=image)
io.imsave(fname=os.path.join(results_p_dir, f'01 02 - CLAHE.jpg'),
arr=image)
image_lab = rgb2lab(image)
logging.info('Resizing')
resize_factor = 8
image_lab = resize(image_lab, (int(image.shape[0] / resize_factor), (image.shape[1] / resize_factor)),
anti_aliasing=True)
image_lab_list.append(image_lab)
dictionary_arguments = {
'brown_lab': np.array([29.01, 24.73, 39.85]),
'blue_lab': np.array([36.72, 3.43, -23.77]),
'white_lab': np.array([80.99, -1.56, -0.01])
}
nb_iteration = 30
k_history = [[] for i in range(nb_images + 1)]
history = []
for j in range(3):
for param_name, param_initial_value in dictionary_arguments.items():
result = minimize(fun=test_on_all_images, x0=param_initial_value, method='L-BFGS-B',
bounds=((0, 100), (-128, 127), (-128, 127)),
options={'eps': 1,
'maxfun': nb_iteration,
'maxiter': nb_iteration,
'maxls': nb_iteration})
dictionary_arguments[param_name] = result.x
n = randint(0, 4) # to try the new value on one of the five images
masks = segmentation(image_lab_list[n], 2, 2, 2,
brown_lab=dictionary_arguments['brown_lab'],
blue_lab=dictionary_arguments['blue_lab'],
white_lab=dictionary_arguments['white_lab'])
results_p_dir_bis = os.path.join(results_dir, "image", param_name, f'{result.x}')
os.makedirs(results_p_dir_bis, exist_ok=True)
regions_positive = outline_regions(image_original_list[n][10:image_original_list[n].shape[0] - 10,
10:image_original_list[n].shape[1] - 10], masks[0, :, :])
io.imsave(fname=os.path.join(results_p_dir_bis, f'regions positive.jpg'),
arr=regions_positive)
regions_negative = outline_regions(image_original_list[n][10:image_original_list[n].shape[0] - 10,
10:image_original_list[n].shape[1] - 10], masks[1, :, :])
io.imsave(fname=os.path.join(results_p_dir_bis, f'regions negative.jpg'),
arr=regions_negative)
regions_background = outline_regions(image_original_list[n][10:image_original_list[n].shape[0] - 10,
10:image_original_list[n].shape[1] - 10], masks[2, :, :])
io.imsave(fname=os.path.join(results_p_dir_bis, f'regions background.jpg'),
arr=regions_background)
results_p_dir_plots = os.path.join(results_dir, "plot", f'iteration {j}')
os.makedirs(results_p_dir_plots, exist_ok=True)
plot_kappa_score(k_history, nb_images, results_p_dir_plots)
file = open(os.path.join(results_dir, "/home/uib/PycharmProjects/ki67/Results/history.txt"), "w")
file.write(f'{history})')
file.close()
print(dictionary_arguments.values())
| AntoineRouland/ki67 | src/v8_test/optimization.py | optimization.py | py | 7,179 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "src.v8_test.segmentation.segmentation",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "src.v8_test.segmentation.segmentation",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "src.v8_test.segmentation.segmentation",
"line_number": 44,
"u... |
9788698464 |
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain import VectorDBQA, OpenAI
from langchain.chains import RetrievalQA
import pinecone
import os
print(os.environ['PINECONE_API_KEY'])
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment=os.environ['PINECONE_ENVIRONMENT']
)
if __name__ == "__main__":
print("Hello VectorStore!")
loader = TextLoader(
"/home/ellizzabbetth/intro-to-vector-db/mediumblogs/mediumblog1.txt"
)
document = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(document)
print(len(texts))
embeddings = OpenAIEmbeddings(openai_api_key=os.environ.get("OPENAI_API_KEY"))
docsearch = Pinecone.from_documents(
texts, embeddings, index_name="eli-index"
)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever(), return_source_documents=True
)
query = "What is a vector DB? Give me a 15 word answer for a begginner"
result = qa({"query": query})
print(result)
| ellizzabbetth/intro-into-vector-db | main.py | main.py | py | 1,279 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pinecone.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_... |
73403061863 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2023/1/13 22:03
# @Author : Rongrui Zhan
# @desc : 本代码未经授权禁止商用
import os.path
import flet
from pdf2image.exceptions import PDFPageCountError
from bonecommand import all_commands
from bonecommand.utils import user_path
from bonecommand.pdf.utils import convert_pdf, split_pdf, merge_pdfs
def main(page: flet.Page):
page.title = "My First Flet App"
page.vertical_alignment = flet.MainAxisAlignment.CENTER
thumbnails = flet.Row(
[],
alignment=flet.MainAxisAlignment.CENTER,
)
os.makedirs(f"{user_path}/bonecommand/generated", exist_ok=True)
pdf_path = flet.TextField(
label="PDF Path",
value="",
)
output_folder = flet.TextField(
label="Output folder",
value=f"{user_path}/bonecommand/generated",
)
def view_pop(view):
page.views.pop()
top_view = page.views[-1]
page.go(top_view.route)
page.add(
pdf_path,
output_folder,
thumbnails,
flet.TextButton("Split PDF", on_click=split_pdf),
)
page.views.append(
flet.View(
"/store",
[
flet.AppBar(
title=flet.Text("Store"), bgcolor=flet.colors.SURFACE_VARIANT
),
flet.ElevatedButton("Go Home", on_click=lambda _: page.go("/")),
],
)
)
page.go("/store")
print([v.route for v in page.views])
page.on_view_pop = view_pop
flet.app(target=main, assets_dir=f"{user_path}/bonecommand/assets")
| zrr1999/BoneCommand | bonecommand/gui.py | gui.py | py | 1,605 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flet.Page",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flet.MainAxisAlignment",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flet.Row",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flet.MainAxisAlig... |
17172274930 | import asyncio
import json
import discord
import requests
from discord.ext import commands
from config.functional_config import check_channels, failure, FAILURE_COLOR, HEADERS, accept, loading, \
SUCCESS_COLOR, GENERAL_COLOR
from config.online_config import server, URL_carta
async def checking(ctx, server_name):
embed = discord.Embed(title='Проверка связи...', color=GENERAL_COLOR)
msg = await ctx.reply(embed=embed)
text = ''
url = URL_carta[server.index(server_name)]
html = requests.get(url, headers=HEADERS, params=None)
r = requests.get(url, headers=HEADERS, params=None).text
if html.status_code == 200:
go_check = True
r = json.loads(r)
text += f'Подключение: {accept}'
stamp = int(r["timestamp"])
clr = SUCCESS_COLOR
else:
go_check = False
text += f'Подключение: {failure}'
clr = FAILURE_COLOR
embed = discord.Embed(title='Проверка связи...',
description=text, color=clr)
await msg.edit(embed=embed)
if go_check:
result = ''
for i in range(5):
await asyncio.sleep(1)
html = requests.get(url, headers=HEADERS, params=None)
r = requests.get(url, headers=HEADERS, params=None).text
if html.status_code == 200:
r = json.loads(r)
serv = r["timestamp"]
if int(serv) != stamp:
stamp = int(serv)
result += f'{accept}'
else:
result += f'{failure}'
embed = discord.Embed(title='Проверка связи...',
description=f'{text}\n\n'
f'Дополнительные тесты {i + 1}/5\n'
f'{result}{loading * (5 - (i + 1))}', color=clr)
await msg.edit(embed=embed)
if result != failure * 5:
result_end = 'Связь хорошая. Сервер мониторится нормально и доступен для выполнения заданий!'
else:
result_end = 'Связь нарушение. Сервер не мониторится. Выполнение заданий невозможно!'
embed = discord.Embed(title='Проверка связи...',
description=f'{text}\n\n'
f'Дополнительные тесты 5/5\n'
f'{result}\n\n'
f'**{result_end}**', color=clr)
await msg.edit(embed=embed)
class CheckServer(commands.Cog):
def __init__(self, py):
self.py = py
@commands.command(aliases=['minecraft-check'])
async def _check_server(self, ctx, server_name):
if await check_channels(ctx):
if server_name in server:
await checking(ctx, server_name)
else:
embed = discord.Embed(title=failure,
description='Я не нашел такого сервера...',
color=FAILURE_COLOR)
await ctx.reply(embed=embed)
def setup(py):
py.add_cog(CheckServer(py))
| YarSav1/ForMCObot | cogs/for_all/minecraft/tasks_minecraft/check/check_serv.py | check_serv.py | py | 3,423 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.Embed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "config.functional_config.GENERAL_COLOR",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "config.online_config.URL_carta",
"line_number": 17,
"usage_type": "name"
},
{
... |
34517047847 | import snap7
from snap7.util import *
from snap7.snap7types import *
s7 = snap7.client.Client()
s7.connect('192.168.14.45', 0, 1)
data = s7.db_read(1, 0, 4)
value = get_real(data, 0)
print(value)
data = bytearray(5)
set_real(data,0, -0.0177002)
data = data[:-1]
print(data)
s7.db_write(1, 0, data)
| AndreasScharf/IotAdapter | sonstiges/s7setvalue.py | s7setvalue.py | py | 300 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "snap7.client.Client",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "snap7.client",
"line_number": 4,
"usage_type": "attribute"
}
] |
30178594395 | from matplotlib import pyplot as plt
from PIL.Image import frombytes
import cv2
import numpy as np
from traceback import print_exc
'''
Toolkit for image process
with the method from cv2,plt,np
个人工具包,对一些需要调用多次函数的
过程打包,主要用于图像的处理和输出,
使用的库为cv2,matplotlib,PIL,numpy
'''
class count_show(object):
def __init__(self, start=0):
self.count=start
def add(self, add=1):
self.count+=add
def add_show(self, start='\r', end=''):
self.add()
print(start+str(self.count), end=end)
class errorProcess(object):
def __init__(self, debug=False):
# 动态生成错误类型统计表
self.errorType = ['NONE', 'DIR', 'NAME', 'LOAD', 'ROTATE', 'STRETCH', 'THRESH', 'WRITE', 'CLEAN']
# recommend the name less than 8 characters
self.errorCount = [0]*len(self.errorType)
# 形成一个由列表组成的有序字典
self.__errorInfoName = ['tag', 'file', 'info']
self.__errorLastInfoValue = [self.errorType[0], '', '']
self.errorInfo = []
self.errorTotalCount = 0
self.debug=debug
def index(self, name):
return self.__errorInfoName.index(name)
def add(self, tagindex, file, info):
if tagindex not in range(0,len(self.errorType)):
tagindex=0
self.__errorLastInfoValue[self.index('tag')] =self.errorType[tagindex]
self.__errorLastInfoValue[self.index('file')]=file
self.__errorLastInfoValue[self.index('info')]=info
self.errorInfo.append(self.__errorLastInfoValue[:])
self.errorCount[tagindex]+=1
self.errorTotalCount+=1
def last_index(self):
return self.errorTotalCount-1
def show(self, index):
if self.debug:
print_exc()
print('[ERROR][%03d:%2d:%-7s][Where]%s:[At]%s' %
(index + 1,
self.errorType.index(self.errorInfo[index][self.index('tag')]),
self.errorInfo[index][self.index('tag')],
self.errorInfo[index][self.index('file')],
repr(self.errorInfo[index][self.index('info')])))
def show_all(self):
for i in range(0,self.last_index()+1):
self.show(i)
def show_last(self):
self.show(self.last_index())
def show_all_type(self):
if not self.is_empty():
for i in range(0, len(self.errorCount)):
if self.errorCount[i] != 0:
print(self.errorType[i].ljust(8,'-') + 'error:' + str(self.errorCount[i]))
def add_show(self, tagindex, file, info):
self.add(tagindex, file, info)
self.show_last()
def is_empty(self):
if self.errorTotalCount==0:
return True
else:
return False
def error_file_list(self):
errorFileList=[]
for i in range(0, self.last_index() + 1):
errorFileList.append(self.errorInfo[i][self.index('file')])
return errorFileList
def show_error_file_list(self):
for file in self.error_file_list():
print(file)
def error_code(self):
if self.errorTotalCount!=0:
errorCode=self.errorType.index(self.errorInfo[self.last_index()][self.index('tag')])
if errorCode==0:
errorCode=-1
return errorCode
return 0
def error_exit(self):
print('Exiting...')
exit(self.error_code())
def is_ascii(file):
if file==file.encode('ascii', 'ignore').decode('ascii'):
return True
else:
return False
cv_series= 0
def cv_show(*from_imgs, name="'L': next, 'A': back, 'E': exit"):
""" Basic usage:cv_show(cv2_img),
show a image with default name "Unnamed".
"""
global cv_series
cv_series+= 1
i= 0
while True:
if(len(from_imgs)>1):
cv2.imshow(name + " - " + str(i) + " - " +str(cv_series), from_imgs[i])
else:
cv2.imshow("Press 'E' to exit" + " - " +str(cv_series), from_imgs[i])
if cv2.waitKey(0) == ord('l'):
i+= 1
cv2.destroyAllWindows()
elif cv2.waitKey(0) == ord('a'):
i-= 1
cv2.destroyAllWindows()
elif cv2.waitKey(0) == ord('e'):
cv2.destroyAllWindows()
break
if i>=len(from_imgs):i=0
elif i<0:i=len(from_imgs)-1
def cv_resize(from_img,max=800):
""" Basic usage:cv_show(cv2_img),
the maximum height/width of the image is limited to 800px
if only has one argument.
"""
if from_img.shape[0] <= max and from_img.shape[1] <= max:return 1, from_img
ratio=max/from_img.shape[0] if from_img.shape[0]>from_img.shape[1] else max/from_img.shape[1]
return ratio, cv2.resize(from_img, None, fx=ratio, fy=ratio) # resize since image is huge
def cv_BoxPoints(rect):
#box = cv2.cv.BoxPoints(rect) # for OpenCV 2.x
rectPoints=np.int0(cv2.boxPoints(rect))
rectPoints=np.array([[rectPoints[x]] for x in range(0,4)])
return rectPoints
def plt_show(*from_imgs):
""" Basic usage:plt_show(cv2_img),
show a image with default name "Unnamed".
"""
row_a= int(np.sqrt(len(from_imgs)))
col_a= int(len(from_imgs)/row_a) + len(from_imgs)%row_a
if row_a>col_a:
ratio_a= row_a/col_a
row_b= row_a-1
col_b= int(len(from_imgs)/row_b) + len(from_imgs)%row_b
ratio_b= row_b/col_b if row_b>col_b else col_b/row_b
elif row_a<col_a:
ratio_a= col_a/row_a
col_b= col_a-1
row_b= int(len(from_imgs)/col_b) + len(from_imgs)%col_b
ratio_b= row_b/col_b if row_b>col_b else col_b/row_b
else:
row_b, col_b=row_a, col_a
ratio_a=ratio_b=1
row= row_a if ratio_a<ratio_b else row_b
col= col_a if ratio_a<ratio_b else col_b
plt_series = 0
for from_img in from_imgs:
plt_series+= 1
plt.subplot(row,col,plt_series)
plt.title(str(plt_series))
plt.imshow(from_img)
#plt.axis('off')
#plt.tight_layout()
plt.show()
def plt_dotshow(dots):
xx=[x for x in range(0,len(dots))]
plt.plot(xx,dots)
plt.grid()
plt.show()
def bytearray_toimg(*datas,show=1):
""" Basic usage:bytearray_toimg(np_array),
convert a numpy array to image and show it
if the last argument is set to 1 by default or by user.
This function accept multiple arrays, show
all of them or return the first one converted.
"""
if show==1:
for data in datas:
frombytes(mode='1', size=data.shape[::-1], data=np.packbits(data, axis=1)).show()
else:
for data in datas:
return frombytes(mode='1',size=data.shape[::-1],data=np.packbits(data,axis=1))
def del_isolatedot(square,nearby_ratio = 1/1000,white_ratio = 0.7,colour_ratio=1):
""" Basic usage:del_isolatedot(cv2_img),
find isolated black dots surrounded by white dots
and fill this area with white,
notice that cv2_img should be gray
and both three ratios should be positive integer
which is less than or equal to 1.
USELESS BY NOW, please use filter_isolated_cells(array, struct) instead.
"""
square=np.copy(square)
# black = 0
white = 255
nearby = int(max(min(square.shape[0] * nearby_ratio, square.shape[1] * nearby_ratio), 1))
colournearby=int(max(min(nearby*colour_ratio,nearby),1))
# the ratio that white pixel should take
white_value = int(white * (nearby * 2 + 1) ** 2 * white_ratio)
i = j = 0
for x in range(nearby, square.shape[0], colournearby * 2):
for y in range(nearby, square.shape[1], colournearby * 2):
i += 1
if np.sum(square[x - nearby:x + nearby + 1, y - nearby:y + nearby + 1]) >= white_value:
j+=1
square[x - colournearby:x + colournearby + 1, y - colournearby:y + colournearby + 1] = white
print(j,"/",i)
return square
def prints(*datas):
for data in datas:
print(data)
print("="*20)
def corner_points(points):
"""
Transform a random quadrilateral to a rectangle
Accept a four-points array generated by cv2.approxPolyDP
and return the arranged one with same format.
min--> 0-a-1
d\ \b
3-c-2 <--max
"""
distances=[cv2.norm(points[x]) for x in range(0, 4)]
points_index=[0, 1, 2, 3]
arrange_points_index=[0]*4
arrange_points_index[0]=distances.index(min(distances)) # find the "0" point
arrange_points_index[2]=distances.index(max(distances)) # find the "2" point
points_index.remove(arrange_points_index[0])
points_index.remove(arrange_points_index[2])
if np.absolute(points[points_index[0]][0][0]-points[distances.index(min(distances))][0][0]) > \
np.absolute(points[points_index[1]][0][0]-points[distances.index(min(distances))][0][0]):
# find the "1" point <-- points_index[0], "3" point <-- points_index[1]
arrange_points_index[1]=points_index[0]
arrange_points_index[3]=points_index[1]
else:
arrange_points_index[3]=points_index[0]
arrange_points_index[1]=points_index[1]
return arrange_points_index
def rearrange_points(points):
'''
corner_points算出来的是顺时针的
'''
arrange_points_index=corner_points(points)
return [points[arrange_points_index[x]] for x in [0,3,2,1]]
def near_line(points, baseline, deviation=0):
distance=[]
for point in points:
distance.append(abs(point-baseline))
i=distance.index(min(distance))
if deviation!=0 and i>=deviation and i<=len(distance)-deviation-1:
if points[i-1]<points[i]:
i+=deviation
else:
i-=deviation
return i
def is_dark_board(img, middle_area=0.6):
dark_line=110
sample=img[int(middle_area/2*img.shape[0]):img.shape[0]-int(middle_area/2*img.shape[0]),
int(middle_area/2*img.shape[1]):img.shape[1]-int(middle_area/2*img.shape[1])]
gray = cv2.cvtColor(sample, cv2.COLOR_BGR2GRAY)
mean=np.mean(gray)
if mean<=dark_line:
return True
else:
return False
def is_monotony_points(points, strict=False):
if not strict:
scale=abs((max(points)-min(points))/16)
else:
scale=0
old_increase=increase=True
first_change=True
for i in range(0,len(points)):
if i==0:
continue
if abs(points[i]-points[i-1])<=scale:
continue
elif points[i]>points[i-1]:
if first_change:
first_change=False
old_increase=increase=True
else:
increase=True
elif points[i]<points[i-1]:
if first_change:
first_change=False
old_increase=increase=False
else:
increase=False
if old_increase!=increase:
return False
if old_increase != increase:
return False
return True
def stretch_points(points):
transform_distance = []
arrange_points_index=corner_points(points)
line_length=[cv2.norm(points[arrange_points_index[0]][0],points[arrange_points_index[1]][0]), # 0-a-1
cv2.norm(points[arrange_points_index[1]][0], points[arrange_points_index[2]][0]), # 1-b-2
cv2.norm(points[arrange_points_index[2]][0], points[arrange_points_index[3]][0]), # 2-c-3
cv2.norm(points[arrange_points_index[3]][0], points[arrange_points_index[0]][0])] # 3-d-0
test= cv2.norm(points[arrange_points_index[3]][0], points[arrange_points_index[0]][0])
x= int(line_length[0] if line_length[0] > line_length[2] else line_length[2])
y= int(line_length[1] if line_length[1] > line_length[3] else line_length[3])
# original format is counterclockwise
transform_distance.append([[0, 0]])
transform_distance.append([[0, y]])
transform_distance.append([[x, y]])
transform_distance.append([[x, 0]])
return np.array(transform_distance)
| wmillers/coursewarePhotoProcess | toolkit.py | toolkit.py | py | 12,134 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "traceback.print_exc",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_n... |
6073108961 |
from traits.api import \
HasTraits, List, Array, Property, cached_property, \
Instance, Trait, Button, on_trait_change, \
Int, Float, DelegatesTo, provides, WeakRef, Bool
from ibvpy.mesh.sdomain import \
SDomain
# from ibvpy.view.plot3d.mayavi_util.pipelines import \
# MVPolyData, MVPointLabels
import numpy as np
from .cell_array import CellView, CellArray, ICellArraySource
from .cell_grid import CellGrid
from .cell_grid_slice import CellGridSlice
#--------------------------------------------------------------------------
# DofGrid
#--------------------------------------------------------------------------
@provides(ICellArraySource)
class DofCellGrid(SDomain):
'''
Get an array with element Dof numbers
'''
cell_grid = Instance(CellGrid)
get_cell_point_X_arr = DelegatesTo('cell_grid')
get_cell_mvpoints = DelegatesTo('cell_grid')
cell_node_map = DelegatesTo('cell_grid')
get_cell_offset = DelegatesTo('cell_grid')
# offset of dof within domain list
#
dof_offset = Int(0)
# number of degrees of freedom in a single node
#
n_nodal_dofs = Int(3)
#-------------------------------------------------------------------------
# Generation methods for geometry and index maps
#-------------------------------------------------------------------------
n_dofs = Property(depends_on='cell_grid.shape,n_nodal_dofs,dof_offset')
def _get_n_dofs(self):
'''
Get the total number of DOFs
'''
unique_cell_nodes = np.unique(self.cell_node_map.flatten())
n_unique_nodes = len(unique_cell_nodes)
return n_unique_nodes * self.n_nodal_dofs
dofs = Property(depends_on='cell_grid.shape,n_nodal_dofs,dof_offset')
@cached_property
def _get_dofs(self):
'''
Construct the point grid underlying the mesh grid structure.
'''
cell_node_map = self.cell_node_map
unique_cell_nodes = np.unique(cell_node_map.flatten())
n_unique_nodes = len(unique_cell_nodes)
n_nodal_dofs = self.n_nodal_dofs
n_nodes = self.cell_grid.point_grid_size
node_dof_array = np.repeat(-1, n_nodes *
n_nodal_dofs).reshape(n_nodes, n_nodal_dofs)
# Enumerate the DOFs in the mesh. The result is an array with n_nodes rows
# and n_nodal_dofs columns
#
# A = array( [[ 0, 1 ],
# [ 2, 3 ],
# [ 4, 5 ]] );
#
node_dof_array[np.index_exp[unique_cell_nodes]] = \
np.arange(
n_unique_nodes * n_nodal_dofs).reshape(n_unique_nodes,
n_nodal_dofs)
# add the dof_offset before returning the array
#
node_dof_array += self.dof_offset
return node_dof_array
dofs_Ia = Property()
def _get_dofs_Ia(self):
return self.dofs
def _get_doffed_nodes(self):
'''
Get the indices of nodes containing DOFs.
'''
cell_node_map = self.cell_node_map
unique_cell_nodes = np.unique(cell_node_map.flatten())
n_nodes = self.cell_grid.point_grid_size
doffed_nodes = np.repeat(-1, n_nodes)
doffed_nodes[np.index_exp[unique_cell_nodes]] = 1
return np.where(doffed_nodes > 0)[0]
#-----------------------------------------------------------------
# Elementwise-representation of dofs
#-----------------------------------------------------------------
cell_dof_map = Property(depends_on='cell_grid.shape,n_nodal_dofs')
def _get_cell_dof_map(self):
return self.dofs[np.index_exp[self.cell_grid.cell_node_map]]
dof_Eid = Property
'''Mapping of Element, Node, Dimension -> DOF
'''
def _get_dof_Eid(self):
return self.cell_dof_map
cell_grid_dof_map = Property(depends_on='cell_grid.shape,n_nodal_dofs')
def _get_cell_grid_dof_map(self):
return self.dofs[np.index_exp[self.cell_grid.cell_grid_node_map]]
def get_cell_dofs(self, cell_idx):
return self.cell_dof_map[cell_idx]
elem_dof_map = Property(depends_on='cell_grid.shape,n_nodal_dofs')
@cached_property
def _get_elem_dof_map(self):
el_dof_map = np.copy(self.cell_dof_map)
tot_shape = el_dof_map.shape[0]
n_entries = el_dof_map.shape[1] * el_dof_map.shape[2]
elem_dof_map = el_dof_map.reshape(tot_shape, n_entries)
return elem_dof_map
def __getitem__(self, idx):
'''High level access and slicing to the cells within the grid.
The return value is a tuple with
1. array of cell indices
2. array of nodes for each element
3. array of coordinates for each node.
'''
dgs = DofGridSlice(dof_grid=self, grid_slice=idx)
return dgs
#-----------------------------------------------------------------
# Spatial queries for dofs
#-----------------------------------------------------------------
def _get_dofs_for_nodes(self, nodes):
'''Get the dof numbers and associated coordinates
given the array of nodes.
'''
doffed_nodes = self._get_doffed_nodes()
# print 'nodes'
# print nodes
# print 'doffed_nodes'
# print doffed_nodes
intersect_nodes = np.intersect1d(
nodes, doffed_nodes, assume_unique=False)
return (self.dofs[np.index_exp[intersect_nodes]],
self.cell_grid.point_X_arr[np.index_exp[intersect_nodes]])
def get_boundary_dofs(self):
'''Get the boundary dofs and the associated coordinates
'''
nodes = [self.cell_grid.point_idx_grid[s]
for s in self.cell_grid.boundary_slices]
dofs, coords = [], []
for n in nodes:
d, c = self._get_dofs_for_nodes(n)
dofs.append(d)
coords.append(c)
return (np.vstack(dofs), np.vstack(coords))
def get_all_dofs(self):
nodes = self.cell_grid.point_idx_grid[...]
return self._get_dofs_for_nodes(nodes)
def get_left_dofs(self):
nodes = self.cell_grid.point_idx_grid[0, ...]
return self._get_dofs_for_nodes(nodes)
def get_right_dofs(self):
nodes = self.cell_grid.point_idx_grid[-1, ...]
return self._get_dofs_for_nodes(nodes)
def get_top_dofs(self):
nodes = self.cell_grid.point_idx_grid[:, -1, ...]
return self._get_dofs_for_nodes(nodes)
def get_bottom_dofs(self):
nodes = self.cell_grid.point_idx_grid[:, 0, ...]
return self._get_dofs_for_nodes(nodes)
def get_front_dofs(self):
nodes = self.cell_grid.point_idx_grid[:, :, -1]
return self._get_dofs_for_nodes(nodes)
def get_back_dofs(self):
nodes = self.cell_grid.point_idx_grid[:, :, 0]
return self._get_dofs_for_nodes(nodes)
def get_bottom_left_dofs(self):
nodes = self.cell_grid.point_idx_grid[0, 0, ...]
return self._get_dofs_for_nodes(nodes)
def get_bottom_front_dofs(self):
nodes = self.cell_grid.point_idx_grid[:, 0, -1]
return self._get_dofs_for_nodes(nodes)
def get_bottom_back_dofs(self):
nodes = self.cell_grid.point_idx_grid[:, 0, 0]
return self._get_dofs_for_nodes(nodes)
def get_top_left_dofs(self):
nodes = self.cell_grid.point_idx_grid[0, -1, ...]
return self._get_dofs_for_nodes(nodes)
def get_bottom_right_dofs(self):
nodes = self.cell_grid.point_idx_grid[-1, 0, ...]
return self._get_dofs_for_nodes(nodes)
def get_top_right_dofs(self):
nodes = self.cell_grid.point_idx_grid[-1, -1, ...]
return self._get_dofs_for_nodes(nodes)
def get_bottom_middle_dofs(self):
if self.cell_grid.point_idx_grid.shape[0] % 2 == 1:
slice_middle_x = self.cell_grid.point_idx_grid.shape[0] / 2
nodes = self.cell_grid.point_idx_grid[slice_middle_x, 0, ...]
return self._get_dofs_for_nodes(nodes)
else:
print('Error in get_bottom_middle_dofs:'
' the method is only defined for an odd number of dofs in x-direction')
def get_top_middle_dofs(self):
if self.cell_grid.point_idx_grid.shape[0] % 2 == 1:
slice_middle_x = self.cell_grid.point_idx_grid.shape[0] / 2
nodes = self.cell_grid.point_idx_grid[slice_middle_x, -1, ...]
return self._get_dofs_for_nodes(nodes)
else:
print('Error in get_top_middle_dofs:'
' the method is only defined for an odd number of dofs in x-direction')
def get_left_middle_dofs(self):
if self.cell_grid.point_idx_grid.shape[1] % 2 == 1:
slice_middle_y = self.cell_grid.point_idx_grid.shape[1] / 2
nodes = self.cell_grid.point_idx_grid[0, slice_middle_y, ...]
return self._get_dofs_for_nodes(nodes)
else:
print('Error in get_left_middle_dofs:'
' the method is only defined for an odd number of dofs in y-direction')
def get_right_middle_dofs(self):
if self.cell_grid.point_idx_grid.shape[1] % 2 == 1:
slice_middle_y = self.cell_grid.point_idx_grid.shape[1] / 2
nodes = self.cell_grid.point_idx_grid[-1, slice_middle_y, ...]
return self._get_dofs_for_nodes(nodes)
else:
print('Error in get_right_middle_dofs:'
' the method is only defined for an odd number of dofs in y-direction')
def get_left_front_bottom_dof(self):
nodes = self.cell_grid.point_idx_grid[0, 0, -1]
return self._get_dofs_for_nodes(nodes)
def get_left_front_middle_dof(self):
if self.cell_grid.point_idx_grid.shape[1] % 2 == 1:
slice_middle_y = self.cell_grid.point_idx_grid.shape[1] / 2
nodes = self.cell_grid.point_idx_grid[0, slice_middle_y, -1]
return self._get_dofs_for_nodes(nodes)
else:
print('Error in get_left_middle_front_dof:'
' the method is only defined for an odd number of dofs in y-direction')
#-----------------------------------------------------------------
# Visualization related methods
#-----------------------------------------------------------------
refresh_button = Button('Draw')
@on_trait_change('refresh_button')
def redraw(self):
'''Redraw the point grid.
'''
self.cell_grid.redraw()
dof_cell_array = Button
def _dof_cell_array_fired(self):
cell_array = self.cell_grid.cell_node_map
self.show_array = CellArray(data=cell_array,
cell_view=DofCellView(cell_grid=self))
self.show_array.current_row = 0
self.show_array.configure_traits(kind='live')
class DofGridSlice(CellGridSlice):
dof_grid = WeakRef(DofCellGrid)
def __init__(self, dof_grid, **args):
self.dof_grid = dof_grid
super(DofGridSlice, self).__init__(**args)
cell_grid = Property()
def _get_cell_grid(self):
return self.dof_grid.cell_grid
dofs = Property
def _get_dofs(self):
_, idx2 = self.idx_tuple
return self.dof_grid.cell_dof_map[
np.ix_(
self.elems,
self.cell_grid.grid_cell[idx2]
)
]
#-----------------------------------------------------------------------
# View a single cell instance
#-----------------------------------------------------------------------
class DofCellView(CellView):
'''View a single cell instance.
'''
# implements(ICellView)
elem_dofs = Array
def set_cell_traits(self):
'''Set the trait values for the current cell_idx
'''
self.elem_dofs = self.cell_grid.get_cell_dofs(self.cell_idx)
def _get_cell_mvpoints(self):
return self.cell_grid.get_cell_mvpoints(self.cell_idx)
def _get_cell_labels(self):
cell_dofs = self.cell_grid.get_cell_dofs(self.cell_idx)
shape = cell_dofs.shape
if shape[1] < 3:
cd = np.zeros((shape[0], 3))
cd[:, :shape[1]] = cell_dofs
return cd
else:
return cell_dofs
def redraw(self):
if self.draw_cell:
self.mvp_elem_labels.redraw(label_mode='label_vectors')
| bmcs-group/bmcs_ibvpy | ibvpy/mesh/cell_grid/dof_grid.py | dof_grid.py | py | 12,406 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ibvpy.mesh.sdomain.SDomain",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cell_grid.CellGrid",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": ... |
9826643435 | import uvicorn
from fastapi import FastAPI, Response, Depends, Query
from config import META_VERIFY_TOKEN, FASTAPI_HOST, FASTAPI_PORT
from model import Event
from example.api import get_weather_info, get_yelp_info, select_yelp_type, get_yelp_typeIdx
from utils import event_parser, verify_payload
from typing import List
from messenger import MessengerBot
app = FastAPI()
messageBot = MessengerBot(set_profile=False)
@app.get("/")
def verify_webhook(
hub_mode: str = Query(alias="hub.mode"),
hub_challenge: str = Query(alias="hub.challenge"),
hub_verify_token: str = Query(alias="hub.verify_token"),
):
"""
This route is only for webhook address validation.
"""
if hub_mode != "subscribe" or not hub_challenge:
return Response(content="Unrecognized params", status_code=400)
if hub_verify_token != META_VERIFY_TOKEN:
return Response(content="Verification token mismatch", status_code=403)
return Response(content=hub_challenge)
@app.post("/", dependencies=[Depends(verify_payload)])
def message_webhook(events: List[Event] = Depends(event_parser)):
"""
It receives a list of events from the webhook, and then for each event, it
checks if the event is a text message, and if so, it sends a corresponding
response back to the user
Args:
events (List[Event]): List[Event] = Depends(event_parser)
Returns:
a response object with the content "ok"
"""
if not events:
return Response(content="Unrecognized webhook", status_code=401)
for event in events:
# type, sender, text, quick_reply, payload
if event.payload == "start":
messageBot.send_home_message(
recipient_id=event.sender
)
return Response(content="ok")
if event.text == "yelp" or event.payload == "yelp":
select_yelp_type(
recipient_id=event.sender,
messageBot=messageBot
)
return Response(content="ok")
if event.quick_reply in get_yelp_typeIdx():
res = get_yelp_info(int(event.quick_reply))
messageBot.send_text_message(
recipient_id=event.sender,
message=res
)
return Response(content="ok")
if event.quick_reply == "weather":
temp, weather = get_weather_info()
messageBot.send_text_message(
recipient_id=event.sender,
message=f'The temprature is {temp}F, the weather is {weather}'
)
return Response(content="ok")
if event.payload == "quick":
messageBot.send_quickreply_message(
recipient_id=event.sender,
message="What do you want to know",
options=["weather", "yelp"]
)
return Response(content="ok")
else:
messageBot.send_home_message(user=event.sender)
return Response(content="ok")
if __name__ == "__main__":
uvicorn.run("main:app", host=FASTAPI_HOST, port=FASTAPI_PORT, reload=True)
| GaryHo34/SeattleBot | example/example.py | example.py | py | 3,107 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "messenger.MessengerBot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fastapi.Query",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "fastapi.Query",
... |
34240292583 | import discord
from discord.ext import commands
import time
intents = discord.Intents.all()
bot = commands.Bot(command_prefix='!', intents=intents)
@bot.event
async def on_ready():
print("Başladı")
@bot.command()
async def send(ctx, *, args=None):
if args != None:
members = ctx.guild.members
for member in members:
time.sleep(1)
try:
await member.send(args)
print("Gönderildi")
except:
continue
else:
await ctx.send("Lütfen Bir Argüman Giriniz.")
bot.run('TOKEN')
| omergoc/DiscordReklamBotu | app.py | app.py | py | 600 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "discord.Intents.all",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "discor... |
9366051037 | from sympy import symbols, Matrix, eye, sin, cos, pi, pprint, diff
import math
import sympy as sym
import numpy as np
import matplotlib.pyplot as plt
from functools import partial
from mpl_toolkits.mplot3d import axes3d, Axes3D
Q1, Q2, Q3, Q4, Q5= symbols('coxa femur tibia pitch wheel')
Joint_Angles = [Q1, Q2, Q3, Q4, Q5]
round_3 = partial(round, ndigits=3)
t = 0
DH_Parameter = [
{'a': 0, 'd': 0.091, 'alpha': 0 },
{'a': 0.191, 'd': 0, 'alpha': -pi / 2},
{'a': 0.500, 'd': 0, 'alpha': 0 },
{'a': 0.450, 'd': 0, 'alpha': 0 },
{'a': 0.535, 'd': 0, 'alpha': 0 },
]
FKine = eye(4)
Jacobian_M = eye(6)
T = []
Z_M = []
O_M = []
for i, (P, Q) in enumerate(zip(DH_Parameter, Joint_Angles)):
d = P['d']
a = P['a']
alpha = P['alpha']
Transform_M = Matrix([[cos(Q), -sin(Q) * cos(alpha), sin(Q) * sin(alpha), a * cos(Q)], \
[sin(Q), cos(Q) * cos(alpha), -cos(Q) * sin(alpha), a * sin(Q)], \
[0, sin(alpha), cos(alpha), d], \
[0, 0, 0, 1]])
T.append(Transform_M)
FKine = Transform_M @ FKine
Z_M.append(FKine[0:3, 3])
O_M.append(FKine[0:3, 2])
T01 = T[0]
T02 = T[0] * T[1]
T03 = T[0] * T[1] * T[2]
T04 = T[0] * T[1] * T[2] * T[3]
T05 = T[0] * T[1] * T[2] * T[3] * T[4]
print("End Effector(wheel) Transformation Matrix = ")
Transform_Matrix = T05.subs({Q1: math.radians(0), Q2: math.radians(30), Q3: math.radians(30), Q4: math.radians(30), Q5: math.radians(0)}).evalf()
pprint(Transform_Matrix)
| HarshShirsath/Robot-Modelling-Project-2-NASA-Athlete-Rover- | FK_athlete.py | FK_athlete.py | py | 1,595 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sympy.symbols",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sympy.pi",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sympy.eye",
"line_number... |
5728396322 | import json
import os
import uuid
import asyncio
from typing import MutableMapping
from aio_pika import Message, connect
from aio_pika.abc import (
AbstractChannel,
AbstractConnection,
AbstractIncomingMessage,
AbstractQueue,
)
class RpcClient:
connection: AbstractConnection
channel: AbstractChannel
callback_queue: AbstractQueue
loop: asyncio.AbstractEventLoop
def __init__(self) -> None:
self.futures: MutableMapping[str, asyncio.Future] = {}
self.loop = asyncio.get_running_loop()
async def connect(self) -> "RpcClient":
host = os.getenv("RABBITMQ_HOST", "localhost")
user = os.getenv("RABBITMQ_USER", "services")
password = os.getenv("RABBITMQ_PASS", "longpassword")
url = f"amqp://{user}:{password}@{host}/"
self.connection = await connect(
url,
loop=self.loop,
)
self.channel = await self.connection.channel()
self.callback_queue = await self.channel.declare_queue(exclusive=True)
await self.callback_queue.consume(self.on_response)
return self
def on_response(self, message: AbstractIncomingMessage) -> None:
if message.correlation_id is None:
print(f"Bad message {message!r}")
return
future: asyncio.Future = self.futures.pop(message.correlation_id)
future.set_result(message.body)
async def rpc_send(self, channel, msg) -> int:
correlation_id = str(uuid.uuid4())
future = self.loop.create_future()
self.futures[correlation_id] = future
await self.channel.default_exchange.publish(
Message(
json.dumps(msg).encode(),
content_type="text/plain",
correlation_id=correlation_id,
reply_to=self.callback_queue.name,
),
routing_key=channel,
)
return json.loads(await future)
async def send(self, channel, msg) -> int:
await self.channel.default_exchange.publish(
Message(body=json.dumps(msg).encode()),
routing_key=channel,
)
# class RunnerTask(object):
# async def __init__(self):
# self.connection = await pika.BlockingConnection(
# pika.ConnectionParameters(
# host=os.getenv("RABBITMQ_HOST", "localhost"),
# credentials=pika.PlainCredentials(
# os.getenv("RABBITMQ_USER", "services"),
# os.getenv("RABBITMQ_PASS", "longpassword"),
# ),
# )
# )
# self.channel = await self.connection.channel()
# result = await self.channel.queue_declare(queue="", exclusive=True)
# self.callback_queue = result.method.queue
# self.channel.basic_consume(
# queue=self.callback_queue,
# on_message_callback=self.on_response,
# auto_ack=True,
# )
# self.response = None
# self.corr_id = None
# async def on_response(self, ch, method, props, body):
# if self.corr_id == props.correlation_id:
# self.response = body
# def send(self, channel, mes):
# self.channel.basic_publish(exchange="", routing_key=channel, body=mes)
# async def rpc_send(self, channel, mes):
# self.response = None
# self.corr_id = str(uuid.uuid4())
# await self.channel.basic_publish(
# exchange="",
# routing_key=channel,
# properties=pika.BasicProperties(
# reply_to=self.callback_queue,
# correlation_id=self.corr_id,
# ),
# body=json.dumps(mes),
# )
# self.connection.process_data_events(time_limit=None)
# return json.loads(self.response)
| PoteeDev/scenario-manager | manager/amqp.py | amqp.py | py | 3,831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aio_pika.abc.AbstractConnection",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "aio_pika.abc.AbstractChannel",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "aio_pika.abc.AbstractQueue",
"line_number": 18,
"usage_type": "name"
},
... |
10954038644 | # -*- coding: utf-8 -*-
import wx
from controlador import control_hilo
class entrada ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, None , id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( int(parent.resolucion[0]), int(parent.resolucion[1]) ), style = 0|wx.TAB_TRAVERSAL )
#wx.Frame.__init__ ( self, None , id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 300, 300 ), style = 0|wx.TAB_TRAVERSAL )
self.parent = parent
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
gSizer4 = wx.GridSizer( 1, 1, 0, 0 )
sbSizer1 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Identificate" ), wx.VERTICAL )
gSizer1 = wx.wx.FlexGridSizer( 3, 1, 0, 0 )
self.m_bitmap1 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"imagenes/vacio.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer1.Add( self.m_bitmap1, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, u"Por Favor Acerque su Babero a la Cámara\r ", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTRE )
self.m_staticText1.Wrap( -1 )
gSizer1.Add( self.m_staticText1, 0, wx.ALL, 30 )
self.btn_alt = wx.Button( self, wx.ID_ANY, u"...", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer1.Add( self.btn_alt, 0, wx.ALIGN_CENTER_HORIZONTAL, 5 )
sbSizer1.Add( gSizer1, 0, 0, 0 )
gSizer4.Add( sbSizer1, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 0 )
self.SetSizer( gSizer4 )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
def Cerrar( self ):
self.Close(True)
def erroresText( self, error ):
if error == 1:
self.m_staticText1.SetLabel("Por Favor Acerque su Babero a la Cámara\r Votante No válido" )
elif error == 2:
self.m_staticText1.SetLabel( "Por Favor Acerque su Babero a la Cámara\r Ya voto" ) | scfouetsfalceon/Inamba | vista/terminal.py | terminal.py | py | 2,194 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wx.Frame",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "wx.Frame.__init__",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_numb... |
22011646889 | from fgo.interops import *
import random
from functools import reduce
from copy import copy
def click():
return reduce(Compose, [
Wait(Range(0.15, 0.25)),
Left(),
Wait(Range(0.1, 0.2)),
Left(),
Wait(Range(0.3, 0.5))
])
def fix_dpi(origin: Event) -> Event:
@origin.fmap
def ret(event: Event):
if isinstance(event, (MoveTo, Move)):
event = copy(event)
event.x = int(4 * event.x / 5)
event.y = int(4 * (event.y + 40) / 5)
return event
return ret
| thautwarm/do-you-like-wan-you-si | fgo/common.py | common.py | py | 560 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 21,
"usage_type": "call"
}
] |
35815306473 | from pathlib import Path
import sys
import numpy as np
from collections import defaultdict
import torch
from torch.utils.tensorboard import SummaryWriter
from rl_envs.grid_world_env import GridWorldEnv
from ReplayMemory import *
def print_actions(agent, env, get_optimal = False):
with torch.no_grad():
action_mapping = [" ↓ "," ↑ "," → "," ← "," ↺ "]
for i in range(env.height):
print("[", end=" ")
for j in range(env.width):
state = torch.tensor((i,j), dtype=torch.float).unsqueeze(0)
action = agent.get_action(state)
print(action_mapping[action.item()], end=" ")
print("]")
def state_normalize(env, state):
return ((state[0] - (env.height-1)/2.0)/env.height,(state[1] - (env.width-1)/2.0)/env.width)
from agents.DQN import DeepQLearningAgent
BATCHSIZE = 100
LEARN_RATE = 1e-5
TRUE_RANDOM_STATE_VALUE = [
[-3.8, -3.8, -3.6, -3.1, -3.2],
[-3.8, -3.8, -3.8, -3.1, -2.9],
[-3.6, -3.9, -3.4, -3.2, -2.9],
[-3.9, -3.6, -3.4, -2.9, -3.2],
[-4.5, -4.2, -3.4, -3.4, -3.5],
]
def calculate_state_value_error(env,agent):
# offline policy have 2 policies, I am using the behavior(random) policy for calculating
with torch.no_grad():
state_value_error = 0
for i in range(env.height):
for j in range(env.width):
state = torch.tensor((i,j), dtype=torch.float).unsqueeze(0)
output = agent.policy_net(state)
state_value = output.sum()/env.action_n
state_value_error += (state_value - TRUE_RANDOM_STATE_VALUE[i][j])
return state_value_error
env = GridWorldEnv(5, 5, forbidden_grids=[(1,1),(1,2), (2,2),(3,1),(3,3),(4,1)], target_grids=[(3,2)], forbidden_reward=-1, hit_wall_reward=-1, target_reward=10)
agent = DeepQLearningAgent(state_space_n= 2, action_space_n=env.action_n, lr = LEARN_RATE)
writer = SummaryWriter()
"""
generate samples to replay buffer
"""
replay_buffer = ReplayMemory(2000)
state = env.reset()
for _ in range(2000):
action = random.randint(0,4)
# action = agent.get_behavior_acion(state)
next_state, reward = env.step(state, action)
replay_buffer.push(torch.tensor(state_normalize(env,state), dtype=torch.float), torch.tensor(action, dtype=torch.int64).unsqueeze(0), torch.tensor(reward, dtype=torch.float).unsqueeze(0), torch.tensor(state_normalize(env,next_state), dtype=torch.float))
state = next_state
"""
perform DQN
"""
iter_counter = 0
for _ in range(200):
for _ in range(50):
transitions = replay_buffer.sample(BATCHSIZE)
batch = Transition(*zip(*transitions))
state = torch.stack(batch.state)
next_state = torch.stack(batch.next_state)
reward = torch.cat(batch.reward)
action_indices = torch.cat(batch.action)
loss, q_value, target_value = agent.update_Q_network(state, action_indices, reward, next_state, env.discounted_factor)
# copy target network every C=5 iteration
# state_value_estimated = output.sum(dim=1) / env.action_n
writer.add_scalar('TD error', (q_value - target_value).sum(), iter_counter)
writer.add_scalar('Loss', loss.sum(), iter_counter)
writer.add_scalar('State value error', calculate_state_value_error(env,agent), iter_counter)
iter_counter+=1
# agent.target_net.load_state_dict(agent.policy_net.state_dict())
agent.sync_target_network()
# print(loss)
writer.flush()
print(env)
print_actions(agent, env, True)
print()
for i in range(env.height):
print("[", end=" ")
for j in range(env.width):
state = torch.tensor((i,j), dtype=torch.float).unsqueeze(0)
output = agent.policy_net(state)
state_value = output.sum()/env.action_n
state_value_error = (state_value - TRUE_RANDOM_STATE_VALUE[i][j])
print(state_value_error, end=" ")
print("]")
# print()
| zhilu1/rl_practice | perform_deep_learning.py | perform_deep_learning.py | py | 3,939 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.no_grad",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line... |
938498832 | import torch
import torch.nn as nn
import torch.nn.functional as F
class GRUEncoder(nn.Module):
def __init__(self, config, gpu_list, *args, **params):
super(GRUEncoder, self).__init__()
self.hidden_size = config.getint("model", "hidden_size")
self.bi = config.getboolean("model", "bi_direction")
self.output_size = self.hidden_size
self.num_layers = config.getint("model", "num_layers")
if self.bi:
self.output_size = self.output_size // 2
self.gru = nn.GRU(input_size=self.hidden_size, hidden_size=self.output_size, num_layers=self.num_layers,
batch_first=True, bidirectional=self.bi)
def forward(self, x):
h_, c = self.gru(x)
h = torch.max(h_, dim=1)[0]
return h, h_
| china-ai-law-challenge/CAIL2020 | sfks/baseline/model/encoder/GRUEncoder.py | GRUEncoder.py | py | 802 | python | en | code | 150 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.GRU",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number"... |
10697052181 | # -*- coding: utf-8 -*-
'''
This code is the implementation of two-phase level set for the following paper:
T. F. Chan and L. A. Vese, "Active contours without edges,"
in IEEE Transactions on Image Processing, vol. 10, no. 2, pp. 266-277, Feb. 2001, doi: 10.1109/83.902291.
Note: level set initialization and parameters are set empirically,
which may need to be modified for different images.
'''
import numpy as np
from skimage.io import imread
from skimage.transform import resize
from evolution import evolution
img = imread('../images/fin1.bmp', as_gray=True)
img = resize(img, (100, 100))
img = np.interp(img, [np.min(img), np.max(img)], [0, 255])
phi= np.zeros_like(img)
for i in range (phi.shape[0]):
for j in range (phi.shape[1]):
phi[i,j] = (-1) * np.sqrt(np.square(i - 50) + np.square(j-50)) + 40
lambda_1 = 1
lambda_2 = 1
mu = 0.2 * 255 * 255
epsilon = 1
time_step = 0.1
iters = 100
if __name__ == '__main__':
phi = evolution(phi, img, lambda_1, lambda_2, mu, epsilon, time_step, iters,
reinit = False, display= True) | zzhenggit/level_set_collections | Chan_Vese_model/demo.py | demo.py | py | 1,079 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "skimage.io.imread",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "skimage.transform.resize",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.min",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.