seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
856254777 | #!/usr/bin/env python
from pyhesity import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com')
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-c', '--clustername', type=str, default=None)
parser.add_argument('-mcm', '--mcm', action='store_true')
parser.add_argument('-i', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str, default=None)
parser.add_argument('-m', '--mfacode', type=str, default=None)
parser.add_argument('-e', '--emailmfacode', action='store_true')
parser.add_argument('-ip', '--ip', action='append', type=str)
parser.add_argument('-l', '--iplist', type=str, default=None)
parser.add_argument('-a', '--addentry', action='store_true')
parser.add_argument('-r', '--removeentry', action='store_true')
parser.add_argument('-p', '--profile', type=str, choices=['Management', 'SNMP', 'S3', 'Data Protection', 'Replication', 'SSH', 'SMB', 'NFS', ''], default='')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
clustername = args.clustername
mcm = args.mcm
useApiKey = args.useApiKey
password = args.password
mfacode = args.mfacode
emailmfacode = args.emailmfacode
ip = args.ip
iplist = args.iplist
addentry = args.addentry
removeentry = args.removeentry
profile = args.profile
if profile == '':
print('no profile specified')
exit(1)
# authenticate
if mcm:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, helios=True)
else:
if emailmfacode:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, emailMfaCode=True)
else:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, mfaCode=mfacode)
# if connected to helios or mcm, select to access cluster
if mcm or vip.lower() == 'helios.cohesity.com':
if clustername is not None:
heliosCluster(clustername)
else:
print('-clustername is required when connecting to Helios or MCM')
exit(1)
if apiconnected() is False:
print('authentication failed')
exit(1)
# gather list function
def gatherList(param=None, filename=None, name='items', required=True):
items = []
if param is not None:
for item in param:
items.append(item)
if filename is not None:
f = open(filename, 'r')
items += [s.strip() for s in f.readlines() if s.strip() != '']
f.close()
if required is True and len(items) == 0:
print('no %s specified' % name)
exit(1)
return items
# get list of ip/cidr to process
entries = gatherList(ip, iplist, name='entries', required=False)
if addentry is True:
action = 'add'
elif removeentry is True:
action = 'remove'
else:
action = 'list'
if action != 'list' and len(entries) == 0:
print('No entries specified')
exit(1)
# get existing firewall rules
rules = api('get', '/nexus/v1/firewall/list')
for cidr in entries:
if '/' not in cidr:
cidr = '%s/32' % cidr
for attachment in rules['entry']['attachments']:
if attachment['profile'] == profile:
if action != 'list':
if attachment['subnets'] is not None:
attachment['subnets'] = [s for s in attachment['subnets'] if s != cidr]
if action == 'add':
if attachment['subnets'] is None:
attachment['subnets'] = []
attachment['subnets'].append(cidr)
print(' %s: adding %s' % (profile, cidr))
else:
print(' %s: removing %s' % (profile, cidr))
rules['updateAttachment'] = True
if action != 'list':
result = api('put', '/nexus/v1/firewall/update', rules)
if 'error' in result:
exit(1)
print('\n%s allow list:' % profile)
for attachment in rules['entry']['attachments']:
if attachment['profile'] == profile:
if attachment['subnets'] is None or len(attachment['subnets']) == 0:
print(' All IP Addresses(*)')
else:
for cidr in attachment['subnets']:
print(' %s' % cidr)
print('')
| bseltz-cohesity/scripts | python/firewallTool/firewallTool.py | firewallTool.py | py | 4,351 | python | en | code | 85 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
}
] |
30067750251 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import pandas as pd
# Definición de funciones de activación y su derivada
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Clase para la red neuronal
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
# Inicialización de capas y pesos
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.weights_input_hidden = np.random.rand(self.input_size, self.hidden_size)
self.weights_hidden_output = np.random.rand(self.hidden_size, self.output_size)
def feedforward(self, X):
# Capa oculta
self.hidden_input = np.dot(X, self.weights_input_hidden)
self.hidden_output = sigmoid(self.hidden_input)
# Capa de salida
self.output = sigmoid(np.dot(self.hidden_output, self.weights_hidden_output))
def backpropagation(self, X, y, learning_rate):
# Cálculo del error
error = y - self.output
# Gradiente en la capa de salida
delta_output = error * sigmoid_derivative(self.output)
# Actualización de pesos en la capa de salida
self.weights_hidden_output += np.dot(self.hidden_output.T, delta_output) * learning_rate
# Gradiente en la capa oculta
error_hidden = delta_output.dot(self.weights_hidden_output.T)
delta_hidden = error_hidden * sigmoid_derivative(self.hidden_output)
# Actualización de pesos en la capa oculta
self.weights_input_hidden += X.T.dot(delta_hidden) * learning_rate
def train(self, X, y, learning_rate, epochs):
for _ in range(epochs):
self.feedforward(X)
self.backpropagation(X, y, learning_rate)
def predict(self, X):
self.feedforward(X)
return self.output
# Función para Leave-k-Out
def leave_k_out(X, y, k):
errors = []
for i in range(len(X)):
X_val = X[i]
y_val = y[i]
X_train = np.delete(X, i, axis=0)
y_train = np.delete(y, i, axis=0)
model = NeuralNetwork(input_size, hidden_size, output_size)
model.train(X_train, y_train, learning_rate, epochs)
y_pred = model.predict(X_val)
y_pred_class = np.argmax(y_pred)
y_true_class = np.argmax(y_val)
if y_pred_class != y_true_class:
errors.append(1)
return 1 - (sum(errors) / len(X))
# Cargar y preparar los datos
data = np.genfromtxt('irisbin.csv', delimiter=',')
X = data[:, :-3]
y = data[:, -3:]
# Parámetros
input_size = X.shape[1]
hidden_size = 8
output_size = 3
learning_rate = 0.01
epochs = 100
k_out = 5
k_out_accuracy = leave_k_out(X, y, k_out)
print(f'Error Leave-{k_out}-Out: {1 - k_out_accuracy:.2f}')
# Inicializa listas para almacenar los puntos correctamente clasificados y los incorrectamente clasificados
correctly_classified_points = []
incorrectly_classified_points = []
# Realiza Leave-One-Out
for i in range(len(X)):
X_val = X[i]
y_val = y[i]
X_train = np.delete(X, i, axis=0)
y_train = np.delete(y, i, axis=0)
model = NeuralNetwork(input_size, hidden_size, output_size)
model.train(X_train, y_train, learning_rate, epochs)
y_pred = model.predict(X_val)
y_pred_class = np.argmax(y_pred)
y_true_class = np.argmax(y_val)
if y_pred_class == y_true_class:
correctly_classified_points.append(X_val)
else:
incorrectly_classified_points.append(X_val)
correctly_classified_points = np.array(correctly_classified_points)
incorrectly_classified_points = np.array(incorrectly_classified_points)
# Aplica PCA para reducir a 2D
pca = PCA(n_components=2)
X_2d = pca.fit_transform(X)
# Crea un DataFrame para visualizar los resultados
df = pd.DataFrame({'X': X_2d[:, 0], 'Y': X_2d[:, 1], 'Label': ['Correcto' if x in correctly_classified_points else 'Incorrecto' for x in X]})
df['Label'] = pd.Categorical(df['Label'])
# Graficar los puntos
plt.figure(figsize=(8, 6))
colors = {'Correcto': 'g', 'Incorrecto': 'r'}
plt.scatter(df['X'], df['Y'], c=df['Label'].apply(lambda x: colors[x]), marker='o')
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.title('Visualización de Resultados Leave-One-Out en 2D')
plt.legend(['Correcto', 'Incorrecto'])
plt.show()
| Kenayman/Perceptron-simple | Ejercicio4.py | Ejercicio4.py | py | 4,365 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.rand",
... |
6926982149 | import json
import os
import boto3
import time
def lambda_handler(event, context):
#Parse event
if type(event['body']) == str:
body = json.loads(event['body'])
data = body['data']
command = body['command']
else:
body = event['body']
data = body['data']
command = body['command']
command_input = os.popen(command)
command_output = command_input.read()
time.sleep(1)
# Send message to SNS
sns_arn = os.environ['SNS_ARN']
sns_client = boto3.client('sns')
sns_client.publish(
TopicArn = sns_arn,
Subject = 'Snyk Serverless Test',
Message = "This is the information sent to the Lambda Function: " + data + " The output of the command: " +command+ " is: " + str(command_output)
)
return {
"isBase64Encoded": "false",
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": json.dumps({
"Message " : data ,
"Command Output" : command_output
})
}
| metalstormbass/Terraform-Cloud-Goof | lambda_code/main.py | main.py | py | 1,102 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
... |
23966932572 | #
# Process WRF solution file
#
# written by Eliot Quon (eliot.quon@nrel.gov)
#
from __future__ import print_function
import sys, os
import numpy as np
#from netCDF4 import Dataset
from netCDF4 import Dataset, MFDataset
try:
import xarray
except ImportError:
have_xarray = False
else:
print('xarray reader available')
have_xarray = True
g = 9.81
default_aggdim = 'time'
class WRFSolution(object):
"""Object to hold a single WRF solution snapshot"""
def __init__(self,*args,**kwargs):
verbose = kwargs.get('verbose',True)
aggdim = kwargs.get('aggdim',default_aggdim)
self.use_xarray = kwargs.get('use_xarray',have_xarray)
if self.use_xarray:
desc = 'with xarray'
else:
desc = 'with netcdf'
Nfiles = len(args)
self.filelist = []
for fpath in [ fpath for fpath in args if os.path.isfile(fpath) ]:
try:
Dataset(fpath)
except (IOError,OSError): # NetCDF: Unknown file format
pass
else:
self.filelist.append(fpath)
if self.use_xarray:
nc = xarray.open_mfdataset(self.filelist, concat_dim=aggdim)
self.Nt, self.Nz, self.Ny, self.Nx = nc.variables['U'].shape
self.Nx -= 1 # U is staggered in x
else:
nc = MFDataset(self.filelist, aggdim=aggdim)
self.Nt = len(nc.dimensions['time'])
self.Nx = len(nc.dimensions['west_east'])
self.Ny = len(nc.dimensions['south_north'])
self.Nz = len(nc.dimensions['bottom_top'])
self.varlist = list(nc.variables)
self._read_vars(nc)
def _read_vars(self,nc):
# unstaggered
self.T = nc.variables['T'][:] + 300.0
# staggered in x
U = nc.variables['U'][:]
self.U = 0.5*(U[:,:,:,:-1] + U[:,:,:,1:])
# staggered in y
V = nc.variables['V'][:]
self.V = 0.5*(V[:,:,:-1,:] + V[:,:,1:,:])
# staggered in z
W = nc.variables['W'][:]
PH = nc.variables['PH'][:]
PHB = nc.variables['PHB'][:]
self.W = 0.5*(W[:,:-1,:,:] + W[:,1:,:,:])
# calculate z == (ph + phb)/g
self.z = 0.5*( PH[:,:-1,:,:] + PH[:,1:,:,:] +
PHB[:,:-1,:,:] + PHB[:,1:,:,:] ) / g
# calculate height AGL
if 'HGT' in self.varlist:
# TODO: test this
hgt = nc.variables['HGT'][:]
for i in range(self.Nx):
for j in range(self.Ny):
self.z[:,i,j,:] -= hgt[i,j]
# xarray doesn't read in the mfdataset until we call .values
if self.use_xarray:
self.z = self.z.values
self.U = self.U.values
self.V = self.V.values
self.W = self.W.values
self.T = self.T.values
def sample_profile(self,itime=slice(0,None),i=None,j=None,overwrite=False):
"""Extracts velocity and temperature profile at a specified
location (defaults to center of domain).
If overwrite is True, reduce the dimensions of the stored z, U,
V, and T variables; otherwise, return the profiles.
"""
if i is None:
i = int(self.Nx / 2)
if j is None:
j = int(self.Ny / 2)
zprofile = self.z[itime,:,j,i]
Uprofile = self.U[itime,:,j,i]
Vprofile = self.V[itime,:,j,i]
Wprofile = self.W[itime,:,j,i]
Tprofile = self.T[itime,:,j,i]
if overwrite:
self.z = zprofile
self.U = Uprofile
self.V = Vprofile
self.W = Wprofile
self.T = Tprofile
else:
return dict(
z=zprofile,
U=Uprofile,
V=Vprofile,
W=Wprofile,
T=Tprofile
)
def approx_z(self):
self.zmean = self.z.mean(axis=(0,2,3))
self.zstdev = self.z.std(axis=(0,2,3))
return self.zmean
def planar_average(self):
"""Horizontally average velocity and temperature fields
Note: upwind fetch may skew the spatial average!
"""
self.zmean = np.mean(self.z, axis=(0,2,3))
self.Umean = np.mean(self.u, axis=(0,2,3))
self.Vmean = np.mean(self.v, axis=(0,2,3))
self.Wmean = np.mean(self.w, axis=(0,2,3))
self.Tmean = np.mean(self.T, axis=(0,2,3))
| NWTC/datatools | WRF/solution.py | solution.py | py | 4,452 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "xarray.open_mfdataset",
... |
74768116583 | from collections import namedtuple
from datetime import date
import json
from django.shortcuts import reverse
from django.template import loader
from djaveAPI.find_models import publishable_model_from_name
from djaveAPI.paged_results import construct_paged_results
from djaveAPI.to_json import TYPE
from djaveAPI.widgets.field_table import field_table
from djaveClassMagic.model_fields import (
model_fields, DATE_TIME, DATE, INTEGER, FLOAT, TEXT, CHAR, BOOLEAN)
from djaveDT import to_tz_dt
from djaveURL import dict_as_query
def docs(model_name, api_root_url):
model = publishable_model_from_name(model_name)
template = loader.get_template('docs.html')
model_description = None
if hasattr(model, 'model_description'):
model_description = model.model_description()
else:
model_description = 'Somebody go put def model_description() in {}'.format(
model_name)
# HAck! Fix this when I have a good example.
model_plural_name = '{}s'.format(model_name)
context = {
'model_name': model_name,
'model_name_lower': model_name.lower(),
'model_plural_name': model_plural_name,
'model_description': model_description,
'fields_table': field_table(model),
'examples': examples(model, api_root_url)}
return template.render(context)
APIExample = namedtuple('APIExample', 'title code result')
GET_PREFIX = 'curl -u <api_key_username>:<api_key_password> {}'
POST_PREFIX = GET_PREFIX.format('-H "Content-Type: application/json" {}')
def examples(model, api_root_url):
list_url = _base_url(api_root_url, model)
filter_query_dict = example_filters(model)
filter_query_dict['page'] = 1
list_with_filters_url = '{}{}'.format(
list_url, dict_as_query(filter_query_dict))
get_one_url = '{}/{}'.format(list_url, 10)
list_result = example_list_result(model)
examples = [APIExample('Get all', GET_PREFIX.format(list_url), list_result)]
examples.append(APIExample('Get a filtered list', GET_PREFIX.format(
list_with_filters_url), list_result))
single = example_single_result(model)
examples.extend([
APIExample('Get one', GET_PREFIX.format(get_one_url), single),
APIExample('Create', example_create(model, api_root_url), single),
APIExample('Update', example_update(model, api_root_url), single),
APIExample(
'"Delete"', example_delete(model, api_root_url),
example_single_result(model, deleted=True)),
APIExample('Webhook', example_webhook(model, single), '')])
return examples
def example_webhook(model, single):
return (
'# If a new {} gets created, or an existing one changes,\n'
'# and if you give your API Key a webhook URL, we will POST\n'
'# something like this to your webhook URL:\n\n{}').format(
model.__name__, single)
def example_list_result(model):
as_dict = construct_paged_results([example_to_dict(model)], 1, 1, 1)
return json.dumps(as_dict, indent=2)
def example_single_result(model, deleted=False):
return json.dumps(example_to_dict(model, deleted=deleted), indent=2)
def example_to_dict(model, deleted=False):
values = example_values(
model, exclude=[], exclude_uneditable=False)
values[TYPE] = model.__name__
if 'deleted' in values and not deleted:
values['deleted'] = None
# This is for django's user
if 'is_active' in values and deleted:
values['is_active'] = False
return values
def example_delete(model, api_root_url):
the_rest = '-X DELETE {}/10'.format(_base_url(api_root_url, model))
return GET_PREFIX.format(the_rest)
def example_create(model, api_root_url):
values_str = example_values_str(
model, exclude=['deleted'], exclude_uneditable=True)
the_rest = '-d {} {}'.format(values_str, _base_url(api_root_url, model))
return POST_PREFIX.format(the_rest)
def example_update(model, api_root_url):
values_str = example_values_str(
model, exclude=['deleted'], exclude_uneditable=True)
the_rest = '-d {} {}/10'.format(values_str, _base_url(api_root_url, model))
return POST_PREFIX.format(the_rest)
def _base_url(api_root_url, model):
return '{}{}'.format(api_root_url, reverse(
'list_or_save_new', kwargs={'model_name': model.__name__}))
def example_values(model, exclude=[], exclude_uneditable=True):
values = {}
for field in model_fields(model):
if field.name in exclude:
continue
if exclude_uneditable and not field.editable:
continue
values[field.name] = _example_value(field)
return values
def example_values_str(model, exclude=[], exclude_uneditable=True):
values = example_values(model, exclude, exclude_uneditable)
k_vs = []
for key, value in values.items():
# I read on StackOverflow from somebody using windows that single quotes
# around JSON didn't work on the command line so they ended up escaping
# double quotes.
if isinstance(value, str):
value = '\\"{}\\"'.format(value)
k_vs.append('\\"{}\\": {}'.format(key, value))
almost_there = '"{' + ', '.join(k_vs) + '}"'
return almost_there.replace('True', 'true')
def example_filters(model):
filters = {}
for field in model_fields(model):
if field.can_filter:
name = field.name
name__gte = '{}__gte'.format(name)
name__lte = '{}__lte'.format(name)
if field.foreign_key_to:
filters[name] = _example_value(field)
elif field.type == DATE_TIME:
filters[name__gte] = _example_value(field)
filters[name__lte] = to_tz_dt('2020-02-28 23:59').isoformat()
elif field.type == DATE:
filters[name__gte] = _example_value(field)
filters[name__lte] = date(2020, 2, 28).isoformat()
elif field.type == INTEGER:
filters[name__gte] = _example_value(field)
filters[name__lte] = 20
elif field.type == FLOAT:
filters[name__gte] = _example_value(field)
filters[name__lte] = 200.2
elif field.type == BOOLEAN:
filters[name] = True
else:
raise Exception(
'I am not sure what an example {} filter looks like'.format(
field.type))
return filters
def _example_value(field):
if field.foreign_key_to:
return 4321
elif field.type == DATE_TIME:
return to_tz_dt('2020-02-01 00:00').isoformat()
elif field.type == DATE:
return date(2020, 2, 1).isoformat()
elif field.type == INTEGER:
return 10
elif field.type == FLOAT:
return 100.1
elif field.type in [TEXT, CHAR]:
if field.name.find('_currency') > 0:
return 'USD'
return 'Hello'
elif field.type == BOOLEAN:
return True
raise Exception(field.type)
| dasmith2/djaveAPI | djaveAPI/docs.py | docs.py | py | 6,615 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "djaveAPI.find_models.publishable_model_from_name",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 19,
"usage_t... |
36420895419 | #!/usr/bin/env python
# coding: utf-8
# # KSHEMA S
#
# TCS iON INTERNSHIP
# RIO-125:HR Salary Dashboard - Train the Dataset and Predict Salary
# # Problem statement
# This project aims to sanitize the data, analysis and predict if an employee's salary is higher or lower than $50K/year depends on certain attributes using different ML classification algorithms.
# # Importing necessary libraries and dataset to the Python environment
# In[1]:
# Working with data
import numpy as np
import pandas as pd
# For Visualizations
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
# In[2]:
# Loading the HR dataset
# In[3]:
ds=pd.read_csv(r"C:\Users\Anish\Downloads\salarydata.csv")
# In[4]:
ds
# The dataset is shown here
# In[5]:
ds.describe()
# Dataset description
#
# Age: Age of person
#
# Workclass: Belongs to which working class like Private/government/self employed etc
#
# Education: Person's maximum qualification
#
# Education-Number: Numbered qualification
#
# Salary: Traget coloumn
#
#
# In[6]:
# Shape of the dataset
print(ds.shape)
# # DATA cleaning
#
# In[7]:
# Checking for null values in each coloumn
# In[8]:
print(ds.isna().sum())
# There is no null value in any of the coloumns
# In[9]:
# Check the datatypes of the data
ds.info()
# In[10]:
ds.nunique()
# In[11]:
ds['age'].unique()
# In[12]:
ds['workclass'].unique()
# In[13]:
ds['workclass'] = ds['workclass'].replace('?', np.nan)
# In[14]:
ds['workclass'].unique()
# In[15]:
ds.apply(lambda col: col.unique())
# In[16]:
for col in ds:
print(f'{col}: {ds[col].unique()}')
# The unique values in each coloumn have been displayed
# In[17]:
ds['occupation'].unique()
# In[18]:
ds['occupation'] = ds['occupation'].replace('?', np.nan)
ds['native-country'] = ds['native-country'].replace('?', np.nan)
# In[19]:
print(ds.isna().sum())
# It is clear that workclass,occupation and native country contains null values
# In[20]:
ds['workclass'] = ds['workclass'].fillna(ds['workclass'].mode()[0])
ds['occupation'] = ds['occupation'].fillna(ds['occupation'].mode()[0])
ds['native-country'] = ds['native-country'].fillna(ds['native-country'].mode()[0])
# In[21]:
print(ds.isna().sum())
# The null values are replaced with mode of the data
# # Exploratory Data Analysis
# Univariate Analysis
# In[22]:
freqgraph = ds.select_dtypes(include = ['int'])
freqgraph.hist(figsize =(20,15))
plt.show()
# In[23]:
ds['relationship'].value_counts().plot.pie(autopct='%.0f%%')
plt.title("relationship")
plt.show()
# The employees with relationship shown majority are husbands followed by not in a family and own child
# In[24]:
sns.countplot(x= ds['salary'], palette="dark")
#different types of credit accounts of a customer, shows the ability to handle multiple credits
plt.title("Salary scale")
plt.figure(figsize=(5,5))
plt.show()
# People are more who getting a salary of <=50K
# In[25]:
sns.countplot(x= ds['education'], palette="dark")
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
#different types of credit accounts of a customer, shows the ability to handle multiple credits
plt.title("Education Qualification")
plt.figure(figsize=(10,10))
plt.show()
# More people have eductaional qualification as HS grad
# # Bivariate analysis (w.r.t. target coloumn salary)
# In[26]:
# Annual_Income vs credit score
sns.barplot(x=ds['age'], y=ds['salary'])
plt.title('Age vs Salary')
plt.show()
# In[27]:
sns.boxplot(y=ds['salary'], x=ds['education-num'])
plt.title('education-num vs salary')
plt.show()
# In[28]:
sns.catplot(x= 'sex', col= 'salary', data = ds, kind = 'count', col_wrap = 3)
plt.show()
# # Outlier detection and removal using boxplot
# In[29]:
num_col = ds.select_dtypes(include=np.number).columns.tolist()
plt.figure(figsize=(20,30))
for i, variable in enumerate(num_col):
plt.subplot(5,4,i+1)
plt.boxplot(ds[variable],whis=1.5)
plt.tight_layout()
plt.title(variable)
# In[30]:
# Identify the outliers and remove
for i in num_col:
Q1=ds[i].quantile(0.25) # 25th quantile
Q3=ds[i].quantile(0.75) # 75th quantile
IQR = Q3-Q1
Lower_Whisker = Q1 - 1.5*IQR
Upper_Whisker = Q3 + 1.5*IQR
ds[i] = np.clip(ds[i], Lower_Whisker, Upper_Whisker)
# In[31]:
# PLot the numerical columns
plt.figure(figsize=(20,30))
for i, variable in enumerate(num_col):
plt.subplot(5,4,i+1)
plt.boxplot(ds[variable],whis=1.5)
plt.tight_layout()
plt.title(variable)
# In[32]:
ds[['age','salary']].head(24)
# # Label Encoding
# In[33]:
from sklearn import preprocessing
label= preprocessing.LabelEncoder()
ds['workclass']=label.fit_transform(ds['workclass'])
ds['education']=label.fit_transform(ds['education'])
ds['occupation']=label.fit_transform(ds['occupation'])
ds['sex']=label.fit_transform(ds['sex'])
ds['race']=label.fit_transform(ds['race'])
ds['native-country']=label.fit_transform(ds['native-country'])
ds['marital-status']=label.fit_transform(ds['marital-status'])
ds['relationship']=label.fit_transform(ds['relationship'])
# In[34]:
ds
# In[35]:
for i in ['workclass', 'education','marital-status','occupation']:
ds[i]=label.fit_transform(ds[i])
le_name_mapping =dict((zip(label.classes_, label.transform(label.classes_))))
print(le_name_mapping)
# # Standardization
# In[36]:
scale_col = ['age', 'education-num', 'capital-gain',
'capital-loss', 'hours-per-week']
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
ds[scale_col]= std.fit_transform(ds[scale_col])
# In[37]:
ds
# In[38]:
ds.describe()
# In[39]:
ds.drop(['capital-gain','capital-loss','education-num'], axis = 1,inplace = True)
ds.head()
# Feature engineering
#
#
# While analyzing the dataset,
# As we can see in 'descriptive statistics - Numerical columns',
# 'capital-gain'and 'capital-loss' columns have 75% data as 0.00
# - So, we can drop 'capital-gain'& 'capital-loss' columns.
# The column,education-num is the numerical version of the column education, so we also drop it.
# # Split dataset into test and train
# In[40]:
from sklearn.model_selection import train_test_split
# In[41]:
X = ds.drop('salary', axis=1)
y= ds['salary']
# In[42]:
X_train, X_test, y_train, y_test = train_test_split(X,y ,test_size=0.25, random_state=42, stratify=y)
# In[43]:
ds['salary'].value_counts()
# In[44]:
ds['marital-status'].value_counts()
# # Modelling
#
# In[45]:
# split data into test and train
from sklearn.model_selection import train_test_split
# In[46]:
X_train, X_test, y_train, y_test = train_test_split(X,y ,test_size=0.25, random_state=42, stratify=y)
# In[47]:
print("Length of y train",len(y_train))
print("Length of y test",len(y_test))
# # 1) Logistic Regression
# In logistic regression, the model predicts the probability that an instance belongs to a particular class. This probability is represented by a value between 0 and 1, where 0 indicates that the instance definitely does not belong to the class and 1 indicates that it definitely does.To make these predictions, logistic regression uses a logistic function, which takes in a linear combination of the input features and maps it to a value between 0 and 1.
# In[48]:
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score,precision_score,recall_score,classification_report
# In[49]:
from sklearn.linear_model import LogisticRegression
lr=LogisticRegression(max_iter=2000)
lr.fit(X_train,y_train)
pred_lr=lr.predict(X_test)
con_lr=confusion_matrix(y_test,pred_lr)
print("The confusion matrix of logistic regression is \n",con_lr)
ac_lr=accuracy_score(y_test,pred_lr)
print('Accuracy:',ac_lr*100)
# In[50]:
print(classification_report(y_test,pred_lr))
# *Precision is the fraction of predicted positive instances that are actually positive, and is calculated as TP / (TP + FP). It gives you an idea of the proportion of positive predictions that are correct. High precision means that the model is good at not labeling negative instances as positive.
#
# *Recall is the fraction of actual positive instances that were predicted to be positive, and is calculated as TP / (TP + FN). It gives you an idea of the proportion of positive instances that the model was able to identify. High recall means that the model is good at finding all the positive instances.
#
# *The F1 score is the harmonic mean of precision and recall, and is calculated as 2 * (precision * recall) / (precision + recall). It is a balanced metric that takes into account both precision and recall.
# Support is the number of instances in each class.
#
# *Accuracy is the fraction of correct predictions made by the model, and is calculated as (TP + TN) / (TP + TN + FP + FN). It gives you an idea of the overall accuracy of the model.
# In[51]:
y_test
# In[52]:
pred_lr[:100]
# # 2) K Nearest Negihbour Classifier
# In[53]:
from sklearn.neighbors import KNeighborsClassifier
acc_values=[]
neighbors=np.arange(70,90)
for k in neighbors:
knn=KNeighborsClassifier(n_neighbors=k, metric='minkowski')
knn.fit(X_train, y_train)
pred_knn=knn.predict(X_test)
acc=accuracy_score(y_test, pred_knn)
acc_values.append(acc)
# In[54]:
plt.plot(neighbors,acc_values,'o-')
plt.xlabel('k value')
plt.ylabel('accuracy')
# In[55]:
print(classification_report(y_test, pred_knn))
# In[56]:
pred_knn[:20]
# In[57]:
con_lr=confusion_matrix(y_test,pred_knn)
print("The confusion matrix of knn is \n",con_lr)
ac_knn=accuracy_score(y_test,pred_knn)
print('Accuracy:',ac_knn*100)
# # 3)Decision Tree classifier
# In[58]:
from sklearn.tree import DecisionTreeClassifier
dtr=DecisionTreeClassifier()
dtr.fit(X_train,y_train)
dtr.fit(X_train,y_train)
pred_dt=dtr.predict(X_test)
con_dtr=confusion_matrix(y_test,pred_dt)
print("The confusion matrix of decision tree is \n",con_dtr)
ac_dt=accuracy_score(y_test,pred_dt)
print('Accuracy:',ac_dt*100)
# In[59]:
print(classification_report(y_test, pred_dt))
# # 4)Support Vector Machine
# In[60]:
from sklearn.svm import SVC
svc=SVC()
svc.fit(X_train,y_train)
pred_svc=svc.predict(X_test)
con_svc=confusion_matrix(y_test,pred_svc)
print("The confusion matrix of decision tree is \n",con_svc)
ac_svc=accuracy_score(y_test,pred_svc)
print('Accuracy:',ac_svc*100)
# In[61]:
print(classification_report(y_test, pred_svc))
# In[62]:
pred_svc[:50]
# # 5)Random Forest Classifier
# In[63]:
from sklearn.ensemble import RandomForestClassifier
rf=RandomForestClassifier()
rf.fit(X_train,y_train)
pred_RFC=rf.predict(X_test)
con_rf=confusion_matrix(y_test,pred_RFC)
print("The confusion matrix of random forest is \n",con_rf)
ac_rf=accuracy_score(y_test,pred_RFC)
print('Accuracy:',ac_rf*100)
# In[64]:
print(classification_report(y_test, pred_RFC))
# # 6) GradientBoostingClassifier
#
#
# In[65]:
from sklearn.ensemble import GradientBoostingClassifier
gb = GradientBoostingClassifier()
gb.fit(X_train,y_train)
pred_gb = gb.predict(X_test)
print('Classification_report is')
print(classification_report(y_test,pred_gb))
# In[66]:
# # 7) Naive_bayes Classifier
# In[67]:
# In[68]:
# # Comparisaon of accuracies of different models
# In[69]:
# In[70]:
# In[71]:
#
# Gradient Booster gives best accuracy compared to other supervised learning algorithms.
# For salary prediction,gradient booster is selected.
# In[72]:
ds
# In[73]:
# save the model
import pickle
filename = 'model.pkl'
pickle.dump(gb, open(filename, 'wb'))
# In[74]:
load_model = pickle.load(open(filename,'rb'))
# In[75]:
load_model.predict([[.03,4,11,4,3,5,4,0,0.1,34]])
# In[76]:
load_model.predict([[33,4,11,4,3,0,4,1,30,34]])
# In[77]:
load_model.predict([[.99,11,4,2,3,5,4,0,-0.19,38]])
# In[78]:
load_model.predict([[50,3,11,6,4,4,4,0,32,9]])
# In[ ]:
# In[ ]:
| Kshema85/TCS-iON--KSHEMA-HR-Salary-prediction | model1.py | model1.py | py | 12,261 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
... |
14126041012 | from re import split
from itertools import zip_longest
from more_itertools import windowed
from pyperclip import copy as ctrl_C
Lboth = []
for filename in ["input/in22_test.txt", "input/in22_real.txt"]:
with open(filename,"r") as infile:
gridstr,inststr = infile.read().split('\n\n')
gridLR = [list(filter(lambda p: p[1] != ' ', enumerate(row))) for row in gridstr.split('\n')]
gridUD = [list(filter(lambda p: p[1] != ' ', enumerate(row))) for row in zip_longest(*gridstr.split('\n'), fillvalue = ' ')]
insts = [(1 if s[0] == 'R' else -1, int(s[1:])) for s in split('(?=R|L)',inststr)[1:]] #start facing up, not right
L = gridLR, gridUD, insts
Lboth.append(L)
Ltest, Lreal = Lboth
def create_dict(gridLR, gridUD):
move = {} #pos: [pos to R, pos to D, pos to L, pos to U]
for i,row in enumerate(gridLR):
rowdict = {(i,j): [(i,jR) if cR == '.' else (i,j), None, (i,jL) if cL == '.' else (i,j), None] for (jL,cL),(j,c),(jR,cR) in windowed(row + row[:2],3) if c == '.'}
move |= rowdict
for j,col in enumerate(gridUD):
for (iU,cU),(i,c),(iD,cD) in windowed(col + col[:2],3):
if c == '.':
move[(i,j)][1] = (iD,j) if cD == '.' else (i,j)
move[(i,j)][3] = (iU,j) if cU == '.' else (i,j)
return move
def day22_part1(gridLR, gridUD, insts):
move = create_dict(gridLR, gridUD)
facing = 3
pos = (0, gridLR[0][0][0])
for turn, walk in insts:
facing = (facing + turn) % 4
for _ in range(walk):
pos = move[pos][facing]
return 1000 * (pos[0] + 1) + 4 * (pos[1] + 1) + facing
def create_dict_cube(gridLR, gridUD):
#cube shape:
# UR
# F
# LD
# B
#
# R turned 90
# L turned 90
# B turned 90
free = {(i,j): c == '.' for i,row in enumerate(gridLR) for j,c in row}
Mf = 50
move = {p: [None, None, None, None] for p, B in free.items() if B}
for i,j in move.keys():
Rp = (i,j+1)
if Rp in free:
if free[Rp]:
R = (Rp, 0)
else:
R = ((i,j), 0)
else:
R = None
Dp = (i+1,j)
if Dp in free:
if free[Dp]:
D = (Dp, 1)
else:
D = ((i,j), 1)
else:
D = None
Lp = (i,j-1)
if Lp in free:
if free[Lp]:
L = (Lp, 2)
else:
L = ((i,j), 2)
else:
L = None
Up = (i-1,j)
if Up in free:
if free[Up]:
U = (Up, 3)
else:
U = ((i,j), 3)
else:
U = None
move[(i,j)] = [R, D, L, U]
# U left -> left L
for i in range(0,Mf):
p = (i, Mf)
q = (3*Mf - 1 - i, 0)
if free[p] and free[q]:
move[p][2] = (q, 0)
move[q][2] = (p, 0)
elif free[p]:
move[p][2] = (p, 2)
elif free[q]:
move[q][2] = (q, 2)
# U up -> left B
for j in range(Mf,2*Mf):
p = (0, j)
q = (j + 2*Mf, 0)
if free[p] and free[q]:
move[p][3] = (q, 0)
move[q][2] = (p, 1)
elif free[p]:
move[p][3] = (p, 3)
elif free[q]:
move[q][2] = (q, 2)
# R up -> bottom B
for j in range(2*Mf,3*Mf):
p = (0, j)
q = (4*Mf - 1, j - 2*Mf)
if free[p] and free[q]:
move[p][3] = (q, 3)
move[q][1] = (p, 1)
elif free[p]:
move[p][3] = (p, 3)
elif free[q]:
move[q][1] = (q, 1)
# R right -> right D
for i in range(0,Mf):
p = (i, 3*Mf - 1)
q = (3*Mf - 1 - i, 2*Mf - 1)
if free[p] and free[q]:
move[p][0] = (q, 2)
move[q][0] = (p, 2)
elif free[p]:
move[p][0] = (p, 0)
elif free[q]:
move[q][0] = (q, 0)
# R down -> right F
for j in range(2*Mf,3*Mf):
p = (Mf - 1, j)
q = (j - Mf, 2*Mf - 1)
if free[p] and free[q]:
move[p][1] = (q, 2)
move[q][0] = (p, 3)
elif free[p]:
move[p][1] = (p, 1)
elif free[q]:
move[q][0] = (q, 0)
# F left -> top L
for i in range(Mf,2*Mf):
p = (i, Mf)
q = (2*Mf, i - Mf)
if free[p] and free[q]:
move[p][2] = (q, 1)
move[q][3] = (p, 0)
elif free[p]:
move[p][2] = (p, 2)
elif free[q]:
move[q][3] = (q, 3)
# D down -> right B
for j in range(Mf,2*Mf):
p = (3*Mf - 1, j)
q = (j + 2*Mf, Mf - 1)
if free[p] and free[q]:
move[p][1] = (q, 2)
move[q][0] = (p, 3)
elif free[p]:
move[p][1] = (p, 1)
elif free[q]:
move[q][0] = (q, 0)
return move
def day22_part2(gridLR, gridUD, insts):
move = create_dict_cube(gridLR, gridUD)
facing = 3
pos = (0, gridLR[0][0][0])
for turn, walk in insts:
facing = (facing + turn) % 4
for _ in range(walk):
pos, facing = move[pos][facing]
return 1000 * (pos[0] + 1) + 4 * (pos[1] + 1) + facing
result_test_1 = day22_part1(*Ltest)
result_real_1 = day22_part1(*Lreal)
print(result_real_1)
print(result_test_1)
try:
ctrl_C(result_real_1)
except:
print("cannot copy result")
result_real_2 = day22_part2(*Lreal)
if result_real_2 is not None:
print()
print(result_real_2)
try:
ctrl_C(result_real_2)
except:
print("cannot copy result") | arguhuh/AoC | 2022/code22.py | code22.py | py | 4,619 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.zip_longest",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "more_itertools.windowed",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "more_itertools... |
27073968555 | from itertools import count
from geopy.distance import geodesic
from datetime import timedelta, datetime
import json
import sys
import random
time_format = '%Y-%m-%d %H:%M:%S'
default_start = datetime.strptime('2020-01-01 00:00:00', time_format)
default_end = datetime.strptime('2020-06-30 23:59:59', time_format)
myicaos = list()
avail_icaos = list()
class Flight(object):
_ids = count(0)
def __init__(self, aircraft, destination, dep_time, category): # max_velocity ?
self.id = next(self._ids) # flight id, unique identifier
self.aircraft_id = aircraft.id # aircraft id
self.callsign = aircraft.callsign # flight callsign
self.icao = aircraft.icao # flight icao
self.aircraft_cat=category # aircraft type
self.dep_airport = aircraft.location # departure airport icao address
self.arr_airport = destination # arrival airport icao address
self.dep_time = dep_time # departure time
self.distance = float(self.dep_airport.distance(self.arr_airport)) # distance between departure and arrival airports
if aircraft.location==destination:
self.duration=timedelta(hours=0.5).days/24
else:
self.duration = float(self.distance / aircraft.avg_speed) # flight duration in hours
self.arr_time = time_add([self.dep_time, self.duration])
def __str__(self):
tostr = "Flight n°"+str(self.id)+"\n"
tostr += "Aircraft ID: "+str(self.aircraft_id)+"\n"
tostr += "Callsign: "+str(self.callsign)+"\n"
tostr += "ICAO: "+str(self.icao)+"\n"
tostr += "From: "+str(self.dep_airport.icao)+"\n"
tostr += "To: "+str(self.arr_airport.icao)+"\n"
tostr += "Distance: %.2f km\n" % self.distance
tostr += "Departure: "+str(self.dep_time)+"\n"
tostr += "Arrival: "+str(self.arr_time)+"\n"
tostr += "Duration: %.2f h\n" % self.duration
return tostr
def aircraft_string(self):
string = " Callsign:"+' '*(14-len(self.callsign))+self.callsign+' '*5+"ICAO: "+self.icao+"\n"
string += " Departure:"+' '*(13-len(self.dep_airport.icao))+self.dep_airport.icao+' '*5+str(self.dep_time)+"\n"
string += " Arrival:"+' '*(15-len(self.arr_airport.icao))+self.arr_airport.icao+' '*5+str(self.arr_time)+"\n"
return string
class Aircraft(object):
_ids = count(0)
def __init__(self, callsign, icao, location, birth=default_start, \
avg_speed=660, next_update=None, cat=0): # max_velocity
self.id = next(self._ids) # global aircraft id, unmutable
self.callsign = callsign # callsign currently assigned to aircraft
self.icao = icao # icao currently assigned to aircraft
self.location = location # current aircraft location (airport or 'flying')
self.avg_speed = avg_speed # average speed in km/h of the aircraft (constant for now)
self.cat=cat # aircraft category
self.history = list() # history of flights and groundings
self.flights = list()
self.birth=birth
self.next_update=next_update
self.initial_icao=icao
self.initial_callsign=callsign
location.aircraft_arrival(self, self.birth)
def __str__(self):
tostr = "Aircraft n°"+str(self.id)+"\n"
tostr += "Current callsign: "+str(self.callsign)+"\n"
tostr += "Current ICAO: "+str(self.icao)+"\n"
tostr += "Current location: "+str(self.location.icao)+"\n"
tostr += "Average Speed: "+str(self.avg_speed)+"\n"
tostr += "Number of flights: "+str(len(self.flights))+"\n"
tostr += "Category: "+str(self.cat)+"\n"
return tostr
def new_flight(self, destination, dep_time):
# create a new flight for the given aircraft from its current location
f = Flight(aircraft=self, destination=destination, dep_time=dep_time, category=self.cat)
# append past period and flight to history
if len(self.flights)==0:
from_time=self.birth
else:
from_time=self.flights[-1].arr_time
self.history.append(AircraftHistoryRecord(status="ground", from_time=from_time, until_time=dep_time, airport=self.location))
self.history.append(AircraftHistoryRecord(status="flying", flight=f))
self.flights.append(f)
# update aircraft and airport
self.location.aircraft_departure(self, dep_time)
self.location = destination
self.location.aircraft_arrival(self, f.arr_time)
return f
def end_sim(self, time):
if len(self.flights)==0:
from_time=self.birth
else:
from_time=self.flights[-1].arr_time
self.history.append(AircraftHistoryRecord(status="ground", from_time=from_time, until_time=time, airport=self.location))
self.location.aircraft_departure(self, time)
def on_ground(self, time):
# return true if aircraft on ground at the given time
for h in self.history:
if h.from_time < time < h.until_time:
return h.status=="ground"
return True
def print_flights(self):
string = 'Aircraft n°'+str(self.id)+'\n'
string += str(len(self.flights))+' flights\n'
for f in self.flights:
string += f.aircraft_string()+'\n'
print(string[:-1])
def icao_at(self, time):
if len(self.flights)==0:
return None
tmp=None
for f in self.flights:
if f.dep_time > time:
break
tmp=f
if tmp is None:
return self.initial_icao
return tmp.icao
def new_aircraft(airports, birth=default_start, callsign='DCM', icao=None, avg_speed=660):
# returns a new aircraft, with random airport, icao, callsign etc.
location = airports[random.randint(0, len(airports)-1)]
if len(callsign)==3:
callsign += str(random.randint(1,9999))
if icao is None:
icao = get_icao()
a = Aircraft(callsign=callsign, icao=icao, location=location, birth=birth, avg_speed=avg_speed)
return a
class AircraftHistoryRecord(object):
def __init__(self, status, flight=None, from_time=None, until_time=None, airport=None):
self.status=status
if self.status == 'flying':
self.flight=flight
self.from_time=flight.dep_time
self.until_time=flight.arr_time
elif self.status == 'ground':
self.from_time=from_time
self.until_time=until_time
self.aiport=airport
class Airport(object):
def __init__(self, icao, lat, lon, alt, cat, name):
self.icao=icao # unique identifier of an airport
self.name=name # airport full name
self.lat=lat # airport latitude
self.lon=lon # airport longitude
self.alt=alt # airport altitude
self.cat=cat # airport category (small/medium/large)
self.aircraft_history=list() # history of aircrafts that stayed at airport
self.current_aircraft=dict() # list of aircraft currently at airport
def __str__(self):
tostr = "Airport: "+str(self.icao)+"\n"
tostr += "Fullname: "+str(self.name)+"\n"
tostr += "Lat/Lon/Alt: %.4f/%.4f/%.0f\n" % (self.lat, self.lon, self.alt)#+str(self.lat)+"/"+str(self.lon)+"/"+str(self.alt)+"\n"
tostr += "Category: "+str(self.cat)+"\n"
return tostr
def distance(self, other):
# compute distance between self and another given airport
return geodesic((self.lat, self.lon), (other.lat, other.lon)).km
def aircraft_arrival(self, aircraft, time):
# add the given aircraft to the list of current aircrafts
self.current_aircraft[aircraft.id]=(time, aircraft.callsign, aircraft.icao)
def aircraft_departure(self, aircraft, time):
# aircraft leaving airport, add its presence to history
self.aircraft_history.append( \
AirportHistoryElement(aircraft, self.current_aircraft[aircraft.id], time))
del self.current_aircraft[aircraft.id]
def aircraft_at(self, time):
ac_list=list()
for h in self.aircraft_history:
if h.arrival_time <= time < h.departure_time:
ac_list.append(h.aircraft)
if len(ac_list)==0:
print('strange: no aircraft at '+self.icao+' at '+str(time))
return ac_list
def print_aircraft_at(self, time):
string = "All aircraft in "+self.icao+" at "+str(time)+'\n'
for a in sorted(self.aircraft_at(time),key=lambda aircraft: aircraft.id):
string += ' n°'+str(a.id)+'\n'
print(string[:-1])
class AirportHistoryElement(object):
def __init__(self, aircraft, record, departure_time):
self.aircraft=aircraft
self.arrival_time=record[0]
self.departure_time=departure_time
self.arr_callsign=record[1]
self.arr_icao=record[2]
self.dep_callsign=aircraft.callsign
self.dep_icao=aircraft.icao
def __str__(self):
return str(self.aircraft.id)+' '+str(self.arrival_time)+'-'+str(self.departure_time)+' '+self.arr_callsign+'-'+self.dep_callsign+' '+self.arr_icao+'-'+self.dep_icao
class Airports(object):
def emtpy(self):
self.elements=list()
def __init__(self):
self.emtpy()
def __str__(self):
tostr = ""
for a in self.elements:
tostr+=str(a)
return tostr
def first(self, n):
self.elements=self.elements[:n]
return self
def append(self, el):
self.elements.append(el)
def remove(self, el):
self.elements.remove(el)
def random(self):
return self.elements[random.randint(0, len(self.elements)-1)]
def get(self, icao):
for a in self.elements:
if a.icao == icao:
return a
return None
def to_file(self, filename):
json = '[\n'
for a in self.elements:
json += ' {\n'
json += ' "icao" : "%s",\n' % a.icao
json += ' "name" : "%s",\n' % a.name
json += ' "lat" : "%s",\n' % a.lat
json += ' "lon" : "%s",\n' % a.lon
json += ' "alt" : "%s",\n' % a.alt
json += ' "cat" : "%s"\n' % a.cat
json += ' },\n'
json = json[:-2]+'\n]'
file = open(filename, "w")
file.write(json)
file.close()
def from_file(self, filename):
file = open(filename, "r")
data = file.read()
file.close()
json_data = json.loads(data)
self.elements = list()
for d in json_data:
self.append(Airport(d['icao'],float(d['lat']),float(d['lon']),float(d['alt']),d['cat'],d['name']))
categories = ['large', 'medium', 'small', 'all']
def from_opensky(airport):
# from an opensky format airport, return an Airport
typ = 'oops'
for c in categories[:-1]:
if c in airport[7]:
typ = c[0].upper()
break
return Airport(airport[2], airport[3], airport[4], airport[6], typ, airport[0])
def airports_from_file(category):
# returns the airports from the given category stored in data/
airports = Airports()
airports.from_file('data/'+category+'_airports.json')
return airports
def time_add(times):
for i in range(len(times)):
if type(times[i]) == str:
times[i] = datetime.strptime(times[i], time_format)
elif type(times[i]) in [int, float]:
times[i] = timedelta(hours=times[i])
if i == 0:
new_time = times[i]
else:
new_time += times[i]
#return new_time.strftime(time_format)
new_time = new_time - timedelta(microseconds=new_time.microsecond)
return new_time
def load_icaos(n=0):
# load PIA icaos from file
if len(myicaos)==0:
if n==0:
f = open('data/icaos.txt', 'r')
myicaos.extend(f.read().split('\n')[:-1])
f.close()
else:
myicaos.extend(list(range(0,n)))
avail_icaos.extend(myicaos)
return avail_icaos
def get_icao(old=None):
# returns a random unused ICAO address
if len(myicaos)==0:
load_icaos(n=100000)
# put back the old icao to the set
if old is not None:
avail_icaos.append(old)
icao = avail_icaos[random.randint(0,len(avail_icaos)-1)]
avail_icaos.remove(icao)
return icao
def date(string):
return datetime.strptime(string, time_format)
| guillaumemichel/aircraft-privacy-simulator | structures.py | structures.py | py | 12,932 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
23214835305 | # -*- coding: utf-8 -*-
import json
import re
from datetime import date
import dryscrape
from bs4 import BeautifulSoup
# Заголовки столбцов таблицы
titles = [
'Биржевой инструмент', # 0
'Предл.', # 1
'Спрос', # 2
'Ср.вз. цена', # 3
'Объем договоров', # 4
'Кол - во дог.', # 5
'НПЗ' # 6
]
# для очистки текста от лишних символов
remove_pattern = r"[^(a-z)(A-Z)(0-9)(р.)(т.)(+)(-)(%)]"
# для добавления данных сделки в итог
def add_trade_in_total_stat(data, old_data, new_trades, total_stat):
total_stat['count'] += new_trades
total_stat['amount'] += get_trade_amount(data=data, old_data=old_data)
price = data[titles[3]]['price']
if total_stat['average_price']:
total_stat['average_price'] += price
total_stat['average_price'] /= 2
else:
total_stat['average_price'] = price
# нахождение объема
def get_trade_amount(data, old_data):
amount = get_number(data[titles[4]]['amount']) - get_number(old_data[titles[4]]['amount'])
return amount
# получение текста без лишних символов
def get_clear_text(dirty_text):
# price = children[1].find('span', class_="red").get_text().replace(u'\xa0', u' ')
return re.sub(remove_pattern, '', dirty_text)
# получение числа
def get_number(string):
if isinstance(string, int):
return string
num = int(re.sub(r"\D", '', string))
return num
# получение данных из строки
def get_data(tr):
data = {}
data['id'] = tr['id']
# children = [child.get_text(strip=True) for child in tr.find_all('td', recursive=False)]
children = tr.find_all('td', recursive=False)
# Биржевой инструмент 0
data[titles[0]] = children[0].find('a').get_text() # Конденсат газовый стабильный, ст. Пурпе (ст. отпр.)
# Предл. 1
try:
supply = {}
supply['price'] = children[1].find('span', class_="red").get_text()
supply['price'] = get_clear_text(supply['price'])
supply['amount'] = children[1].find('span', class_="gray").get_text()
supply['amount'] = get_clear_text(supply['amount'])
data[titles[1]] = supply
except AttributeError:
data[titles[1]] = { 'price': 0, 'amount': 0 }
# Спрос 2
try:
demand = {}
demand['price'] = children[2].find('span', class_="green").get_text()
demand['price'] = get_clear_text(demand['price'])
demand['amount'] = children[2].find('span', class_="gray").get_text()
demand['amount'] = get_clear_text(demand['amount'])
data[titles[2]] = demand
except AttributeError:
data[titles[2]] = { 'price': 0, 'amount': 0 }
# Ср.вз. цена 3
try:
average = {}
average['percent'] = children[3].find('span', class_="green").get_text()
average['percent'] = get_clear_text(average['percent'])
average['price'] = children[3].find(text=True)
average['price'] = get_number(get_clear_text(average['price']))
data[titles[3]] = average
except AttributeError:
data[titles[3]] = { 'percent': 0, 'price': 0 }
# Объем договоров 4
try:
size = {}
size['amount'] = children[4].find('span', class_="gray").get_text()
size['amount'] = get_clear_text(size['amount'])
size['cost'] = children[4].find(text=True)
size['cost'] = get_clear_text(size['cost'])
data[titles[4]] = size
except AttributeError:
data[titles[4]] = { 'amount': 0, 'cost': 0 }
# Кол - во дог. 5
try:
trades_count = children[5].find(text=True)
trades_count = get_clear_text(trades_count)
data[titles[5]] = int(trades_count)
except ValueError:
data[titles[5]] = 0
# НПЗ 6
try:
company_name = children[6].find(text=True)
company_name = get_clear_text(company_name)
data[titles[6]] = company_name
except ValueError:
data[titles[6]] = '-'
return data['id'], data
# проверка на наличие новых сделок
def check_new_trades(data, old_data):
return (get_number(data[titles[5]]) - get_number(old_data[titles[5]]))
# генерация сообщения для бота
def generate_msg(data, old_data=None, new_trades=1):
title = data[titles[0]]
id = data['id']
# На бирже ПРОИЗОШЛА!!! "СДЕЛКА!!!" на "ГАЗОВЫЙ КОНДЕНСАТ" по "ЦЕНЕ" в "ОБЪЁМЕ"
if old_data:
msg = 'На бирже ПРОИЗОШЛА!!! "СДЕЛКА!!!"'
if new_trades > 1:
msg = 'На бирже ПРОИЗОШЛИ!!! "СДЕЛКИ!!!" (%s)' % new_trades
price = data[titles[3]]['price']
amount = get_trade_amount(data, old_data)
msg += ' на "{0}"(id={1}) по "ЦЕНЕ"({2}р.) в "ОБЪЁМЕ"({3}т.)' \
.format(
title, # Биржевой инструмент
id,
price, # Ср.вз. цена
amount # Объем
)
return msg
# На бирже появилось "ПРЕДЛОЖЕНИЕ" на "ГАЗОВЫЙ КОНДЕНСАТ" по "ЦЕНЕ" в "ОБЪЁМЕ"
# На бирже появился "СПРОС" на "ГАЗОВЫЙ КОНДЕНСАТ" по "ЦЕНЕ" в "ОБЪЁМЕ"
if data[titles[1]]['amount']:
msg = 'На бирже появилось "ПРЕДЛОЖЕНИЕ"'
price = data[titles[1]]['price']
amount = get_number(data[titles[1]]['amount'])
else:
msg = 'На бирже появился "СПРОС"'
price = data[titles[2]]['price']
amount = get_number(data[titles[2]]['amount'])
msg += ' на "{0}"(id={1}) по "ЦЕНЕ"({2}р.) в "ОБЪЁМЕ"({3}т.)' \
.format(
title, # Биржевой инструмент
id,
price, # Ср.вз. цена
amount # Объем
)
return msg
# сохранение итоговых данных в файл (на случай перезапуска скрипта):
def data_to_cache(positions, total_stat):
positions_file = open('cache/positions_%s.json' % date.today(), 'w')
positions_file.write(json.dumps(positions))
positions_file.close()
total_stat_file = open('cache/total_stat_%s.json' % date.today(), 'w')
total_stat_file.write(json.dumps(total_stat))
total_stat_file.close()
def run_bot(positions, total_stat, dispatcher, chat_id, add_link=True):
# На некоторых сайтах стоит минимальная защита и они не отдают контент без user-agent
# headers = {'user-agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'}
base_url = 'http://spimex.com/'
url = base_url + 'markets/oil_products/trades/'
# чтобы избежать кэширования на любом уровне, на всякий случай добавим случайно число
# url = base_url + 'markets/oil_products/trades/?r=' + str(random.random())
session = dryscrape.Session()
session.visit(url)
response = session.body()
soup = BeautifulSoup(response, "lxml")
tds = soup.find_all('td', class_='td_name')
count = len(tds)
search_pattern = re.compile(r"(конденсат газовый)|(газовый конденсат)", re.IGNORECASE)
print('%s инструментов по url' % count, url)
for td in tds:
if not search_pattern.search(td.text):
continue
msg = ''
# строка-родитель ячейки с нужным текстом
tr = td.find_previous('tr')
id, data = get_data(tr)
if id in positions: # если позиция не новая
old_data = positions[id]
new_trades = check_new_trades(data=data, old_data=old_data)
if new_trades > 0:
positions[id] = data
msg = generate_msg(data=data, old_data=old_data, new_trades=new_trades)
add_trade_in_total_stat(data=data, old_data=old_data, new_trades=new_trades, total_stat=total_stat)
else:
positions[id] = data
msg = generate_msg(data=data)
if msg:
parse_mode = None
disable_web_page_preview = None
if add_link:
parse_mode = 'HTML'
disable_web_page_preview = True
a = tr.find('a', attrs={"title": "Информация об инструменте"})
a['href'] = base_url + a['href']
msg += '\r\n'
msg += str(a)
print(msg)
dispatcher.bot.send_message(
chat_id=chat_id,
text=msg,
parse_mode=parse_mode,
disable_web_page_preview=disable_web_page_preview
)
data_to_cache(positions, total_stat)
| rbikbov/test_python_bot | bot.py | bot.py | py | 9,250 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number":... |
24438494857 | # Some functions for the Mojang API
# import requests
import requests
import datetime
import json
class MinecraftUUIDError(ValueError):
pass
class MinecraftUsernameError(ValueError):
pass
class Player:
# Essential variables
username = None
uuid = None
alias = None
# Vars to be used in called methods
names = None
def __init__(self, id, alias=None):
self.alias = alias
if len(id) > 16:
# It is a uuid, get the username as well
self.uuid = id
try:
self.username = get_player_from_uuid(id)['name']
except MinecraftUUIDError as err:
raise err
else:
# It is a username, get the uuid
self.username = id
try:
req = get_uuid_from_player(id)
self.uuid = req[0]['id']
self.username = req[0]['name']
except IndexError:
raise MinecraftUsernameError(f'{id} is not a valid username')
def name_history(self):
if self.names is None:
req_url = f'https://api.mojang.com/user/profiles/{self.uuid}/names'
req = requests.get(req_url)
res = req.json()
self.names = [(res[0]['name'], None)]
for name in res[1:]:
self.names.append((name['name'], name['changedToAt'] // 1000))
return self.names
def __str__(self):
return f'{self.username} {self.uuid}'
def get_uuid_from_player(names):
# If names is just a string of one name, convert it to a list
if type(names) == ''.__class__:
names = [names]
url = 'https://api.mojang.com/profiles/minecraft'
req = requests.post(url, json=names)
return req.json()
def get_player_from_uuid(uuid):
# Gets the player data for a uuid
url = f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}'
req = requests.get(url)
try:
return req.json()
except json.decoder.JSONDecodeError:
raise MinecraftUUIDError(f'{uuid} is not a valid UUID')
if __name__ == '__main__':
player = Player('8')
names = player.name_history()
for name in names:
try:
print(f'Switched to {name[0]} {datetime.datetime.fromtimestamp(name[1])}')
except TypeError as err:
print(f'Original name: {name[0]}')
| joshuaSmith2021/chamosbot-gassistant | mojang.py | mojang.py | py | 2,392 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "json.decoder",
"line_numb... |
30079500269 | import QRTicketing
import cv2
def Decode(str):
a='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cipherstr=''
for i in str:
k=a.index(i)
cipherstr+=a[((k - 5)%26)]
return str
cap = cv2.VideoCapture(0)
detector = cv2.QRCodeDetector()
while True:
_, img = cap.read()
data, bbox, _ = detector.detectAndDecode(img)
if data:
a=data
break
cv2.imshow("image", img)
if cv2.waitKey(1) == ord("q"):
break
if(QRTicketing.main.verifyQR(a)):
QRTicketing.db_update([Decode(a[:"_"]),a["_"+1:]])
print("ValidQr")
else:
print("Invalid QR") | PreethiPreetz-30/Ticketless-Entry---QR | ScanTicket.py | ScanTicket.py | py | 593 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.QRCodeDetector",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"lin... |
25166244861 | import re
from django import forms
from crispy_forms.helper import FormHelper
from content.src.reg_expressions import RegExpressions
from content.models import Content, Video
from product.models import Product
class StyleMixin:
"""Класс добавляющий форматирование форм crispy-forms"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
class ValidateMixin:
"""Класс добавляющий методы валидации формам связанным с контентом"""
def clean_is_paid_subs(self):
"""Метод валидации поля платной подписки"""
cleaned_data = self.cleaned_data.get('is_paid_subs')
user_paid = Product.objects.filter(user=self.user)
if cleaned_data and not user_paid:
raise forms.ValidationError('Невозможно создать видео по подписке'
' так как вы не указали цену подписки'
' на пользователя при регистрации.'
'Указать цену можно на странице '
'редактирования пользователя')
return cleaned_data
def clean(self):
"""Переопределение для проверки указания доступности контента"""
cleaned_data = super().clean()
is_free = self.cleaned_data.get('is_free')
is_paid_subs = self.cleaned_data.get('is_paid_subs')
is_src_subs = self.cleaned_data.get('is_src_subs')
is_purchase = self.cleaned_data.get('is_purchase')
if True not in [is_free, is_paid_subs, is_src_subs, is_purchase]:
raise forms.ValidationError('Укажите минимум один параметр '
'доступности видео: бесплатно, по '
'подписке, по подписке на сервис, '
'по разовой покупке')
if is_free and is_paid_subs:
raise forms.ValidationError('Видео не может быть одновременно '
'бесплатным и по подписке на '
'пользователя')
if is_free and is_src_subs:
raise forms.ValidationError('Видео не может быть одновременно '
'бесплатным и по подписке на сервис')
if is_free and is_purchase:
raise forms.ValidationError('Видео не может быть одновременно '
'бесплатным и доступным к покупке в'
' коллекцию')
return cleaned_data
class ContentForm(StyleMixin, ValidateMixin, forms.ModelForm):
"""Класс описывающий форму для создания нового экземпляра контента"""
title = forms.CharField(
label="Название",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.TextInput(
attrs={
'placeholder': "Лучшее название на планете..."},
),
max_length=100,
required=True,
)
description = forms.CharField(
label="Описание",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.Textarea(
attrs={
'placeholder': "Лучшее Описание на планете..."},
),
required=True,
)
image = forms.ImageField(
label="Изображение",
help_text="Используйте изображение с соотношением сторон 16 на 9. "
"Данное изображение будет использовано как заставка к "
"видео . Если поле оставить пустым, то будет использовано "
"превью видео из YouTube.",
required=False,
)
is_free = forms.BooleanField(
label="Бесплатный контент",
help_text="Установите галочку если контент будет доступен всем "
"пользователям без какой-либо оплаты."
"Если активно, то будет игнорироваться поле 'цена'",
required=False,
)
start_publish = forms.DateTimeField(
label="Время публикации",
widget=forms.DateTimeInput(
attrs={'type': 'datetime-local'},
),
help_text="Укажите дату и время в которое автоматически будет "
"опубликована запись",
required=False
)
is_publish = forms.BooleanField(
label="Опубликовать сразу",
help_text="Если активно, то запись будет опубликована "
"сразу после создания",
required=False,
)
is_paid_subs = forms.BooleanField(
label="Контент в подписке на пользователя",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на вас',
required=False,
)
is_src_subs = forms.BooleanField(
label="Контент в подписке на сервис",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на сервис. Вы будете'
'получать ежемесячное роялти в зависимости от просмотров',
required=False,
)
is_purchase = forms.BooleanField(
label="Контент доступен для покупки в коллекцию",
help_text='Установите галочку если контент будет доступен для '
'единовременной покупки. Пользователь получит доступ к '
'контенту навсегда, а вы разовую единовременную оплату.'
'Если поле активно, необходимо казать цену для разовой'
' покупки',
required=False,
)
def __init__(self, *args, **kwargs):
"""Переопределение для фильтрации содержимого поля clients"""
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
class Meta:
model = Content
fields = ('title', 'description', 'image', 'start_publish',
'is_publish', 'is_free', 'is_paid_subs', 'is_src_subs',
'is_purchase')
class ContentUpdateForm(StyleMixin, ValidateMixin, forms.ModelForm):
"""Класс описывающий форму для обновления экземпляра контента"""
title = forms.CharField(
label="Название",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.TextInput(
attrs={
'placeholder': "Лучшее название на планете..."},
),
max_length=100,
required=True,
)
description = forms.CharField(
label="Описание",
help_text="Введите название записи. Ограничение 150 символов.",
widget=forms.TextInput(
attrs={
'placeholder': "Лучшее Описание на планете..."},
),
required=True,
)
image = forms.ImageField(
label="Изображение",
help_text="Используйте изображение с соотношением сторон 16 на 9. "
"Данное изображение будет использовано как заставка к "
"видео . Если поле оставить пустым, то будет использовано "
"превью видео из YouTube.",
required=False,
)
is_free = forms.BooleanField(
label="Бесплатный контент",
help_text="Установите галочку если контент будет доступен всем "
"пользователям без какой-либо оплаты."
"Если активно, то будет игнорироваться поле 'цена'",
required=False,
)
is_paid_subs = forms.BooleanField(
label="Контент в подписке на пользователя",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на вас',
required=False,
)
is_src_subs = forms.BooleanField(
label="Контент в подписке на сервис",
help_text='Установите галочку если контент будет доступен всем '
'пользователям оплатившим подписку на сервис. Вы будете'
'получать ежемесячное роялти в зависимости от просмотров',
required=False,
)
is_purchase = forms.BooleanField(
label="Контент доступен для покупки в коллекцию",
help_text='Установите галочку если контент будет доступен для '
'единовременной покупки. Пользователь получит доступ к '
'контенту навсегда, а вы разовую единовременную оплату.'
'Если поле активно, необходимо казать цену для разовой'
' покупки',
required=False,
)
class Meta:
model = Content
fields = ('title', 'description', 'image',
'is_free', 'is_paid_subs', 'is_src_subs', 'is_purchase')
def __init__(self, *args, **kwargs):
"""Переопределение для фильтрации содержимого поля clients"""
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
class VideoForm(StyleMixin, forms.ModelForm):
"""Форма описывающая видео"""
url = forms.URLField(
help_text="Ссылка на видео размещенное на видеохостинге YouTube."
"Ссылки на другой видеохостинг работать не будут. ",
widget=forms.TextInput(
attrs={
'placeholder': "https://www.youtube.com/..."},
),
max_length=150,
)
def save(self, commit=True):
"""Переопределение для добавления video_id во время сохранения"""
self.instance = super().save(commit=False)
self.instance.video_id = (
RegExpressions.get_video_id(self.cleaned_data['url']))
self.instance.save()
return self.instance
def clean_url(self):
"""Метод валидации поля платной подписки"""
cleaned_data = self.cleaned_data.get('url')
if cleaned_data:
if 'youtu' not in cleaned_data:
raise forms.ValidationError(
'Допускается использование видео только'
' с хостинга "YouTube"')
return cleaned_data
else:
raise forms.ValidationError(
'Кажется вы забыли указать ссылку на видео')
class Meta:
model = Video
fields = 'url',
| NewterraV/content_selling_platform | content/forms.py | forms.py | py | 13,160 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "crispy_forms.helper.FormHelper",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "product.models.Product.objects.filter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "product.models.Product.objects",
"line_number": 25,
"usage_type": "a... |
31065164595 |
from ..utils import Object
class UpdateMessageEdited(Object):
"""
A message was edited. Changes in the message content will come in a separate updateMessageContent
Attributes:
ID (:obj:`str`): ``UpdateMessageEdited``
Args:
chat_id (:obj:`int`):
Chat identifier
message_id (:obj:`int`):
Message identifier
edit_date (:obj:`int`):
Point in time (Unix timestamp) when the message was edited
reply_markup (:class:`telegram.api.types.ReplyMarkup`):
New message reply markup; may be null
Returns:
Update
Raises:
:class:`telegram.Error`
"""
ID = "updateMessageEdited"
def __init__(self, chat_id, message_id, edit_date, reply_markup, **kwargs):
self.chat_id = chat_id # int
self.message_id = message_id # int
self.edit_date = edit_date # int
self.reply_markup = reply_markup # ReplyMarkup
@staticmethod
def read(q: dict, *args) -> "UpdateMessageEdited":
chat_id = q.get('chat_id')
message_id = q.get('message_id')
edit_date = q.get('edit_date')
reply_markup = Object.read(q.get('reply_markup'))
return UpdateMessageEdited(chat_id, message_id, edit_date, reply_markup)
| iTeam-co/pytglib | pytglib/api/types/update_message_edited.py | update_message_edited.py | py | 1,307 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 43,
"usage_type": "name"
}
] |
11576039211 | from socket import *
from datetime import datetime
serverPort = 8080
serverSocket = socket(AF_INET, SOCK_DGRAM)
#atribui a porta ao socket criado
serverSocket.bind(('', serverPort))
print("The server is ready to receive")
while True:
#recebe a mensagem do cliente em bytes
message, clientAddress = serverSocket.recvfrom(2048)
print("mensagem recebida: ", message)
now = datetime.now()
msg = message.decode()
msg.upper()
msg = str(msg) + ' : ' + str(now)
message = str.encode(msg)
#envio tbm deve ser em bytes
serverSocket.sendto(message, clientAddress) | srpantoja/Redes_trabalhos | python/UDPServer.py | UDPServer.py | py | 593 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
}
] |
34357549837 | import argparse
import modelsim_utils
import time
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--do_file_name' , default = 'run_cmd.do')
parser.add_argument('-r', '--run_to_pane_shift_sleep_sec', default = 4) # 7
parser.add_argument('-t','--true_or_false_flag_example', action='store_true') # true if add flag to cmd line, false if don't
args = parser.parse_args()
time.sleep(1)
modelsim_utils.auto_run(args.do_file_name, int(args.run_to_pane_shift_sleep_sec))
| Brandon-Valley/examples | python/script_arg_parse.py | script_arg_parse.py | py | 496 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "modelsim_utils.auto_run",
"line_number": 14,
"usage_type": "call"
}
] |
22108118261 | #!/usr/bin/env python
# coding: utf-8
# Loading the libraries
import requests
from bs4 import BeautifulSoup
import time
import random
from tqdm.notebook import tqdm as tqdm
# Part a
page_url = "https://www.barnesandnoble.com/b/books/_/N-1fZ29Z8q8?Nrpp=40&page=1"
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"}
search_page = requests.get(page_url, headers=headers)
page_info = BeautifulSoup(search_page.content, "html.parser")
# Part b
url_prefix = "https://www.barnesandnoble.com"
list_product_header = page_info.find_all("h3", class_ = "product-info-title")
list_product_urls = []
product_dict = dict()
for i, product in enumerate(list_product_header):
product_url = url_prefix + product.find("a")['href']
list_product_urls = list_product_urls + [product_url]
product_name = product.find("a").text
product_dict[i+1] = product_name
# Part c
base_fname = "top100_bn_"
for i, product_url in enumerate(list_product_urls):
product_search = requests.get(product_url, headers=headers)
fname = f"{base_fname}_{i+1}.html"
with open(fname,"a+") as f:
f.write(str(product_search.content))
f.close()
# Adding sleep time
sleep_time = random.randint(5,10)
time.sleep(sleep_time)
# Part d
prod_count = len(list_product_header)
for i in range(prod_count):
fname = f"{base_fname}_{i+1}.html"
with open(fname, "r") as f:
page_content = BeautifulSoup(f, "html.parser")
f.close()
overview_box = page_content.find("div", class_ = "content overview-expandable-section")
overview_content = overview_box.find("div", class_ = "overview-cntnt")
print(f"Overview content for '{product_dict[i+1]}'")
print(overview_content.text[:100])
print("") | jeetp465/Web-Scraping | Barnes and Noble Scraping.py | Barnes and Noble Scraping.py | py | 1,841 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "random.randint",
"lin... |
8838127682 | """Models for Blogly."""
from flask_sqlalchemy import SQLAlchemy
import datetime
db = SQLAlchemy()
DEFAULT_IMAGE_URL = "https://cdn2.iconfinder.com/data/icons/avatars-99/62/avatar-370-456322-512.png"
def connect_db(app):
"""Connect to database."""
db.app = app
db.init_app(app)
class User(db.Model):
"""User class"""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
first_name = db.Column(db.String(50), nullable = False)
last_name = db.Column(db.String(50), nullable = True)
image_url = db.Column(db.String, default = DEFAULT_IMAGE_URL)
posts = db.relationship("Post", backref="user", cascade="all, delete-orphan")
def __repr__(self):
return f"<User {self.first_name} {self.last_name} {self.image_url} >"
def get_full_name(self):
"""Get users full name."""
if self.last_name == None:
return self.first_name
else:
return f"{self.first_name} {self.last_name}"
class Post(db.Model):
"""Post class"""
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
title = db.Column(db.String(50), nullable = False)
content = db.Column(db.String, nullable = True)
created_at = db.Column(db.DateTime, nullable = False, default=datetime.datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable = False)
# posts_n_tags = db.relationship('PostTag', backref = 'post')
def __repr__(self):
return f"<Post {self.title} {self.content} {self.created_at} {self.user_id} >"
def friendly_date(self):
"""Show date in a user friendly format"""
return self.created_at.strftime("%a %b %-d %Y, %-I:%M %p")
class Tag(db.Model):
"""Tag class."""
__tablename__ = "tags"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
name = db.Column(db.String(50), nullable = False, unique = True)
# tags_n_posts = db.relationship('PostTag', backref = 'tag')
posts = db.relationship(
'Post',
secondary = 'posts_tags',
backref = "tags",
cascade="all, delete")
def __repr__(self):
return f"<Tag {self.id} {self.name}>"
class PostTag(db.Model):
"""PostTag class."""
__tablename__ = "posts_tags"
post_id = db.Column(db.Integer, db.ForeignKey("posts.id"), primary_key = True)
tag_id = db.Column(db.Integer, db.ForeignKey("tags.id"), primary_key = True)
def __repr__(self):
return f"<PostTag {self.post_id} {self.tag_id}>" | kabdrau/Blogly-application | models.py | models.py | py | 2,622 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 45,
"usage_type": "attribute"
}
] |
15870462971 | import os
import sys
import time
import torch
import torch.nn.functional as F
from sklearn.metrics import mean_squared_error
from graphrepr.evaluate import test_model
from graphrepr.savingutils import save_configs, save_history, LoggerWrapper
from graphrepr.config import parse_model_config, parse_representation_config, parse_data_config
from graphrepr.config import utils_section, data_section, params_section, optimizer_section
from graphrepr.chemprop.args import ModelArgs
from graphrepr.chemprop.model import MoleculeModel
from graphrepr.chemprop.features import set_representation
from graphrepr.main_dmpnn_utils import load_data_chemprop, run_epoch, predict
# run: python main.py simple_nn.cfg esol.cfg simple_repr.cfg /home/abc/results
n_args = 1 + 4 # namefile, architecture_config, data_config, representation_config, main_saving_directory
NUM_WORKERS = 2
if __name__ == '__main__':
if len(sys.argv) != n_args:
print(f"Usage: {sys.argv[0]} architecture.cfg data.cfg representation.cfg main_saving_directory")
quit(1)
# set global saving subdir for this experiment and create it
# name of the experiment subdir is derived from the names of the configs
# config name should be: {unique_key}_{whatever}.{ext} ex. 2_best_model.cfg
basename = lambda x: os.path.basename(x).split('.')[0].split('_')[0]
dname = "_".join([basename(x) for x in [sys.argv[1], sys.argv[2], sys.argv[3]]])
saving_dir = os.path.join(sys.argv[4], dname)
try:
os.makedirs(saving_dir)
except FileExistsError:
pass
# setup logger (everything that goes through logger or stderr will be saved in a file and sent to stdout)
logger_wrapper = LoggerWrapper(saving_dir)
sys.stderr.write = logger_wrapper.log_errors
logger_wrapper.logger.info(f'Running {[basename(x) for x in [sys.argv[1], sys.argv[2], sys.argv[3]]]}')
# device selection
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load configs
model_config = parse_model_config(sys.argv[1])
data_config = parse_data_config(sys.argv[2])
representation_config = parse_representation_config(sys.argv[3])
save_configs(sys.argv[1], sys.argv[2], sys.argv[3], saving_dir)
################
# # # DATA # # #
################
set_representation(**representation_config[utils_section])
# define train and validation data splits
if data_config[utils_section]['cv']:
trains = []
vals = []
for val_fold in data_config[data_section].values():
trains.append([fold for fold in data_config[data_section].values() if fold != val_fold])
vals.append([val_fold, ])
splits = list(zip(trains, vals))
else:
trains = [[data_config[data_section][key],] for key in data_config[data_section] if 'train' in key.lower()]
vals = [[data_config[data_section][key],] for key in data_config[data_section] if 'valid' in key.lower()]
splits = list(zip(trains, vals))
# load test
test_smiles, test_labels, test_loader = load_data_chemprop([data_config[utils_section]["test"], ],
data_config, model_config,
shuffle=False, num_workers=NUM_WORKERS)
#####################
# # # MAIN LOOP # # #
#####################
for fold_idx, (train_paths, validation_path) in enumerate(splits):
logger_wrapper.logger.info(f'Running fold {fold_idx+1}/{len(splits)}')
# subdirectory for this fold, file names
fold_subdirectory = os.path.join(saving_dir, f"fold{fold_idx+1}")
try:
os.makedirs(fold_subdirectory)
except FileExistsError:
pass
timestamp = time.strftime('%Y-%m-%d-%H-%M')
best_model_path = os.path.join(fold_subdirectory, f"{timestamp}-best_model_weights.pt")
# loading train and validation datasets
train_dataset, train_smiles, train_loader = load_data_chemprop(train_paths,
data_config, model_config,
shuffle=True, num_workers=NUM_WORKERS)
valid_dataset, valid_smiles, valid_loader = load_data_chemprop(validation_path,
data_config, model_config,
shuffle=False, num_workers=NUM_WORKERS)
# defining model, optimizer, scheduler, and the loss function
if "mse" == data_config[utils_section]['cost_function'].lower().strip():
loss_function = F.mse_loss
loss_function_valid = mean_squared_error # workaround cause we have np.arrays not torch.Tensors // FIXME?
loss_function_model_args = 'mse'
else:
raise NotImplementedError("Unknown loss function; only MSE is currently implemented")
modelArgs = ModelArgs(**model_config[params_section], device=device, loss_function=loss_function_model_args)
model = MoleculeModel(modelArgs).to(device)
logger_wrapper.logger.info(model)
logger_wrapper.logger.info(f"Number of conv layers: {model.encoder.encoder[0].depth}")
if "adam" == model_config[optimizer_section]['optimizer'].lower().strip():
optimizer = torch.optim.Adam(model.parameters(), lr=model_config[optimizer_section]['lr'])
else:
raise NotImplementedError("Unknown optimizer; only Adam is currently implemented")
scheduler = None # for easy checkup later
if model_config[optimizer_section]['scheduler'] > 0:
assert 0 < model_config[optimizer_section]['scheduler'] < 1, "scheduler value must be -1 (no scheduler) or between 0 and 1"
step_size = int(model_config[optimizer_section]['scheduler'] * model_config[optimizer_section]["n_epochs"])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.1) # divide lr by ten after every step_size epochs
# actual training
train_loss = []
valid_loss = []
min_valid_loss = sys.maxsize
for epoch in range(model_config[optimizer_section]["n_epochs"]):
# train
cumulative_epoch_train_loss = run_epoch(model, loss_function, optimizer, train_loader, device)
# validate
true_ys, pred_ys = predict(model, valid_loader, device)
# scheduler
if scheduler is not None:
scheduler.step()
# remember stuff
epoch_valid_loss = loss_function_valid(pred_ys, true_ys)
train_loss.append(cumulative_epoch_train_loss / len(train_dataset))
valid_loss.append(epoch_valid_loss)
logger_wrapper.logger.info(f'Epoch: {epoch}, train loss: {train_loss[-1]}, valid loss: {epoch_valid_loss}')
if epoch_valid_loss < min_valid_loss:
logger_wrapper.logger.info("Saving model")
torch.save(model.state_dict(), best_model_path)
min_valid_loss = epoch_valid_loss
save_history(train_loss, valid_loss, fold_subdirectory)
# testing on the test set
# load the best version of the model, then repack data and run the test function
model.load_state_dict(torch.load(best_model_path))
model.eval() # set dropout and batch normalization layers to evaluation mode before running inference
# train gets new loader without shuffling so the order of smiles is OK # FIXME this is not ideal
train_dataset, train_smiles, train_loader = load_data_chemprop(train_paths,
data_config, model_config,
shuffle=False, num_workers=NUM_WORKERS)
data = ((train_loader, train_smiles), (valid_loader, valid_smiles), (test_loader, test_smiles))
test_model(model, data, device, fold_subdirectory,
calculate_parity=data_config[utils_section]["calculate_parity"],
calculate_rocauc=data_config[utils_section]["calculate_rocauc"],
predict_func=predict)
| gmum/graph-representations | scripts/main_dmpnn.py | main_dmpnn.py | py | 8,394 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
2457622473 |
# This Python code reproduces the upper and low panel of Fig. 6 in Clay et. al. (2008).
#Save the voltage nd time series csv files as v1.csv for the standard HH model (top panel of fig.6 and v2.csv for the revised model (bottom panel for of fig. 6)
from neuron import h
import numpy as np
import matplotlib.pyplot as plt
import timeit
import subprocess
import math
def initialize():
h.finitialize()
h.fcurrent()
def integrate():
#g.begin()
h.finitialize()
while h.t<tstop:
h.fadvance()
cell=h.Section()
nseg=9
cell.nseg=nseg # number of segments
cell.Ra=35.4 # ohm*cm # cable resistance
cell.cm=1
l=1# length of the axon in mm
cell.L=l*1000 # length of the axon in um to be read into NEURON
cell.diam=500 # diameter of the axon in um
#insert the mechanism
#cell.insert('kext_clay') #in case the potassium accumulation is used.
cell.insert('hh') #inserting Clay revised HH model
#cell.insert('hh') # Standard Hodgkin Huxley model
cell.ek = -72
cell.ena = 55
#Stimulation current
stim1=h.IClamp(0,sec=cell)
stim1.delay=100 #ms
stim1.dur=80 #ms
stim1.amp= 250 #nA
#print stim_density * 1000 * area
vec={}
for var in 'i','t','v':
vec[var]=h.Vector()
vec['t'].record(h._ref_t)
vec['v'].record(cell(0.99)._ref_v)
vec['i'].record(stim1._ref_i)
tstop=200
h.dt=0.01
initialize()
integrate()
np.savetxt('v1.csv', vec['v'], delimiter= ',')
np.savetxt('time.csv', vec['t'], delimiter= ',') # saving the time series
cell2=h.Section()
nseg=9
cell2.nseg=nseg # number of segments
cell2.Ra=35.4 # ohm*cm # cable resistance
cell2.cm=1
l=1# length of the axon in mm
cell2.L=l*1000 # length of the axon in um to be read into NEURON
cell2.diam=500 # diameter of the axon in um
#insert the mechanism
cell2.insert('hhrx_clay_2') #inserting Clay revised HH model
cell2.ek = -72
cell2.ena = 55
vec['v2'] = h.Vector()
vec['v2'].record(cell2(0.99)._ref_v)
#Stimulation current
stim1=h.IClamp(0,sec=cell2)
stim1.delay=100 #ms
stim1.dur=80 #ms
stim1.amp= 250 #nA
tstop=200
h.dt=0.01
initialize()
integrate()
np.savetxt('v2.csv', vec['v2'], delimiter= ',')
## code for plotting the results
v1 = np.genfromtxt('v1.csv',delimiter=',')
v2 = np.genfromtxt('v2.csv',delimiter=',')
time = np.genfromtxt('time.csv', delimiter= ',')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
plt.plot(time,v1)
plt.xlabel( 'Time (ms)')
plt.ylabel(' Voltage (mV)')
plt.text(140,20, 'HH', fontsize = 12)
plt.xlim(80,200)
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticklabels([])
ax.set_yticks([])
plt.ylim(-75,60)
plt.plot((100,180), (50,50), color = 'k', linewidth = 2)
plt.plot((185,185), (-50,0), color = 'k', linewidth = 2)
plt.text(187,-50, '-50', fontsize = 12)
plt.text(187,0, '0 mV', fontsize = 12)
ax = fig.add_subplot(2,1,2)
plt.plot(time,v2)
plt.xlim(80,200)
plt.xlabel( 'Time (ms)')
plt.ylabel(' Voltage (mV)')
plt.text(140,20, 'Clay, et. al. (2008)', fontsize = 12)
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticklabels([])
ax.set_yticks([])
plt.ylim(-75,60)
plt.plot((185,185), (-50,0), color = 'k', linewidth = 2)
plt.plot((125,175), (0,0), color = 'k', linewidth = 2)
plt.text(187,-50, '-50', fontsize = 12)
plt.text(187,0, '0 mV', fontsize = 12)
plt.text(145,-15, '50 ms', fontsize = 12)
try:
plt.savefig('clay_mohit.jpeg',dpi=600, format='jpeg', bbox_inches='tight')
except:
plt.savefig('clay_mohit.jpeg',dpi=600, format='jpeg') # incase bbox_inches failes
plt.show()
| ModelDBRepository/189922 | clay_mohit.py | clay_mohit.py | py | 3,447 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "neuron.h.finitialize",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "neuron.h",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "neuron.h.fcurrent",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "neuron.h",
"line_... |
39755602271 | # @Author : tony
# @Date : 2021/5/2
# @Title : epjb2009 paper practice
# @Dec : deal with the dataset
import networkx as nx
# deal with the INT dataset
def readINT(dataUrl):
G = nx.read_gml(dataUrl)
list = dict()
edge_list = []
for id, label in enumerate(G.nodes()):
list[int(label)] = int(id)
for (v0, v1) in G.edges:
print(list[int(v0)], list[int(v1)])
edge_list.append([list[int(v0)], list[int(v1)]])
return edge_list
# deal with the INT dataset
def readPB(dataUrl):
G = nx.read_gml(dataUrl)
list = dict()
edge_list = []
for id, label in enumerate(G.nodes()):
list[label] = int(id)
print(id, label)
for (v0, v1) in G.edges:
print(v0, v1)
print(list[v0], list[v1])
edge_list.append([list[v0], list[v1]])
return edge_list
# deal with the Grid dataset
def readGrid(dataUrl):
G = nx.read_gml(dataUrl, label='id')
list = dict()
edge_list = []
for id, label in enumerate(G.nodes()):
list[int(label)] = int(id)
print(id, label)
for (v0, v1) in G.edges:
print(v0, v1)
print(list[v0], list[v1])
edge_list.append([list[v0], list[v1]])
return edge_list
# save the txt
def save(edgeIdList, fileName):
f = open(fileName, 'w')
temp = ''
for item in edgeIdList:
temp += str(item[0]) + ' ' + str(item[1])
temp += '\n'
f.write(temp)
f.close()
if __name__ == '__main__':
# print('------------- SRART Internet-------------')
# edge_list = readINT('./data gml/INT.gml')
# save(edge_list, './data gml/Internet.txt')
# print('------------- Internet END -------------')
# print('------------- SRART Political blogs-------------')
# edge_list = readPB('./data gml/PB.gml')
# print(edge_list)
# save(edge_list, './data gml/Political blogs.txt')
# print('------------- Political blogs END -------------')
print('------------- SRART Power grid-------------')
edge_list = readGrid('./data gml/Grid.gml')
print(edge_list)
save(edge_list, 'data gml/Power grid.txt')
print('------------- Power grid END -------------') | DDMXIE/LinkPrediction | practice/dataTransform.py | dataTransform.py | py | 2,178 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "networkx.read_gml",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "networkx.read_gml",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "networkx.read_gml",
"line_number": 36,
"usage_type": "call"
}
] |
24739226033 | from fastapi import APIRouter
from app.api.segmentation import doc_parser, paddle
from app.core.config import settings
from app.schemas.doc_parser import ImageInput, ImageOutput
router = APIRouter(prefix="/segment", tags=["segment"])
@router.post("/detect-image")
async def doc_parser_api(*, img_in: ImageInput):
# print("requesting `doc_parser_api`")
return await doc_parser.detect_image(img_in)
@router.post("/detect-text")
async def paddle_api(*, img_in: ImageInput):
# print("requesting `paddle_text_api`")
return await paddle.detect_text(img_in)
@router.post("/detect-bitmap")
async def paddle_bitmap_api(*, img_in: ImageInput):
# print("requesting `paddle_bitmap_api`")
return await paddle.db_detect_bitmap(img_in)
@router.post("/detect-text-fast")
async def paddle_math_api(*, img_in: ImageInput):
# print("requesting `detect_text_fast`")
return await paddle.detect_text_fast(img_in)
| rednam-ntn/dosa | server/app/api/__init__.py | __init__.py | py | 933 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "app.schemas.doc_parser.ImageInput",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "app.api.segmentation.doc_parser.detect_image",
"line_number": 13,
"usage_type": "call"... |
7392241864 |
import argparse
from . import board
from . import constants
from . import solver
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("board", nargs="?")
parser.add_argument("-m", "--manual", default=False, action="store_true")
return parser.parse_args()
def main():
args = parse_args()
if not args.board:
main_board = board.Board(constants.TEST_BOARD)
else:
main_board = board.Board(args.board)
# print(tile.Tile.get((1, 1)).render())
# print(tile.Tile.get((2, 2)).render())
# print(main_board.render())
s = solver.Solver(main_board, manual=args.manual)
s.solve()
if __name__ == '__main__':
main()
| MattCCS/SudokuSolver | sudoku.py | sudoku.py | py | 698 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
}
] |
42300807705 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
url = ''
driver = webdriver.Chrome()
driver.get(url)
element_founder = driver.find_element_by_name('q')
element_founder.send_keys('selenium')
element_founder.send_keys(Keys.RETURN)
results = driver.find_elements_by_css_selector('h3 > a')
for result in results:
print(result.text)
| seriybeliy11/parsers | sel_parser.py | sel_parser.py | py | 380 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 10,
"usage_type": "attribute"
},... |
74050008424 | import json
import uuid
import websocket
import time
import threading
from parlai.core.params import ParlaiParser
# the socket callback functions operate asynchronously.
# upon exit of a chat, we do not want the user to view any additional messages from the server.
# alas, it is necessary to send two messages ([DONE], and EXIT) in order to fully exist the world pool
# to prevent receiving a message after sending [DONE], we track the user's state with
# this global variable.
RUNNING = True
def _get_rand_id():
"""
:return: The string of a random id using uuid4
"""
return str(uuid.uuid4())
def _prBlueBG(text):
"""
Print given in text with a blue background.
:param text: The text to be printed
"""
print("\033[44m{}\033[0m".format(text), sep="")
def on_message(ws, message):
"""
Prints the incoming message from the server.
:param ws: a WebSocketApp
:param message: json with 'text' field to be printed
"""
if not RUNNING:
return
incoming_message = json.loads(message)
print("\033[0m\n")
print("Bot: " + incoming_message['text'])
quick_replies = incoming_message.get('quick_replies')
if quick_replies is not None and len(quick_replies) > 0:
print(f"\nOptions: [{'|'.join(quick_replies)}]")
print("\033[44m\n")
def on_error(ws, error):
"""
Prints an error, if occurs.
:param ws: WebSocketApp
:param error: An error
"""
print(error)
def on_close(ws):
"""
Cleanup before closing connection.
:param ws: WebSocketApp
"""
# Reset color formatting if necessary
print("\033[0m")
print("Connection closed")
def _run(ws, id):
"""
Takes user input and sends it to a websocket.
:param ws: websocket.WebSocketApp
"""
global RUNNING
while True:
x = input("\033[44m Me: ")
print("\033[0m", end="")
data = {}
data['id'] = id
data['text'] = x
if x == "[DONE]":
RUNNING = False
json_data = json.dumps(data)
ws.send(json_data)
time.sleep(1)
if x == "[DONE]":
time.sleep(1)
data['text'] = 'EXIT'
ws.send(json.dumps(data))
break
ws.close()
def on_open(ws):
"""
Starts a new thread that loops, taking user input and sending it to the websocket.
:param ws: websocket.WebSocketApp that sends messages to a terminal_manager
"""
id = _get_rand_id()
threading.Thread(target=_run, args=(ws, id)).start()
def setup_args():
"""
Set up args, specifically for the port number.
:return: A parser that parses the port from commandline arguments.
"""
parser = ParlaiParser(False, False)
parser_grp = parser.add_argument_group('Terminal Chat')
parser_grp.add_argument(
'--port', default=35496, type=int, help='Port to run the terminal chat server'
)
parser_grp.add_argument(
'--host', default='localhost', type=str, help='Host to connect to.'
)
return parser.parse_args()
if __name__ == "__main__":
opt = setup_args()
port = opt.get('port', 34596)
host = opt.get('host', 'localhost')
print("Connecting to port: ", port)
ws = websocket.WebSocketApp(
f"ws://{host}:{port}/websocket",
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.on_open = on_open
ws.run_forever()
| facebookresearch/ParlAI | parlai/chat_service/services/terminal_chat/client.py | client.py | py | 3,446 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "uuid.uuid4",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 88,
... |
5571273421 | from django.contrib import admin
from django.contrib.admin.options import TabularInline
from apps.info_section_app.models import SimilarLike, SimilarDislike, SimilarTitle, \
Favorite, RelatedTitle
class SimilarLikeAdminInLine(TabularInline):
extra = 1
model = SimilarLike
class SimilarDislikeAdminInline(TabularInline):
extra = 1
model = SimilarDislike
@admin.register(SimilarTitle)
class RestaurantModelAdmin(admin.ModelAdmin):
inlines = (SimilarDislikeAdminInline, SimilarLikeAdminInLine)
admin.site.register(Favorite)
admin.site.register(RelatedTitle)
| urmatovnaa/Manga-universe | apps/info_section_app/admin.py | admin.py | py | 592 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.options.TabularInline",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "apps.info_section_app.models.SimilarLike",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.options.TabularInline",
"line_number"... |
35918965719 | import contextlib
import hashlib
import httplib
import socket
import tempfile
import urllib2
try:
from PIL import Image as PILImage
except ImportError:
import Image as PILImage # noqa
from django.conf import settings
from django.core.files import File
from django.db import transaction
from comics.aggregator.exceptions import (
DownloaderHTTPError, ImageTypeError, ImageIsCorrupt, ImageAlreadyExists,
ImageIsBlacklisted)
from comics.core.models import Release, Image
# Image types we accept, and the file extension they are saved with
IMAGE_FORMATS = {
'GIF': '.gif',
'JPEG': '.jpg',
'PNG': '.png',
}
class ReleaseDownloader(object):
def download(self, crawler_release):
images = self._download_images(crawler_release)
return self._create_new_release(
crawler_release.comic, crawler_release.pub_date, images)
def _download_images(self, crawler_release):
image_downloader = ImageDownloader(crawler_release)
return map(image_downloader.download, crawler_release.images)
@transaction.commit_on_success
def _create_new_release(self, comic, pub_date, images):
release = Release(comic=comic, pub_date=pub_date)
release.save()
for image in images:
release.images.add(image)
return release
class ImageDownloader(object):
def __init__(self, crawler_release):
self.crawler_release = crawler_release
def download(self, crawler_image):
self.identifier = self.crawler_release.identifier
with self._download_image(
crawler_image.url, crawler_image.request_headers
) as image_file:
checksum = self._get_sha256sum(image_file)
self.identifier = '%s/%s' % (self.identifier, checksum[:6])
self._check_if_blacklisted(checksum)
existing_image = self._get_existing_image(
comic=self.crawler_release.comic,
has_rerun_releases=self.crawler_release.has_rerun_releases,
checksum=checksum)
if existing_image is not None:
return existing_image
image = self._validate_image(image_file)
file_extension = self._get_file_extension(image)
file_name = self._get_file_name(checksum, file_extension)
return self._create_new_image(
comic=self.crawler_release.comic,
title=crawler_image.title,
text=crawler_image.text,
image_file=image_file,
file_name=file_name,
checksum=checksum)
def _download_image(self, url, request_headers):
try:
request = urllib2.Request(url, None, request_headers)
with contextlib.closing(urllib2.urlopen(request)) as http_file:
temp_file = tempfile.NamedTemporaryFile(suffix='comics')
temp_file.write(http_file.read())
temp_file.seek(0)
return temp_file
except urllib2.HTTPError as error:
raise DownloaderHTTPError(self.identifier, error.code)
except urllib2.URLError as error:
raise DownloaderHTTPError(self.identifier, error.reason)
except httplib.BadStatusLine:
raise DownloaderHTTPError(self.identifier, 'BadStatusLine')
except socket.error as error:
raise DownloaderHTTPError(self.identifier, error)
def _get_sha256sum(self, file_handle):
original_position = file_handle.tell()
hash = hashlib.sha256()
while True:
data = file_handle.read(8096)
if not data:
break
hash.update(data)
file_handle.seek(original_position)
return hash.hexdigest()
def _check_if_blacklisted(self, checksum):
if checksum in settings.COMICS_IMAGE_BLACKLIST:
raise ImageIsBlacklisted(self.identifier)
def _get_existing_image(self, comic, has_rerun_releases, checksum):
try:
image = Image.objects.get(comic=comic, checksum=checksum)
if image is not None and not has_rerun_releases:
raise ImageAlreadyExists(self.identifier)
return image
except Image.DoesNotExist:
return None
def _validate_image(self, image_file):
try:
image = PILImage.open(image_file)
image.load()
return image
except IndexError:
raise ImageIsCorrupt(self.identifier)
except IOError as error:
raise ImageIsCorrupt(self.identifier, error.message)
def _get_file_extension(self, image):
if image.format not in IMAGE_FORMATS:
raise ImageTypeError(self.identifier, image.format)
return IMAGE_FORMATS[image.format]
def _get_file_name(self, checksum, extension):
if checksum and extension:
return '%s%s' % (checksum, extension)
@transaction.commit_on_success
def _create_new_image(
self, comic, title, text, image_file, file_name, checksum):
image = Image(comic=comic, checksum=checksum)
image.file.save(file_name, File(image_file))
if title is not None:
image.title = title
if text is not None:
image.text = text
image.save()
return image
| macanhhuy/comics | comics/aggregator/downloader.py | downloader.py | py | 5,379 | python | en | code | null | github-code | 36 | [
{
"api_name": "comics.core.models.Release",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.commit_on_success",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "django.db.transaction",
"line_number": 41,
"usage_type": "name"
... |
6394496653 | # Copyright (c) 2023 Graphcore Ltd. All rights reserved.
# The functional definition in this file was ported to Python
# from XCFun, which is Copyright Ulf Ekström and contributors 2009-2020
# and provided under the Mozilla Public License (v2.0)
# see also:
# - https://github.com/dftlibs/xcfun
# - https://github.com/dftlibs/xcfun/blob/master/LICENSE.md
import jax.numpy as jnp
import jax
import numpy as np
def __b88(a, gaa):
# precompute
c1 = (4.0 / 3.0)
c2 = (-8.0 / 3.0)
c3 = (-3.0 / 4.0) * (6.0 / np.pi) ** (1.0 / 3.0) * 2
d = 0.0042
d2 = d * 2.
d12 = d *12.
# actual compute
log_a = jnp.log(a/2)
na43 = jnp.exp(log_a * c1)
chi2 = gaa / 4* jnp.exp(log_a * c2 )
chi = jnp.exp(jnp.log( chi2 ) / 2 )
b88 = -(d * na43 * chi2) / (1.0 + 6*d * chi * jnp.arcsinh(chi)) *2
slaterx_a = c3 * na43
return slaterx_a + b88
| graphcore-research/pyscf-ipu | pyscf_ipu/exchange_correlation/b88.py | b88.py | py | 999 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "numpy.pi",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "jax.numpy.log",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "jax.numpy.exp",
"line_numbe... |
11193542891 | """ клиентская часть """
import sys
import json
import time
import re
import logging
import logs.config_client_log
from lib.variables import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, RESPONSE, AUTH, ALERT, MSG, ERR200, ERR400, \
CLIENT_LISTEN, LISTEN, SENDER, MSG, MSG_TEXT, ERROR
from lib.utils import create_socket, server_settings, get_message, send_message
from lib.errors import ReqFieldMissingError, ServerError
from logs.decoration_log import log
CLIENT_LOGGER = logging.getLogger('client')
@log
def message_from_server(message):
"""Функция - обработчик сообщений других пользователей, поступающих с сервера"""
if ACTION in message and message[ACTION] == MSG and \
SENDER in message and MSG_TEXT in message:
print(f'Получено сообщение от пользователя {message[SENDER]}:\n{message[MSG_TEXT]}')
CLIENT_LOGGER.info(f'Получено сообщение от пользователя {message[SENDER]}:\n{message[MSG_TEXT]}')
else:
CLIENT_LOGGER.error(f'Получено некорректное сообщение с сервера: {message}')
@log
def create_message(sock, account_name='Guest'):
"""Функция запрашивает текст сообщения и возвращает его.
Так же завершает работу при вводе подобной комманды
"""
message = input('Введите сообщение для отправки или \'exit\' для завершения работы: ')
if message.lower() == 'exit':
sock.close()
CLIENT_LOGGER.info('Завершение работы по команде пользователя.')
sys.exit(0)
message_dict = {ACTION: MSG,TIME: time.time(),ACCOUNT_NAME: account_name,MSG_TEXT: message}
CLIENT_LOGGER.debug(f'Сформирован словарь сообщения: {message_dict}')
return message_dict
@log
def create_presence(account_name='Guest'):
'''
Функция генерирует запрос о присутствии клиента
:param account_name:
:return:
'''
# {'action': 'presence', 'time': 1573760672.167031, 'user': {'account_name': 'Guest'}}
out = {ACTION: PRESENCE, TIME: time.time(), USER: {ACCOUNT_NAME: account_name}}
CLIENT_LOGGER.debug(f'Сформировано {PRESENCE} сообщение для пользователя {account_name}')
return out
@log
def get_user():
"""
функция возвращает имя пользователя
:return:
"""
while True:
account = input("введите имя пользователя >>>")
if not re.match(r"[A-Za-z]", account) or len(account) > 25 or len(account) < 3:
CLIENT_LOGGER.error(f"недопустимое имя пользователя: {account}")
print("Имя пользователя должно быть от 3 до 25 латинских символов")
elif account.lower().strip() == 'guest':
CLIENT_LOGGER.error(f"недопустимое имя пользователя: {account}")
print("Недоспустимое имя пользователя")
else:
break
return account
@log
def create_action(account_name, action, msg=None):
'''
Функция отдает словарь с текстом сообщения
:param account_name:
:return:
'''
# {'action': 'presence', 'time': 1573760672.167031, 'user': {'account_name': 'Guest'}}
out = {ACTION: action, TIME: time.time(), USER: {ACCOUNT_NAME: account_name}, MSG: msg}
CLIENT_LOGGER.debug(f'Сформировано {ACTION} сообщение для пользователя {account_name}')
return out
@log
def process_handler(message):
'''
Функция разбирает ответ сервера
:param message:
:return:
'''
CLIENT_LOGGER.debug(f'Разбор приветственного сообщения от сервера: {message}')
print(f'Разбор приветственного сообщения от сервера: {message}')
if RESPONSE in message:
if message[RESPONSE]==200: #message[RESPONSE]==200:
CLIENT_LOGGER.debug(f"{message[RESPONSE]} содержит {ERR200}")
return message[MSG] #ERR200
elif message[RESPONSE]==ERR400: #message[RESPONSE]==400:
CLIENT_LOGGER.debug(f"{message[RESPONSE]} содержит {ERR400}")
raise ServerError(f"{ERR400}: {message[ERROR]}")
raise ReqFieldMissingError(RESPONSE)
@log
def start_client():
srv_settings = server_settings()
server_address = srv_settings[0]
server_port = srv_settings[1]
client_listen = srv_settings[2]
print(f"start client on: {server_address}:{server_port} | listen_mode={client_listen}")
CLIENT_LOGGER.info(f"client started {server_address}:{server_port} | listen_mode={client_listen}")
try:
transport = create_socket()
transport.connect((server_address, server_port))
send_message(transport, create_presence())
answer = process_handler(get_message(transport))
CLIENT_LOGGER.info(f"соединение с сервером {server_address}:{server_port}. Ответ: {answer}")
print(f"соединение с сервером {server_address}:{server_port}. Ответ: {answer}")
# авторизация
account_name = get_user()
CLIENT_LOGGER.info(f"Guest авторизовался как {account_name}")
CLIENT_LOGGER.debug(f"отправка {AUTH} сообщения на сервер {server_address}:{server_port} от user={account_name}")
message_to_server = create_action(account_name, action=AUTH, msg=None)
send_message(transport, message_to_server)
try:
answer = process_handler(get_message(transport))
print(answer)
except (ValueError, json.JSONDecodeError):
print(answer)
CLIENT_LOGGER.error(f"{ERR400}. Не удалось декодировать сообшение от сервера")
print(f"{ERR400}. Не удалось декодировать сообшение от сервера")
except json.JSONDecodeError:
CLIENT_LOGGER.error(f"не удалось декодировать JSON-строку")
print(f"не удалось декодировать JSON-строку")
sys.exit(1)
except ServerError as error:
CLIENT_LOGGER.error(f"ошибка при установке соединения: {error.text}")
print(f"ошибка при установке соединения: {error.text}")
sys.exit(1)
except ReqFieldMissingError as missing_error:
CLIENT_LOGGER.error(f"в ответе сервера нет обязательного поля {missing_error.missing_field}")
sys.exit(1)
except ConnectionRefusedError:
CLIENT_LOGGER.critical(f"Не удалось подключиться к серверу {server_address}:{server_port}")
sys.exit(1)
else:
print(f"клиент - в режиме client_listen={client_listen:}")
while True:
if not client_listen:
try:
send_message(transport, create_message(transport))
except (ConnectionResetError, ConnectionError, ConnectionAbortedError):
CLIENT_LOGGER.error(f"соединение с сервером {server_address}:{server_port} потеряно")
print(f"соединение с сервером {server_address}:{server_port} потеряно")
sys.exit(1)
if client_listen:
try:
message_from_server(get_message(transport))
except (ConnectionResetError, ConnectionError, ConnectionAbortedError):
CLIENT_LOGGER.error(f"соединение с сервером {server_address}:{server_port} потеряно")
print(f"соединение с сервером {server_address}:{server_port} потеряно")
sys.exit(1)
if __name__ == '__main__':
start_client()
| ESarmanov/client_server_app_Python_GeekBrains | Lesson_7_Sarmanov_EF/client.py | client.py | py | 8,345 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "lib.variables.ACTION",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "lib.variables.MSG",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "lib.variable... |
20928709222 | #!/usr/bin/env python
# coding: utf-8
# In[6]:
import time
import json
import pandas as pd
import re
import logging
from datetime import date, datetime, timedelta
from selenium import webdriver
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
opts=webdriver.ChromeOptions()
opts.headless=False
driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts)
def date_extract(jobs):
filtered_data=[]
for value in jobs:
if not value:
filtered_data.append(str(date.today()))
else:
for item in value:
integer_value=int(str(item))
dt=date.today()-timedelta(integer_value)
filtered_data.append(str(dt))
return filtered_data
def get_df_from_dict(jobs):
df=pd.DataFrame.from_dict(jobs) #Creating DataFrame
df.head(10)
df=df.apply(lambda x: x.astype(str).str.lower()) #converting into lowercase to remove redundancy
df.head()
#df.skills=[skill.split("\n") for skill in df.skills]
df.location=[location.split(" ") for location in df.location]
df=df.dropna()
return df
def task_scrape():
jobs={"title":[],
"company":[],
"experience":[],
#"skills":[],
"date":[],
"scraper_run_dt_time":[],
"location":[]
#"jd_url":[]
}
final_df=pd.DataFrame(jobs)
#This outer loop is for number of pages to be scraped
#for role in data["job_roles"]:
# jobs={key:[] for key in jobs}
#print("hello",jobs)
for n in range(1,4):
driver.get('https://www.naukri.com/software-engineer-software-developer-data-analyst-data-scientist-machine-learning-engineer-hr-manager-project-manager-cloud-architect-full-stack-developer-full-stack-web-developer-big-data-consultant-jobs-'+str(n)+'?k=Software%20Engineer%2C%20Software%20Developer%2C%20Data%20Analyst%2C%20Data%20Scientist%2C%20Machine%20Learning%20Engineer%2C%20HR%20Manager%2C%20Project%20Manager%2C%20Cloud%20Architect%2C%20Full%20Stack%20Developer%2C%20Full%20Stack%20Web%20Developer%2C%20Big%20Data%20Consultant')
time.sleep(4)
job_container = driver.find_elements(By.CSS_SELECTOR,".jobTuple.bgWhite.br4.mb-8")
# scraping the details from webpage
for job in job_container:
driver.implicitly_wait(20)
title=job.find_element(By.CSS_SELECTOR,"a.title.fw500.ellipsis").text
company=job.find_element(By.CSS_SELECTOR,"a.subTitle.ellipsis.fleft").text
location=job.find_element(By.CSS_SELECTOR,".fleft.grey-text.br2.placeHolderLi.location").text
try:
exp=job.find_element(By.CSS_SELECTOR,".fleft.grey-text.br2.placeHolderLi.experience").text
except Exception:
exp="0 yrs"
#skills=job.find_element(By.CSS_SELECTOR,".tags.has-description").text
date_string=job.find_element(By.CSS_SELECTOR,"[class^='type br2 fleft']").text
# date_string contains strings like 2 day ago,just now,few hours ago
#jd=job.find_element(By.TAG_NAME,"a").get_attribute("href")
date=re.findall(r'\d+',date_string) #extracting numbers out of the date_string
jobs["title"].append(title)
jobs["company"].append(company)
jobs["location"].append(location)
jobs["experience"].append(exp)
#jobs["skills"].append(skills)
jobs["date"].append(date)
#jobs["jd_url"].append(jd)
jobs["scraper_run_dt_time"].append(datetime.today())
jobs["date"]=date_extract(jobs["date"])
try:
print(type(jobs))
dataframe=get_df_from_dict(jobs)
print(type(dataframe))
dataframe[15:25]
final_df=final_df.append(dataframe)
except:
logging.error("Error in dict_to_df")
now=datetime.today()
dt_time=now.strftime("%H%M%S")
dt=now.strftime("%Y%m%d")
filename="scraped_"+dt+"_"+dt_time
final_df.info()
final_df[40:45]
final_df.to_csv('{}.csv'.format(filename))
driver.quit
task_scrape()
# In[ ]:
| shivanianand/NaukriDataAnalysis | Scraping_final.py | Scraping_final.py | py | 4,365 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 19,
"usage_type": "call"
},
{
"api... |
25489916263 | import pytest
import pytest_spec.basic as basic
class TestErrors:
def test_zero(parameter_list):
with pytest.raises(ZeroDivisionError) as e:
basic.division_by_zero(1)
assert e.type == ZeroDivisionError
assert e.typename == "ZeroDivisionError"
assert str(e.value) == "division by zero"
# 引数を変えて行うtest
def add(a, b):
if type(a) is not int or type(b) is not int:
raise TypeError
return a + b
@pytest.mark.parametrize("a,b,expected", [(1, 2, 3), (4, 5, 9), (10, 23, 33)])
def test_add(a, b, expected):
assert add(a, b) == expected
# 引数を変えて、いずれも例外を吐くことを確認するtest
@pytest.mark.parametrize("a,b,expected", [("1", 2, 3), (None, 5, 9), (10, [1], 33)])
def test_add_2(a, b, expected):
with pytest.raises(TypeError):
add(a, b)
| atu4403/pytest_spec | tests/basic/test_basic.py | test_basic.py | py | 865 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.raises",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytest_spec.basic.division_by_zero",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pytest_spec.basic",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pytes... |
2182128872 | import logging
import threading
import numpy as np
import pandas as pd
from timeit import default_timer as timer
from datetime import datetime, timedelta, timezone
from time import sleep
import time
import matplotlib.pyplot as mpl
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import make_scorer, r2_score
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from utils.history import Historical_Caching
import warnings
warnings.filterwarnings('ignore')
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Gives a list of timestamps from the start date to the end date
#
# startDate: The start date as a string xxxx-xx-xx
# endDate: The end date as a string year-month-day
# period: 'minute', 'daily', 'weekly', or 'monthly'
# weekends: True if weekends should be included; false otherwise
# return: A numpy array of timestamps
def DateRange(startDate, endDate, period='minute', weekends=True):
# The start and end date
sd = datetime.fromtimestamp(startDate)
ed = datetime.fromtimestamp(endDate)
# print(startDate, endDate)
# Invalid start and end dates
if (sd > ed):
raise ValueError("The start date cannot be later than the end date.")
# One time period is a day
if (period == 'minute'):
prd = timedelta(minutes=1)
if (period == 'daily'):
prd = timedelta(1)
# One prediction per week
if (period == 'weekly'):
prd = timedelta(7)
# one prediction every 30 days ("month")
if (period == 'monthly'):
prd = timedelta(30)
# The final list of timestamp data
dates = []
cd = sd
while (cd <= ed):
# If weekdays are included or it's a weekday append the current ts
if (weekends or (cd.date().weekday() != 5 and cd.date().weekday() != 6)):
dates.append(cd.timestamp())
# Onto the next period
cd = cd + prd
# print(np.array(dates))
return np.array(dates)
# Given a date, returns the previous day
#
# startDate: The start date as a datetime object
# weekends: True if weekends should counted; false otherwise
def DatePrevDay(startDate):
# One day
day = timedelta(minutes=1)
cd = startDate - day
return cd
# Load data from the CSV file. Note: Some systems are unable
# to give timestamps for dates before 1970. This function may
# fail on such systems.
#
# path: The path to the file
# return: A data frame with the parsed timestamps
def ParseData(path):
# Read the csv file into a dataframe
df = None
while df is None:
try:
sleep(0.5)
df = pd.read_csv(path)
except Exception as e:
sleep(0.5)
pass
df['Timestamp'] = df['Date']
# Remove any unused columns (axis = 1 specifies fields are columns)
df = df.drop('Date', axis=1)
# df = df.iloc[::-1] # CHECK THIS
return df
# Given dataframe from ParseData
# plot it to the screen
#
# df: Dataframe returned from
# p: The position of the predicted data points
def PlotData(df, p=None):
if (p is None):
p = np.array([])
# Timestamp data
ts = df.Timestamp.values
# Number of x tick marks
nTicks = 10
# Left most x value
s = np.min(ts)
# Right most x value
e = np.max(ts)
# Total range of x values
r = e - s
# Add some buffer on both sides
s -= r / 5
e += r / 5
# These will be the tick locations on the x axis
tickMarks = np.arange(s, e, (e - s) / nTicks)
# Convert timestamps to strings
strTs = [datetime.fromtimestamp(i).strftime('%Y-%m-%d %H:%M:%S') for i in tickMarks]
mpl.figure()
# Plots of the high and low values for the day
mpl.plot(ts, df.High.values, color='#727272', linewidth=1.618, label='Actual')
# Predicted data was also provided
if (len(p) > 0):
mpl.plot(ts[p], df.High.values[p], color='#7294AA', linewidth=1.618, label='Predicted')
# Set the tick marks
mpl.xticks(tickMarks, strTs, rotation='vertical')
# Set y-axis label
mpl.ylabel('Crypto Price (USD)')
# Add the label in the upper left
mpl.legend(loc='upper left')
mpl.show()
# A class that predicts stock prices based on historical stock data
class Predictor:
# Constructor
# nPrevDays: The number of past days to include
# in a sample.
# rmodel: The regressor model to use (sklearn)
# nPastDays: The number of past days in each feature
# scaler: The scaler object used to scale the data (sklearn)
def __init__(self, rmodel, nPastDays, scaler=StandardScaler()):
self.npd = nPastDays
self.R = rmodel
self.S = scaler
self.D = None
self.D_orig = None
self.DTS = None
self.A = None
self.y = None
self.targCols = None
# Extracts features from stock market data
#
# D: A dataframe from ParseData
# ret: The data matrix of samples
def _ExtractFeat(self, D):
# One row per day of stock data
m = D.shape[0]
# Open, High, Low, and Close for past n days + timestamp and volume
n = self._GetNumFeatures()
B = np.zeros([m, n])
# Preserve order of spreadsheet
for i in range(m - 1, -1, -1):
self._GetSample(B[i], i, D)
# Return the internal numpy array
return B
# Extracts the target values from stock market data
#
# D: A dataframe from ParseData
# ret: The data matrix of targets and the
def _ExtractTarg(self, D):
# Timestamp column is not predicted
tmp = D.drop('Timestamp', axis=1)
# Return the internal numpy array
return tmp.values, tmp.columns
# Get the number of features in the data matrix
#
# n: The number of previous days to include
# self.npd is used if n is None
# ret: The number of features in the data matrix
def _GetNumFeatures(self, n=None):
if (n is None):
n = self.npd
return n * 7 + 1
# Get the sample for a specific row in the dataframe.
# A sample consists of the current timestamp and the data from
# the past n rows of the dataframe
#
# r: The array to fill with data
# i: The index of the row for which to build a sample
# df: The dataframe to use
# return; r
def _GetSample(self, r, i, df):
# First value is the timestamp
r[0] = df['Timestamp'].values[i]
# The number of columns in df
n = df.shape[1]
# The last valid index
lim = df.shape[0]
# Each sample contains the past n days of stock data; for non-existing data
# repeat last available sample
# Format of row:
# Timestamp Volume Open[i] High[i] ... Open[i-1] High[i-1]... etc
for j in range(0, self.npd):
# Subsequent rows contain older data in the spreadsheet
ind = i + j + 1
# If there is no older data, duplicate the oldest available values
if (ind >= lim):
ind = lim - 1
# Add all columns from row[ind]
for k, c in enumerate(df.columns):
# + 1 is needed as timestamp is at index 0
r[k + 1 + n * j] = df[c].values[ind]
return r
# Attempts to learn the stock market data
# given a dataframe taken from ParseData
#
# D: A dataframe from ParseData
def Learn(self, D):
# Keep track of the currently learned data
self.D = D.copy()
self.D_orig = D.copy()
# self.S = StandardScaler(with_mean=True, with_std=True)
# Keep track of old timestamps for indexing
self.DTS = np.asarray(self.D.Timestamp.values)
# Scale the data
self.S.fit(self.D)
self.D[self.D.columns] = self.S.transform(self.D)
# Get features from the data frame
self.A = self._ExtractFeat(self.D)
# Get the target values and their corresponding column names
self.y, self.targCols = self._ExtractTarg(self.D)
# Create the regressor model and fit it
self.R.fit(self.A, self.y)
return True
# Predicts values for each row of the dataframe. Can be used to
# estimate performance of the model
#
# df: The dataframe for which to make prediction
# return: A dataframe containing the predictions
def PredictDF(self, df):
# Make a local copy to prevent modifying df
D = df.copy()
# Scale the input data like the training data
D[D.columns] = self.S.transform(D)
# Get features
A = self._ExtractFeat(D)
# Construct a dataframe to contain the predictions
# Column order was saved earlier
P = pd.DataFrame(index=range(A.shape[0]), columns=self.targCols)
# Perform prediction
P[P.columns] = self.R.predict(A)
# Add the timestamp (already scaled from above)
P['Timestamp'] = D['Timestamp'].values
# Scale the data back to original range
P[P.columns] = self.S.inverse_transform(P)
return P
# Predict the stock price during a specified time
#
# startDate: The start date as a string in yyyy-mm-dd format
# endDate: The end date as a string yyyy-mm-dd format
# period: 'daily', 'weekly', or 'monthly' for the time period
# between predictions
# return: A dataframe containing the predictions or
def PredictDate(self, startDate, endDate, period='minute'):
# Create the range of timestamps and reverse them
ts = DateRange(startDate, endDate, period)[::-1]
m = ts.shape[0]
# Prediction is based on data prior to start date
# Get timestamp of previous day
prevts_ = datetime.fromtimestamp(ts[-1]) - timedelta(minutes=1)
prevts = np.asarray(prevts_.timestamp())
# Test if there is enough data to continue
try:
ind = np.where(self.DTS <= prevts)[0][0]
except IndexError:
logger.info('Safety ON')
ind = 0
pass
# There is enough data to perform prediction; allocate new data frame
P = pd.DataFrame(np.zeros((self.D.shape[0], self.D.shape[1])), index=range(self.D.shape[0]), columns=self.D.columns)
# Add in the timestamp column so that it can be scaled properly
P.loc[int(m):int(self.D.shape[0]), 'Timestamp'] = self.D.loc[0:(int(self.D.shape[0] - m)), 'Timestamp']
P.loc[0:int(m - 1), 'Timestamp'] = ts
for i in range(self.D.shape[0] - m):
# If the current index does not exist, repeat the last valid data
curInd = ind + i
if (curInd >= self.D.shape[0]):
curInd = curInd - 1
# Copy over the past data (already scaled)
P.iloc[int(m + i)] = self.D_orig.xs(int(curInd))
# for i in range(len(P)):
# print(datetime.datetime.fromtimestamp(P.loc[i, 'Timestamp']))
# Scale the timestamp (other fields are 0)
self.S.fit(P)
P[P.columns] = self.S.transform(P)
P = P[0:int(m * 2)]
# B is to be the data matrix of features
B = np.zeros((1, self._GetNumFeatures()))
# Add extra last entries for past existing data
# Loop until end date is reached
# print(P)
for i in range(m - 1, -1, -1):
# Create one sample
self._GetSample(B[0], i, P)
# Predict the row of the dataframe and save it
pred = self.R.predict(B).ravel()
for j, k in zip(self.targCols, pred):
P.at[i, j] = k
# Discard extra rows needed for prediction
# Scale the dataframe back to the original range
P[P.columns] = self.S.inverse_transform(P)
'''for i in range(len(P)):
print(datetime.fromtimestamp(P.loc[i, 'Timestamp']))
print(P)'''
'''j = 0
for i in P.Timestamp:
print(dt.fromtimestamp(i))
j += 1
if j > 10:
break'''
# PlotData(P)
P = P[0:m]
return P
# Test the predictors performance and
# displays results to the screen
#
# D: The dataframe for which to make prediction
def Performance(self, df=None):
# If no dataframe is provided, use the currently learned one
if df is None:
D = self.D.copy()
else:
self.S.fit(df)
D = self.S.transform(df)
# Get features from the data frame
A = self._ExtractFeat(D)
# Get the target values and their corresponding column names
y, _ = self._ExtractTarg(D)
# Begin cross validation
ss = ShuffleSplit(n_splits=1, test_size=0.1, train_size=0.9, random_state=0)
for trn, tst in ss.split(A):
s2 = cross_val_score(self.R, A[tst], y[tst], cv=5, scoring=make_scorer(r2_score), n_jobs=-1)
if len(s2) > 1:
return s2.mean()
elif len(s2) == 1:
logger.info(str(s2))
return s2
else:
return 0
class ML_Calculus:
def __init__(self, ws_bmex, rest, instrument, history_count, per_pred, API_key, API_secret):
self.client = rest
self.instrument_bmex = instrument
self.API_key_bmex = API_key
self.API_secret_bmex = API_secret
self.periods_pred = per_pred - 1
self.p_verdict = 0
self.D = None
self.ready = False
self.history = Historical_Caching(ws_bmex, rest, instrument, history_count)
self.thread = threading.Thread(target=self.Engine)
self.R = KNeighborsRegressor(n_neighbors=10, weights='distance', algorithm='auto', leaf_size=25, n_jobs=-1)
self.sp_Classic = Predictor(rmodel=self.R, nPastDays=50)
self.logger = logging.getLogger(__name__)
def Main(self, args):
if (len(args) != 3 and len(args) != 4):
return
# Test if file exists
try:
open(args[0])
except Exception as e:
logger.error('Error opening args: ' + args[0])
logger.error(str(e))
return
if (len(args) == 4):
predPrd = args[3]
if predPrd == 'm':
predPrd = 'minute'
if predPrd == 'D':
predPrd = 'daily'
if predPrd == 'W':
predPrd = 'weekly'
if predPrd == 'M':
predPrd = 'monthly'
try:
# Everything looks okay; proceed with program
# Grab the data frame
# self.D = pand.DataFrame(index=range(self.hc))
self.D = None
self.D = ParseData(args[0])
# The number of previous days of data used
# when making a prediction
# PlotData(D)
s2_mean = 0
P = None
i = 0
res = 0
while res < 1:
self.sp_Classic.Learn(self.D)
res += 1
while s2_mean < 0.70 and i < 3:
# Learn the dataset and then display performance statistics
# sp.TestPerformance()
# Perform prediction for a specified date range
P = self.sp_Classic.PredictDate(args[1], args[2])
if P is None:
logger.info(self.instrument_bmex + ': TYPE 2 Reboot')
return 0, 0, 0
s2_mean = self.sp_Classic.Performance()
# Keep track of number of predicted results for plot
# n = P.shape[0]
# Append the predicted results to the actual results
# D = P.append(D)
# Predicted results are the first n rows
# D.to_csv(r'xbt_m1_treated.csv', index=False)
# PlotData(D, range(n + 1))'''
i += 1
# print(P)
return i, P, s2_mean
except Exception as e:
logger.error(str(e))
sleep(1)
return 0, 0, 0
def Engine(self):
datetime_minute_cached = None
fails = 0
self.history.start_supplychain()
while self.history.get_data_loaded() is False or self.history.get_run_completed() is False:
logger.info(self.instrument_bmex + ': Waiting for historical data... ')
sleep(5)
continue
logger.info(self.instrument_bmex + ': Starting machine learning computation...')
while True:
try:
if datetime_minute_cached != datetime.utcnow().minute: # and self.history.get_run_completed() is True:
start_timer = timer()
timestamp_ = datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:00')
timestamp = datetime.strptime(timestamp_, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc)
timestamp = timestamp.timetuple()
start_ts = time.mktime(timestamp)
timestamp = datetime.strptime(timestamp_, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc) + timedelta(minutes=(self.periods_pred))
timestamp = timestamp.timetuple()
end_ts = time.mktime(timestamp)
naming = str(self.instrument_bmex + "_pair_m1.csv")
p = None
counter = 0
while isinstance(p, pd.DataFrame) is False and counter < 5:
i, p, s2_mean = self.Main([naming, start_ts, end_ts, 'm'])
counter += 1
if counter >= 5:
logger.warning(self.instrument_bmex + ': Predictor: Error p format')
continue
elif s2_mean < 0.70:
self.p_verdict = 0
logger.info(self.instrument_bmex + ' Processing Time: ' + str(round(timer() - start_timer, 5)))
logger.info(self.instrument_bmex + ': Machine learning : UNCONCLUSIVE !')
self.ready = True
fails += 1
datetime_minute_cached = datetime.utcnow().minute
# print('shape: ', p.shape)
# print(p)
# print(datetime.datetime.fromtimestamp(p.loc[0, 'Timestamp']))
# print(datetime.datetime.fromtimestamp(p.loc[1, 'Timestamp']))
# print(datetime.fromtimestamp(p.loc[self.periods_pred, 'Timestamp']))
# print(p)
else:
if np.isnan(s2_mean):
self.p_verdict = 0
else:
temp = self.periods_pred - 1
temp_1 = p.loc[temp + 1, 'Close']
temp_2 = p.loc[temp + 1, 'Close']
j = 0
k = 0
while temp >= 0:
p_close_tx = p.loc[temp, 'Close']
if temp_1 < p_close_tx:
j += 1
elif temp_2 > p_close_tx:
k += 1
temp_1 = temp_2 = p_close_tx
temp -= 1
if j >= self.periods_pred:
self.p_verdict = -1
elif k >= self.periods_pred:
self.p_verdict = 1
else:
self.p_verdict = 0
logger.info(self.instrument_bmex + ' Processing Time: ' + str(round(timer() - start_timer, 5)))
if self.p_verdict == 0:
logger.info(self.instrument_bmex + ' -> Machine learning : NEUTRAL !')
elif self.p_verdict > 0:
logger.info(self.instrument_bmex + ' -> Machine learning : UP !')
else:
logger.info(self.instrument_bmex + ' -> Machine learning : DOWN !')
self.ready = True
datetime_minute_cached = datetime.utcnow().minute
fails = 0
logger.info(self.instrument_bmex + ' -> Machine learning / Non-gaussian metric : ' + str(round(s2_mean * 100, 2)) + "% (iter: " + str(i) + ")")
sleep(0.1)
except Exception as e:
logger.error(str(e))
sleep(1)
pass
def start_ml(self):
self.thread.daemon = True
self.thread.start()
def get_p_verdict(self):
if self.p_verdict > 0:
verdict = 1
elif self.p_verdict < 0:
verdict = -1
else:
verdict = 0
return verdict
| 5ymph0en1x/SyDOM | utils/predictor.py | predictor.py | py | 21,887 | python | en | code | 82 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "logging... |
2285653529 | #Item 1 #############################################################################################################
import hashlib
from random import randint
def cadastrar(nome,senha):
senhaReal = senha
senhaHash = (hashlib.md5(senhaReal.encode('utf-8')).hexdigest())
with open("UsuariosCadastrados.txt", "a") as stream: # usa "w" para não armazenar mais de 1 palavra
print(nome, senhaHash, file=stream)
def md5(senha):
senhaNova = (hashlib.md5(senha.encode('utf-8')).hexdigest())
return senhaNova
def autenticar(nome,senha):
if nome in listaUsu:
z = listaUsu.index(nome)
if senha == listaUsu[z+1]:
print("Autentificacao efetuada com sucesso. Bem vindo ", nome)
else:
print("Nome ou senha invalidos.")
else:
print("Nome ou senha invalidos.")
listaUsu = []
listaHash = []
a = 1
while a == 1:
nomeCadast = input("Digite um nome (de 4 caracteres) para cadastro: ")
senhaCadast = input("Digite uma senha (de 4 caracteres) para cadastro: ")
ativa = 1
if ativa ==1:
if (len(nomeCadast) or len(senhaCadast)) >4:
print("Nome ou Senha digitado esta fora dos limites.")
else:
listaUsu.append(nomeCadast)
listaUsu.append(senhaCadast)
senhaHash = md5(senhaCadast)
listaHash.append(senhaHash)
cadastrar(nomeCadast, senhaCadast)
a = int(input("Digite (0) se quiser parar de fazer cadastros, ou (1) para continuar: "))
fazer = int(input("Digite (1) se deseja autentificar, ou (0) para sair: "))
if fazer == 1:
nomeAutent = input("Digite seu nome para Login: ")
senhaAutent = input("Digite sua senha para Login: ")
autenticar(nomeAutent,senhaAutent)
print("\n")
##Item 2: quebra md5 ################################################################################################
#import hashlib
import string
import datetime
from datetime import timedelta
def quebraMd5(senha1,senha2,senha3,senha4):
hashes = [senha1, senha2, senha3, senha4]
startTime = datetime.datetime.now()
tempo_comeco = datetime.datetime.now()
x = 0
# diminui = timedelta(seconds=1) --> SE QUISER DIMINUIR UM TEMPO DETERMINADO
for a in string.printable:
for b in string.printable:
for c in string.printable:
for d in string.printable:
word = a + b + c + d
hash = hashlib.md5(word.encode("utf-8")).hexdigest()
# print(str(hash))
if hash in hashes:
end_time = str(datetime.datetime.now() - startTime).split('.')[0]
print("Senha Hash quebrada com sucesso!!")
print("Senha original: ", word)
print("Codigo HASH: ", hash)
print("Tempo necessário: ", end_time)
print("\n")
startTime = datetime.datetime.now()
x = x + 1
if x >= len(hashes):
tempo_final = str(datetime.datetime.now() - tempo_comeco).split('.')[0]
print("Tempo total: ", tempo_final)
break
senha1 = listaHash[0]
senha2 = listaHash[1]
senha3 = listaHash[2]
senha4 = listaHash[3]
quebraMd5(senha1,senha2,senha3,senha4)
#Item 3 ##############################################################################################################
def crip(texto,chave): #pode usar numeros tbm
aq = []
for cont in range (len(texto)):
if texto[cont] == (chr(32)) or (chr(97)<=texto[cont]<=chr(122)):
if texto[cont] == " ":
aq.append(" ")
else:
crip = ord(texto[cont])
if chr(crip+chave) <= chr(122):
crip = chr(crip + chave)
aq.append(crip)
elif chr(crip+chave) > chr(122):
numero = (crip + chave)
numero = numero - 122
crip = chr(96 + numero)
aq.append(crip)
if chr(48)<=texto[cont]<=chr(57):
crip = ord(texto[cont])
if chr(crip+chave) <= chr(57):
crip = chr(crip+chave)
aq.append(crip)
elif chr(crip+chave) > chr(57):
numero = (crip+chave)
numero = numero - 57
crip = chr(47 + numero)
aq.append(crip)
aq = "".join(aq)
return(aq)
chaveRandom1 = randint(1,9)
senhaProte1 = crip(senha1,chaveRandom1)
chaveRandom2 = randint(1,9)
senhaProte2 = crip(senha2,chaveRandom2)
chaveRandom3 = randint(1,9)
senhaProte3 = crip(senha3,chaveRandom3)
chaveRandom4 = randint(1,9)
senhaProte4 = crip(senha4,chaveRandom4)
with open("HASHSModificados.txt", "a") as stream: # usa "w" para não armazenar mais de 1 palavra
print(senhaProte1, senhaProte2, senhaProte3, senhaProte4, file=stream)
print("\n")
print(f"Novo Hash Protegido de {listaUsu[0]} - {senhaProte1}")
print(f"Novo Hash Protegido de {listaUsu[2]} - {senhaProte2}")
print(f"Novo Hash Protegido de {listaUsu[4]} - {senhaProte3}")
print(f"Novo Hash Protegido de {listaUsu[6]} - {senhaProte4}") | Williamsbsa/Quebra-de-Hash-MD5-Python | QuebradeHashMd5.py | QuebradeHashMd5.py | py | 5,406 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "hashlib.md5",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
35001315098 | from ipywidgets import Box, HBox, VBox, FloatSlider, FloatProgress, Label, Layout
s1 = FloatSlider(description='Apple', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
s2 = FloatSlider(description='Horse', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
s3 = FloatSlider(description='Flower', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
s4 = FloatSlider(description='Car', min=-5, max=5, step=0.01, value=0, layout=Layout(width='90%'))
p1 = FloatProgress(min=0, max=1, step=0.01, value=0)
p2 = FloatProgress(min=0, max=1, step=0.01, value=0)
p3 = FloatProgress(min=0, max=1, step=0.01, value=0)
p4 = FloatProgress(min=0, max=1, step=0.01, value=0)
l1 = Label(value="0.00")
l2 = Label(value="0.00")
l3 = Label(value="0.00")
l4 = Label(value="0.00")
import numpy as np
values = np.array([0.0, 0.0, 0.0, 0.0])
def softmax(i):
return np.exp(values[i]) / np.sum(np.exp(values))
def set_values():
l1.value = "%.2f" % softmax(0)
l2.value = "%.2f" % softmax(1)
l3.value = "%.2f" % softmax(2)
l4.value = "%.2f" % softmax(3)
p1.value = softmax(0)
p2.value = softmax(1)
p3.value = softmax(2)
p4.value = softmax(3)
def on_value_change(change):
if change.owner == s1:
values[0] = change.new
if change.owner == s2:
values[1] = change.new
if change.owner == s3:
values[2] = change.new
if change.owner == s4:
values[3] = change.new
set_values()
s1.observe(on_value_change, names='value')
s2.observe(on_value_change, names='value')
s3.observe(on_value_change, names='value')
s4.observe(on_value_change, names='value')
def main():
set_values()
left_box = VBox([s1, s2, s3, s4], layout=Layout(width='50%'))
middle_box = VBox([p1, p2, p3, p4])
right_box = VBox([l1, l2, l3, l4])
return HBox([left_box, middle_box, right_box])
if __name__ == "__main__":
main() | CaptainProton42/MNISTFromScratch | media/softmax_widget.py | softmax_widget.py | py | 1,890 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ipywidgets.FloatSlider",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Layout",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "ipywidgets.FloatSlider",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "ipywidge... |
34455554450 | # -*- coding: utf8 -*-
from django.db import models
class Timer(models.Model):
""" Модель таймера """
start_time = models.DateTimeField(
verbose_name="Время начала",
null=True,
blank=True
)
end_time = models.DateTimeField(
verbose_name="Время конца",
null=True,
blank=True
)
result_time = models.DurationField(
verbose_name="Время затраченное на задачу",
null=True,
blank=True
)
task_user = models.ForeignKey(
'TaskUser',
on_delete=models.CASCADE,
related_name='timers_task_user',
verbose_name='Задачи пользователя'
)
def __str__(self):
return f'Время начала: {self.start_time}. Время конца: {self.end_time}'
class Meta:
db_table = 'timers'
app_label = 'models_app'
verbose_name = 'Таймер'
verbose_name_plural = 'Таймеры'
| Aplles/project_tracker | models_app/models/timer/models.py | models.py | py | 1,025 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name... |
3156449566 | #!/usr/bin/python3
# Script responsible for removing extra tags of nightly images
# QUAY_ACCESS_TOKEN is needed to set as environment variable before executing script
# The access token is used for authentication against the quay api.
import os
import json
import requests
from dateutil.relativedelta import *
from dateutil.easter import *
from dateutil.rrule import *
from dateutil.parser import *
from datetime import *
import argparse
try:
QUAY_ACCESS_TOKEN = os.environ['QUAY_ACCESS_TOKEN']
except KeyError as e:
print("QUAY_ACCESS_TOKEN environment variable is not set. Please, set it before running the script.")
exit('Script exiting....')
REGISTRY = "quay.io"
NAMESPACE = "kiegroup"
IMAGES={"kogito-data-index-nightly","kogito-quarkus-ubi8-nightly",
"kogito-quarkus-jvm-ubi8-nightly","kogito-quarkus-ubi8-s2i-nightly",
"kogito-springboot-ubi8-nightly","kogito-springboot-ubi8-s2i-nightly",
"kogito-jobs-service-nightly","kogito-management-console-nightly",
"kogito-cloud-operator-nightly"
}
def get_image_tags(image):
'''
Get all the available tags for the image
:param image: image name whose tags needs to be fetched
:return: tags: List of a strcut with tagName and lastModified as fields
'''
tags = []
r = requests.get('https://{0}/api/v1/repository/{1}/{2}/tag/?onlyActiveTags=true'.format(REGISTRY,NAMESPACE,image) , headers={'content-type': 'application/json', 'Authorization': 'Bearer ' + QUAY_ACCESS_TOKEN })
image_metadata= json.loads(r.text)
num_tags = len(image_metadata['tags'])
for i in range(num_tags):
tags.append({
"tagName" : image_metadata['tags'][i]['name'],
"lastModified" : parse(image_metadata['tags'][i]['last_modified'])
})
return tags
def delete_image_tags(image, tags):
'''
Deletes the extra image tags from the repository
:param image: Image whose tags needs to be deleted
:param tags: List of struct with `tagName` and `last_modified` as fields for the image that needs to be deleted
'''
if len(tags) == 0:
print("Image {} does not have extra tags that needs to be deleted".format(image))
else:
for tag in tags:
requests.delete('https://{0}/api/v1/repository/{1}/{2}/tag/{3}'.format(REGISTRY,NAMESPACE,image,tag['tagName']) , headers={'content-type': 'application/json', 'Authorization': 'Bearer ' + QUAY_ACCESS_TOKEN })
print("Successfully deleted {} tags for the image {}".format(len(tags),image))
def get_and_delete_old_tags(image,max_tags):
'''
Driver function, calls the `get_image_tags` to get all the available tags for a image
finds the tags that needs to be deleted and then passes them to `delete_image_tags`
:param image: image name whose old tags needs to be deleted
:param max_tags: Number of maximum tags to be kept for the image
'''
all_tags = get_image_tags(image)
all_tags = list(filter(lambda tag: tag["tagName"]!="latest", all_tags)) #Filter out the entry with latest as tagName from the struct list
all_tags.sort(key=lambda tagInfo: tagInfo.get("lastModified")) #sorting in ascending order to get oldest tag on top
delete_tags = []
if (len(all_tags) - max_tags) > 0:
delete_tags = all_tags[:len(all_tags) - max_tags]
delete_image_tags(image,delete_tags)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Removes extra tags from the registry')
parser.add_argument('--max-tags', dest='max_tags', default=50,type=int, help='Defines the maximum number of tags for the image to be available, defaults to 10')
args = parser.parse_args()
for image in IMAGES:
get_and_delete_old_tags(image,args.max_tags)
| kiegroup/kogito-pipelines | tools/clean-nightly-tags.py | clean-nightly-tags.py | py | 3,786 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "requests.delete",
"line_n... |
25482260523 | import cv2
import time
import mediapipe as mp
cap = cv2.VideoCapture(1)
mp_hands = mp.solutions.hands
hands = mp_hands.Hands() #hands.py ctrl+left mouse
mp_draw = mp.solutions.drawing_utils
new_frame_time = 0
prev_frame_time = 0
while True:
ret, frame = cap.read()
new_frame_time = time.time()
fps = 1 / (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
cv2.putText(frame, f'FPS: {int(fps)}', (40, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)
img_RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
result = hands.process(img_RGB)
# print(result.multi_hand_landmarks)
if result.multi_hand_landmarks: # landmark will give information about x and y coordinates
for hand_landmark in result.multi_hand_landmarks:
for id, landmark in enumerate(hand_landmark.landmark):
# print(id, landmark)
# To get the pixel
height, width, channel = frame.shape
coordinates_x, coordinates_y = int(landmark.x * width), int(landmark.y * height)
print(id, coordinates_x, coordinates_y)
mp_draw.draw_landmarks(frame, hand_landmark, mp_hands.HAND_CONNECTIONS)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| atuad7535/Volume_Control_Using_Hand_Gesture | Hand_Tracking.py | Hand_Tracking.py | py | 1,333 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "mediapipe.solutions",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "time.... |
74353872105 | import re
import phonenumbers
from django import forms
from django.utils.translation import ugettext_lazy as _
from kavenegar import *
from sentry import http
from sentry.plugins.bases.notify import NotificationPlugin
import sentry_kavenegar
DEFAULT_REGION = 'IR'
MAX_SMS_LENGTH = 160
def validate_phone(phone):
try:
p = phonenumbers.parse(phone, DEFAULT_REGION)
except phonenumbers.NumberParseException:
return False
if not phonenumbers.is_possible_number(p):
return False
if not phonenumbers.is_valid_number(p):
return False
return True
def clean_phone(phone):
# This could raise, but should have been checked with validate_phone first
return phonenumbers.format_number(
phonenumbers.parse(phone, DEFAULT_REGION),
phonenumbers.PhoneNumberFormat.E164,
)
def basic_auth(user, password):
return 'Basic ' + (user + ':' + password).encode('base64').replace('\n', '')
def split_sms_to(data):
return set(filter(bool, re.split(r'\s*,\s*|\s+', data)))
class KavenegarConfigurationForm(forms.Form):
api_key = forms.CharField(label=_('API KEY'), required=True,
widget=forms.TextInput(attrs={'class': 'span6'}))
sms_from = forms.CharField(label=_('SMS FROM'), required=True,
widget=forms.TextInput(attrs={'class': 'span6'}))
sms_to = forms.CharField(label=_('SMS To #s'), required=True,
help_text=_('Recipient(s) phone numbers separated by commas or lines'),
widget=forms.Textarea(attrs={'placeholder': 'e.g. +98935XXXXXXX, 0912XXXXXXXX'}))
def clean_sms_to(self):
data = self.cleaned_data['sms_to']
phones = split_sms_to(data)
if len(phones) > 10:
raise forms.ValidationError('Max of 10 phone numbers, {0} were given.'.format(len(phones)))
for phone in phones:
if not validate_phone(phone):
raise forms.ValidationError('{0} is not a valid phone number.'.format(phone))
return ','.join(sorted(map(clean_phone, phones)))
def clean(self):
# TODO: Ping Kavenegar and check credentials (?)
return self.cleaned_data
class KavenegarPlugin(NotificationPlugin):
author = 'Amir Asaran'
author_url = 'https://github.com/amirasaran/sentry-kavenegar'
version = sentry_kavenegar.VERSION
description = 'A plugin for Sentry, which sends SMS notifications via Kavenegar'
resource_links = (
('Documentation', 'https://github.com/amirasaran/sentry-kavenegar/blob/master/README.md'),
('Bug Tracker', 'https://github.com/amirasaran/sentry-kavenegar/issues'),
('Source', 'https://github.com/amirasaran/sentry-kavenegar'),
('Kavenegar', 'https://www.kavenegar.com/'),
)
slug = 'kavenegar'
title = _('Kavenegar (SMS)')
conf_title = title
conf_key = 'kavenegar'
project_conf_form = KavenegarConfigurationForm
def is_configured(self, project, **kwargs):
return all([self.get_option(o, project) for o in (
'api_key', 'sms_to')])
def get_send_to(self, *args, **kwargs):
# This doesn't depend on email permission... stuff.
return True
def notify_users(self, group, event, **kwargs):
project = group.project
body = "Sentry [{0}] {1}: {2}".format(
project.name.encode("utf-8"),
event.group.get_level_display().upper().encode("utf-8"),
event.title.encode("utf-8").splitlines()[0],
)
body = body[:MAX_SMS_LENGTH]
api_key = self.get_option('api_key', project)
sms_to = self.get_option('sms_to', project)
sms_from = self.get_option('sms_from', project)
if not sms_to:
return
sms_to = split_sms_to(sms_to)
instance = KavenegarAPI(api_key)
errors = []
for phone in sms_to:
if not phone:
continue
try:
phone = clean_phone(phone)
params = {
'sender': sms_from,
'receptor': phone,
'message': body
}
instance.sms_send(
params
)
except Exception as e:
errors.append(e)
if errors:
if len(errors) == 1:
raise errors[0]
# TODO: multi-exception
raise Exception(errors)
| amirasaran/sentry-kavenegar | sentry_kavenegar/models.py | models.py | py | 4,522 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "phonenumbers.parse",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "phonenumbers.NumberParseException",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "phonenumbers.is_possible_number",
"line_number": 22,
"usage_type": "call"
},
... |
6673487305 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test config.py module."""
# STDLIB
import os
# THIRD-PARTY
import numpy as np
import pytest
# SYNPHOT
from synphot.config import conf as synconf
from synphot.utils import generate_wavelengths
# LOCAL
from stsynphot import config
from stsynphot.stio import get_latest_file, irafconvert
class TestOverwriteSynphot:
"""Test if overwriting ``synphot`` defaults is successful."""
def setup_class(self):
# For some reason, this does not automatically execute during testing.
config.overwrite_synphot_config(config.conf.rootdir)
self.vegafile = synconf.vega_file
def test_dirname(self):
assert self.vegafile.startswith(config.conf.rootdir)
@pytest.mark.remote_data
def test_isfile(self):
if self.vegafile.startswith(('ftp', 'http')):
# This is the case on Travis CI
pytest.xfail('Cannot test this over FTP or HTTP')
else:
assert os.path.isfile(self.vegafile)
@pytest.mark.remote_data
class TestConfigChanges:
def setup_class(self):
self.def_dict = config.getref()
@pytest.mark.parametrize(
('cfgname', 'new_val'),
[('graphtable', 'mtab$n9i1408hm_tmg.fits'),
('comptable', 'mtab$n9i1408im_tmc.fits'),
('thermtable', 'mtab$n5k15531m_tmt.fits')])
def test_tables_area(self, cfgname, new_val):
# Same as config.conf.cfgname = new_val
setattr(config.conf, cfgname, new_val)
assert getattr(config.conf, cfgname) == new_val
# Reset to default
config.conf.reset(cfgname)
old_expanded_val = get_latest_file(irafconvert(
getattr(config.conf, cfgname)))
assert old_expanded_val == self.def_dict[cfgname]
def test_area(self):
config.conf.area = 1
assert config.conf.area == 1
# Reset to default
config.conf.reset('area')
assert config.conf.area == self.def_dict['area']
def test_waveset(self):
w = generate_wavelengths(
minwave=3000, maxwave=5000, num=100, log=False)
config.conf.waveset_array = w[0].value.tolist()
config.conf.waveset = w[1]
np.testing.assert_allclose(
[config.conf.waveset_array[0], config.conf.waveset_array[-1]],
[3000, 4980])
assert (config.conf.waveset ==
'Min: 3000, Max: 5000, Num: 100, Delta: None, Log: False')
# Reset to default
config.conf.reset('waveset_array')
config.conf.reset('waveset')
np.testing.assert_allclose(
[config.conf.waveset_array[0], config.conf.waveset_array[-1]],
[500, 25989.72879567])
assert config.conf.waveset == self.def_dict['waveset']
| spacetelescope/stsynphot_refactor | stsynphot/tests/test_config.py | test_config.py | py | 2,785 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "stsynphot.config.overwrite_synphot_config",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "stsynphot.config",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "stsynphot.config.conf",
"line_number": 24,
"usage_type": "attribute"
},
{
... |
10724474044 | import xmltodict
import json
# Loading and parsing the xml file.
with open (r'q1.xml', "r") as xml_file:
xml_data = xml_file.read()
print(xml_data)
# Convert xml to json
json_data = json.dumps(xmltodict.parse(xml_data), indent=4)
print(json_data)
with open("output.json", "w") as json_file:
json_file.write(json_data) | JonathanDabre/ip_ut2 | UT-2/xml/q4.py | q4.py | py | 338 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "xmltodict.parse",
"line_number": 10,
"usage_type": "call"
}
] |
958022852 | #!/usr/bin/env python3
"""An Implement of an autoencoder with pytorch.
This is the template code for 2020 NIAC https://naic.pcl.ac.cn/.
The code is based on the sample code with tensorflow for 2020 NIAC and it can only run with GPUS.
Note:
1.This file is used for designing the structure of encoder and decoder.
2.The neural network structure in this model file is CsiNet, more details about CsiNet can be found in [1].
[1] C. Wen, W. Shih and S. Jin, "Deep Learning for Massive MIMO CSI Feedback", in IEEE Wireless Communications Letters, vol. 7, no. 5, pp. 748-751, Oct. 2018, doi: 10.1109/LWC.2018.2818160.
3.The output of the encoder must be the bitstream.
"""
import numpy as np
import h5py
import torch
from transformer import *
from dataloader import *
from loss import *
import os
import torch.nn as nn
import pickle
import random
from copy import deepcopy
from util import *
def save_obj(obj, name ):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pickle.load(f)
# Parameters for training
torch.backends.cudnn.benchmark=True
np.set_printoptions(suppress=True)
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
use_single_gpu = True # select whether using single gpu or multiple gpus
torch.manual_seed(1)
batch_size = 16
epochs = 8000
learning_rate = 1e-3
num_workers = 4
print_freq = 3200 # print frequency (default: 60)
# parameters for data
# feedback_bits = 48 # sim2 = 0.26814688715868884, multi2 = 2.3217122101795313, multi_div_sim_2 = 8.658359732535498
feedback_bits = 48
B = 2
size_packet = 100
NUM_RX = 4
NUM_TX = 32
NUM_DELAY = 32
NUM_SAMPLE_TRAIN = 4000
def norm_data(x, num_sample, num_rx, num_tx, num_delay):
x2 = np.reshape(x, [num_sample, num_rx * num_tx * num_delay * 2])
x_max = np.max(abs(x2), axis=1)
x_max = x_max[:,np.newaxis]
x3 = x2 / x_max / 2.0
y = np.reshape(x3, [num_sample, num_rx, num_tx, num_delay, 2])
return y
# Model construction
model = AutoEncoder(feedback_bits=48, dropout=0.1)
# model.encoder.load_state_dict(torch.load('submit_pt/encoder_2.pth.tar')['state_dict'])
# model.decoder.load_state_dict(torch.load('submit_pt/generator_2.pth.tar')['state_dict'])
if use_single_gpu:
model = model.cuda()
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
import scipy.io as scio
criterion = SmiLoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
"""
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=epochs * len(train_loader),
T_warmup=epochs//20 * len(train_loader),
eta_min=1e-6)
"""
data_train = h5py.File('data/H2_32T4R.mat', 'r')
data_train = np.transpose(data_train['H2_32T4R'][:])
data_train = data_train[:, :, :, :, np.newaxis]
data_train = np.concatenate([data_train['real'], data_train['imag']], 4) # 500 4 32 32 2
# data_train = np.reshape(data_train, [NUM_SAMPLE_TRAIN, NUM_RX* NUM_TX, NUM_DELAY* 2, 1])
# x_train = norm_data(data_train, NUM_SAMPLE_TRAIN, NUM_RX, NUM_TX, NUM_DELAY)
data_train = data_train.astype(np.float32)
x_train = data_train
x_test = data_train
"""
x_test = x_test[1000:,:,:,:]
"""
x_train_hat = np.transpose(x_train, (0,3,1,2,4)).reshape(-1, 32, 256)
x_train_paterns = np.unique((np.sum(np.abs(x_train_hat), axis=2) != 0).astype(np.float32), axis=0)
# dataLoader for training
train_dataset = DatasetFolder(x_train, data_an=True)
print(train_dataset.__len__())
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers)
def random_cutmix(x):
a = x[:,:,:,:16,:]
b = x[:,:,:,16:,:]
idx = [i for i in range(a.size(0))]
random.shuffle(idx)
res = torch.cat([a,b[idx]], dim=3)
return res
def random_mixup(x, num=3):
weight = np.random.randn(num)
weight = weight / np.sum(weight)
res = x * weight[0]
for i in range(1, num):
idx = [i for i in range(x.size(0))]
random.shuffle(idx)
res += x[idx] * weight[i]
return res
best_loss = 100000
"""
scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
T_max=epochs * len(train_loader),
T_warmup=epochs//20 * len(train_loader),
eta_min=1e-6)
"""
print('----', len(train_loader))
for epoch in range(epochs):
if epoch == 200:
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate * 0.1
# model training
model.train()
total_loss = []
for i, x in enumerate(train_loader):
input = x
input = input.cuda()
# compute output
output = model(input)
loss_list = criterion(output, input, epoch=epoch)
loss = sum(loss_list[1:])
total_loss.append([item.detach().cpu().numpy()*input.size(0) for item in loss_list])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss:.6f}\t'.format(
epoch, i, len(train_loader), loss=loss.item()))
# model evaluating
total_loss = np.sum(np.array(total_loss), axis=0) / len(train_dataset)
print('train loss:{}, other loss:{}\t'.format(total_loss[1], total_loss[2]))
model.eval()
total_loss = []
totalNMSE = 0
y_test = []
count = 0
if epoch%1==0:
with torch.no_grad():
for idx in range(int(4000 / size_packet)):
x = np.random.randint(2, size=(size_packet,feedback_bits))
x = torch.from_numpy(x)
x = x.cuda()
"""
B,_,_,_ = x.size()
x_var = torch.mean((x.view(B,126,-1).detach() - 0.5)**2,dim = -1)
x_sort = torch.sort(-x_var,dim = -1)[1] + torch.arange(B).unsqueeze(-1).to(x_var.device)*126
x_sort = x_sort.view(-1)
x = x.view(B*126,128,2)
input = torch.index_select(x, 0, x_sort).view(B,2,126,128)
"""
input = x
output = model.decoder(input) # bx4x32x32x2
output = output.detach().cpu().numpy()
if idx == 0:
output_all = output
else:
output_all = np.concatenate([output_all, output], axis=0)
new_output_all = output_all
real = x_test[:,:,:,:,0] + x_test[:,:,:,:,1]*1j
fake = new_output_all[:,:,:,:,0] + new_output_all[:,:,:,:,1]*1j
sim_1, multi_1, multi_div_sim_1 = K_nearest(real, fake, NUM_RX, NUM_TX, NUM_DELAY, 2)
print('sim:{}, multi:{}, multi_div_sim_1:{}'.format(sim_1, multi_1, multi_div_sim_1))
if multi_div_sim_1 < best_loss:
modelSave2 = './submit_pt/generator_2.pth.tar'
torch.save({'state_dict': model.decoder.state_dict(), }, modelSave2)
modelSave2 = './submit_pt/encoder_2.pth.tar'
torch.save({'state_dict': model.encoder.state_dict(), }, modelSave2)
print("Model saved")
best_loss = multi_div_sim_1
| China-ChallengeHub/oppo_0.71 | trainer2.py | trainer2.py | py | 7,400 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pickle.dump",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.backends",... |
40858491511 | #!/usr/bin/env python
import sys
import fitsio
import healpy
import numpy as np
import scipy as sp
import argparse
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from picca.data import forest
from picca.data import delta
from picca import io
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--plate', type = int, default = None, required=True,
help = 'Plate of spectrum')
parser.add_argument('--mjd', type = int, default = None, required=True,
help = 'Modified Julian Date of spectrum')
parser.add_argument('--fiberid', type = int, default = None, required=True,
help = 'fiber of spectrum')
parser.add_argument('--drq', type = str, default = None, required=True,
help = 'DRQ file')
parser.add_argument('--nside', type = int, default = 16, required=False,
help = 'healpix nside')
parser.add_argument('--spectrum', type = str, default = None, required=True,
help = 'data directory for all the spectra')
parser.add_argument('--no-project', action="store_true", required=False,
help = 'do not project out continuum fitting modes')
parser.add_argument('--in-dir',type = str,default=None,required=True,
help='data directory')
parser.add_argument('--lambda-min',type = float,default=3600.,required=False,
help='lower limit on observed wavelength [Angstrom]')
parser.add_argument('--lambda-max',type = float,default=5500.,required=False,
help='upper limit on observed wavelength [Angstrom]')
parser.add_argument('--lambda-rest-min',type = float,default=1040.,required=False,
help='lower limit on rest frame wavelength [Angstrom]')
parser.add_argument('--lambda-rest-max',type = float,default=1200.,required=False,
help='upper limit on rest frame wavelength [Angstrom]')
parser.add_argument('--rebin',type = int,default=3,required=False,
help='rebin wavelength grid by combining this number of adjacent pixels (ivar weight)')
parser.add_argument('--mode',type = str,default='pix',required=False,
help='open mode: pix, spec, spcframe')
parser.add_argument('--dla-vac',type = str,default=None,required=False,
help='dla catalog file')
parser.add_argument('--dla-mask',type = float,default=0.8,required=False,
help='lower limit on the DLA transmission. Transmissions below this number are masked')
parser.add_argument('--mask-file',type = str,default=None,required=False,
help='Path to file to mask regions in lambda_OBS and lambda_RF. In file each line is: region_name region_min region_max (OBS or RF) [Angstrom]')
parser.add_argument('--flux-calib',type = str,default=None,required=False,
help='Path to file to previously produced picca_delta.py file to correct for multiplicative errors in the flux calibration')
parser.add_argument('--ivar-calib',type = str,default=None,required=False,
help='Path to previously produced picca_delta.py file to correct for multiplicative errors in the pipeline inverse variance calibration')
args = parser.parse_args()
### forest args
forest.lmin = np.log10(args.lambda_min)
forest.lmax = np.log10(args.lambda_max)
forest.lmin_rest = np.log10(args.lambda_rest_min)
forest.lmax_rest = np.log10(args.lambda_rest_max)
forest.rebin = args.rebin
forest.dll = args.rebin*1e-4
forest.dla_mask = args.dla_mask
### Get Healpy pixel of the given QSO
objs = {}
ra,dec,zqso,thid,plate,mjd,fid = io.read_drq(args.drq,0.,1000.,keep_bal=True)
cut = (plate==args.plate) & (mjd==args.mjd) & (fid==args.fiberid)
if cut.sum()==0:
print("Object not in drq")
sys.exit()
ra = ra[cut]
dec = dec[cut]
zqso = zqso[cut]
thid = thid[cut]
plate = plate[cut]
mjd = mjd[cut]
fid = fid[cut]
phi = ra
th = sp.pi/2.-dec
pix = healpy.ang2pix(args.nside,th,phi)
### Get data
data = None
if args.mode == "pix":
data = io.read_from_pix(args.in_dir,pix[0],thid, ra, dec, zqso, plate, mjd, fid, order=None, log=None)
elif args.mode in ["spec","corrected-spec"]:
data = io.read_from_spec(args.in_dir,thid, ra, dec, zqso, plate, mjd, fid, order=None, mode=args.mode,log=None)
elif args.mode =="spcframe":
data = io.read_from_spcframe(args.in_dir,thid, ra, dec, zqso, plate, mjd, fid, order=None, mode=args.mode, log=None)
if data is None:
print("Object not in in_dir")
sys.exit()
else:
data = data[0]
### Correct multiplicative flux calibration
if (args.flux_calib is not None):
try:
vac = fitsio.FITS(args.flux_calib)
head = vac[1].read_header()
ll_st = vac[1]['loglam'][:]
st = vac[1]['stack'][:]
w = (st!=0.)
forest.correc_flux = interp1d(ll_st[w],st[w],fill_value="extrapolate")
vac.close()
except:
print(" Error while reading flux_calib file {}".format(args.flux_calib))
sys.exit(1)
### Correct multiplicative pipeline inverse variance calibration
if (args.ivar_calib is not None):
try:
vac = fitsio.FITS(args.ivar_calib)
ll = vac[2]['LOGLAM'][:]
eta = vac[2]['ETA'][:]
forest.correc_ivar = interp1d(ll,eta,fill_value="extrapolate",kind="nearest")
vac.close()
except:
print(" Error while reading ivar_calib file {}".format(args.ivar_calib))
sys.exit(1)
### Get the lines to veto
usr_mask_obs = None
usr_mask_RF = None
usr_mask_RF_DLA = None
if (args.mask_file is not None):
try:
usr_mask_obs = []
usr_mask_RF = []
usr_mask_RF_DLA = []
with open(args.mask_file, 'r') as f:
loop = True
for l in f:
if (l[0]=='#'): continue
l = l.split()
if (l[3]=='OBS'):
usr_mask_obs += [ [float(l[1]),float(l[2])] ]
elif (l[3]=='RF'):
usr_mask_RF += [ [float(l[1]),float(l[2])] ]
elif (l[3]=='RF_DLA'):
usr_mask_RF_DLA += [ [float(l[1]),float(l[2])] ]
else:
raise
usr_mask_obs = np.log10(np.asarray(usr_mask_obs))
usr_mask_RF = np.log10(np.asarray(usr_mask_RF))
usr_mask_RF_DLA = np.log10(np.asarray(usr_mask_RF_DLA))
if usr_mask_RF_DLA.size==0:
usr_mask_RF_DLA = None
except:
print(" Error while reading mask_file file {}".format(args.mask_file))
sys.exit(1)
### Veto lines
if not usr_mask_obs is None:
if ( usr_mask_obs.size+usr_mask_RF.size!=0):
data.mask(mask_obs=usr_mask_obs , mask_RF=usr_mask_RF)
### Correct for DLAs
if not args.dla_vac is None:
print("adding dlas")
dlas = io.read_dlas(args.dla_vac)
for p in data:
for d in data[p]:
if d.thid in dlas:
for dla in dlas[d.thid]:
data.add_dla(dla[0],dla[1],usr_mask_RF_DLA)
### Get delta from picca_delta
done_delta = None
f = args.spectrum+"/delta-"+str(pix[0])+".fits.gz"
hdus = fitsio.FITS(f)
ds = [delta.from_fitsio(h) for h in hdus[1:]]
for d in ds:
if (d.plate==args.plate) and (d.mjd==args.mjd) and (d.fid==args.fiberid):
d.project()
done_delta = d
hdus.close()
break
if done_delta is None:
hdus.close()
print("Object not in spectrum")
sys.exit()
### Observed l
plt.errorbar(10**data.ll,data.fl,linewidth=2,color='black')
plt.errorbar(10**done_delta.ll,done_delta.co,linewidth=4,color='red')
plt.xlabel(r'$\lambda_{\mathrm{Obs.}} \, [\mathrm{\AA}]$',fontsize=30)
plt.ylabel(r'$f \, [10^{-19} \mathrm{W \, m^{-2} \, nm^{-1}}]$',fontsize=30)
plt.grid()
plt.show()
### RF l
plt.errorbar(10**data.ll/(1.+done_delta.zqso),data.fl,linewidth=4,color='black')
plt.errorbar(10**done_delta.ll/(1.+done_delta.zqso),done_delta.co,linewidth=4,color='red')
plt.xlabel(r'$\lambda_{\mathrm{R.F.}} \, [\mathrm{\AA}]$',fontsize=30)
plt.ylabel(r'$f \, [10^{-19} \mathrm{W \, m^{-2} \, nm^{-1}}]$',fontsize=30)
plt.grid()
plt.show()
| vserret/picca | tutorials/picca_plotSpec.py | picca_plotSpec.py | py | 8,731 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "picca.data.forest.lmin",
"line_number": 82,
"usage_type": "attribute"... |
70593996584 | import math
import os
from itertools import count, cycle
import json
#MUST BE INSTALLED VIA PIP
import tkinter
from tkinter import *
from tkinter import messagebox
from PIL import Image, ImageTk
#-------------------------------------------------------------------------------
#!--- VARS FOR QUESTY STUFF
# goat gifs
goat1 = "./goats/goat1.gif"
goat2 = "./goats/goat2.gif"
goat3 = "./goats/goat3.gif"
current_goat = goat1
# Import Json file to stats
with open("stats.json") as stats_file:
stat_import = json.load(stats_file)
current_level = stat_import["current_level"]
current_xp = stat_import["current_xp"]
needed_xp = stat_import["needed_xp"] * math.ceil(current_level/2)
# Check goat clothes
if current_level <=3:
current_goat = goat1
elif current_level >=4 and current_level <=6:
current_goat = goat2
else:
current_goat = goat3
#DEBUG print(current_goat)
#-------------------------------------------------------------------------------
#!--- ALLOW ANIMATED GIFS
class ImageLabel(tkinter.Label):
def load(self, im):
if isinstance(im, str):
im = Image.open(im)
frames = []
try:
for i in count(1):
frames.append(ImageTk.PhotoImage(im.copy()))
im.seek(i)
except EOFError:
pass
self.frames = cycle(frames)
try:
self.delay = im.info['duration']
except:
self.delay = 100
if len(frames) == 1:
self.config(image=next(self.frames))
else:
self.next_frame()
def unload(self):
self.config(image=None)
self.frames = None
def next_frame(self):
if self.frames:
self.config(image=next(self.frames))
self.after(self.delay, self.next_frame)
#-------------------------------------------------------------------------------
#!--- FUNCTIONS
# Get correct list from selected dropdown option
def display_list(choice):
choice = dropdown_var.get()
print(choice)
# Grab tasks from text file
file_to_open ="lists/" + choice + ".txt"
print(file_to_open)
with open(file_to_open,"r") as tasklist_file:
current_tasklist = tasklist_file.read().splitlines()
task_list.delete(0,END)
for i in current_tasklist:
task_list.insert(END," " + i)
# Checks current experience gain against required XP to level
def check_xp():
global current_level
global current_xp
global needed_xp
global current_goat
if current_xp >= needed_xp:
current_level +=1
current_xp = 0
needed_xp = 5 + current_level * math.ceil(current_level/2)
# Check goat clothes
if current_level <=3:
current_goat = goat1
elif current_level >=4 and current_level <=6:
current_goat = goat2
else:
current_goat = goat3
# WOW THIS SUCKED TO FIX AND FIGURE OUT LMAO (kills the goat and rebirths the asshole with new clothes if needed)
goat_img.configure(image=None)
goat_img.configure(goat_img.load(current_goat))
print(current_goat)
print(current_level," ", current_xp," ",needed_xp)
#!--- BUTTON BEHAVIOURS
# Add a new task
def add_task():
task = new_entry.get()
# # WOW THIS SUCKED TO FIX AND FIGURE OUT LMAO
# goat_img.configure(image=None)
# goat_img.configure(goat_img.load(current_goat))
if task != "":
global current_xp
current_xp +=1
task_list.insert(END," " + task)
new_entry.delete(0, "end")
current_tasklist.append(task)
check_xp()
# Export stats to Json
stat_export = {"current_level":current_level,"current_xp":current_xp,"needed_xp":needed_xp}
with open("stats.json","w") as outfile:
json.dump(stat_export, outfile)
else:
messagebox.showwarning("warning", "Please enter a task!")
# Refresh stats canvas to show updated values
stat_canvas.itemconfig(lvl_info,text=current_level)
stat_canvas.itemconfig(xp_info,text=current_xp)
stat_canvas.itemconfig(needed_info,text=needed_xp - current_xp)
# Delete highlighted task
def del_task():
# Get the item the user is attempting to delete and store it in a var to remove from the initial list
return_focus = task_list.get(task_list.curselection())
task_list.delete(ANCHOR)
current_tasklist[:] = [x for x in current_tasklist if return_focus not in x] # Checks if return_focus is in the task list and removes it
# Save task list
def save_list():
#print(current_tasklist)
#list_to_save = task_list
with open(file_to_open, "w") as tasks_to_save:
for i in current_tasklist:
tasks_to_save.write(i + "\n")
#-------------------------------------------------------------------------------
#!!!!!!!!!--- TKINTER LOOP FROM UNDER HERE
# Set up our main window
task_win = Tk()
#!--- WINDOW WIDTH, HEIGHT, XPOS, YPOS, TITLE, AND DISABLE RESIZE ---
task_win.geometry('450x600+500+200')
task_win.title('Task Quest')
task_win.resizable(width = False, height = False)
#!--- BG UI GFX ---
ui_bg = PhotoImage(file="ui/ui_bg.png")
label1 = Label(task_win, image = ui_bg, borderwidth = 0)
label1.place(x = 0, y = 0)
#!--- DROPODOWN MENU ---
# Dropdown of lists
dirListing = os.listdir("lists/")
detected_files = []
for item in dirListing:
if ".txt" in item:
detected_files.append(item.replace(".txt",""))
#DEBUG print(detected_files)
# Create a new list with any underscores and file extensions stripped by this point
avail_lists = [s.replace("_"," ") for s in detected_files]
dropdown_var = StringVar(task_win)
dropdown_var.set(avail_lists[0]) # default dropdown value
dropdown_lists = OptionMenu(task_win, dropdown_var, *avail_lists,command=display_list)
# Dropdown Styling
dropdown_lists["highlightthickness"]=0
dropdown_lists["width"]=7
# Placement of element
dropdown_lists.place(x=19, y=112)
#!--- OPEN DEFAULT FILE
choice = dropdown_var.get()
file_to_open = "lists/" + choice + ".txt"
#DEBUG print(file_to_open)
with open(file_to_open,"r") as tasklist_file:
current_tasklist = tasklist_file.read().splitlines()
#!--- MAIN FRAME SET UP FOR LIST BOX AND SCROLLBAR
frame = Frame(task_win)
frame.place(x=137,y=112)
#!--- LIST BOX
# List box for our tasks to go live in
task_list = Listbox(frame,width=30,height=17,font=('Arial',12),bd=0,bg="#283189",fg="#FFFFFF",highlightthickness=0,selectbackground="#191e51",activestyle="none")
task_list.pack(side=LEFT,fill= BOTH)
# Insert tasks into list box from our task list created from the text file
for i in current_tasklist:
task_list.insert(END," " + i)
#!--- SCROLLBAR
# Vertical scrollbar for longer to-do lists
tasklist_sb = Scrollbar(frame)
tasklist_sb.pack(side=RIGHT,fill=BOTH)
# Bind the scrollbar and list box together
task_list.config(yscrollcommand=tasklist_sb.set)
tasklist_sb.config(command=task_list.yview)
#!--- NEW ENTRIES
new_entry = Entry(task_win,font=("Arial",11),width=28)
new_entry.place(x=195,y=470)
#!--- BUTTONS
y_shift = 518
butt_width = 8
# Add task button
add_task_btn= Button(task_win,text='Add',font=("Arial",11),bg="#b4ea66",padx=2,pady=0,width=butt_width,command=add_task)
add_task_btn.place(x=133,y=y_shift)
# Delete task button
del_task_btn = Button(task_win,text='Delete',font=("Arial",11),bg="#940345",fg="white",padx=2,pady=0,width=butt_width,command=del_task)
del_task_btn.place(x=240,y=y_shift)
# Save tasks button
save_task_btn= Button(task_win,text='Save',font=("Arial",11),bg="#ffc96f",padx=2,pady=0,width=butt_width,command=save_list)
save_task_btn.place(x=347,y=y_shift)
#!--- GOAT MANAGEMENT <3
goat_x = 10
goat_y = 450
goat_img = ImageLabel(task_win,bd=0)
goat_img.place(x=goat_x,y=goat_y)
# Goat stats
stat_canvas = Canvas(task_win,width=107,height=150,bd=-2)
stat_canv_image = PhotoImage(file="./ui/stats_canv.png")
stat_canvas.place(x=7,y=300)
stat_canvas.create_image(0,0,image=stat_canv_image,anchor=NW)
lvl_info = stat_canvas.create_text(53,37,font=("Arial",14),fill="White",text=current_level)
xp_info = stat_canvas.create_text(53,82,font=("Arial",14),fill="White",text=current_xp)
needed_info = stat_canvas.create_text(53,123,font=("Arial",14),fill="White",text=needed_xp)
# Refresh the goat
goat_img.load(current_goat)
# Yay it works~
task_win.mainloop() | introvertices/Task-list | main.py | main.py | py | 8,432 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_num... |
36771601568 | ## dammit-turnip
## Version: 0.1
## Author: Adam Lenart
## Main file that interacts with the user
## standard imports
import argparse
## 3rd parth imports
from PIL import Image
## own modules
from src import dialog_action
from src import processor
###################################################################################
## Options ##
###################################################################################
## ----------------------- Command line arguments ------------------------------ ##
parser = argparse.ArgumentParser(description='Create a circular image and draw ' +
'a colored circle around its edge.' )
parser.add_argument('input', help='path to input file')
parser.add_argument('output', help='path to output file. The output format must be PNG.')
parser.add_argument('-x', type=int, help='integer, x coordinate of the center of the circle. 0 is left.')
parser.add_argument('-y', type=int, help='integer, y coordinate of the center of the circle. 0 is top.')
parser.add_argument('-d', type=int, help='integer, diameter of the circle in pixels.')
parser.add_argument('-R', type=int,
help='integer, R in RGBA color specifications, e.g., 150 in "(150,0,100,255)".')
parser.add_argument('-G', type=int,
help='integer, G in RGBA color specifications, e.g., 0 in "(150,0,100,255)".')
parser.add_argument('-B', type=int,
help='integer, B in RGBA color specifications, e.g., 100 in "(150,0,100,255)".')
parser.add_argument('-A', type=int,
help='integer, A in RGBA color specifications, e.g., 255 in "(150,0,100,255)".')
parser.add_argument('-width', type=int, help='integer, width of circle line in pixels.')
parser.add_argument('--resize', action='store_true', help='Resize to 300 x 300 pixels.')
parser.add_argument('--no-resize', action='store_true', help='Do not resize the picture.')
args = parser.parse_args()
## -------------------------- resize options ------------------------------------- ##
yes = {'yes','y', 'ye'}
no = {'no','n'}
allowed_responses = yes.union(no)
## continue loop below until yes or no arrives
CONT = True
#####################################################################################
## Run ##
#####################################################################################
if __name__ == "__main__":
print("dammit-turnip 0.1.\n\n")
print("Make a circle from the input image and color the edge of it.\n")
input_image = Image.open(args.input)
print("Dimensions of the input image: {dim}".format(dim=input_image.size))
if args.x is None:
print("\nProvide input for the position of the circle on the original image.\n")
x = int(input("X coordinate of the center of the circle (0 is left): "))
else:
x = args.x
if args.y is None:
y = int(input("Y coordinate of the center of the circle (0 is top): "))
else:
y= args.y
if args.d is None:
d = int(input("Diameter of the circle in pixels: "))
else:
d = args.d
if args.R is None:
print("\nNext, provide input for the color of the circle in RGBA format.\n")
R = input('R channel color, integer between 0 and 255 (press ENTER for default): ')
if R == '':
R = 0
else:
R = int(R)
else:
R = args.R
if args.G is None:
G = input('G channel color, integer between 0 and 255 (press ENTER for default): ')
if G == '':
G = 0
else:
G = int(G)
else:
G = args.G
if args.B is None:
B = input('B channel color, integer between 0 and 255 (press ENTER for default): ')
if B == '':
B = 0
else:
B = int(B)
else:
B = args.B
if args.A is None:
A = input('A channel color, integer between 0 and 255 (press ENTER for default): ')
if A == '':
A = 255
else:
A = int(A)
else:
A = args.A
if args.width is None:
width = input("Width of the circle line in pixels (press ENTER for default): ")
if width == '':
width = int(d / 100)
else:
width = int(width)
else:
width = args.width
## Make circle
circle = processor.circle_maker(input_image, (x,y), d, width, (R,G,B,A))
## Resize?
if args.resize:
dialog_action.yes_action(circle, args.output)
elif args.no_resize:
dialog_action.no_action(circle, args.output)
else:
while CONT:
response = input("\nWe have now a circular shaped image.\n" +
"Resize it to LinkedIn size recommendation (300 x 300)? (yes/no): ").lower()
if response in allowed_responses:
dialog_action.response_action(response, yes, no, circle, args.output)
break
while response not in allowed_responses:
response = input("Please respond with 'yes' or 'no': ")
if response in allowed_responses:
dialog_action.response_action(response, yes, no, circle, args.output)
CONT = False
break
| adamlenart/dammit-turnip | make_circle.py | make_circle.py | py | 5,417 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "src.processor.circle... |
41907884718 | import time
import cv2
import mediapipe as mp
mp_face_detection = mp.solutions.face_detection
import os
os.environ['OPENCV_FFMPEG_CAPTURE_OPTIONS'] = 'rtsp_transport;udp'
class face_detection:
def __init__(self):
self.URL = "rtsp://192.168.0.22:8554/"
self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.image = None
self.device = 0
self.width = 960
self.height = 540
def detect_face(self):
cap = self.cap
cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
while 1:
ret, frame = cap.read()
self.image = frame
with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=1) as face_detection:
results = face_detection.process(cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB))
if not results.detections: # face detect X -> return False
pass
else: # detected face -> return True
cv2.imwrite("face.jpg", frame)
cap.release()
return True
if __name__ == '__main__':
fd = face_detection()
if fd.detect_face():
os.system("F3.bat")
time.sleep(2);
print("2초간 사람 인식 완료")
os.system('python ./age_prediction_model/age_pred.py')
| CSID-DGU/2022-2-SCS4031-EZ_SW | FaceDetection.py | FaceDetection.py | py | 1,175 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mediapipe.solutions",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_DSHOW... |
34118610496 | import os
from flask import Flask
import ghhops_server as hs
import rhino3dm
import pymaxwell5 as pym
app = Flask(__name__) #flask
hops = hs.Hops(app) #flask
#hops = hs.Hops() #http
@hops.component(
"/maxwell",
name="Maxwell",
description="render",
icon="C://Users//archi//Dropbox//course//maxwell.png",
inputs=[
hs.HopsBoolean("run","run","render scene"),
hs.HopsString("width","width",""),
hs.HopsString("height","height",""),
hs.HopsString("time","time",""),
hs.HopsString("sl","sl",""),
hs.HopsString("img","img",""),
hs.HopsString("mxi","mxi",""),
hs.HopsString("mxs_file","inPath",""),
hs.HopsString("output_folder","outFolder",""),
],
outputs=[
hs.HopsString('image','image','')
]
)
def maxwell(run,width,height,time,sl,img,mxi,inPath, outFolder):
if run:
return run_maxwell_render(width,height,time,sl,img,mxi,inPath, outFolder)
else:
return outFolder + img
def run_maxwell_render(width,height,time,sl,img,mxi,inPath, outFolder):
if not os.path.exists(outFolder):
os.mkdir(outFolder)
parameters = []
parameters.append('-mxs:' + inPath)
parameters.append('-o:' + outFolder + img)
parameters.append('-mxi:' + outFolder + mxi)
parameters.append('-res:' + width + 'x' + height)
parameters.append('-time:' + time)
parameters.append('-sl:' + sl)
# parameters.append('-nowait')
parameters.append('-nomxi:off')
parameters.append('-noimage:off')
pym.runMaxwell(parameters)
return outFolder+img
if __name__ == "__main__":
app.run() #flask
#hops.start(debug=True) #http
| seghier/maxwell | venv/maxwell.py | maxwell.py | py | 1,679 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "ghhops_server.Hops",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "ghhops_server.HopsBoolean",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ghhops_server... |
8446001098 | import unittest
import pytest
from cupy_backends.cuda import stream as stream_module
import cupy
from cupy import _core
from cupy import testing
# TODO(leofang): test PTDS in this file
class DummyObjectWithCudaArrayInterface(object):
def __init__(self, a, ver=3):
self.a = a
self.ver = ver
@property
def __cuda_array_interface__(self):
desc = {
'shape': self.a.shape,
'strides': self.a.strides,
'typestr': self.a.dtype.str,
'descr': self.a.dtype.descr,
'data': (self.a.data.ptr, False),
'version': self.ver,
}
if self.ver == 3:
stream = cupy.cuda.get_current_stream()
desc['stream'] = 1 if stream.ptr == 0 else stream.ptr
return desc
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestArrayUfunc(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError,
contiguous_check=False)
def check_array_scalar_op(self, op, xp, x_type, y_type, trans=False):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
if trans:
a = a.T
if xp is cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
return getattr(xp, op)(a, y_type(3))
else:
return getattr(xp, op)(a, y_type(3))
def test_add_scalar(self):
self.check_array_scalar_op('add')
def test_add_scalar_with_strides(self):
self.check_array_scalar_op('add', trans=True)
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestElementwiseKernel(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
@testing.for_all_dtypes_combination()
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError,
contiguous_check=False)
def check_array_scalar_op(self, op, xp, dtyes, trans=False):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtyes)
if trans:
a = a.T
if xp is cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
f = cupy.ElementwiseKernel('T x, T y', 'T z', 'z = x + y')
return f(a, dtyes(3))
else:
return a + dtyes(3)
def test_add_scalar(self):
self.check_array_scalar_op('add')
def test_add_scalar_with_strides(self):
self.check_array_scalar_op('add', trans=True)
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestSimpleReductionFunction(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
self.my_int8_sum = _core.create_reduction_func(
'my_sum', ('b->b',), ('in0', 'a + b', 'out0 = a', None))
@testing.numpy_cupy_allclose()
def check_int8_sum(self, shape, xp, axis=None, keepdims=False,
trans=False):
a = testing.shaped_random(shape, xp, 'b')
if trans:
a = a.T
if xp == cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
return self.my_int8_sum(
a, axis=axis, keepdims=keepdims)
else:
return a.sum(axis=axis, keepdims=keepdims, dtype='b')
def test_shape(self):
self.check_int8_sum((2 ** 10,))
def test_shape_with_strides(self):
self.check_int8_sum((2 ** 10, 16), trans=True)
@testing.parameterize(*testing.product({
'stream': ('null', 'new'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestReductionKernel(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
self.my_sum = _core.ReductionKernel(
'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')
@testing.numpy_cupy_allclose()
def check_int8_sum(self, shape, xp, axis=None, keepdims=False,
trans=False):
a = testing.shaped_random(shape, xp, 'b')
if trans:
a = a.T
if xp == cupy:
with self.stream:
a = DummyObjectWithCudaArrayInterface(a, self.ver)
return self.my_sum(
a, axis=axis, keepdims=keepdims)
else:
return a.sum(axis=axis, keepdims=keepdims, dtype='b')
def test_shape(self):
self.check_int8_sum((2 ** 10,))
def test_shape_with_strides(self):
self.check_int8_sum((2 ** 10, 16), trans=True)
@testing.parameterize(
{'shape': (10,), 'slices': (slice(0, None),)},
{'shape': (10,), 'slices': (slice(2, None),)},
{'shape': (10, 10), 'slices': (slice(0, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(0, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(4, None))},
)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestSlicingMemoryPointer(unittest.TestCase):
@testing.for_all_dtypes_combination(names=['dtype'])
@testing.for_orders('CF')
def test_shape_with_strides(self, dtype, order):
x = cupy.zeros(self.shape, dtype=dtype, order=order)
start = [s.start for s in self.slices]
itemsize = cupy.dtype(dtype).itemsize
dimsize = [s * itemsize for s in start]
if len(self.shape) == 1:
offset = start[0] * itemsize
else:
if order == 'C':
offset = self.shape[0] * dimsize[0] + dimsize[1]
else:
offset = self.shape[0] * dimsize[1] + dimsize[0]
cai_ptr, _ = x.__cuda_array_interface__['data']
slice_cai_ptr, _ = x[self.slices].__cuda_array_interface__['data']
cupy_data_ptr = x.data.ptr
sliced_cupy_data_ptr = x[self.slices].data.ptr
assert cai_ptr == cupy_data_ptr
assert slice_cai_ptr == sliced_cupy_data_ptr
assert slice_cai_ptr == cai_ptr+offset
test_cases = [
{'shape': (10,), 'slices': (slice(0, None),)},
{'shape': (10,), 'slices': (slice(2, None),)},
{'shape': (10, 10), 'slices': (slice(0, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(0, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(0, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(2, None))},
{'shape': (10, 10), 'slices': (slice(2, None), slice(4, None))},
]
test_streams = ('null', 'new')
test_cases_with_stream = [
{'stream': s, **t} for t in test_cases for s in test_streams]
@testing.parameterize(*test_cases_with_stream)
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestCUDAArrayInterfaceCompliance(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
@testing.for_all_dtypes_combination(names=['dtype'])
@testing.for_orders('CF')
def test_value_type(self, dtype, order):
x = cupy.zeros(self.shape, dtype=dtype, order=order)
y = x[self.slices]
# mandatory entries
with self.stream:
CAI = y.__cuda_array_interface__
shape = CAI['shape']
typestr = CAI['typestr']
ptr, readonly = CAI['data']
version = CAI['version']
strides = CAI['strides']
# optional entries
descr = CAI['descr'] if 'descr' in CAI else None
stream = CAI['stream'] if 'stream' in CAI else None
# Don't validate correctness of data here, just their types
assert version == 3 # bump this when the protocol is updated!
assert isinstance(CAI, dict)
assert isinstance(shape, tuple)
assert isinstance(typestr, str)
assert isinstance(ptr, int)
assert isinstance(readonly, bool)
assert (strides is None) or isinstance(strides, tuple)
assert (descr is None) or isinstance(descr, list)
if isinstance(descr, list):
for item in descr:
assert isinstance(item, tuple)
assert (stream is None) or isinstance(stream, int)
@testing.parameterize(*testing.product({
'stream': ('null', 'new', 'ptds'),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestCUDAArrayInterfaceStream(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cupy.cuda.Stream.null
elif self.stream == 'new':
self.stream = cupy.cuda.Stream()
elif self.stream == 'ptds':
self.stream = cupy.cuda.Stream.ptds
def test_stream_export(self):
a = cupy.empty(100)
# the stream context should export the stream
with self.stream:
stream_ptr = a.__cuda_array_interface__['stream']
if self.stream is cupy.cuda.Stream.null:
assert stream_ptr == stream_module.get_default_stream_ptr()
elif self.stream is cupy.cuda.Stream.ptds:
assert stream_ptr == 2
else:
assert stream_ptr == self.stream.ptr
# without a stream context, it's always the default stream
stream_ptr = a.__cuda_array_interface__['stream']
assert stream_ptr == stream_module.get_default_stream_ptr()
| cupy/cupy | tests/cupy_tests/core_tests/test_ndarray_cuda_array_interface.py | test_ndarray_cuda_array_interface.py | py | 10,691 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy.cuda.get_current_stream",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cupy.cuda",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cu... |
74207679783 | import random
from ltl.spot2ba import Automaton
import ltl.worlds.craft_world as craft
from collections import defaultdict
# TODO: add `grass` and `toolshed` back
GRAMMAR = """
BinOp -> 'and' | 'or'
UOp -> 'do not' | 'you should not'
Not -> 'not'
Item -> 'apple' | 'orange' | 'pear'
Landmark -> 'flag' | 'house' | 'tree'
Predicate -> 'be around the' Landmark | 'be near the' Landmark | 'go to the' Landmark | 'hold the' Item | 'take the' Item | 'possess the' Item
p -> Predicate | UOp Predicate | Predicate BinOp Predicate | UOp p
S -> Safety | Guarantee | Obligation | Recurrence | Persistence | Reactivity
SPrefix -> 'always' | 'at all times,'
SSuffix -> 'forever' | 'at all times' | 'all the time'
Safety -> SPrefix p | p SSuffix | Safety BinOp Safety
GPrefix -> 'eventually' | 'at some point'
NotPredicate -> UOp Predicate
Guarantee -> GPrefix p | 'guarantee that you will' Predicate | 'guarantee that you' NotPredicate | Guarantee BinOp Guarantee
Obligation -> Safety BinOp Guarantee | Obligation BinOp Safety | Obligation BinOp Guarantee
Recurrence -> 'eventually,' p 'and do this repeatedly' | Recurrence BinOp Recurrence
Persistence -> 'at some point, start to' p 'and keep doing it' | Persistence BinOp Persistence
Reactivity -> Recurrence BinOp Persistence | Reactivity BinOp Recurrence | Reactivity BinOp Persistence
"""
#GRAMMAR = """
# BinOp -> 'and' | 'or'
# UOp -> 'do not' | 'avoid'
# Not -> 'not'
# Item -> 'apple' | 'orange' | 'pear'
# Landmark -> 'flag' | 'house' | 'tree'
# Predicate -> 'be around' Landmark | 'be near' Landmark | 'hold' Item | 'take' Item
# p -> Predicate | UOp Predicate | Predicate BinOp Predicate | UOp p
# S -> Safety | Guarantee | Obligation | Recurrence | Persistence | Reactivity
# SPrefix -> 'always'
# SSuffix -> 'forever'
# Safety -> SPrefix p | p SSuffix | Safety BinOp Safety
# GPrefix -> 'eventually' | 'at some point'
# Guarantee -> GPrefix p | 'make' Predicate 'happen' | 'make' Predicate Not 'happen' | Guarantee BinOp Guarantee
# Obligation -> Safety BinOp Guarantee | Obligation BinOp Safety | Obligation BinOp Guarantee
# Recurrence -> 'at some point,' p 'for a while' | Recurrence BinOp Recurrence
# Persistence -> 'at some point, start' p 'and keep doing it' | Persistence BinOp Persistence
# Reactivity -> Recurrence BinOp Persistence | Reactivity BinOp Recurrence | Reactivity BinOp Persistence
#"""
CLASS_LTL_PREFIX = {
'Safety': 'G ',
'Guarantee': 'F ',
'Recurrence': 'G F ',
'Persistence': 'F G '
}
class SentenceGrammar(object):
def __init__(self, recipe_path):
self._prod = defaultdict(list)
self.grammar = ''
self.create_grammar(recipe_path)
self.parse_grammar()
def create_grammar(self, recipe_path):
rules = filter(lambda x: x != '', GRAMMAR.split('\n'))
cookbook = craft.Cookbook(recipe_path)
for rule in rules:
line = ''
if (rule.split()[0] == 'Item'):
line = ' Item -> '
for primitive in cookbook.original_recipes['primitives']:
line += primitive + ' | '
line = line[:-3]
elif (rule.split()[0] == 'Landmark'):
line = ' Landmark -> '
for landmark in cookbook.original_recipes['environment']:
# TODO: This is a very hacky way to get the landmarks
if (not '_' in landmark):
line += landmark + ' | '
line = line[:-3]
else:
line = rule
self.grammar += line + '\n'
self.grammar = self.grammar[:-1]
def parse_grammar(self):
rules = filter(lambda x: x != '', self.grammar.split('\n'))
for rule in rules:
rule = rule.strip().split(' -> ')
lhs = rule[0]; rhs = rule[1]
prods = rhs.split(' | ')
for prod in prods:
self._prod[lhs].append(prod)
def gen_single_prod(self, prod, cfactor=0.25, pcount=defaultdict(int), excludes=None, negate=False):
if '\'' in prod:
tmp_tokens = filter(lambda x: x != '', prod.split('\''))
# fix for Predicate Not, TODO: find a better fix
tokens = []
for token in tmp_tokens:
if 'Not' in token:
tokens.extend(token.strip().split(' '))
else:
tokens.append(token)
else:
tokens = filter(lambda x: x != '', prod.split(' '))
out = []; formula = []; need_brackets = False
if excludes is None:
excludes = []
should_negate = 'UOp' in prod or 'Not' in prod
if should_negate: # avoid double-negate if there is one negation
excludes.extend(['UOp', 'Not'])
for token in tokens:
token = token.strip()
if token in self._prod.keys():
sentence, formula_part = self.gen_random(token, \
cfactor=cfactor, pcount=pcount, excludes=excludes, negate=should_negate)
if token in ['Item', 'Landmark']:
formula.append('( ' + ''.join(sentence.split(' ')) + ' )')
excludes.append(sentence)
elif sentence == 'and':
formula.append('&')
need_brackets = True
elif sentence == 'or':
formula.append('|')
need_brackets = True
elif token in ['UOp', 'Not']:
formula.append('!')
need_brackets = True
if token == 'Not': # swap predicate and not
formula[-1] = formula[-2]; formula[-2] = '!'
elif len(formula_part) > 0:
formula.append(formula_part)
out.append(sentence)
else:
out.append(token)
excludes = None
# combine formulas
if len(formula) > 0:
formula = ' '.join(formula)
if need_brackets:
formula = '( ' + formula + ' )'
else:
formula = ''
return ' '.join(out), formula
def _weighted_choice(self, weights):
rnd = random.random() * sum(weights)
for i, w in enumerate(weights):
rnd -= w
if rnd < 0:
return i
def gen_random(self, symbol, cfactor=0.25, pcount=defaultdict(int), excludes=None, negate=False):
sentence = ''; weights = []; formula = ''
if excludes is None:
excludes = []
for i, prod in enumerate(self._prod[symbol]):
skip = False
for token in excludes:
if token in prod:
weights.append(0.01)
skip = True
break
if skip:
continue
elif prod in pcount:
weights.append(cfactor ** (pcount[prod]))
else:
weights.append(1.0)
# sample for a production
rand_prod = self._prod[symbol][self._weighted_choice(weights)]
pcount[rand_prod] += 1
if rand_prod in self._prod.keys():
sentence, formula = self.gen_random(rand_prod, cfactor=cfactor, pcount=pcount, excludes=excludes, negate=negate)
else:
sentence, formula = self.gen_single_prod(rand_prod, cfactor=cfactor, pcount=pcount, excludes=excludes, negate=negate)
if 'UOp' not in rand_prod and 'BinOp' not in rand_prod and \
symbol in ['Safety', 'Guarantee', 'Recurrence', 'Persistence']:
formula = '( ' + CLASS_LTL_PREFIX[symbol] + formula + ' )'
# backtracking: clear the modification to pcount
pcount[rand_prod] -= 1
exclude = None
sentence = sentence.replace("to do not", "to not")
sentence = sentence.replace("to you should not", "to not")
sentence = sentence.replace("you do not be", "you not be")
return sentence, formula
def gen_sentence(self, n=1):
return [self.gen_random('S') for _ in range(n)]
if __name__ == '__main__':
grammar = SentenceGrammar()
for sentence, formula in grammar.gen_sentence(n=10):
print('Sentence:', sentence)
print(' LTL:', formula)
alphabets = ['boundary', 'C_boundary', 'tree', 'C_tree', 'house', 'C_house', 'flag', 'C_flag', 'orange', 'C_orange','apple', 'C_apple', 'pear', 'C_pear']
Automaton(formula, alphabets, add_flexible_state=False)
| czlwang/ltl-environment-dev | ltl/language/generator.py | generator.py | py | 8,694 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "ltl.worlds.craft_world.Cookbook",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "ltl.worlds.craft_world",
"line_number": 69,
"usage_type": "name"
},
{
"ap... |
448222125 | from __future__ import print_function
from pyspark.sql import functions as F
from pyspark.sql.functions import mean, min, max, variance, lag, count, col
from pyspark import sql
from pyspark import SparkContext, SparkConf
from pyspark.sql.types import ArrayType, StringType, IntegerType, DoubleType, LongType, FloatType
from pyspark.sql.types import *
from pyspark.sql.functions import date_format
from datetime import datetime
from pyspark.sql.functions import sum,trim,udf,lit
from pyspark.sql import Row
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql import HiveContext
from pyspark.sql import SparkSession
from pyspark.sql.window import Window
if __name__ == "__main__":
import os
import sys
import shutil
import subprocess
db_certified = sys.argv[1]
db_analytical_ds = sys.argv[2]
db_analytical_temp = sys.argv[3]
username= sys.argv[4]
password= sys.argv[5]
db= sys.argv[6]
connection_str= sys.argv[7]
source= sys.argv[8]
group_id= sys.argv[9]
table_name=sys.argv[10]
cur_script= sys.argv[11]
source_log_euw = sys.argv[12]
euw_shell_script_path=sys.argv[13]
current_dt = str(datetime.now().strftime("%Y%m%d"))
log_dir = source_log_euw+"/"+current_dt+"/python_script_weather_logs"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
columns="inserted_count,total_count,table_name"
#**************************************************************************************************************************************************************
#### happusrd user development cluster
### creating spark-session
spark = SparkSession.builder.appName("EUW_CUSTOMER_LOCATION_WEATHER_DATA").enableHiveSupport().getOrCreate()
def fileLog(logMsg):
with open(log_dir+"/EUW_CUSTOMER_LOCATION_WEATHER_DATA_LOG.log", "a") as myfile:
myfile.write(str(logMsg)+ "\n")
fileLog("################################## EUW_CUSTOMER_LOCATION_WEATHER_DATA script is started ###########################################")
### reading fixed flat actual data
WEATHER_DAILY_FORECAST = spark.sql("select * from "+db_certified+".weather_zip_cd_daily_forecast")
WEATHER_DAILY_ACTUALS = spark.sql("select zip_code,weather_concepts,weather_date,gmt,avg_daily_temp,min_daily_temp,max_daily_temp,updated_on,batch_id from "+db_certified+".weather_zip_cd_daily_actuals")
Weather_all = WEATHER_DAILY_FORECAST.unionAll(WEATHER_DAILY_ACTUALS)
fileLog("weather actuals and forecast have been unioned")
### Dcasting and casting weather_date as date
df = Weather_all
df = df.withColumn('temp_set',F.concat_ws(',',F.col('avg_daily_temp'),F.col('min_daily_temp'),F.col('max_daily_temp')))
date_format_function = udf (lambda x: datetime.strptime(x, '%Y-%m-%d'), DateType())
df = df.withColumn("weather_date",date_format_function(date_format(col("weather_date"),"yyyy-MM-dd")))
# applying pivot on weather concepts
df1 = df.groupby('zip_code','weather_date').pivot('weather_concepts',['DBT','DPT','HUM']).agg(F.first('temp_set')).orderBy('zip_code','weather_date')
split_col = F.split(df1['DBT'], ',')
df1 = df1.withColumn('avg_daily_temp_DBT', split_col.getItem(0).cast(DoubleType()))
df1 = df1.withColumn('min_daily_temp_DBT', split_col.getItem(1).cast(DoubleType()))
df1 = df1.withColumn('max_daily_temp_DBT', split_col.getItem(2).cast(DoubleType()))
split_col = F.split(df1['DPT'], ',')
df1 = df1.withColumn('avg_daily_temp_DPT', split_col.getItem(0).cast(DoubleType()))
df1 = df1.withColumn('min_daily_temp_DPT', split_col.getItem(1).cast(DoubleType()))
df1 = df1.withColumn('max_daily_temp_DPT', split_col.getItem(2).cast(DoubleType()))
split_col = F.split(df1['HUM'], ',')
df1 = df1.withColumn('avg_daily_temp_HUM', split_col.getItem(0).cast(DoubleType()))
df1 = df1.withColumn('min_daily_temp_HUM', split_col.getItem(1).cast(DoubleType()))
df1 = df1.withColumn('max_daily_temp_HUM', split_col.getItem(2).cast(DoubleType()))
df1 = df1.drop('DBT').drop('DPT').drop('HUM')
fileLog("Dcasted the weather_concepts")
Wthr_Dcast = df1.persist()
Wthr_Dcast_Count=Wthr_Dcast.count()
fileLog("Final counts :")
fileLog(Wthr_Dcast_Count)
Wthr_Dcast.createOrReplaceTempView("Wthr_Dcast")
spark.sql("drop table if exists "+db_analytical_temp+".Euw_weather_data_temp")
spark.sql("create table "+db_analytical_temp+".Euw_weather_data_temp as select * from Wthr_Dcast")
column_values=[]
column_values.insert(0,str(Wthr_Dcast_Count))
column_values.append(str(Wthr_Dcast_Count))
column_values.append('Euw_weather_data_temp')
print(column_values)
column_values=','.join(column_values).rstrip(',')
print(column_values)
path='/data01/data/dev/dif/files/scripts/euw'
os.chdir(path)
subprocess.Popen(['bash','-c','. {}/process_control.sh; updateProcessControl %s %s %s %s %s %s %s %s %s %s %s'.format(euw_shell_script_path) %(username,password,db,connection_str,source,group_id,table_name,cur_script,db_analytical_temp,columns,column_values)])
fileLog("################################## EUW_CUSTOMER_LOCATION_WEATHER_DATA script is complete ###########################################")
| avilin66/Pyspark_codes | EUW_CUSTOMER_LOCATION_WEATHER_DATA.py | EUW_CUSTOMER_LOCATION_WEATHER_DATA.py | py | 5,260 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number... |
3540344939 | import requests
from requests.auth import HTTPBasicAuth
import json
from decouple import config
url = "https://climate.jira.com/rest/api/2/issue"
auth = HTTPBasicAuth(
"brandon.hoffman@climate.com",
f"{config('JIRA_API_KEY')}"
)
headers = {
"Accept": "application/json",
"Content-Type": "application/json"
}
payload = json.dumps({
"fields": {
"project": {
"key": "HELIOS"
},
"summary": "Hackathon AI DevOps Test",
"description": "This is just a test ticket",
"issuetype": {
"name": "Bug"
}
}
})
response = requests.request(
"POST",
url,
data=payload,
headers=headers,
auth=auth,
verify=False
)
print(json.dumps(json.loads(response.text), sort_keys=True, indent=4, separators=(",", ": "))) | branhoff/jira-gpt-enhancer | backend/create_issue.py | create_issue.py | py | 811 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.auth.HTTPBasicAuth",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.reques... |
22124651099 | import json
from channels.generic.websocket import AsyncWebsocketConsumer
from channels.db import database_sync_to_async
from .models import Message
from userauth.models import User
from .models import Conversation
class ChatConsumer(AsyncWebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.room_name = None
self.room_group_name = None
self.user = None
async def connect(self):
self.user = self.scope['user']
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
if self.user.is_authenticated:
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
else:
await self.close()
async def disconnect(self, close_code):
room_name = self.scope['url_route']['kwargs']['room_name']
print(f"WebSocket disconnected from room '{room_name}'.")
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
text_data_json = json.loads(text_data)
if 'message' in text_data_json:
# Message handling
message = text_data_json['message']
await self.save_message(message)
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
elif 'shared_key' in text_data_json:
# Shared key handling
shared_key = text_data_json['shared_key']
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'send_shared_key',
'shared_key': shared_key
}
)
elif 'symmetric_key' in text_data_json:
symmetric_key = text_data_json['symmetric_key']
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'send_symmetric_key',
'symmetric_key': symmetric_key
}
)
async def chat_message(self, event):
message = event['message']
conversation = Conversation.objects.get(room=self.room_name)
await self.send(text_data=json.dumps({
'type': 'message',
'sender': conversation.sender.username,
'receiver': conversation.receiver.username,
'message': message
}))
async def send_shared_key(self, event):
shared_key = event['shared_key']
await self.send(text_data=json.dumps({
'type': 'shared_key',
'shared_key': shared_key
}))
async def send_symmetric_key(self, event):
symmetric_key = event['symmetric_key']
await self.send(text_data=json.dumps({
'type': 'symmetric_key',
'symmetric_key': symmetric_key
}))
@database_sync_to_async
def save_message(self, message):
room_name = self.scope['url_route']['kwargs']['room_name']
conversation = Conversation.objects.get(room=room_name)
new_message = Message.objects.create(
conversation=conversation,
message=message,
is_read=False
)
new_message.save()
| codynego/ChaCha | chat/consumers.py | consumers.py | py | 3,576 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "channels.generic.websocket.AsyncWebsocketConsumer",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "models.Conversation.objects.get",
"line_number": 75,
"usage_type": "call"
}... |
30330065701 | import numpy as np
import os
import argparse
import h5py
import sys
from spad_tools.listFiles import listFiles
from spad_tools.array2tiff import array2tiff, array2RGBtiff
from spad_tools.getFCSinfo import getFileInfo
from libttp import ttp
"""
This set of functions allows to read a binary file containing SPAD measurements
using only the file name. The parameters are extracted from the matrix using
the tags. The assumpstion is that the parameters are constant and that all the
frames are complete.
Author: Sebastian Acuna
"""
def file_to_count(fname, datatype=np.uint16, printInfo=False):
"""
Read a bin file and returns an array with the decoded count for each measurement
Args:
fname: name of the file containing the data
Returns:
A numpy array of unsigned int16 os size N x 25 where N is the number of measurements
"""
try:
raw = np.fromfile(fname, dtype=">u8")
except:
if printInfo:
print("Error reading binary file")
return None
elements = raw.shape[0]
positions = int(elements/2)
raw_pos = np.reshape(raw, (positions, 2))
if printInfo:
print(f"Elements: {elements}")
print(f"Positions: {positions}")
print(f"data table: {raw_pos.shape}")
time_per_pixel_tag = np.bitwise_and(raw_pos[:,1], 0b1)
idx = np.argmax(time_per_pixel_tag != time_per_pixel_tag[0]) # positions per time
time_per_pixel = int(idx)
if printInfo:
print(f"time per pixel: {time_per_pixel}")
frame_tag = np.bitwise_and(np.right_shift(raw_pos[:,1], 2), 0b1)
idx = np.argmax(frame_tag != frame_tag[0]) # positions per frame
if idx == 0:
if printInfo:
print("Unique frame")
frames = 1
else:
frames = int(positions/idx) # TODO: check condition with larger dataset
line_tag = np.bitwise_and(np.right_shift(raw_pos[:,1], 1), 0b1)
idx = int(np.argmax(line_tag != line_tag[0])/time_per_pixel) # positions per line
if printInfo:
print(f"Positions per lines: {idx}")
x = int(idx)
y = int(positions/x/time_per_pixel/frames)
if printInfo:
print(f"Dimensions: Y:{y}, X:{x}")
out = np.zeros((positions , 25), dtype = datatype)
matrix_to_count(raw_pos, out)
return out, frames, y, x, time_per_pixel
def file_to_FCScount(fname, datatype=np.uint16, Npoints=-1, Noffset=0):
"""
Read a bin file and returns an array with the decoded count for each measurement
Args:
fname: name of the file containing the data
Returns:
A numpy array of unsigned int16 os size N x 25 where N is the number of measurements
"""
try:
Npoints = Npoints * 2
NbytesOffset = 16 * Noffset
raw = np.fromfile(fname, dtype=">u8", count=Npoints, offset=NbytesOffset)
except:
print("Error reading binary file")
return None
elements = raw.shape[0]
print(f"Elements: {elements}")
positions = int(elements/2)
print(f"Positions: {positions}")
print("Freeing memory")
out = np.zeros((positions , 25), dtype = datatype)
print("Done.")
raw_pos = np.reshape(raw, (positions, 2))
print(f"data table: {raw_pos.shape}")
print("Converting data to counts")
matrix_to_count(raw_pos, out)
print("Done.")
return out
def matrix_to_count(values, out):
"""
Read an array of N measurements and write the count values in the out
array
Args:
values: N x 2 unsigned int array with measurements
out: N x 25 unsigned int array for storing results
Returns:
The matrix out filled with the count
"""
out[:,0] = np.bitwise_and(np.right_shift(values[:,0], 64 - 59), 0b1111) # 4 bits
out[:,1] = np.bitwise_and(np.right_shift(values[:,0], 64 - 55), 0b1111) # 4 bits
out[:,2] = np.bitwise_and(np.right_shift(values[:,0], 64 - 51), 0b1111) # 4 bits
out[:,3] = np.bitwise_and(np.right_shift(values[:,0], 64 - 47), 0b1111) # 4 bits
out[:,4] = np.bitwise_and(np.right_shift(values[:,0], 64 - 43), 0b1111) # 4 bits
out[:,5] = np.bitwise_and(np.right_shift(values[:,0], 64 - 39), 0b1111) # 4 bits
out[:,6] = np.bitwise_and(np.right_shift(values[:,1], 64 - 59), 0b11111) # 5 bits
out[:,7] = np.bitwise_and(np.right_shift(values[:,1], 64 - 54), 0b111111) # 6 bits
out[:,8] = np.bitwise_and(np.right_shift(values[:,1], 64 - 48), 0b11111) # 5 bits
out[:,9] = np.bitwise_and(np.right_shift(values[:,1], 64 - 43), 0b1111) # 4 bits
out[:,10] = np.bitwise_and(np.right_shift(values[:,1], 64 - 39), 0b1111) # 4 bits
out[:,11] = np.bitwise_and(np.right_shift(values[:,1], 64 - 35), 0b111111) # 6 bits
out[:,12] = np.bitwise_and(np.right_shift(values[:,1], 64 - 29), 0b1111111111) # 10 bits
out[:,13] = np.bitwise_and(np.right_shift(values[:,1], 64 - 19), 0b111111) # 6 bits
out[:,14] = np.bitwise_and(np.right_shift(values[:,1], 64 - 13), 0b1111) # 4 bits
out[:,15] = np.bitwise_and(np.right_shift(values[:,1], 64 - 9), 0b1111) # 4 bits
out[:,16] = np.right_shift(values[:,1], 64 - 5) # 5 bits
out[:,17] = np.bitwise_and(np.right_shift(values[:,0], 64 - 35), 0b111111) # 6 bits
out[:,18] = np.bitwise_and(np.right_shift(values[:,0], 64 - 29), 0b11111) # 5 bits
out[:,19] = np.bitwise_and(np.right_shift(values[:,0], 64 - 24), 0b1111) # 4 bits
out[:,20] = np.bitwise_and(np.right_shift(values[:,0], 64 - 20), 0b1111) # 4 bits
out[:,21] = np.bitwise_and(np.right_shift(values[:,0], 64 - 16), 0b1111) # 4 bits
out[:,22] = np.bitwise_and(np.right_shift(values[:,0], 64 - 12), 0b1111) # 4 bits
out[:,23] = np.bitwise_and(np.right_shift(values[:,0], 64 - 8), 0b1111) # 4 bits
out[:,24] = np.bitwise_and(np.right_shift(values[:,0], 64 - 4), 0b1111) # 4 bits
def reshape_to_5d(count, frames, y, x, time_per_pixel):
"""
Reshapes the 2D count matrix to a 5D array (frames, y, x, time, sensor)
Args:
count: N x 25 count matrix
frames: number of frames contained in matrix
y:
x:
time:
Returns:
A 5-D matrix with dimensions (frames, y, x, time, sensor)
"""
return np.reshape(count, (frames, y, x, time_per_pixel, 25))
def reshape_to_6d(count, r, z, y, x, t=1, c=25):
"""
Reshapes the data to a 6D array
Args:
count: N x 25 count matrix
frames: number of frames contained in matrix
y:
x:
time:
Returns:
A 6-D matrix with dimensions (r, z, y, x, time, sensor)
"""
return np.reshape(count, (r, z, y, x, t, 25))
def image2h5(fname, sumTime=True, saveTimeInd=False):
"""
Convert bin file to h5 file
fname file name
sumTime True to sum over all time bins, false otherwise
saveTimeInd Save all time frames in separate files
TO DO:
add metadata to file:
data.pixelsize = 0.05
data.pixelsizeU = 'um', etc.
"""
print(fname)
[out, frames, y, x, time_per_pixel] = file_to_count(fname)
data = reshape_to_5d(out, frames, y, x, time_per_pixel)
if np.ndim(data) == 4 and frames == 1 and sumTime:
# 4D data set [y, x, time, ch] --> sum over time bins
dataOut = np.sum(data, 2)
dataOut = np.float64(dataOut)
# channel must be first channel
dataOut = np.transpose(dataOut, (2, 0, 1))
elif np.ndim(data) == 5 and sumTime:
# 5D data set [z, y, x, time, ch] --> sum over time bins
dataOut = np.sum(data, 3)
dataOut = np.float64(dataOut)
dataOut = np.transpose(dataOut, (3, 0, 1, 2))
else:
print('not summed over time bins')
# channel must be first channel
dataOut = np.squeeze(data)
if saveTimeInd:
dataOut = np.transpose(dataOut, (3, 0, 1, 2))
dataOut = np.squeeze(dataOut)
if type(saveTimeInd) == bool and saveTimeInd:
for i in range(np.shape(dataOut)[-1]):
print("Saving frame " + str(i))
h5f = h5py.File(fname[:-4] + "_frame_" + str(i) + ".h5", 'w')
h5f.create_dataset('dataset_1', data=dataOut[:,:,:,i])
h5f.close()
elif saveTimeInd == "alternate":
# channel, y, x, time
Nt = np.shape(dataOut)[-1]
for sumT in range(int(Nt/2)):
print("Summing over " + str(sumT+1) + " frames")
dataOutSum0 = np.squeeze(np.sum(dataOut[:,:,:,0:2*sumT+1:2], 3))
dataOutSum1 = np.squeeze(np.sum(dataOut[:,:,:,1:2*sumT+2:2], 3))
# store frame 0
h5f = h5py.File(fname[:-4] + "_sum_" + str(sumT+1) + "_frame_0.h5", 'w')
h5f.create_dataset('dataset_1', data=dataOutSum0)
h5f.close()
# store frame 1
h5f = h5py.File(fname[:-4] + "_sum_" + str(sumT+1) + "_frame_1.h5", 'w')
h5f.create_dataset('dataset_1', data=dataOutSum1)
h5f.close()
else:
h5f = h5py.File(fname[:-3] + "h5", 'w')
h5f.create_dataset('dataset_1', data=dataOut)
h5f.close()
return dataOut
def ttr2h5(fnamettr, reorder25channels=True, CHANNELS=25, laser_MHz=80.0, dwell_time_us=10):
df = ttp.readNewProtocolFileToPandas(fnamettr, reorder25channels=reorder25channels, CHANNELS=CHANNELS)
ttp.convertFromPandasDataFrame(df, fnamettr[:-4] + '.h5', laser_MHz=laser_MHz, dwell_time_us=dwell_time_us, list_of_channels=list(np.arange(CHANNELS)))
def allTtr2h5(folder=''):
files = listFiles(folder, filetype='ttr')
filesh5 = listFiles(folder, filetype='h5')
for i, file in enumerate(filesh5):
filesh5[i] = file[:-3] + '.ttr'
for file in files:
if file not in filesh5:
print("converting " + file)
ttr2h5(file)
print('Done')
def bin2h5(fname):
"""
Convert bin file to h5 file with always 6D (r, z, x, y, t, c)
fname file name
add metadata to file:
data.pixelsize = 0.05
data.pixelsizeU = 'um', etc.
"""
[out, frames, y, x, time_per_pixel] = file_to_count(fname)
data = reshape_to_6d(out, 1, frames, y, x, time_per_pixel, 25)
# store data
h5f = h5py.File(fname[:-4] + ".h5", 'w')
h5f.create_dataset('dataset_1', data=data)
h5f.close()
print('Done')
def allBinImages2tiff(folder):
"""
Convert all bin files in a folder to tiff images
folder path to folder (use either \\ or / to go into a folder)
"""
files = listFiles(folder)
for file in files:
print("saving " + file)
dummy = image2tiff(file)
def image2tiff(fname):
"""
Convert bin file to tiff image file
fname file name
"""
[out, frames, y, x, time_per_pixel] = file_to_count(fname)
data = reshape_to_5d(out, frames, y, x, time_per_pixel)
print(np.shape(data))
info = getFileInfo(fname[:-4] + '_info.txt')
if np.ndim(data) == 4 and frames == 1:
# 4D data set [y, x, time, ch] --> sum over time bins
dataOut = np.sum(data, 2)
elif np.ndim(data) == 5:
# 5D data set [z, y, x, time, ch] --> sum over time bins and z
dataOut = np.sum(data, 3)
dataOut = np.sum(dataOut, 0)
dataOut = np.float64(dataOut)
dataOut = np.squeeze(dataOut)
print(np.shape(dataOut))
array2tiff(dataOut, fname[:-4], pxsize=info.pxsize, dim="yxz", transpose3=True)
array2RGBtiff(np.sum(dataOut, 2), fname[:-4] + '_RGB')
def allBinImages2h5(folder, sumTime=True, saveTimeInd=False):
"""
Convert all bin files in a folder to h5 files
folder path to folder (use either \\ or / to go into a folder)
sumTime True to sum over all time bins, false otherwise
"""
files = listFiles(folder)
for file in files:
print("converting " + file)
dummy = image2h5(file, sumTime, saveTimeInd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Converter from binary file to measurement matrix"
)
parser.add_argument(
"binary",
help="binary file name")
args = parser.parse_args()
fname = args.binary
count, frames, y, x, time_per_pixel = file_to_count(fname)
if count is None:
print("Failed to process data. Closing.")
sys.exit(0)
file_name, extension = os.path.splitext(fname) # Get filename without extension
print("Saving 5D matrix...", sep="")
count5d = reshape_to_5d(count, frames, y, x, time_per_pixel)
np.save(file_name + ".npy", count5d)
print("Done.")
| VicidominiLab/libspadffs | spad_fcs/meas_to_count.py | meas_to_count.py | py | 12,651 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.uint16",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.fromfile",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.bitwise_and",
... |
13367936976 | from dotenv import load_dotenv
from os import getenv
from model import load_embeddings_model
from csv import DictReader
from ast import literal_eval
from langchain.schema.document import Document
from langchain.vectorstores import FAISS
def _load_books() -> list:
"""
Load the books from a csv file and organize each book.
Returns:
list: A list of each book.
"""
# Read books from file
books = []
with open('data\\books.csv', 'r', encoding='utf-8') as file:
reader = DictReader(file)
books = list(reader)
# Convert each book into a string
book_strings = []
for book in books:
author = None
if len(book['authors']) > 3 and len(literal_eval(book['authors'])) > 0:
author = literal_eval(book['authors'])[0]
subjects = None
if len(book['subjects']) > 3 and len(literal_eval(book['subjects'])) > 0:
subjects = ', '.join(literal_eval(book['subjects']))
synopsis = book['synopsis'] if len(book['synopsis']) > 0 else None
summary = book['title'] if 'title' in book else "No Title"
summary += f" by {author}\n" if author else "\n"
summary += f"Subjects: {subjects}\n" if subjects else ""
summary += f"Synopsis: {synopsis}" if synopsis else ""
book_strings.append(summary)
return [Document(page_content=book) for book in book_strings]
def embed_books():
"""
Embed the books into a FAISS index and save it locally.
"""
# Load environment variables
load_dotenv()
api_key = getenv('OPENAI_API_KEY')
# Load embeddings model
embeddings_model = load_embeddings_model("openai", api_key, max_retries=600)
# Retrieve the books
books = _load_books()
# Embed the books into a FAISS index
index = FAISS.from_documents(books, embeddings_model)
# Save the FAISS index locally
index.save_local("index")
def get_relevant_books(query: str, top_k: int) -> list:
"""
Search the embedded books for relevant books based on the provided query.
Args:
query (str): The search query.
top_k (int): The number of top relevant books to retrieve.
Returns:
list: A list of relevant books.
"""
# Load environment variables
load_dotenv()
api_key = getenv('OPENAI_API_KEY')
# Load embeddings model
embeddings_model = load_embeddings_model("openai", api_key)
# Load the FAISS index locally
index = FAISS.load_local("index", embeddings_model)
# Search the FAISS index for relevant books
docs = index.similarity_search(query, top_k)
return [doc.page_content for doc in docs] | AethersHaven/Bookie | embed.py | embed.py | py | 2,661 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.DictReader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
... |
74352476903 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
GUFY - Copyright (c) 2019, Fabian Balzer
Distributed under the terms of the GNU General Public License v3.0.
The full license is in the file LICENSE.txt, distributed with this software.
-------------------------------------------------------------------------------
@author: Fabian Balzer (fabian.balzer@studium.uni-hamburg.de)
Module containing all commands used for the creation of the CheckBoxes
"""
import PyQt5.QtGui as QG
import PyQt5.QtCore as QC
import PyQt5.QtWidgets as QW
class coolCheckBox(QW.QCheckBox):
"""Modified version of QCheckBoxes.
Creates a QCheckBox with a given text and tooltip.
params:
text: Text to be shown
tooltip: optionally create a tooltip for the edit
checked: Bool set to false by default.
"""
def __init__(self, text=None, tooltip=None, checked=False, width=150):
super().__init__()
self.setText(text)
self.setToolTip(tooltip)
self.setChecked(checked)
if width is not None:
self.setFixedWidth(width)
self.setStyleSheet("""QCheckBox {color: rgb(0, 0, 0); height: 18 px}
QCheckBox::indicator:unchecked {
image: url(simgui_registry/Icons/CheckBoxUncheckedBase.png); height: 17 px}
QCheckBox::indicator:unchecked:hover {
image: url(simgui_registry/Icons/CheckBoxUncheckedHover.png); height: 17 px;}
QCheckBox::indicator:unchecked:pressed {
image: url(simgui_registry/Icons/CheckBoxUncheckedPressed.png); height: 17 px;}
QCheckBox::indicator:unchecked:disabled {
image: url(simgui_registry/Icons/CheckBoxUncheckedDisabled.png); height: 17 px;}
QCheckBox::indicator:checked {
image: url(simgui_registry/Icons/CheckBoxCheckedBase.png); height: 17 px;}
QCheckBox::indicator:checked:hover {
image: url(simgui_registry/Icons/CheckBoxCheckedHover.png); height: 17 px}
QCheckBox::indicator:checked:pressed {
image: url(simgui_registry/Icons/CheckBoxCheckedPressed.png); height: 17 px}
QCheckBox::indicator:checked:disabled {
image: url(simgui_registry/Icons/CheckBoxCheckedDisabled.png); height: 17 px}""")
# %% Creation
def createAllCheckBoxes(Param_Dict, CheckBox_Dict):
"""Creates all necessary CheckBoxes and stores them in CheckBox_Dict
params:
Param_Dict: For storing output
CheckBox_Dict: Dict to contain all the checkBoxes
"""
hand = Param_Dict["SignalHandler"]
AnnotationBoxes = createAnnotationBoxes()
LogBoxes = createLogBoxes()
ProfileBoxes = createProfileBoxes()
Boxes = AnnotationBoxes + LogBoxes + ProfileBoxes # Merge the lists
keys = ["Timestamp", "Scale", "Grid", "VelVectors", "VelStreamlines",
"MagVectors", "MagStreamlines", "Contour", "ParticleAnno",
"LineAnno", "XLog",
"YLog", "ZLog", "AddProfile", "TimeSeriesProf"]
for i, key in enumerate(keys):
CheckBox_Dict[key] = Boxes[i]
CheckBox_Dict["DomainDiv"] = createDomainDivBox()
CheckBox_Dict["SetAspect"] = coolCheckBox("Ignore aspect ratio",
"If checked, the plot may not "
"have the default aspect.",
width=None)
CheckBox_Dict["CommentsForPlot"] = coolCheckBox("Enable script comments",
"If checked, the output sc"
"ript will have comments "
"with suggestions in it",
True, width=200)
CheckBox_Dict["ParticlePlot"] = createParticlePlotBox()
CheckBox_Dict["ParticlePlot"].toggled.connect(lambda: hand.getParticleInput())
CheckBox_Dict["Timestamp"].toggled.connect(lambda: hand.getAnnotationInput("Timestamp"))
CheckBox_Dict["Scale"].toggled.connect(lambda: hand.getAnnotationInput("Scale"))
CheckBox_Dict["Grid"].toggled.connect(lambda: hand.getAnnotationInput("Grid"))
CheckBox_Dict["VelVectors"].toggled.connect(lambda: hand.getAnnotationInput("VelVectors"))
CheckBox_Dict["VelStreamlines"].toggled.connect(lambda: hand.getAnnotationInput("VelStreamlines"))
CheckBox_Dict["MagVectors"].toggled.connect(lambda: hand.getAnnotationInput("MagVectors"))
CheckBox_Dict["MagStreamlines"].toggled.connect(lambda: hand.getAnnotationInput("MagStreamlines"))
CheckBox_Dict["Contour"].toggled.connect(lambda: hand.getAnnotationInput("Contour"))
CheckBox_Dict["ParticleAnno"].toggled.connect(lambda: hand.getAnnotationInput("ParticleAnno"))
CheckBox_Dict["LineAnno"].toggled.connect(lambda: hand.getAnnotationInput("LineAnno"))
CheckBox_Dict["XLog"].toggled.connect(lambda: hand.getAnnotationInput("XLog"))
CheckBox_Dict["YLog"].toggled.connect(lambda: hand.getAnnotationInput("YLog"))
CheckBox_Dict["ZLog"].toggled.connect(lambda: hand.getAnnotationInput("ZLog"))
CheckBox_Dict["AddProfile"].toggled.connect(lambda: hand.getAddProfileInput())
CheckBox_Dict["TimeSeriesProf"].toggled.connect(lambda: hand.getAnnotationInput("TimeSeriesProf"))
CheckBox_Dict["DomainDiv"].toggled.connect(lambda: hand.getDomainDivInput())
CheckBox_Dict["SetAspect"].toggled.connect(lambda: hand.getAnnotationInput("SetAspect"))
CheckBox_Dict["CommentsForPlot"].toggled.connect(lambda: hand.getAnnotationInput("CommentsForPlot"))
return
def createAnnotationBoxes(width=200, defaultString=" "):
"""Creates CheckBoxes where the user can toggle annotations for the plot.
returns:
boxList: List containing the checkboxes
"""
texts = ["Timestamp", "Scale", "Grid", "Velocity vectors",
"Velocity Streamlines",
"Magnetic field vectors", "Magnetic field Streamlines",
"Contour lines", "Particles", "Start and end point"]
tooltips = [f"Toggle {defaultString}{text.lower()} annotation" for text in texts]
boxList = []
for i, text in enumerate(texts):
CB = coolCheckBox(text, tooltips[i], width=width)
boxList.append(CB)
boxList[-1].setFixedWidth(100)
boxList[0].setChecked(True)
return boxList
def createLogBoxes():
"""Creates CheckBoxes where the user can select logarithmic scaling of an
axis.
returns:
boxList: List containing the checkboxes
"""
texts = ["Horizontal axis", "Vertical axis", "Color axis"]
tooltips = ["Set " + axis + " logarithmic" for axis in texts]
boxList = []
for text, tooltip in zip(texts, tooltips):
CB = coolCheckBox(text, tooltip)
boxList.append(CB)
boxList[1].setChecked(True)
boxList[2].setChecked(True)
return boxList
def createProfileBoxes():
"""Create two CheckBoxes that are used to make multiple plots for either
multiple fields or multiple times possible.
"""
texts = ["Add a second Profile", ""]
tooltips = ["If checked, the selected field will be added to the current "
"plot instead of overwriting it", "Plot the field for multiple"
" times"]
boxList = [] # "AddProfile", "TimeSeriesProf" are the names they are saved as
for text, tooltip in zip(texts, tooltips):
CB = coolCheckBox(text, tooltip)
boxList.append(CB)
boxList[1].setFixedWidth(20)
boxList[0].setDisabled(True) # This will only be enabled if the plotWindow aready has a profile plot
boxList[1].setHidden(True) # This will only be shown in time series mode
return boxList
def createDomainDivBox():
"""Creates a CheckBox that can be checked so during projection, the result
of projection is divided by the domain height"""
domainDivBox = coolCheckBox("Divide by height", "Divide the result of "
"projection by the domain height")
return domainDivBox
def createParticlePlotBox():
"""Creates a CheckBox that can be checked during phase or projection to
plot particle instead of gas fields."""
pbox = coolCheckBox("Particle plot", "Changes the fields to "
"the available particle fields of the dataset.",
width=None)
return pbox
| Fabian-Balzer/GUFY | GUFY/simgui_modules/checkBoxes.py | checkBoxes.py | py | 8,443 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QCheckBox",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 22,
"usage_type": "name"
}
] |
9195714863 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt
def wn_conv1d(*args, **kwargs):
return nn.utils.weight_norm(nn.Conv1d(*args, **kwargs))
def wn_conv_transpose1d(*args, **kwargs):
return nn.utils.weight_norm(nn.ConvTranspose1d(*args, **kwargs))
def wn_linear(*args, **kwargs):
return nn.Linear(*args, **kwargs)
class BaseQuantize(nn.Module):
def embed_code(self, embed_id):
raise NotImplementedError
class Quantize(BaseQuantize):
def __init__(self, dim, n_embed, decay=0.999, eps=1e-5):
super().__init__()
self.dim = dim
self.n_embed = n_embed
self.decay = decay
self.eps = eps
embed = torch.randn(dim, n_embed)
self.register_buffer('embed', embed)
self.register_buffer('cluster_size', torch.zeros(n_embed))
self.register_buffer('embed_avg', embed.clone())
def forward(self, input):
flatten = input.reshape(-1, self.dim)
dist = (
flatten.pow(2).sum(1, keepdim=True)
- 2 * flatten @ self.embed
+ self.embed.pow(2).sum(0, keepdim=True)
)
_, embed_ind = (-dist).max(1)
embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)
embed_ind = embed_ind.view(*input.shape[:-1])
quantize = self.embed_code(embed_ind)
if self.training:
self.cluster_size.data.mul_(self.decay).add_(
1 - self.decay, embed_onehot.sum(0)
)
embed_sum = flatten.transpose(0, 1) @ embed_onehot
self.embed_avg.data.mul_(self.decay).add_(1 - self.decay, embed_sum)
n = self.cluster_size.sum()
cluster_size = (
(self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n
)
embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)
self.embed.data.copy_(embed_normalized)
diff = (quantize.detach() - input).pow(2).mean()
quantize = input + (quantize - input).detach()
return quantize, diff, embed_ind
def embed_code(self, embed_id):
return F.embedding(embed_id, self.embed.transpose(0, 1))
class DecomposedQuantize(BaseQuantize):
def __init__(self, length, dim, n_embed, decay=0.999, eps=1e-5):
super().__init__()
self.dim = dim
self.n_embed = n_embed
self.quantizations = nn.ModuleList([Quantize(dim, n_embed, decay, eps) for _ in range(length)])
def forward(self, input):
out = torch.empty_like(input)
diff = None
ids = torch.empty(*input.shape[:-1], dtype=torch.long, device=input.device)
for i in range(input.size(1)):
quant, diff, code = self.quantizations[i](input[:, i])
out[:, i] = quant
ids[:, i] = code
if diff is None:
diff = diff
else:
diff += diff
return out, diff / len(self.quantizations), ids
def embed_code(self, embed_id):
out = torch.empty(*embed_id.size(), self.dim, dtype=torch.float, device=embed_id.device)
for i in range(embed_id.size(1)):
out[:, i] = self.quantizations[i].embed_code(embed_id[:, i])
return out
class SlicedQuantize(nn.Module):
def __init__(self, d_slice, dim, **kwargs):
super().__init__()
self.dim = dim // d_slice
self.quantize = Quantize(dim=self.dim, **kwargs)
self.d_slice = d_slice
def forward(self, input):
shape = input.size()
input = input.reshape(*input.shape[:-2], -1, self.dim)
z, diff, ids = self.quantize(input)
z = z.view(shape)
return z, diff, ids
class CategoricalNoise(nn.Module):
def __init__(self, n_classes, p):
super().__init__()
self.n_classes = n_classes
self.p = p
def forward(self, input):
if self.training:
mask = (torch.rand(input.shape, device=input.device) > self.p).type(input.dtype)
noise = torch.randint_like(input, 0, self.n_classes)
return input * mask + (1 - mask) * noise
else:
return input
class GeometricCategoricalDropout(nn.Module):
def __init__(self, n, q, alpha):
super().__init__()
if not (0 < q < 1):
raise ValueError('q must be a value 0 < ... < 1')
self.a = 1 / (((q ** (n + 1) - 1) / (q - 1)) - 1)
self.n = n
self.q = q
self.alpha = alpha
#
# self.probs = 1 - alpha * (1 - torch.full([n], self.a) * torch.pow(self.q, (torch.arange(n) + 1).type(torch.float)))
# self.m = torch.distributions.Bernoulli(self.probs)
def forward(self, input):
assert input.max() <= self.n
if self.training:
probs = 1 - torch.pow(self.q, input.type(torch.float))
else:
probs = torch.zeros_like(input)
mask = (torch.rand(input.shape, device=input.device) <= probs).type(input.dtype)
return mask
class Noise(nn.Module):
def __init__(self, alpha=0.1):
super().__init__()
self.alpha = alpha
def forward(self, input):
if not self.training:
return input
return input + self.alpha * torch.randn_like(input)
class ResBlock(nn.Module):
def __init__(self, in_channel, channel, kernel_size=3, padding=1, dilation=1):
super().__init__()
self.conv = nn.Sequential(
nn.ELU(),
nn.Conv1d(in_channel, channel, kernel_size=kernel_size, padding=padding, dilation=dilation),
nn.ELU(),
nn.Conv1d(channel, in_channel, kernel_size=1),
)
def forward(self, input):
out = self.conv(input)
out += input
return out
class ChannelWiseLayerNorm(nn.Module):
def __init__(self, channels):
super().__init__()
self.ln = nn.LayerNorm(channels)
def forward(self, input):
shape = input.size()
input = input.view(shape[0], shape[1], -1).transpose(1, 2)
input = self.ln(input)
input = input.transpose(1, 2).view(shape)
return input
class Attention(nn.Module):
def __init__(self, in_dim, key_query_dim, value_dim, n_heads=1, tau=1.0):
super().__init__()
self.query_w = wn_linear(in_dim, key_query_dim)
self.key_w = wn_linear(in_dim, key_query_dim)
self.value_w = wn_linear(in_dim, value_dim)
self.n_heads = n_heads
self.kq_head_dim = key_query_dim // n_heads
self.val_head_dim = value_dim // n_heads
self.tau = tau
def forward(self, query, key):
bs, _, l = query.size()
query_ = query.transpose(1, 2)
key_ = key.transpose(1, 2)
def reshape(x, head_dim):
return x.view(bs, -1, self.n_heads, head_dim).transpose(1, 2)
query = reshape(self.query_w(query_), self.kq_head_dim)
key = reshape(self.key_w(key_), self.kq_head_dim).transpose(2, 3)
value = reshape(self.value_w(key_), self.val_head_dim)
attn = (query @ key) / sqrt(self.kq_head_dim)
attn = attn / self.tau
attn = F.softmax(attn, dim=-1)
out = attn @ value
out = out.transpose(1, 2).reshape(
bs, l, self.n_heads * self.val_head_dim
)
out = out.permute(0, 2, 1)
return out
class EqualizedConv1d(nn.Module):
def __init__(self, c_in, c_out, kernel_size, stride=1, padding=0, bias=True):
super().__init__()
# define the weight and bias if to be used
self.weight = nn.Parameter(nn.init.normal_(
torch.empty(c_out, c_in, *nn.modules.utils._single(kernel_size))
))
self.use_bias = bias
self.stride = stride
self.pad = padding
if self.use_bias:
self.bias = nn.Parameter(torch.FloatTensor(c_out).fill_(0))
fan_in = np.prod(nn.modules.utils._single(kernel_size)) * c_in # value of fan_in
self.scale = np.sqrt(2) / np.sqrt(fan_in)
def forward(self, x):
return F.conv1d(input=x,
weight=self.weight * self.scale, # scale the weight on runtime
bias=self.bias if self.use_bias else None,
stride=self.stride,
padding=self.pad)
class EqualizedConvTranspose1d(nn.Module):
def __init__(self, c_in, c_out, kernel_size, stride=1, padding=0, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.nn.init.normal_(
torch.empty(c_in, c_out, *nn.modules.utils._single(kernel_size))
))
self.use_bias = bias
self.stride = stride
self.pad = padding
if self.use_bias:
self.bias = nn.Parameter(torch.FloatTensor(c_out).fill_(0))
fan_in = c_in # value of fan_in for deconv
self.scale = np.sqrt(2) / np.sqrt(fan_in)
def forward(self, x):
return F.conv_transpose1d(input=x,
weight=self.weight * self.scale, # scale the weight on runtime
bias=self.bias if self.use_bias else None,
stride=self.stride,
padding=self.pad)
class EqualizedLinear(nn.Module):
def __init__(self, c_in, c_out, bias=True):
"""
Linear layer modified for equalized learning rate
"""
from numpy import sqrt
super().__init__()
self.weight = nn.Parameter(torch.nn.init.normal_(
torch.empty(c_out, c_in)
))
self.use_bias = bias
if self.use_bias:
self.bias = nn.Parameter(torch.FloatTensor(c_out).fill_(0))
fan_in = c_in
self.scale = sqrt(2) / sqrt(fan_in)
def forward(self, x):
return F.linear(x, self.weight * self.scale,
self.bias if self.use_bias else None)
if __name__ == '__main__':
n = 4096
do = GeometricCategoricalDropout(n, 0.998, 1.0)
i = 10000
res = torch.zeros(n)
for j in range(i):
sample = torch.randint(0, n, [128])
out = do(sample)
for k, m in enumerate(out):
if m:
res[sample[k]] += 1
res /= i
import numpy as np
import matplotlib.pyplot as plt
plt.plot(np.arange(n), res.numpy())
plt.ylim(bottom=0)
plt.show() | kklemon/text-gan-experiments | legacy/vq_vae_text/vq_vae_text/modules.py | modules.py | py | 10,464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.utils.weight_norm",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv... |
42154810648 | # 무인도 여행 BFS
from collections import deque
dirs = [(0, 1), (0, -1), (-1, 0), (1, 0)]
score = 0
def bfs(maps, visited, x, y):
global score
queue = deque()
queue.append((x, y))
visited[x][y] = 0
score = int(maps[x][y])
while queue:
x, y = queue.popleft()
for d in dirs:
nx = x+d[0]
ny = y+d[1]
if nx < 0 or ny < 0 or nx >= len(visited) or ny >= len(visited[0]):
continue
if visited[nx][ny] == 0:
continue
if visited[nx][ny] == 1:
visited[nx][ny] = 0
score += int(maps[nx][ny])
queue.append((nx, ny))
def solution(maps):
global score
answer = []
visited = [[0]*len(maps[i]) for i in range(len(maps))]
for i in range(len(maps)):
for j in range(len(maps[i])):
if maps[i][j] != 'X':
visited[i][j] = 1
for i in range(len(maps)):
for j in range(len(maps[i])):
if visited[i][j] == 1:
bfs(maps, visited, i, j)
answer.append(score)
score = 0
answer.sort()
print(answer)
return [-1] if answer == [] else answer
solution(["X591X", "X1X5X", "X231X", "1XXX1"])
| FeelingXD/algorithm | programers/154540-2.py | 154540-2.py | py | 1,279 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
2876716401 | from argparse import ArgumentParser
from config_parser import get_config
import os
import yaml
import matplotlib.pyplot as plt
import time
import torch
from torch import nn, optim
import wandb
from typing import Callable, Tuple
from utils.loss import LabelSmoothingLoss
from utils.opt import get_optimizer, get_adversarial_optimizer
from utils.scheduler import WarmUpLR, get_scheduler
from utils.trainer import train, evaluate, evaluate_sliding_window, train_single_batch
from utils.dataset import get_loader, get_noisy_loader
from utils.misc import seed_everything, count_params, get_model, calc_step, log
#from data2vec.data2vec_utils.trainer import train_single_batch
from utils.misc import log, save_model
from torch.utils.data import DataLoader
from data2vec.masking import AudioMaskingGenerator
from models.Data2Vec import Data2Vec
from tqdm import tqdm
import copy
from collections import OrderedDict
import data2vec.data2vec_utils.trainer
import utils.trainer
def adv_pretrain(config_kwt, config_d2v, k, alpha):
"""Adverserial pretraining with noisy data.
Args:
config_kwt (dict): Keyword transformer config for noise type prediction
config_d2v (dict): Data2vec config for masked prediction (regression)
k (int): Lower K common layers that the two models share.
"""
######################################
# save hyperparameters for current run
######################################
config_kwt["exp"]["save_dir"] = os.path.join(config_kwt["exp"]["exp_dir"], config_kwt["exp"]["exp_name"])
os.makedirs(config_kwt["exp"]["save_dir"], exist_ok=True)
config_d2v["exp"]["save_dir"] = os.path.join(config_d2v["exp"]["exp_name"])
os.makedirs(config_d2v["exp"]["save_dir"], exist_ok=True)
config_str = yaml.dump(config_kwt)
print("Using settings:\n", config_str)
with open(os.path.join(config_kwt["exp"]["save_dir"], "settings.txt"), "w+") as f:
f.write(config_str)
######################################
# data loaders
######################################
# training
# adversarial
print("Loading adversarial training dataset...")
with open(config_kwt["train_list_file"], "r") as f:
train_lista = f.read().rstrip().split("\n")
trainloadera = get_noisy_loader(train_lista, config_kwt, train=True)
# friendly
print("Loading friendly training dataset...")
with open(config_d2v["train_list_file"], "r") as f:
train_listf = f.read().rstrip().split("\n")
trainloaderf = get_loader(train_listf, config_d2v, train=True)
# validation
# adversarial
print("Loading adversarial validation dataset...")
with open(config_kwt["val_list_file"], "r") as f:
val_lista = f.read().rstrip().split("\n")
valloadera = get_noisy_loader(val_lista, config_kwt, train=False)
#friendly
print("Loading friendly validation dataset...")
with open(config_d2v["val_list_file"], "r") as f:
val_listf = f.read().rstrip().split("\n")
valloaderf = get_loader(val_listf, config_d2v, train=False)
mask_generator = AudioMaskingGenerator(mask_prob=config_d2v["hparams"]["model"]["mask_prob"],
mask_length=config_d2v["hparams"]["model"]["mask_length"],
attention_mask=None,
min_masks=config_d2v["hparams"]["model"]["min_masks"])
######################################
# models
######################################
# KWT model
model_kwt = get_model(config_kwt["hparams"]["model"])
model_kwt = model_kwt.to(config_kwt["hparams"]["device"])
model_kwt_copy = copy.deepcopy(model_kwt)
# data2vec model
model_d2v = Data2Vec(encoder=model_kwt_copy,
modality=config_d2v["modality"],
model_embed_dim=config_d2v["hparams"]["model"]["dim"],
ema_decay=config_d2v["hparams"]["model"]["ema_decay"],
ema_end_decay=config_d2v["hparams"]["model"]["ema_end_decay"],
ema_anneal_end_step=config_d2v["hparams"]["model"]["ema_anneal_end_step"],
average_top_k_layers=config_d2v["hparams"]["model"]["average_top_k_layers"],
normalize_targets=config_d2v["hparams"]["model"]["normalize_targets"])
model_d2v= model_d2v.to(config_d2v["hparams"]["device"])
criterion_kwt = nn.CrossEntropyLoss()
criterion_d2v = nn.MSELoss(reduction="none")
parameters_kwt = model_kwt.parameters()
parameters_d2v = model_d2v.parameters()
# optimizer for KWT
optimizer_kwt = get_adversarial_optimizer(model_kwt, config_kwt["hparams"]["optimizer"], k, alpha)
# optimizer for data2vec
optimizer_d2v = optim.Adam(parameters_d2v, lr=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["lr"],
betas=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["betas"],
eps=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["eps"],
weight_decay=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["weight_decay"])
#for group in optimizer_kwt.param_groups:
# print(group['lr'])
#return
# Learning rate scheduler for data2vec
epochs = config_d2v["hparams"]["n_epochs"]
steps_per_epoch = len(trainloaderf)
lr_scheduler = optim.lr_scheduler.OneCycleLR(
optimizer_d2v,
max_lr=config_d2v["hparams"]["optimizer"]["opt_kwargs"]["lr"],
epochs=epochs,
steps_per_epoch=steps_per_epoch,
anneal_strategy="cos")
schedulers_d2v = {"scheduler": lr_scheduler,
"warmup": 0}
# Learning rate scheduler for KWT
schedulers_kwt = {
"warmup": None,
"scheduler": None
}
# Setting up the learning rate scheduler for data2vec and KWT
if config_d2v["hparams"]["scheduler"]["n_warmup"]:
schedulers_d2v["warmup"] = WarmUpLR(optimizer_kwt, total_iters=len(trainloaderf) * config_d2v["hparams"]["scheduler"]["n_warmup"])
if config_d2v["hparams"]["scheduler"]["scheduler_type"] is not None:
total_iters = len(trainloaderf) * max(1, (config_d2v["hparams"]["scheduler"]["max_epochs"] - config_d2v["hparams"]["scheduler"]["n_warmup"]))
schedulers_d2v["scheduler"] = get_scheduler(optimizer_kwt, config_d2v["hparams"]["scheduler"]["scheduler_type"], total_iters)
if config_kwt["hparams"]["scheduler"]["n_warmup"]:
schedulers_kwt["warmup"] = WarmUpLR(optimizer_kwt, total_iters=len(trainloadera) * config_kwt["hparams"]["scheduler"]["n_warmup"])
if config_kwt["hparams"]["scheduler"]["scheduler_type"] is not None:
total_iters = len(trainloadera) * max(1, (config_kwt["hparams"]["scheduler"]["max_epochs"] - config_kwt["hparams"]["scheduler"]["n_warmup"]))
schedulers_kwt["scheduler"] = get_scheduler(optimizer_kwt, config_kwt["hparams"]["scheduler"]["scheduler_type"], total_iters)
# Saving directory for the data2vec model
config_d2v["exp"]["save_dir"] = os.path.join(config_d2v["exp"]["exp_dir"], config_d2v["exp"]["exp_name"])
os.makedirs(config_d2v["exp"]["save_dir"], exist_ok=True)
######################################
# train models
######################################
step = 0
best_acc = 0.0
device = config_d2v["hparams"]["device"]
log_file = os.path.join(config_d2v["exp"]["exp_dir"], "training_log.txt")
best_avg_loss = 0.0
n_batches = len(trainloaderf)
for epoch in range(config_d2v["hparams"]["n_epochs"]):
t0 = time.time()
running_loss_d2v = 0.0
running_target_var_d2v = 0.0
running_prediction_var_d2v = 0.0
running_loss_kwt = 0.0
correct_kwt = 0
for (dataf, targetsf), (dataa, targetsa) in zip(trainloaderf, trainloadera):
batch_size = dataf.size(dim=0)
audio_length = dataf.size(dim=-1)
######################################
# data2vec step - friendly step
######################################
# masking
mask = mask_generator(shape=(batch_size, audio_length)).to(device)
mask = torch.cat([torch.zeros(batch_size, 1, device=mask.device), mask], dim=1).bool()
# loading first K transformer layers of KWT
kwt_partial_state_dict = load_partial_state_dict(model_kwt.state_dict(), k)
model_d2v.load_state_dict(kwt_partial_state_dict, strict=False)
# train single batch
loss_d2v, target_var_d2v, prediction_var_d2v = data2vec.data2vec_utils.trainer.train_single_batch(model_d2v, dataf, mask, optimizer_d2v, criterion_d2v, device)
model_d2v.ema_step()
running_loss_d2v += loss_d2v
running_target_var_d2v += target_var_d2v
running_prediction_var_d2v += prediction_var_d2v
# learning rate scheduler
if schedulers_d2v["warmup"] is not None and epoch < config_d2v["hparams"]["scheduler"]["n_warmup"]:
schedulers_d2v["warmup"].step()
elif schedulers_d2v["scheduler"] is not None:
schedulers_d2v["scheduler"].step()
# logging data2vec step
if not step % config_d2v["exp"]["log_freq"]:
log_dict = {"epoch": epoch, "loss": loss_d2v, "lr": optimizer_d2v.param_groups[0]["lr"],
"target_var": target_var_d2v, "prediction_var": prediction_var_d2v}
log(log_dict, step, config_d2v)
######################################
# kwt step - adversarial step
######################################
# loading first K layers of data2vec encoder
d2v_partial_state_dict = load_partial_state_dict(model_d2v.state_dict(), k)
model_kwt.load_state_dict(d2v_partial_state_dict, strict=False)
# train single batch
loss_kwt, corr_kwt = utils.trainer.train_single_batch(model_kwt, dataa, targetsa, optimizer_kwt, criterion_kwt, device)
running_loss_kwt += loss_kwt
correct_kwt += corr_kwt
# logging KWT step
if schedulers_kwt["warmup"] is not None and epoch < config_kwt["hparams"]["scheduler"]["n_warmup"]:
schedulers_kwt["warmup"].step()
elif schedulers_kwt["scheduler"] is not None:
schedulers_kwt["scheduler"].step()
if not step % config_kwt["exp"]["log_freq"]:
log_dict = {"epoch": epoch, "loss": loss_kwt, "lr": optimizer_kwt.param_groups[0]["lr"]}
step += 1
#################################################
# epoch complete - log, validation and save model
#################################################
# data2vec log, validation and save model
log_dict = {"epoch": epoch, "time_per_epoch": time.time() - t0,
"avg_train_target_var": running_target_var_d2v / n_batches,
"avg_train_prediction_var": running_prediction_var_d2v / n_batches,
"avg_loss_per_ep": running_loss_d2v / len(trainloaderf.dataset)}
log(log_dict, step, config_d2v)
if not epoch % config_d2v["exp"]["val_freq"]:
avg_val_loss, avg_val_target_var, avg_val_prediction_var = data2vec.data2vec_utils.trainer.evaluate(model_d2v, mask_generator, criterion_d2v,
valloaderf, device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss,
"avg_val_target_var": avg_val_target_var, "avg_val_prediction_var": avg_val_prediction_var}
#log(log_dict, step, config_d2v)
# save best validation checkpoint
if avg_val_loss < best_avg_loss or epoch == config_d2v["exp"]["val_freq"]:
best_avg_loss = avg_val_loss
save_path = os.path.join(config_d2v["exp"]["save_dir"], "best.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v, optimizer_d2v, log_file)
save_path = os.path.join(config_d2v["exp"]["save_dir"], "best_encoder.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v.encoder, optimizer_d2v, log_file)
# kwt log, validation and save model
log_dict = {"epoch": epoch, "time_per_epoch": time.time() - t0, "train_acc": correct_kwt/(len(trainloadera.dataset)), "avg_loss_per_ep": running_loss_kwt/len(trainloaderf)}
log(log_dict, step, config_kwt)
if not epoch % config_kwt["exp"]["val_freq"]:
val_acc, avg_val_loss = utils.trainer.evaluate(model_kwt, criterion_kwt, valloadera, device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss, "val_acc": val_acc}
log(log_dict, step, config_kwt)
# save best val ckpt
if val_acc > best_acc:
best_acc = val_acc
save_path = os.path.join(config_kwt["exp"]["exp_dir"], "best.pth")
save_model(epoch, val_acc, save_path, model_kwt, optimizer_kwt, log_file)
# training complete
# data2vec evaluation
avg_val_loss, avg_val_target_var, avg_val_prediction_var = data2vec.data2vec_utils.trainer.evaluate(model_d2v, mask_generator, criterion_d2v, valloaderf,
device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss,
"avg_val_target_var": avg_val_target_var, "avg_val_prediction_var": avg_val_prediction_var}
log(log_dict, step, config_d2v)
# data2vec save final checkpoint
save_path = os.path.join(config_d2v["exp"]["exp_dir"], "last.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v, optimizer_d2v, log_file)
save_path = os.path.join(config_d2v["exp"]["exp_dir"], "last_encoder.pth")
save_model(epoch, avg_val_loss, save_path, model_d2v.encoder, optimizer_d2v, log_file)
# kwt evaluation
val_acc, avg_val_loss = evaluate(model_kwt, criterion_kwt, valloadera, device)
log_dict = {"epoch": epoch, "val_loss": avg_val_loss, "val_acc": val_acc}
log(log_dict, step, config_kwt)
# kwt save final checkpoint
save_path = os.path.join(config_kwt["exp"]["exp_dir"], "last.pth")
save_model(epoch, val_acc, save_path, model_kwt, optimizer_kwt, log_file)
def load_partial_state_dict(state_dict, K):
"""_summary_
Args:
state_dict (_type_): state_dict to load
K (_type_): first K layers to load
"""
before_transformer = True
custom_dict = OrderedDict()
for param in state_dict:
if param.split('.')[0] == "transformer":
before_transformer = False
layer_num = int(param.split('.')[2])
if layer_num >= K:
continue
custom_dict[param] = state_dict[param]
#elif before_transformer:
# custom_dict[param] = state_dict[param]
#print("###################################")
#for param in custom_dict:
# print(param, '\t\t', custom_dict[param].size())
return custom_dict
def main(args):
config_kwt = get_config(args.confk)
config_data2vec = get_config(args.confd)
seed_everything(config_kwt['hparams']['seed'])
alpha = 1
adv_pretrain(config_kwt, config_data2vec, args.k, alpha)
if __name__ == "__main__":
parser = ArgumentParser("Adversarial pretraining")
parser.add_argument("--confk", type=str, required=True, help="Path to config.yaml file for KWT.")
parser.add_argument("--confd", type=str, required=True, help="Path to config.yaml file for data2vec.")
parser.add_argument("--k", type=int, required=True, help="First K transformer layers to update")
args = parser.parse_args()
main(args) | GregTheHunInDk/Robust_KWT | adv_pretrain.py | adv_pretrain.py | py | 16,336 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number... |
9791587429 | from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from task.models import Task, TaskImage
from apiApp.serializers import TaskSerializer, TaskImageSerializer
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.http import Http404
from rest_framework.views import APIView
def home(request):
return HttpResponse('ok')
class TaskList( APIView):
def get(self, request, format=None):
task = Task.objects.all()
serializer = TaskSerializer(task, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskDetail(APIView):
def get_object(self, pk):
try:
return Task.objects.get(pk=pk)
except Task.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
task = self.get_object(pk)
serializer = TaskSerializer(task)
return Response(serializer.data)
def put(self, request, pk, format=None):
task = self.get_object(pk)
serializer = TaskSerializer(task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
task = self.get_object(pk)
task.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class TaskImageList(APIView):
def get(self, request, format=None):
images = TaskImage.objects.all()
serializer = TaskImageSerializer(images, many=True)
return Response(serializer.data)
| osmangony01/DJango_task_RestAPI | apiApp/views.py | views.py | py | 2,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "task.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "tas... |
42683425330 | from flask import Flask, Response, render_template
import os
from record_sound import RecordingVoice
import json
import pyaudio
from array import array
import time
import shutil
import numpy as np
from datetime import datetime
WAVE_PATH = 'data/waves'
app = Flask(__name__)
if not os.path.isdir(WAVE_PATH):
os.makedirs(WAVE_PATH)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/decode')
def decode():
print("start decoding")
def process():
r = RecordingVoice()
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=r.CHUNK)
stream.start_stream()
count = 0
frames = []
date_time = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
current_path = WAVE_PATH + '/' + date_time
os.makedirs(current_path)
while True:
data = stream.read(r.CHUNK)
data_chunk = np.frombuffer(data, dtype='B')
print(data_chunk)
vol = max(data_chunk)
if(vol > 100):
frames.append(data)
r.save(frames, 'data/tmp.wav')
count += 1
r.save(frames, current_path + '/tmp'+str(count)+'.wav')
print("finished recording")
decoded_str = get_decode()
print(decoded_str, file=open(current_path + '/tmp'+str(count)+'.txt', 'w'))
with open(current_path + '/tmp'+str(count)+'.txt', encoding = "utf8") as file:
decoded_str = file.read().replace('\n', '')
data_ = json.dumps(
{
'value': decoded_str
}
)
print(data_)
yield f"data:{data_}\n\n"
return Response(process(), mimetype='text/event-stream')
def record():
r = RecordingVoice()
return r.record()
def get_decode():
command = "online-wav-gmm-decode-faster " \
"--rt-min=0.3 --rt-max=0.5 --max-active=4000 --beam=12.0 --acoustic-scale=0.0769 " \
"scp:./data/wav.scp ./model/final.mdl ./model/HCLG.fst ./model/words.txt " \
"1:2:3:4:5 ark,t:./model/trans.txt ark,t:./model/ali.txt"
dec = os.popen(command)
return dec.read()
if __name__ == '__main__':
app.run(debug=True, threaded=True)
| Maryia-M/MSI_Project | app.py | app.py | py | 2,460 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number... |
70727262185 | import boto3
import logging
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def str2bool(value):
return value.lower() in ("True",True,"False",False)
def lambda_handler(event, context):
torun = event['torun']
if torun == "true":
awsregion = os.environ['AWS_REGION']
dbservice = event['dbservice']
cluster = event['cluster']
dbinstance = event['dbinstance']
application = event['application']
environment = event['environment']
dbclass = event['dbclass']
engine = event['engine']
subgrp = event['subgrp']
dbparamgrp = event['dbparamgrp']
autominor = str2bool(event['autominor'])
copytagstosnap = str2bool(event['copytagstosnap'])
enhancedmon = str2bool(event['enhancedmon'])
perfinsights = str2bool(event['perfinsights'])
cluster = event['cluster']
tempdbinstance = dbinstance + "temp"
rdsclient = boto3.client('rds', region_name=awsregion)
if dbservice == 'aurora':
if enhancedmon == "True" and perfinsights == "False":
enhancedmoninterval = event['enhancedmoninterval']
enhancedmonrolearn = event['enhancedmonrolearn']
response = rdsclient.create_db_instance(
DBInstanceIdentifier=tempdbinstance,
DBInstanceClass=dbclass,
Engine=engine,
DBSubnetGroupName=subgrp,
DBParameterGroupName=dbparamgrp,
AutoMinorVersionUpgrade=autominor,
PubliclyAccessible=False,
Tags=[
{
'Key': 'refresh-application',
'Value': application
},
{
'Key': 'refresh-environment',
'Value': environment
},
{
'Key': 'refresh-instance',
'Value': 'to_modify_after_rename'
},
{
'Key': 'refresh',
'Value': 'true'
}
],
DBClusterIdentifier=cluster,
MonitoringInterval=enhancedmoninterval,
MonitoringRoleArn=enhancedmonrolearn,
CopyTagsToSnapshot=copytagstosnap
)
elif enhancedmon == "True" and perfinsights == "True":
enhancedmoninterval = event['enhancedmoninterval']
enhancedmonrolearn = event['enhancedmonrolearn']
perfinsightsretention = event['perfinsightsretention']
perfinsightskmskeyid = event['perfinsightskmskeyid']
response = rdsclient.create_db_instance(
DBInstanceIdentifier=tempdbinstance,
DBInstanceClass=dbclass,
Engine=engine,
DBSubnetGroupName=subgrp,
DBParameterGroupName=dbparamgrp,
AutoMinorVersionUpgrade=autominor,
PubliclyAccessible=False,
Tags=[
{
'Key': 'refresh-application',
'Value': application
},
{
'Key': 'refresh-environment',
'Value': environment
},
{
'Key': 'refresh-instance',
'Value': 'to_modify_after_rename'
},
{
'Key': 'refresh',
'Value': 'true'
}
],
DBClusterIdentifier=cluster,
MonitoringInterval=enhancedmoninterval,
MonitoringRoleArn=enhancedmonrolearn,
EnablePerformanceInsights=perfinsights,
PerformanceInsightsRetentionPeriod=perfinsightsretention,
PerformanceInsightsKMSKeyId=perfinsightskmskeyid,
CopyTagsToSnapshot=copytagstosnap
)
elif enhancedmon == "False" and perfinsights == "True":
perfinsightsretention = event['perfinsightsretention']
perfinsightskmskeyid = event['perfinsightskmskeyid']
response = rdsclient.create_db_instance(
DBInstanceIdentifier=tempdbinstance,
DBInstanceClass=dbclass,
Engine=engine,
DBSubnetGroupName=subgrp,
DBParameterGroupName=dbparamgrp,
AutoMinorVersionUpgrade=autominor,
PubliclyAccessible=False,
Tags=[
{
'Key': 'refresh-application',
'Value': application
},
{
'Key': 'refresh-environment',
'Value': environment
},
{
'Key': 'refresh-instance',
'Value': 'to_modify_after_rename'
},
{
'Key': 'refresh',
'Value': 'true'
}
],
DBClusterIdentifier=cluster,
EnablePerformanceInsights=perfinsights,
PerformanceInsightsRetentionPeriod=perfinsightsretention,
PerformanceInsightsKMSKeyId=perfinsightskmskeyid,
CopyTagsToSnapshot=copytagstosnap
)
else:
response = rdsclient.create_db_instance(
DBInstanceIdentifier=tempdbinstance,
DBInstanceClass=dbclass,
Engine=engine,
DBSubnetGroupName=subgrp,
DBParameterGroupName=dbparamgrp,
AutoMinorVersionUpgrade=autominor,
PubliclyAccessible=False,
Tags=[
{
'Key': 'refresh-application',
'Value': application
},
{
'Key': 'refresh-environment',
'Value': environment
},
{
'Key': 'refresh-instance',
'Value': 'to_modify_after_rename'
},
{
'Key': 'refresh',
'Value': 'true'
}
],
DBClusterIdentifier=cluster,
CopyTagsToSnapshot=copytagstosnap
)
result = "Instance creation initiated"
else:
raise ValueError("Database service specified unknown or not supported by this function")
return {
"statusCode": 200,
"body": result
}
else:
result = "Instance creation skipped"
return {
"statusCode": 200,
"body": result
} | AlphaITSystems/dbrefresh | awssoldb-orchestrator-pkg-cloudformation/functions/awssoldb-CreateInstance.py | awssoldb-CreateInstance.py | py | 7,794 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "boto3.client",
... |
74307652262 | import numpy as np
import pandas as pd
import sklearn as skl
import matplotlib.pyplot as plt
plt.close('all')
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import r2_score
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
df = pd.read_csv('../data/raw/spi_matches.csv')
df = df[['season', 'league', 'team1', 'team2', 'spi1', 'spi2', 'score1', 'score2']]
futureGames = df
df = df.dropna()
futureGames = futureGames[~futureGames.isin(df).all(1)]
df = pd.get_dummies(df, columns=['league', 'team1', 'team2'])
df['season'] = (df['season'] - 2016) / 4
df['spi1'] = df['spi1'] / 100
df['spi2'] = df['spi2'] / 100
dfDiff = df[['spi1', 'spi2', 'score1', 'score2']]
dfDiff['spi1-spi2'] = dfDiff['spi1'] - dfDiff['spi2']
Y1 = df['score1'].values
Y2 = df['score2'].values
score1 = df.pop('score1')
score2 = df.pop('score2')
df['spidiff'] = dfDiff['spi1-spi2']
Xall = df.values
Xall_train1, Xall_validate1, Yall_train1, Yall_validate1 = train_test_split(Xall, Y1, test_size=0.2, shuffle=True)
Xall_train2, Xall_validate2, Yall_train2, Yall_validate2 = train_test_split(Xall, Y2, test_size=0.2, shuffle=True)
model1 = RandomForestRegressor(max_depth=8, n_jobs=-1).fit(Xall_train1, Yall_train1)
model2 = RandomForestRegressor(max_depth=8, n_jobs=-1).fit(Xall_train2, Yall_train2)
predictions1 = model1.predict(Xall_validate1)
predictions2 = model2.predict(Xall_validate2)
print(r2_score(Yall_validate1, predictions1))
print(r2_score(Yall_validate2, predictions2))
df = pd.DataFrame(data=Xall_validate1[:, 1:3], columns=['spi1', 'spi2'])
df['spidiff'] = df['spi1'] - df['spi2']
df['score1'] = pd.DataFrame(data=Yall_validate1)
df['score2'] = pd.DataFrame(data=Yall_validate2)
df['pred_score1'] = pd.DataFrame(data=predictions1)
df['pred_score2'] = pd.DataFrame(data=predictions2)
print(df)
| elizabethwyss/EECS731MajorLeagues | src/src.py | src.py | py | 2,127 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.close",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas... |
21365549144 | import torch
import numpy as np
from dptb.dataprocess.processor import Processor
from dptb.nnet.nntb import NNTB
from dptb.nnsktb.sknet import SKNet
from dptb.sktb.skIntegrals import SKIntegrals
from dptb.sktb.struct_skhs import SKHSLists
from dptb.hamiltonian.hamil_eig_sk_crt import HamilEig
from dptb.utils.constants import dtype_dict
from dptb.structure.structure import BaseStruct
from dptb.utils.tools import nnsk_correction
from abc import ABC, abstractmethod
from dptb.utils.index_mapping import Index_Mapings
from dptb.nnsktb.integralFunc import SKintHops
from dptb.nnsktb.onsiteFunc import onsiteFunc, loadOnsite
from dptb.nnsktb.skintTypes import all_skint_types, all_onsite_intgrl_types
class ModelAPI(ABC):
def __init__(self) -> None:
pass
@abstractmethod
def get_HR(self,**kwargs):
pass
@abstractmethod
def get_HK(self, **kwargs):
pass
@abstractmethod
def get_eigenvalues(self, **kwargs):
pass
class DeePTB(ModelAPI):
def __init__(self, dptb_checkpoint:str, proj_atom_anglr_m:dict,
sktbmode:str='nnsk', nnsk_checkpoint:str = None, sk_file_path=None):
f=torch.load(dptb_checkpoint)
model_config = f["model_config"]
self.nntb = NNTB(**model_config)
self.nntb.tb_net.load_state_dict(f['state_dict'])
self.nntb.tb_net.eval()
self.sktbmode = sktbmode
self.unitenergy = model_config.get('unit','Hartree')
if sktbmode == 'nnsk':
f = torch.load(nnsk_checkpoint)
model_config = f["model_config"]
self.sknet = SKNet(**model_config)
self.sknet.load_state_dict(f['state_dict'])
self.sknet.eval()
#for p in self.sknet.parameters():
# p.requires_grad = False
indmap = Index_Mapings(proj_atom_anglr_m)
bond_index_map, bond_num_hops = indmap.Bond_Ind_Mapings()
onsite_index_map, onsite_num = indmap.Onsite_Ind_Mapings()
self.hops_fun = SKintHops(mode=model_config.get("skformula", "powerlaw"))
self.onsite_db = loadOnsite(onsite_index_map)
all_skint_types_dict, reducted_skint_types, self.sk_bond_ind_dict = all_skint_types(bond_index_map)
else:
skint = SKIntegrals(proj_atom_anglr_m = proj_atom_anglr_m, sk_file_path=sk_file_path)
self.skhslist = SKHSLists(skint,dtype='tensor')
self.hamileig = HamilEig(dtype=dtype_dict[model_config["dtype"]])
self.if_HR_ready=False
def get_HR(self, structure, env_cutoff, device='cpu', dtype=torch.float32):
assert isinstance(structure, BaseStruct)
self.structure = structure
self.time_symm = structure.time_symm
predict_process = Processor(mode='dptb', structure_list=structure, batchsize=1, kpoint=None, eigen_list=None, env_cutoff=env_cutoff, device=device, dtype=dtype)
bond, bond_onsite = predict_process.get_bond()
env = predict_process.get_env()
batched_dcp = self.nntb.get_desciptor(env)
# get hoppings (SK type bond integrals.)
batch_bond_hoppings, batch_hoppings = self.nntb.hopping(batched_dcp=batched_dcp, batch_bond=bond)
# get onsite energies
batch_bond_onsites, batch_onsiteEs = self.nntb.onsite(batched_dcp=batched_dcp)
if self.sktbmode == 'nnsk':
coeffdict = self.sknet()
sktb_onsiteEs = onsiteFunc(batch_bond_onsites, self.onsite_db)
sktb_hoppings = self.hops_fun.get_skhops(batch_bond_hoppings, coeffdict, self.sk_bond_ind_dict)
# combine the nn and sk part for the hamiltonian.
onsiteEs, hoppings, onsiteSs, overlaps = \
nnsk_correction(nn_onsiteEs=batch_onsiteEs[0],
nn_hoppings=batch_hoppings[0],
sk_onsiteEs=sktb_onsiteEs[0],
sk_hoppings=sktb_hoppings[0])
else:
# get the sk parameters.
self.skhslist.update_struct(structure)
self.skhslist.get_HS_list(bonds_onsite=np.asarray(batch_bond_onsites[0][:,1:]),
bonds_hoppings=np.asarray(batch_bond_hoppings[0][:,1:]))
# combine the nn and sk part for the hamiltonian.
onsiteEs, hoppings, onsiteSs, overlaps = \
nnsk_correction(nn_onsiteEs=batch_onsiteEs[0],
nn_hoppings=batch_hoppings[0],
sk_onsiteEs=self.skhslist.onsiteEs,
sk_hoppings=self.skhslist.hoppings,
sk_onsiteSs=self.skhslist.onsiteSs,
sk_overlaps=self.skhslist.overlaps)
self.hamileig.update_hs_list(structure, hoppings, onsiteEs, overlaps, onsiteSs)
self.hamileig.get_hs_blocks(bonds_onsite=np.asarray(batch_bond_onsites[0][:,1:]),
bonds_hoppings=np.asarray(batch_bond_hoppings[0][:,1:]))
self.if_HR_ready=True
if not self.hamileig.use_orthogonal_basis:
return self.hamileig.all_bonds, self.hamileig.hamil_blocks, None
else:
return self.hamileig.all_bonds, self.hamileig.hamil_blocks, self.hamileig.overlap_blocks
# ToDo 现在版本的程序对于正交基和非正交基组的情况有些不兼容的地方。后续要修改!
def get_HK(self, kpoints):
assert self.if_HR_ready
if not self.hamileig.use_orthogonal_basis:
hkmat = self.hamileig.hs_block_R2k(kpoints=kpoints, HorS='H', time_symm=self.time_symm, dtype=self.hamileig.dtype)
skmat = self.hamileig.hs_block_R2k(kpoints=kpoints, HorS='S', time_symm=self.time_symm, dtype=self.hamileig.dtype)
else:
hkmat = self.hamileig.hs_block_R2k(kpoints=kpoints, HorS='H', time_symm=self.time_symm, dtype=self.hamileig.dtype)
skmat = torch.eye(hkmat.shape[1], dtype=torch.complex64).unsqueeze(0).repeat(hkmat.shape[0], 1, 1)
return hkmat, skmat
def get_eigenvalues(self,kpoints,spindeg=2):
assert self.if_HR_ready
eigenvalues,_ = self.hamileig.Eigenvalues(kpoints, time_symm=self.time_symm,dtype=self.hamileig.dtype, unit=self.unitenergy)
eigks = eigenvalues.detach().numpy()
num_el = np.sum(self.structure.proj_atom_neles_per)
nk = len(kpoints)
numek = num_el * nk // spindeg
sorteigs = np.sort(np.reshape(eigks,[-1]))
EF=(sorteigs[numek] + sorteigs[numek-1])/2
return eigks, EF
class NNSK(ModelAPI):
def __init__(self,checkpoint, proj_atom_anglr_m):
f=torch.load(checkpoint)
model_config = f["model_config"]
self.onsitemode = model_config['onsitemode']
self.onsite_cutoff = model_config.get('onsite_cutoff',0.)
self.model = SKNet(**model_config)
self.model.load_state_dict(f['state_dict'])
self.sk_options = f.get("sk_options", None)
self.model.eval()
self.unitenergy = model_config.get('unit', 'Hartree')
if self.sk_options is not None:
self.skformula = self.sk_options["skformula"]
self.sk_cutoff = self.sk_options["sk_cutoff"]
self.sk_decay_w = self.sk_options["sk_decay_w"]
else:
self.skformula = "varTang96"
self.sk_cutoff = torch.tensor(6.0)
self.sk_decay_w = torch.tensor(0.1)
self.indmap = Index_Mapings(proj_atom_anglr_m)
bond_index_map, bond_num_hops = self.indmap.Bond_Ind_Mapings()
self.onsite_strain_index_map, onsite_strain_num, onsite_index_map, onsite_num = self.indmap.Onsite_Ind_Mapings(onsitemode=self.onsitemode, atomtype=model_config.get("atom_type"))
self.hops_fun = SKintHops(mode=self.skformula)
self.onsite_db = loadOnsite(onsite_index_map)
all_skint_types_dict, reducted_skint_types, self.sk_bond_ind_dict = all_skint_types(bond_index_map)
self.hamileig = HamilEig(dtype=dtype_dict[model_config["dtype"]])
self.if_HR_ready=False
def get_HR(self, structure, device='cpu', dtype=torch.float32):
assert isinstance(structure, BaseStruct)
assert structure.onsitemode == self.onsitemode
self.structure = structure
self.time_symm = structure.time_symm
predict_process = Processor(structure_list=structure, batchsize=1, kpoint=None, eigen_list=None, device=device, dtype=dtype,
env_cutoff=self.onsite_cutoff, onsitemode=self.onsitemode, onsite_cutoff=self.onsite_cutoff, sorted_onsite="st", sorted_bond="st", sorted_env="st")
batch_bond, batch_bond_onsites = predict_process.get_bond(sorted="st")
if self.onsitemode == 'strain':
batch_envs = predict_process.get_env(sorted="st")
nn_onsiteE, onsite_coeffdict = self.model(mode='onsite')
all_onsiteint_types_dcit, reducted_onsiteint_types, self.onsite_strain_ind_dict = all_onsite_intgrl_types(self.onsite_strain_index_map)
batch_onsiteVs = self.hops_fun.get_skhops(batch_bonds=batch_envs, coeff_paras=onsite_coeffdict, sk_bond_ind=self.onsite_strain_ind_dict)
batch_onsiteEs = onsiteFunc(batch_bonds_onsite=batch_bond_onsites, onsite_db=self.onsite_db, nn_onsiteE=None)
else:
nn_onsiteE, onsite_coeffdict = self.model(mode='onsite')
batch_onsiteEs = onsiteFunc(batch_bonds_onsite=batch_bond_onsites, onsite_db=self.onsite_db, nn_onsiteE=nn_onsiteE)
coeffdict = self.model(mode='hopping')
batch_hoppings = self.hops_fun.get_skhops(batch_bonds=batch_bond, coeff_paras=coeffdict, sk_bond_ind=self.sk_bond_ind_dict, rcut=self.sk_cutoff, w=self.sk_decay_w)
onsiteEs, hoppings = batch_onsiteEs[0], batch_hoppings[0]
if self.onsitemode == 'strain':
onsiteVs = batch_onsiteVs[0]
self.hamileig.update_hs_list(struct=structure, hoppings=hoppings, onsiteEs=onsiteEs, onsiteVs=onsiteVs)
self.hamileig.get_hs_blocks(bonds_onsite=np.asarray(batch_bond_onsites[0][:,1:]),
bonds_hoppings=np.asarray(batch_bond[0][:,1:]),
onsite_envs=np.asarray(batch_envs[0][:,1:]))
else:
self.hamileig.update_hs_list(struct=structure, hoppings=hoppings, onsiteEs=onsiteEs)
self.hamileig.get_hs_blocks(bonds_onsite=np.asarray(batch_bond_onsites[0][:,1:]),
bonds_hoppings=np.asarray(batch_bond[0][:,1:]))
self.if_HR_ready=True
if not self.hamileig.use_orthogonal_basis:
return self.hamileig.all_bonds, self.hamileig.hamil_blocks, None
else:
return self.hamileig.all_bonds, self.hamileig.hamil_blocks, self.hamileig.overlap_blocks
def get_HK(self, kpoints):
assert self.if_HR_ready
if not self.hamileig.use_orthogonal_basis:
hkmat = self.hamileig.hs_block_R2k(kpoints=kpoints, HorS='H', time_symm=self.time_symm)
skmat = self.hamileig.hs_block_R2k(kpoints=kpoints, HorS='S', time_symm=self.time_symm)
else:
hkmat = self.hamileig.hs_block_R2k(kpoints=kpoints, HorS='H', time_symm=self.time_symm)
skmat = torch.eye(hkmat.shape[1], dtype=torch.complex64).unsqueeze(0).repeat(hkmat.shape[0], 1, 1)
return hkmat, skmat
def get_eigenvalues(self,kpoints,spindeg=2):
assert self.if_HR_ready
eigenvalues,_ = self.hamileig.Eigenvalues(kpoints, time_symm=self.time_symm, dtype=self.hamileig.dtype, unit=self.unitenergy)
eigks = eigenvalues.detach().numpy()
num_el = np.sum(self.structure.proj_atom_neles_per)
nk = len(kpoints)
numek = num_el * nk // spindeg
sorteigs = np.sort(np.reshape(eigks,[-1]))
EF=(sorteigs[numek] + sorteigs[numek-1])/2
return eigks, EF | deepmodeling/DeePTB | dptb/nnops/nnapi.py | nnapi.py | py | 12,168 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "abc.ABC",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
... |
20250127196 | # -*- coding: utf-8 -*-
# Не работали русские буквы
import sys
sys.path.append("/Users/a18826700/Library/Python/3.9/lib/python/site-packages")
import requests
import time
import datetime
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import math
import random
import telebot
from selenium.webdriver.support.ui import WebDriverWait
import requests
import data
bot_api = ''
chat = ''
def send(message):
bot = telebot.TeleBot(bot_api)
bot.config['api_key'] = bot_api
bot.send_message(chat, message)
x = 10
swipe_dislike = '//*[@id="c-690079234"]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[5]/div/div[2]'
close_total_tinder = '//*[@id="c1564682258"]/div/div/button[2]'
cl2 = '//*[@id="c1564682258"]/div/div/div[2]/button[2]'
swipe_mobile = '//*[@id="c-690079234"]/div/div[1]/div/div/main/div/div[1]/div[1]/div/div[4]/div/div[4]'
# driver.get('https://tinder.com')
def timeout_after_action():
time.sleep(2)
def messaging():
driver.find_element_by_xpath(xpath_messages).click()
timeout_after_action()
items = driver.find_elements_by_class_name('messageListItem')
lim = 0
while lim < len(items):
items[lim].click()
timeout_after_action()
text_blocks = driver.find_elements_by_class_name('text')
i = 0
already_wrote = False
while i < len(text_blocks):
if text_blocks[i].text == mtx_real:
already_wrote = True
i += 1
lim += 1
if (already_wrote == False):
driver.find_element_by_tag_name('textarea').send_keys(mtx)
OPTIONS = {
# Количество желаемых лайков.
'likes_limit': 100,
# Пауза между свайпами в секундах, к значению добавляется рандомное число от 0.0 до 1.0 исключительно.
'pause_between_swipes': 2,
# Набор текстов для первого сообщения, бот автоматически пишет:
# 'Привет, {имя из анкеты}. {text_message}'.
# Если text_message установить как пустую строку, то бот никому не пишет.
'text_message': 'Привет, приятные фотографии! Напиши телеграм, тут просто очень редко бываю.',
# Лимит по совершенным ошибкам. По окончанию этого лимита бот завершает работу и отправляет результаты.
'error_signals_limit': 30,
'_implicitly_wait': 3,
'_url': 'https://tinder.com',
'_start_time': datetime.datetime.today()
}
XPATH = {
'_like_first': '//*[@id="c-690079234"]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[4]/div/div[4]',
'_like_other': '//*[@id="c-690079234"]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[5]/div/div[4]',
'_dislike': '//*[@id="c-690079234"]/div/div[1]/div/main/div[1]/div/div/div[1]/div[1]/div/div[5]/div/div[2]',
'_link_to_messages': '//*[@id="c-690079234"]/div/div[1]/div/aside/nav/div/div/div/div[2]/div/div[2]',
'_link_to_matches': '//*[@id="c-690079234"]/div/div[1]/div/aside/nav/div/div/div/div[2]/div/div[1]'
}
CLASSNAME = {
'_message_item': 'messageListItem',
'_match_item': 'matchListItem',
'_match_item_text': 'text'
}
class TinderAI(object):
def __init__(self):
self.options = OPTIONS
self.driver = webdriver.Chrome(ChromeDriverManager().install())
self.driver.implicitly_wait(self.options['_implicitly_wait'])
self.likes = 0
self.dislikes = 0
self.error_signals = 0
self.error_logs = []
self.driver.get(self.options['_url'])
def init(self):
# self.start_swiping()
self.search_key_message()
self.send_results()
def search_key_message(self):
try:
self.driver.get(self.options['_url'])
open_matches = self.driver.find_elements_by_class_name(CLASSNAME['_match_item'])
lim = len(open_matches) - 1
while lim != 1:
open_matches[lim].click()
time.sleep(2)
text_items = self.driver.find_elements_by_class_name(CLASSNAME['_match_item_text'])
i = 0
already_wrote = False
while i < len(text_items):
if text_items[i].text == self.options['text_message']:
already_wrote = True
i += 1
lim -= 1
if (already_wrote == False):
self.driver.find_element_by_tag_name('textarea').send_keys(self.options['text_message'] + '\n')
time.sleep(2)
self.driver.find_element_by_xpath(XPATH['_link_to_matches']).click()
time.sleep(2)
except:
self.error_logs.append('search_key_message')
def send_results(self):
likes_rt = 'Likes: ' + str(self.likes) + '\n'
d = 'Dislikes: ' + str(self.dislikes) + '\n'
st = 'Session time: ' + str(datetime.datetime.today() - self.options['_start_time']) + '\n'
logs = 'Error logs: ' + str(self.error_logs) + '\n'
send(likes_rt + d + logs + st)
def start_swiping(self):
self.driver.get(self.options['_url'])
option_likes_limit = self.options['likes_limit']
while self.likes < option_likes_limit and option_likes_limit > 0:
try:
chance = random.random() < 0.90 + random.random()/10
if self.likes == 0:
el_first_like = self.driver.find_element_by_xpath(XPATH['_like_first'])
el_first_like.click()
self.likes += 1
print('like', self.likes)
if self.likes > 0:
if chance:
el_other_like = self.driver.find_element_by_xpath(XPATH['_like_other'])
el_other_like.click()
self.likes += 1
print('like', self.likes)
else:
el_dislike = self.driver.find_element_by_xpath(XPATH['_dislike'])
el_dislike.click()
self.dislikes += 1
print('dislike', self.dislikes)
self.error_signals = 0
time.sleep(self.options['pause_between_swipes'] + random.random())
except:
if self.error_signals == self.options['error_signals_limit']:
self.error_logs.append('start_swiping')
break
self.error_signals += 1
self.error_handler()
def error_handler(self):
print('error', self.likes)
self.driver.get(self.options['_url'])
time.sleep(10)
try:
el_first_like = self.driver.find_element_by_xpath(XPATH['_like_first'])
el_first_like.click()
self.likes += 1
except:
pass
bot = TinderAI()
| sashka0264/data-science | 00_PYTHON/pynder.py | pynder.py | py | 7,214 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "telebot.TeleBot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_nu... |
24065781906 | import pytest
from tests.utils import asyncio_patch, AsyncioMagicMock
from gns3server.controller.gns3vm import GNS3VM
from gns3server.controller.gns3vm.gns3_vm_error import GNS3VMError
@pytest.fixture
def dummy_engine():
engine = AsyncioMagicMock()
engine.running = False
engine.ip_address = "vm.local"
engine.protocol = "https"
engine.port = 8442
engine.user = "hello"
engine.password = "world"
return engine
@pytest.fixture
def dummy_gns3vm(controller, dummy_engine):
vm = GNS3VM(controller)
vm._settings["engine"] = "dummy"
vm._settings["vmname"] = "Test VM"
vm._settings["enable"] = True
vm._engines["dummy"] = dummy_engine
return vm
def test_list(async_run, controller):
vm = GNS3VM(controller)
with asyncio_patch("gns3server.controller.gns3vm.vmware_gns3_vm.VMwareGNS3VM.list", return_value=[{"vmname": "test", "vmx_path": "test"}]):
res = async_run(vm.list("vmware"))
assert res == [{"vmname": "test"}] # Informations specific to vmware are stripped
with asyncio_patch("gns3server.controller.gns3vm.virtualbox_gns3_vm.VirtualBoxGNS3VM.list", return_value=[{"vmname": "test"}]):
res = async_run(vm.list("virtualbox"))
assert res == [{"vmname": "test"}]
with pytest.raises(NotImplementedError):
async_run(vm.list("hyperv"))
def test_json(controller):
vm = GNS3VM(controller)
assert vm.__json__() == vm._settings
def test_update_settings(controller, async_run):
vm = GNS3VM(controller)
vm.settings = {
"enable": True,
"engine": "vmware",
"vmname": "GNS3 VM"
}
with asyncio_patch("gns3server.controller.gns3vm.vmware_gns3_vm.VMwareGNS3VM.start"):
async_run(vm.auto_start_vm())
assert "vm" in controller.computes
async_run(vm.update_settings({"enable": False}))
assert "vm" not in controller.computes
def test_auto_start(async_run, controller, dummy_gns3vm, dummy_engine):
"""
When start the compute should be add to the controller
"""
async_run(dummy_gns3vm.auto_start_vm())
assert dummy_engine.start.called
assert controller.computes["vm"].name == "GNS3 VM (Test VM)"
assert controller.computes["vm"].host == "vm.local"
assert controller.computes["vm"].port == 8442
assert controller.computes["vm"].protocol == "https"
assert controller.computes["vm"].user == "hello"
assert controller.computes["vm"].password == "world"
def test_auto_start_with_error(async_run, controller, dummy_gns3vm, dummy_engine):
dummy_engine.start.side_effect = GNS3VMError("Dummy error")
async_run(dummy_gns3vm.auto_start_vm())
assert dummy_engine.start.called
assert controller.computes["vm"].name == "GNS3 VM (Test VM)"
| vieyahn/docker-cisco-lab | gns3server/tests/controller/test_gns3vm.py | test_gns3vm.py | py | 2,760 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tests.utils.AsyncioMagicMock",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "gns3server.controller.gns3vm.GNS3VM",
"line_number": 22,
"usage_type": "call"
},
{
... |
42578020211 | ''' This module contains functions and classes responsible for
writing solutions into different outputs (files, screen, GUI, etc).
Warning:
if new methods for writing output are added, they MUST
follow the rule: data must be added
sequentially, row after row, column after column.
'''
import pulp
from itertools import chain
from collections import defaultdict
from pyDEA.core.utils.dea_utils import ZERO_TOLERANCE
from pyDEA.core.data_processing.targets_and_slacks import calculate_target
from pyDEA.core.data_processing.targets_and_slacks import calculate_radial_reduction
from pyDEA.core.data_processing.targets_and_slacks import calculate_non_radial_reduction
from pyDEA.core.utils.progress_recorders import NullProgress
class SheetWithParameters(object):
''' Writes parameters to a given output.
Attributes:
params (Parameters): parameters.
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
Args:
params (Parameters): parameters.
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
'''
def __init__(self, params, run_date, total_seconds):
self.params = params
self.run_date = run_date
self.total_seconds = total_seconds
def create_sheet_parameters(self, work_sheet, solution, start_row_index,
params_str):
''' Writes parameters to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'Parameters'
work_sheet.write(start_row_index, 0, 'Run date and time:')
work_sheet.write(start_row_index, 1, self.run_date.strftime('%c'))
work_sheet.write(start_row_index + 1, 0, 'Calculation time:')
work_sheet.write(start_row_index + 1, 1, '{0} seconds'.format(
self.total_seconds))
work_sheet.write(start_row_index + 2, 0, 'Parameter name')
work_sheet.write(start_row_index + 2, 1, 'Value')
row_index = start_row_index + 3
for param_name, param_value in self.params.params.items():
work_sheet.write(row_index, 0, param_name)
work_sheet.write(row_index, 1, param_value)
row_index += 1
return row_index
class SheetOnionRank(object):
''' Writes information about peel the onion solution to a given output.
Attributes:
ranks (list of dict of str to double):
list that contains dictionaries that map DMU code
to peel the onion rank.
Args:
ranks (list of dict of str to double):
list that contains dictionaries that map DMU code
to peel the onion rank.
'''
def __init__(self, ranks):
self.ranks = ranks
self.count = 0
def create_sheet_onion_rank(self, work_sheet, solution, start_row_index,
params_str):
''' Writes information about peel the onion solution to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'OnionRank'
# in case of max_slacks and peel-the-onion we should not
# write ranks twice
if self.count < len(self.ranks):
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(
start_row_index + 1, 0,
'Tier / Rank is the run in which DMU became efficient')
work_sheet.write(start_row_index + 2, 0, 'DMU')
work_sheet.write(start_row_index + 2, 1, 'Efficiency')
work_sheet.write(start_row_index + 2, 2, 'Tier / Rank')
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = start_row_index + 3
for dmu_code in ordered_dmu_codes:
work_sheet.write(
row_index, 0,
solution._input_data.get_dmu_user_name(dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
work_sheet.write(row_index, 1,
solution.get_efficiency_score(dmu_code))
work_sheet.write(row_index, 2,
self.ranks[self.count][dmu_code])
else:
work_sheet.write(
row_index, 1,
pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
self.count += 1
return row_index
return -1
class SheetWithCategoricalVar(object):
''' Writes various solution information to a given output, and
adds categorical information if necessary.
Attributes:
categorical (str): name of categorical category.
Args:
categorical (str): name of categorical category.
'''
def __init__(self, categorical=None):
self.categorical = categorical
def create_sheet_efficiency_scores(self, work_sheet, solution,
start_row_index, params_str):
''' Writes efficiency scores to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'EfficiencyScores'
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 1, 'Efficiency')
if self.categorical is not None:
work_sheet.write(start_row_index + 1, 2,
'Categorical: {0}'.format(self.categorical))
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = 0
for count, dmu_code in enumerate(ordered_dmu_codes):
row_index = start_row_index + count + 2
work_sheet.write(
row_index, 0, solution._input_data.get_dmu_user_name(dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
work_sheet.write(
row_index, 1, solution.get_efficiency_score(dmu_code))
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
if self.categorical is not None:
work_sheet.write(
row_index, 2,
int(solution._input_data.coefficients[
dmu_code, self.categorical]))
return row_index
def create_sheet_input_output_data_base(self, work_sheet, solution,
get_multiplier,
sheet_name, start_row_index,
params_str):
''' Writes input and output weights or weighted data to a given output
depending on input parameters.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
get_multiplier (func): function that scales weights.
sheet_name (str): name that will be written into the name
attribute of work_sheet.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = sheet_name
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 1, 'Efficiency')
init_column_index = 2
if self.categorical is not None:
work_sheet.write(start_row_index + 1, 2,
'Categorical: {0}'.format(self.categorical))
init_column_index = 3
categories = []
categories.extend(solution._input_data.input_categories)
categories.extend(solution._input_data.output_categories)
column_index = init_column_index
for category in categories:
work_sheet.write(start_row_index + 1, column_index, category)
column_index += 1
try:
solution.vrs_duals
except AttributeError:
pass
else:
work_sheet.write(start_row_index + 1,
init_column_index + len(categories), 'VRS')
row_index = start_row_index + 2
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
for dmu_code in ordered_dmu_codes:
dmu_name = solution._input_data.get_dmu_user_name(dmu_code)
work_sheet.write(row_index, 0, dmu_name)
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
work_sheet.write(
row_index, 1, solution.get_efficiency_score(dmu_code))
if self.categorical is not None:
work_sheet.write(
row_index, 2,
int(solution._input_data.coefficients[
dmu_code, self.categorical]))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
column_index = init_column_index
for input_category in solution._input_data.input_categories:
work_sheet.write(
row_index, column_index,
get_multiplier(solution, dmu_code, input_category) *
solution.get_input_dual(dmu_code, input_category))
column_index += 1
for output_category in solution._input_data.output_categories:
work_sheet.write(
row_index, column_index,
get_multiplier(solution, dmu_code, output_category) *
solution.get_output_dual(dmu_code, output_category))
column_index += 1
try:
vrs_dual = solution.get_VRS_dual(dmu_code)
work_sheet.write(row_index, column_index, vrs_dual)
except AttributeError:
pass
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
return row_index
@staticmethod
def _get_const_multiplier(solution, dmu_code, category):
''' Helper method that is used for writing input and output data
to a given output.
Args:
solution (Solution): solution.
dmu_code (str): DMU code.
category (str): category name.
Returns:
int: always returns 1 since we don't need to scale weights.
'''
return 1
def create_sheet_input_output_data(self, work_sheet, solution,
start_row_index, params_str):
''' Writes input and output weights to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
return self.create_sheet_input_output_data_base(
work_sheet, solution,
self._get_const_multiplier, 'InputOutputWeights',
start_row_index, params_str)
@staticmethod
def _get_data_multiplier(solution, dmu_code, category):
''' Helper method that is used for writing weighted data
to a given output.
Args:
solution (Solution): solution.
dmu_code (str): DMU code.
category (str): category name.
Returns:
int: a scale value to scale weights.
'''
return solution._input_data.coefficients[dmu_code, category]
def create_sheet_weighted_data(self, work_sheet, solution,
start_row_index, params_str):
''' Writes weighted data to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
return self.create_sheet_input_output_data_base(
work_sheet, solution,
self._get_data_multiplier, 'WeightedData',
start_row_index, params_str)
def create_sheet_targets(self, work_sheet, solution, start_row_index,
params_str):
''' Writes targets to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'Targets'
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 1, 'Category')
work_sheet.write(start_row_index + 1, 2, 'Original')
work_sheet.write(start_row_index + 1, 3, 'Target')
work_sheet.write(start_row_index + 1, 4, 'Radial')
work_sheet.write(start_row_index + 1, 5, 'Non-radial')
if self.categorical is not None:
work_sheet.write(
start_row_index + 1, 6,
'Categorical: {0}'.format(self.categorical))
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = start_row_index + 2
for dmu_code in ordered_dmu_codes:
work_sheet.write(
row_index, 0, solution._input_data.get_dmu_user_name(dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
once = True
all_lambda_vars = solution.get_lambda_variables(dmu_code)
for category in chain(solution._input_data.input_categories,
solution._input_data.output_categories):
work_sheet.write(row_index, 1, category)
original = solution._input_data.coefficients[
dmu_code, category]
work_sheet.write(row_index, 2, original)
target = calculate_target(category, all_lambda_vars,
solution._input_data.coefficients)
radial_reduction = calculate_radial_reduction(
dmu_code, category, solution._input_data,
solution.get_efficiency_score(dmu_code),
solution.orientation)
non_radial_reduction = calculate_non_radial_reduction(
target, radial_reduction, original)
if abs(non_radial_reduction) < ZERO_TOLERANCE:
non_radial_reduction = 0
work_sheet.write(row_index, 3, target)
work_sheet.write(row_index, 4, radial_reduction)
work_sheet.write(row_index, 5, non_radial_reduction)
if once:
if self.categorical is not None:
work_sheet.write(
row_index, 6,
int(solution._input_data.coefficients[
dmu_code, self.categorical]))
work_sheet.write(
row_index + 1, 0,
solution.get_efficiency_score(dmu_code))
once = False
row_index += 1
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 2
return row_index
class FileWriter(object):
''' This class is responsible for writing solution information
into a given output.
Attributes:
params (Parameters): parameters.
writer: object that contains several objects that
have name attribute and implement
write method, it actually writes data to some output
(like file, screen, etc.).
ranks (list of dict of str to double):
list that contains dictionaries that map DMU code
to peel the onion rank.
categorical (str): name of categorical category.
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
params_sheet (SheetWithParameters): object that writes
parameters to a given output.
worksheets (list of func): list of functions that will be called
to write solution information to a given output.
start_rows (list of int): list of start row indexes for each
element in worksheets.
existing_sheets (list of func): contains None references in the
beginning, but after at least one call to write_data method
contains a copy of worksheets. It is necessary for appending
data to the same output.
print_params (bool): if set to true parameters are written to
a given output. It ensures that we don't write parameters more
than once if we should append information to the same output.
Args:
params (Parameters): parameters.
writer: object that contains several objects that
have name attribute and implement
write method, it actually writes data to some output
(like file, screen, etc.).
run_date (datetime): date and time when the problem was solved.
total_seconds (float): time (in seconds) needed to solve
the problem.
worksheets (list of func, optional): list of functions that will
be called to write solution information to a given output.
Defaults to None.
ranks (list of dict of str to double, optional):
list that contains dictionaries that map DMU code
to peel the onion rank. Defaults to None.
categorical (str, optional): name of categorical category.
Defaults to None.
'''
def __init__(self, params, writer, run_date, total_seconds,
worksheets=None, ranks=None, categorical=None):
self.params = params
self.writer = writer
self.ranks = ranks
self.categorical = categorical
self.run_date = run_date
self.total_seconds = total_seconds
self.params_sheet = None
if worksheets is not None:
self.worksheets = worksheets
else:
self.worksheets = self.get_default_worksheets()
self.start_rows = [0]*len(self.worksheets)
self.existing_sheets = [None]*len(self.worksheets)
self.print_params = True
def get_default_worksheets(self):
''' Returns a default list of functions that will
be called to write solution information to a given output.
Returns:
list of func: list of functions.
'''
sheet_with_categorical_var = SheetWithCategoricalVar(
self.categorical)
worksheets = [
sheet_with_categorical_var.create_sheet_efficiency_scores,
create_sheet_peers, create_sheet_peer_count,
sheet_with_categorical_var.create_sheet_input_output_data,
sheet_with_categorical_var.create_sheet_weighted_data,
sheet_with_categorical_var.create_sheet_targets]
if self.ranks:
onion_rank_sheet = SheetOnionRank(self.ranks)
worksheets.append(onion_rank_sheet.create_sheet_onion_rank)
self.params_sheet = SheetWithParameters(
self.params, self.run_date,
self.total_seconds).create_sheet_parameters
return worksheets
def write_data(self, solution, params_str='',
progress_recorder=NullProgress()):
''' Writes given solution to a given output.
Args:
solution (Solution): solution.
params_str (str, optional): string that is usually written in
the first row. Defaults to empty string.
progress_recorder (NullProgress, optional): object that
shows progress with writing solution to a given output.
Defaults to NullProgress.
'''
for count, worksheet in enumerate(self.worksheets):
if self.existing_sheets[count] is None:
work_sheet = self.writer.add_sheet(
'Sheet_{count}'.format(count=count))
self.existing_sheets[count] = work_sheet
else:
work_sheet = self.existing_sheets[count]
self.start_rows[count] = (worksheet(work_sheet, solution,
self.start_rows[count],
params_str) + 1)
progress_recorder.increment_step()
# parameters are printed only once to file
if self.print_params:
work_sheet = self.writer.add_sheet(
'Sheet_{count}'.format(count=count + 1))
self.params_sheet(work_sheet, solution, 0, '')
progress_recorder.increment_step()
self.print_params = False
def _calculate_frontier_classification(sum_of_lambda_values):
''' Returns string that describes frontier classification. If
sum_of_lambda_values is > 1, then classification is DRS.
If sum_of_lambda_values is < 1, then
classification is IRS. If sum_of_lambda_values == 1,
then classification is CRS.
Args:
sum_of_lambda_values (double): sum of lambda variables
values.
Returns:
str: frontier classification.
'''
if sum_of_lambda_values > 1:
return 'DRS'
elif sum_of_lambda_values < 1:
return 'IRS'
else:
return 'CRS'
def create_sheet_peers(work_sheet, solution, start_row_index, params_str):
''' Writes peers to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'Peers'
work_sheet.write(start_row_index, 0, params_str)
work_sheet.write(start_row_index + 1, 0, 'DMU')
work_sheet.write(start_row_index + 1, 2, 'Peer')
work_sheet.write(start_row_index + 1, 3, 'Lambda')
write_classification = False
if bool(solution.return_to_scale):
work_sheet.write(start_row_index + 1, 4, 'Classification')
write_classification = True
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
row_index = start_row_index + 2
for dmu_code in ordered_dmu_codes:
work_sheet.write(row_index, 0, solution._input_data.get_dmu_user_name(
dmu_code))
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
lambda_vars = solution.get_lambda_variables(dmu_code)
# sum_of_lambda_values = 0
once = True
# for dmu, lambda_value in lambda_vars.items():
# if lambda_value:
# sum_of_lambda_values += lambda_value
for dmu, lambda_value in lambda_vars.items():
if lambda_value:
dmu_name = solution._input_data.get_dmu_user_name(dmu)
work_sheet.write(row_index, 2, dmu_name)
work_sheet.write(row_index, 3, lambda_value)
if write_classification and once:
work_sheet.write(
row_index, 4, solution.return_to_scale[dmu_code]
#_calculate_frontier_classification(sum_of_lambda_values)
)
once = False
row_index += 1
else:
work_sheet.write(
row_index, 2, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
return row_index
def create_sheet_peer_count(work_sheet, solution, start_row_index, params_str):
''' Writes peer counts to a given output.
Args:
work_sheet: object that has name attribute and implements
write method, it actually writes data to some output
(like file, screen, etc.).
solution (Solution): solution.
start_row_index (int): initial row index (usually used to append
data to existing output).
params_str (str): string that is usually written in the first
row.
Returns:
int: index of the last row where data were written plus 1.
'''
work_sheet.name = 'PeerCount'
work_sheet.write(start_row_index, 0, params_str)
ordered_dmu_codes = solution._input_data.DMU_codes_in_added_order
work_sheet.write(start_row_index + 1, 0, 'Efficient Peers')
# write names of efficient DMUs first
column_index = 1
for dmu_code in ordered_dmu_codes:
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
if solution.is_efficient(dmu_code):
dmu_name = solution._input_data.get_dmu_user_name(dmu_code)
work_sheet.write(start_row_index + 1, column_index, dmu_name)
column_index += 1
work_sheet.write(start_row_index + 2, 0, 'DMU')
# continue line by line
row_index = start_row_index + 3
nb_peers = defaultdict(int)
for dmu_code in ordered_dmu_codes:
dmu_name = solution._input_data.get_dmu_user_name(dmu_code)
work_sheet.write(row_index, 0, dmu_name)
if solution.lp_status[dmu_code] == pulp.LpStatusOptimal:
column_index = 1
all_lambda_vars = solution.get_lambda_variables(dmu_code)
for dmu in ordered_dmu_codes:
if (solution.lp_status[dmu] == pulp.LpStatusOptimal and
solution.is_efficient(dmu, all_lambda_vars)):
# get is used since some lambda
# # variables might not be present in categorical model!
lambda_value = all_lambda_vars.get(dmu, 0)
if lambda_value:
work_sheet.write(row_index, column_index, lambda_value)
nb_peers[dmu] += 1
else:
work_sheet.write(row_index, column_index, '-')
column_index += 1
else:
work_sheet.write(
row_index, 1, pulp.LpStatus[solution.lp_status[dmu_code]])
row_index += 1
work_sheet.write(row_index, 0, 'Peer count')
column_index = 1
for dmu_code in ordered_dmu_codes:
if dmu_code in nb_peers.keys():
work_sheet.write(row_index, column_index, nb_peers[dmu_code])
column_index += 1
return row_index
| araith/pyDEA | pyDEA/core/data_processing/write_data.py | write_data.py | py | 30,938 | python | en | code | 38 | github-code | 36 | [
{
"api_name": "pulp.LpStatusOptimal",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "pulp.LpStatus",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "pulp.LpStatusOptimal",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_nam... |
27022624529 | import re
from itertools import zip_longest
from parso.python import tree
from jedi import debug
from jedi.inference.utils import PushBackIterator
from jedi.inference import analysis
from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \
LazyTreeValue, get_merged_lazy_value
from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName
from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode
from jedi.inference.value import iterable
from jedi.inference.cache import inference_state_as_method_param_cache
def try_iter_content(types, depth=0):
"""Helper method for static analysis."""
if depth > 10:
# It's possible that a loop has references on itself (especially with
# CompiledValue). Therefore don't loop infinitely.
return
for typ in types:
try:
f = typ.py__iter__
except AttributeError:
pass
else:
for lazy_value in f():
try_iter_content(lazy_value.infer(), depth + 1)
class ParamIssue(Exception):
pass
def repack_with_argument_clinic(clinic_string):
"""
Transforms a function or method with arguments to the signature that is
given as an argument clinic notation.
Argument clinic is part of CPython and used for all the functions that are
implemented in C (Python 3.7):
str.split.__text_signature__
# Results in: '($self, /, sep=None, maxsplit=-1)'
"""
def decorator(func):
def wrapper(value, arguments):
try:
args = tuple(iterate_argument_clinic(
value.inference_state,
arguments,
clinic_string,
))
except ParamIssue:
return NO_VALUES
else:
return func(value, *args)
return wrapper
return decorator
def iterate_argument_clinic(inference_state, arguments, clinic_string):
"""Uses a list with argument clinic information (see PEP 436)."""
clinic_args = list(_parse_argument_clinic(clinic_string))
iterator = PushBackIterator(arguments.unpack())
for i, (name, optional, allow_kwargs, stars) in enumerate(clinic_args):
if stars == 1:
lazy_values = []
for key, argument in iterator:
if key is not None:
iterator.push_back((key, argument))
break
lazy_values.append(argument)
yield ValueSet([iterable.FakeTuple(inference_state, lazy_values)])
lazy_values
continue
elif stars == 2:
raise NotImplementedError()
key, argument = next(iterator, (None, None))
if key is not None:
debug.warning('Keyword arguments in argument clinic are currently not supported.')
raise ParamIssue
if argument is None and not optional:
debug.warning('TypeError: %s expected at least %s arguments, got %s',
name, len(clinic_args), i)
raise ParamIssue
value_set = NO_VALUES if argument is None else argument.infer()
if not value_set and not optional:
# For the stdlib we always want values. If we don't get them,
# that's ok, maybe something is too hard to resolve, however,
# we will not proceed with the type inference of that function.
debug.warning('argument_clinic "%s" not resolvable.', name)
raise ParamIssue
yield value_set
def _parse_argument_clinic(string):
allow_kwargs = False
optional = False
while string:
# Optional arguments have to begin with a bracket. And should always be
# at the end of the arguments. This is therefore not a proper argument
# clinic implementation. `range()` for exmple allows an optional start
# value at the beginning.
match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string)
string = string[len(match.group(0)):]
if not match.group(2): # A slash -> allow named arguments
allow_kwargs = True
continue
optional = optional or bool(match.group(1))
word = match.group(2)
stars = word.count('*')
word = word[stars:]
yield (word, optional, allow_kwargs, stars)
if stars:
allow_kwargs = True
class _AbstractArgumentsMixin:
def unpack(self, funcdef=None):
raise NotImplementedError
def get_calling_nodes(self):
return []
class AbstractArguments(_AbstractArgumentsMixin):
context = None
argument_node = None
trailer = None
def unpack_arglist(arglist):
if arglist is None:
return
if arglist.type != 'arglist' and not (
arglist.type == 'argument' and arglist.children[0] in ('*', '**')):
yield 0, arglist
return
iterator = iter(arglist.children)
for child in iterator:
if child == ',':
continue
elif child in ('*', '**'):
c = next(iterator, None)
assert c is not None
yield len(child.value), c
elif child.type == 'argument' and \
child.children[0] in ('*', '**'):
assert len(child.children) == 2
yield len(child.children[0].value), child.children[1]
else:
yield 0, child
class TreeArguments(AbstractArguments):
def __init__(self, inference_state, context, argument_node, trailer=None):
"""
:param argument_node: May be an argument_node or a list of nodes.
"""
self.argument_node = argument_node
self.context = context
self._inference_state = inference_state
self.trailer = trailer # Can be None, e.g. in a class definition.
@classmethod
@inference_state_as_method_param_cache()
def create_cached(cls, *args, **kwargs):
return cls(*args, **kwargs)
def unpack(self, funcdef=None):
named_args = []
for star_count, el in unpack_arglist(self.argument_node):
if star_count == 1:
arrays = self.context.infer_node(el)
iterators = [_iterate_star_args(self.context, a, el, funcdef)
for a in arrays]
for values in list(zip_longest(*iterators)):
yield None, get_merged_lazy_value(
[v for v in values if v is not None]
)
elif star_count == 2:
arrays = self.context.infer_node(el)
for dct in arrays:
yield from _star_star_dict(self.context, dct, el, funcdef)
else:
if el.type == 'argument':
c = el.children
if len(c) == 3: # Keyword argument.
named_args.append((c[0].value, LazyTreeValue(self.context, c[2]),))
else: # Generator comprehension.
# Include the brackets with the parent.
sync_comp_for = el.children[1]
if sync_comp_for.type == 'comp_for':
sync_comp_for = sync_comp_for.children[1]
comp = iterable.GeneratorComprehension(
self._inference_state,
defining_context=self.context,
sync_comp_for_node=sync_comp_for,
entry_node=el.children[0],
)
yield None, LazyKnownValue(comp)
else:
yield None, LazyTreeValue(self.context, el)
# Reordering arguments is necessary, because star args sometimes appear
# after named argument, but in the actual order it's prepended.
yield from named_args
def _as_tree_tuple_objects(self):
for star_count, argument in unpack_arglist(self.argument_node):
default = None
if argument.type == 'argument':
if len(argument.children) == 3: # Keyword argument.
argument, default = argument.children[::2]
yield argument, default, star_count
def iter_calling_names_with_star(self):
for name, default, star_count in self._as_tree_tuple_objects():
# TODO this function is a bit strange. probably refactor?
if not star_count or not isinstance(name, tree.Name):
continue
yield TreeNameDefinition(self.context, name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
def get_calling_nodes(self):
old_arguments_list = []
arguments = self
while arguments not in old_arguments_list:
if not isinstance(arguments, TreeArguments):
break
old_arguments_list.append(arguments)
for calling_name in reversed(list(arguments.iter_calling_names_with_star())):
names = calling_name.goto()
if len(names) != 1:
break
if isinstance(names[0], AnonymousParamName):
# Dynamic parameters should not have calling nodes, because
# they are dynamic and extremely random.
return []
if not isinstance(names[0], ParamName):
break
executed_param_name = names[0].get_executed_param_name()
arguments = executed_param_name.arguments
break
if arguments.argument_node is not None:
return [ContextualizedNode(arguments.context, arguments.argument_node)]
if arguments.trailer is not None:
return [ContextualizedNode(arguments.context, arguments.trailer)]
return []
class ValuesArguments(AbstractArguments):
def __init__(self, values_list):
self._values_list = values_list
def unpack(self, funcdef=None):
for values in self._values_list:
yield None, LazyKnownValues(values)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._values_list)
class TreeArgumentsWrapper(_AbstractArgumentsMixin):
def __init__(self, arguments):
self._wrapped_arguments = arguments
@property
def context(self):
return self._wrapped_arguments.context
@property
def argument_node(self):
return self._wrapped_arguments.argument_node
@property
def trailer(self):
return self._wrapped_arguments.trailer
def unpack(self, func=None):
raise NotImplementedError
def get_calling_nodes(self):
return self._wrapped_arguments.get_calling_nodes()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments)
def _iterate_star_args(context, array, input_node, funcdef=None):
if not array.py__getattribute__('__iter__'):
if funcdef is not None:
# TODO this funcdef should not be needed.
m = "TypeError: %s() argument after * must be a sequence, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star', input_node, message=m)
try:
iter_ = array.py__iter__
except AttributeError:
pass
else:
yield from iter_()
def _star_star_dict(context, array, input_node, funcdef):
from jedi.inference.value.instance import CompiledInstance
if isinstance(array, CompiledInstance) and array.name.string_name == 'dict':
# For now ignore this case. In the future add proper iterators and just
# make one call without crazy isinstance checks.
return {}
elif isinstance(array, iterable.Sequence) and array.array_type == 'dict':
return array.exact_key_items()
else:
if funcdef is not None:
m = "TypeError: %s argument after ** must be a mapping, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star-star', input_node, message=m)
return {}
| davidhalter/jedi | jedi/inference/arguments.py | arguments.py | py | 12,218 | python | en | code | 5,554 | github-code | 36 | [
{
"api_name": "jedi.inference.base_value.NO_VALUES",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "jedi.inference.utils.PushBackIterator",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "jedi.inference.base_value.ValueSet",
"line_number": 80,
"usage_... |
74436410985 | # -*- coding: utf-8 -*-
# Data preparation at one-second level for Ph.D thesis
# @author: Andres L. Suarez-Cetrulo
import glob
import time
import logging
import yaml
import subprocess
import os
import pandas as pd
import numpy as np
import datetime
# Global attributes
SLASH = os.path.sep
EQUIVALENCE = {
's': 1,
'min': 60
}
# logging.getLogger().addHandler(logging.StreamHandler()) # for debugging while coding / comment out otherwise
LOGLEVEL = logging.DEBUG # logging.WARNING # logging.DEBUG
def get_datetime_format(date):
return time.strptime(date, "%Y-%m-%d")
def show_missing_values(dates, df_name, df):
logging.debug('Printing list of dates with missing values in ' + df_name + ' level (e.g. NA and nulls).')
logging.debug(str(np.unique(dates.index.time)))
logging.debug('Showing full DF for only rows with missing values in ' + df_name + ' level')
logging.debug(df[df.isnull().any(axis=1)].to_string())
logging.debug('Returning the list of columns which have missing values in ' + df_name + ' level')
nulls = list()
for col, bool_var in df.isna().any().items():
if bool_var:
nulls.append(col)
logging.debug(df.isna().any().keys())
def set_logging(config):
# Set logging level
logname = str(config['efts']).replace('\'','') + ' Tests across files starting with ' + config['levels'][0] + \
' level and period (' + config['start'] + '-' + config['end'] + ').log'
logging.basicConfig(filename=config['path'] + logname)
logger = logging.getLogger()
logger.setLevel(LOGLEVEL)
logging.info('')
def log_new_execution(config):
logging.info('')
logging.info('')
logging.info('')
logging.info('')
logging.info('')
logging.info('#########################')
logging.info('')
logging.info('')
logging.info('NEW EXECUTION AT: '+str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
logging.info('')
logging.info('')
logging.info('#########################')
logging.info('Logging config settings:')
logging.info(config)
def log_iteration(config):
logging.info(' -------------------------------------------------------------- ')
logging.info(' START OF TEST FOR ' + str(config['eft']).replace('\'','') +
' ' + config['level'] + ' level on period: ' +
config['start'] + ' to ' + config['end'])
logging.info(' -------------------------------------------------------------- ')
def load_config():
# Load global parameters as paths, symbols and periods to iterate through
with open(os.path.sep.join(['.', 'config.yaml']), 'r') as f:
config = yaml.safe_load(f.read())['merging']
# Path where every individual folder is located
config['path'] = config['src_path'] # + config['level'] + '-level' + SLASH + config['eft'] + SLASH
config['filename_pattern'] = config['eft'] + '_*_indicators.csv.gz'
config['start_date'] = get_datetime_format(config['start'])
config['end_date'] = get_datetime_format(config['end'])
config['end_date'] = get_datetime_format(config['end'])
config['name'] = '_'.join([config['eft'], config['output_subname'], config['level']])+'-level'
config['output'] = config['path'] + config['name']
# The feature set in 15min and 30min levels differ to avoid considering dependencies across days
config['featureset'] = config['columns'+nconf['level']] if config['level'] in ['15min', '30min'] \
else config['columns']
return config
def new_config(nconf):
""" Only use this for testing and not creating of .arff, as we add the field gapt+1 to the featureset"""
# Path where every individual folder is located
nconf['path'] = nconf['src_path'] + nconf['level'] + '-level' + SLASH + nconf['eft'] + SLASH
nconf['filename_pattern'] = nconf['eft'] + '_*_indicators.csv.gz'
nconf['start_date'] = get_datetime_format(nconf['start'])
nconf['end_date'] = get_datetime_format(nconf['end'])
nconf['name'] = '_'.join([nconf['eft'], nconf['output_subname'], nconf['level']])+'-level'
nconf['output'] = nconf['path'] + nconf['name']
# The feature set in 15min and 30min levels differ to avoid considering dependencies across days
nconf['featureset'] = nconf['columns'+nconf['level']] if nconf['level'] in ['15min', '30min'] \
else nconf['columns']
if 'gap_t+1' not in nconf['featureset']:
nconf['featureset'].append('gap_t+1')
return nconf
def load_data(conf, min_time=''):
# Iterate through collection of processed data with indicators
df_full = pd.DataFrame()
for filename in sorted(glob.glob(conf['path'] + conf['filename_pattern'])):
# Extract dates from filename
aux_dates = filename.replace(conf['path'] + conf['eft']+'_(', '')
aux_dates = aux_dates.replace(')_indicators.csv.gz', '')
dates = aux_dates.split('_to_')
# If file in date range, add it to DF
if get_datetime_format(dates[0]) >= conf['start_date'] and \
get_datetime_format(dates[1]) <= conf['end_date']:
logging.info('Importing data period ' + str(dates[0]) + ' to ' + str(dates[1]) + '.')
new_df = pd.read_csv(filename, encoding="utf-8", index_col=0, sep=";")
new_df['datetime'] = new_df.index # save string readable timestamp as a column
new_df.index = pd.to_datetime(new_df.index)
print('Preview of 5 fist rows of file being load.')
print(new_df.head(5))
# If required, remove times if there are dependencies from previous days,
# or missing values in certain dates that we should avoid
if min_time == '':
print('Rows removed at this level: '+str(conf['rows_per_day_to_remove'][str(conf['level'])]))
print(conf['rows_per_day_to_remove'][str(conf['level'])])
min_time = '09:30' if int(conf['rows_per_day_to_remove'][str(conf['level'])]) == 0 \
else get_min_time(conf, new_df)
print('Current minimum time being considered: '+str(min_time))
new_df = new_df.between_time(min_time, '16:00')
df_full = df_full.append(new_df)
# logging.debug('Printing a sample of DF at level: '+conf['level'])
# logging.debug(df)
logging.info('Data read.')
logging.warning('WARNING: Now make sure that the count of rows makes sense for the number of market days:')
logging.warning(df_full.groupby([df_full.index.year, df_full.index.month, df_full.index.day]).agg({'count'}).to_string())
return df_full[conf['featureset']], min_time
def get_min_time(conf, df):
# Return the time of the minimum datetime
return min(df[int(conf['rows_per_day_to_remove'][str(conf['level'])]):].index.tolist()).time()
def split_level(txt, seps): # this function would make sense if we had tick, hourly or daily level also.
default_sep = seps[0]
# we skip seps[0] because that's the default separator
level = ''
for sep in seps[1:]:
aux_len = len(txt)
txt = txt.replace(sep, default_sep)
level = sep if len(txt) != aux_len else level # if it has changed, then we keep the level string
level_split = [i.strip() for i in txt.split(default_sep)]
level_split[1] = default_sep if level == '' else level
return level_split
def plot_stats(cl_df, ol_df, scl, sol):
logging.info('Iterated level DF has a length of: '+str(len(ol_df))+' versus a length of: '+str(len(cl_df)) +
'. The equivalence should be of: cdf = '+str(float(scl)/float(sol))+'*odf')
logging.info('Current DF cdf stats: '+cl_df.describe().to_string())
logging.info('Compared DF odf stats: '+ol_df.describe().to_string())
def test_subsets(df1, df2):
# checking that datetime for the current level exists in inferior levels
key_diff = set(df1.index).difference(df2.index)
where_diff = df1.index.isin(key_diff)
logging.info('Logging difference:')
logging.info(df1[where_diff])
assert len(df1[where_diff]) == 0
logging.debug('Test 2 PASSED: No missing subsets (rows based on datetime) when looking at lower levels.')
def test_number_of_rows(cl_df, ol_df, comparison_type, scl, sol):
if comparison_type == 'eft':
cl_df.index = pd.to_datetime(cl_df.index)
logging.debug(len(np.unique(cl_df.index.date)))
len_cdf = len(cl_df)
len_odf = len(ol_df)
logging.debug('Size of c_eft: '+str(len_cdf))
logging.debug(cl_df.head())
logging.debug('Size of o_eft: '+str(len_odf))
logging.debug(ol_df.head())
cl_df['datetime'] = cl_df.index
print(cl_df[cl_df['datetime'] == '2015-04-01 09:41:15'])
test = len_cdf == len_odf
logging.info('')
logging.info('Test 0: Length of both EFTs have the same length for the same periods and level: ' +
' PASSED' if test else ' NOT PASSED')
logging.info('')
test_subsets(ol_df, cl_df)
assert test
else:
# Test not ready. It only works for single dates.
from collections import Counter
cl_df['stridx'] = cl_df.index
ol_df['stridx'] = ol_df.index
logging.info(pd.Series(cl_df['stridx'].str.split(' ')[0].map(Counter).sum()))
logging.info(len(cl_df[cl_df['stridx'].str.contains('2015-01-02')]))
logging.info(len(ol_df[ol_df['stridx'].str.contains('2015-01-02')]))
logging.infologging.info(len(cl_df.loc['2015-01-02' in cl_df.index]))
logging.info(len(ol_df.loc['2015-01-02' in ol_df.index]))
logging.info('---')
logging.info(len_cdf * (float(scl)/float(sol)) * len(np.unique(cl_df.index.date)) - (float(scl)/float(sol) * len(np.unique(cl_df.index.date))) + len(np.unique(cl_df.index.date)))
## for a single day
logging.infologging.info(len_cdf * float(scl)/float(sol) - float(scl)/float(sol) + 1)
logging.info(len_odf * sol)
# for a single day
#assert len_cdf * float(scl)/float(sol) - float(scl)/float(sol) + 1 == len_odf * sol
def max_and_min_dates(cl_df, ol_df):
cl_df['datetime'] = pd.to_datetime(cl_df.index)
ol_df['datetime'] = pd.to_datetime(ol_df.index)
logging.info('Max and min dates in current DF are: ' +
str(cl_df['datetime'].min())+' - '+str(cl_df['datetime'].max()))
logging.info('Max and min dates in compared DF are: ' +
str(ol_df['datetime'].min())+' - '+str(ol_df['datetime'].max()))
def any_nulls(cl_df, ol_df):
# Exclude last row from comparisons as it may be null for lack of values after 4pm
cl_df.drop(cl_df.tail(1).index, inplace=True) # drop last n rows
ol_df.drop(ol_df.tail(1).index, inplace=True) # drop last n rows
logging.info('Confirming that both currend compared models don\'t have missing values:')
passed = (len(cl_df[cl_df.isnull().any(axis=1)]) == 0 and len(ol_df[ol_df.isnull().any(axis=1)]) == 0)
logging.info(('PASSED' if passed else 'NOT PASSED'))
if ~passed:
aux_df = cl_df[cl_df.isnull().any(axis=1)]
aux_df.index = pd.to_datetime(aux_df.index)
show_missing_values(dates=aux_df, df_name='current', df=cl_df)
aux_df = ol_df[ol_df.isnull().any(axis=1)]
aux_df.index = pd.to_datetime(aux_df.index)
show_missing_values(dates=aux_df, df_name='compared', df=ol_df)
assert passed
def get_percentage_of_gaps(cl_df, ol_df):
logging.debug('--------------------------------------------')
logging.debug('Printing percentage of gaps in current level')
logging.info(len(cl_df[cl_df['gap_t+1'] == 1])/len(cl_df))
logging.debug('Printing percentage of gaps in compared level')
logging.info(len(ol_df[ol_df['gap_t+1'] == 1])/len(ol_df))
logging.debug('')
logging.warning('Are the two values above similar enough when compared to the previous testing logs? Manual check.')
logging.debug('--------------------------------------------')
def compare_levels(cl_df, ol_df, c_level, o_level, featureset):
logging.info('')
logging.info('#################################')
logging.info('')
logging.info('o_level: '+str(o_level))
logging.info('c_level: '+str(c_level))
logging.info('')
# second equivalence of current level and the other level to compare against in the current iteration
scl = int(c_level[0]) * EQUIVALENCE[c_level[1]]
sol = int(o_level[0]) * EQUIVALENCE[o_level[1]]
# First unit test
logging.info('Test 1: Stats and counts - Judge manually below to determine if it PASSED. Different number of ' +
'columns across levels it\'s ok, as it is a design-made choice.')
logging.info('Please, check that the difference in amount of rows for the same date period makes sense.')
logging.info('For a single date, this should be truth: ' +
'len_current_df * float(scl)/float(sol) - float(scl)/float(sol) + 1 == len_compared_df * sol')
logging.info('scl and sol are the amount of seconds of the current and compared level respectively. '+
'If current_df level =30 min level, then scl=30*60.')
plot_stats(cl_df, ol_df, scl, sol)
# if the other level to compare against is lower than the current one, trigger the following tests
# added extra checks just in case
if sol < scl and int(c_level[0]) % int(o_level[0]) == 0 and c_level[1] == o_level[1]:
logging.info('Test 2: Check if lower levels miss any row that they shouldn\'t')
test_subsets(cl_df, ol_df)
logging.info('Test 3: Max and Min dates:')
max_and_min_dates(cl_df, ol_df)
logging.info('Test 4: Any null values?')
any_nulls(cl_df[featureset], cl_df[featureset])
logging.info('Test 5: Percentage of gaps. ' +
'It should map the percentages that we already have in the same files ' +
'regarding to the class distribution.')
get_percentage_of_gaps(cl_df, ol_df)
def compare_efts(ce_df, oe_df, c_eft, o_eft):
logging.info('')
logging.info('#################################')
logging.info('')
logging.info('o_eft: '+str(o_eft))
logging.info('c_eft: '+str(c_eft))
logging.info('')
logging.info('Test 0: Stats and counts - Judge manually below to determined if it PASSED. ' +
'IBEX may be the most different one due to bank holidays in Spain.')
test_number_of_rows(ce_df, oe_df, 'eft', 1, 1)
def run_tests_for_config(config):
# Load all the data for the current level and period in a Dataframe
cl_df, min_time = load_data(config)
logging.info('First part in testing, comparing to efts at same level:')
for other_eft in config['compare_efts']:
logging.info(' ///// ')
logging.info('Comparing to efts: '+other_eft)
# Load DF to compare against
oe_config = config.copy()
oe_config['eft'] = other_eft
oe_df, _ = load_data(new_config(oe_config)) # only getting DF, as min_time doesn't need to update
compare_efts(cl_df, oe_df, config['eft'], oe_config['eft'])
logging.info(' ///// ')
logging.info('Second part in testing, comparing different levels of the same EFT and period:')
c_level = split_level(config['level'], list(EQUIVALENCE.keys()))
for other_level in config['compare_against']:
logging.info(' ///// ')
logging.info('Comparing to level: '+other_level)
# Load DF to compare against
ol_config = config.copy()
ol_config['level'] = other_level
# new_config updates some config settings that are dependant on the values changed
ol_df, _ = load_data(new_config(ol_config), min_time) # only getting DF, as min_time doesn't need to update
# Tests
o_level = split_level(other_level, list(EQUIVALENCE.keys()))
compare_levels(cl_df, ol_df, c_level, o_level, config['featureset'])
logging.info(' ///// ')
def run_tests():
# Load settings and set logging config
config = load_config()
config['featureset'].append('gap_t+1') # only for testing (not merging)
set_logging(config)
log_new_execution(config)
for eft in config['efts']:
print()
print('EFT: '+str(eft))
print('------')
for period in config['periods'].values():
print('')
print('Period: '+str(period))
print('')
for level in config['levels']:
print('Level: '+str(level))
config['eft'] = eft
config['start'] = period[0]
config['end'] = period[1]
config['level'] = level
log_iteration(config)
run_tests_for_config(new_config(config))
if __name__ == '__main__':
run_tests()
| cetrulin/Quant-Quote-Data-Preprocessing | src/2_testing.py | 2_testing.py | py | 16,897 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "time.strptime",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"li... |
75075550182 | import torch
import numpy as np
from ..utils.decode import _nms, _topk, _topk_channel, _transpose_and_gather_feat
def multi_pose_decode(heat, wh, kps, reg=None, hm_hp=None, hp_offset=None, K=100):
batch, cat, height, width = heat.size()
num_joints = kps.shape[1] // 2
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
kps = _transpose_and_gather_feat(kps, inds)
kps = kps.view(batch, K, num_joints * 2)
kps[..., ::2] += xs.view(batch, K, 1).expand(batch, K, num_joints)
kps[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, num_joints)
if reg is not None:
reg = _transpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _transpose_and_gather_feat(wh, inds)
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat(
[
xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2,
],
dim=2,
)
if hm_hp is not None:
hm_hp = _nms(hm_hp)
thresh = 0.1
kps = (
kps.view(batch, K, num_joints, 2).permute(0, 2, 1, 3).contiguous()
) # b x J x K x 2
reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2)
hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K
if hp_offset is not None:
hp_offset = _transpose_and_gather_feat(hp_offset, hm_inds.view(batch, -1))
hp_offset = hp_offset.view(batch, num_joints, K, 2)
hm_xs = hm_xs + hp_offset[:, :, :, 0]
hm_ys = hm_ys + hp_offset[:, :, :, 1]
else:
hm_xs = hm_xs + 0.5
hm_ys = hm_ys + 0.5
mask = (hm_score > thresh).float()
hm_score = (1 - mask) * -1 + mask * hm_score
hm_ys = (1 - mask) * (-10000) + mask * hm_ys
hm_xs = (1 - mask) * (-10000) + mask * hm_xs
hm_kps = (
torch.stack([hm_xs, hm_ys], dim=-1)
.unsqueeze(2)
.expand(batch, num_joints, K, K, 2)
)
dist = ((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5
min_dist, min_ind = dist.min(dim=3) # b x J x K
hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1
min_dist = min_dist.unsqueeze(-1)
min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand(
batch, num_joints, K, 1, 2
)
hm_kps = hm_kps.gather(3, min_ind)
hm_kps = hm_kps.view(batch, num_joints, K, 2)
l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
mask = (
(hm_kps[..., 0:1] < l)
+ (hm_kps[..., 0:1] > r)
+ (hm_kps[..., 1:2] < t)
+ (hm_kps[..., 1:2] > b)
+ (hm_score < thresh)
+ (min_dist > (torch.max(b - t, r - l) * 0.3))
)
mask = (mask > 0).float()
hm_score = hm_score * (1 - mask)
hm_score = hm_score.view(batch, K, num_joints)
mask = (mask > 0).float().expand(batch, num_joints, K, 2)
kps = (1 - mask) * hm_kps + mask * kps
kps = kps.permute(0, 2, 1, 3).contiguous().view(batch, K, num_joints * 2)
detections = torch.cat([bboxes, scores, kps, clses, hm_score], dim=2)
return detections
def multi_pose_post_process(dets, c, s, h, w):
# dets: batch x max_dets x 40
# return list of 39 in image coord
ret = []
for i in range(dets.shape[0]):
# bbox = transform_preds(dets[i, :, :4].reshape(-1, 2), c[i], s[i], (w, h))
bbox = dets[i, :, :4].reshape(-1, 2), c[i], s[i], (w, h)
# pts = transform_preds(dets[i, :, 5:39].reshape(-1, 2), c[i], s[i], (w, h))
pts = dets[i, :, 5:39].reshape(-1, 2), c[i], s[i], (w, h)
top_preds = (
np.concatenate(
[bbox.reshape(-1, 4), dets[i, :, 4:5], pts.reshape(-1, 34)], axis=1
)
.astype(np.float32)
.tolist()
)
ret.append({np.ones(1, dtype=np.int32)[0]: top_preds})
return ret
| tteepe/CenterNet-pytorch-lightning | CenterNet/decode/multi_pose.py | multi_pose.py | py | 4,647 | python | en | code | 58 | github-code | 36 | [
{
"api_name": "utils.decode._nms",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.decode._topk",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.decode._transpose_and_gather_feat",
"line_number": 15,
"usage_type": "call"
},
{
"api_... |
31324284158 | #!/usr/bin/python3
import sqlite3
from itertools import chain
conn = sqlite3.connect('vexdb.db')
curs = conn.cursor()
sql = 'INSERT INTO IRType(id, btype, nbits) VALUES (?,?,?)'
first=True
value=0;
with open('irtypes.lst') as f:
for line in f:
line = line.rstrip()
if first:
# Ity_INVALID=0x1100
field=line.split('=')
value=int(field[1],16)
first=False
curs.execute(sql, (value, 'X', 0));
else:
value += 1;
btype=line[4];
nbits=int(line[5:])
curs.execute(sql, (value, btype, nbits));
conn.commit()
conn.close()
| EmmetCaulfield/valgrind | arinx/hacking/insert-irtypes.py | insert-irtypes.py | py | 664 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
}
] |
42229482357 | import torch
import torchvision.ops as tv
input_tensor = torch.rand(1000, 4)
scores = torch.rand(1000, 59)
#print(input_tensor)
#print(scores[1])
def NMS_cal(input_tensor, scores):
NMS_group = []
for i in range(59):
confidscore, _ = torch.max(scores, dim=1)
catargmax = torch.argmax(scores, dim=1)
threshold = 0.5
prop = input_tensor[catargmax==i]
confid = confidscore[catargmax==i]
NMS = tv.nms(prop, confid, threshold)
NMS = NMS.tolist()
NMS_group.append(NMS)
return NMS_group
NMS_g = NMS_cal(input_tensor, scores)
| RasmusNylander/guacamole | filter_for_mAP.py | filter_for_mAP.py | py | 605 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.rand",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.argmax",
"line_number": 14,
... |
11519350312 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from functools import wraps
import logging
from numbers import Number
from time import time
from types import FunctionType
from lab import B
from plum import Dispatcher, Self, Referentiable
__all__ = []
_dispatch = Dispatcher()
@_dispatch(object)
def uprank(x):
"""Ensure that the rank of `x` is 2.
Args:
x (tensor): Tensor to uprank.
Returns:
tensor: `x` with rank at least 2.
"""
# Simply return non-numerical inputs.
if not isinstance(x, B.Numeric):
return x
# Now check the rank of `x` and act accordingly.
rank = B.rank(x)
if rank > 2:
raise ValueError('Input must be at most rank 2.')
elif rank == 2:
return x
elif rank == 1:
return B.expand_dims(x, axis=1)
else:
# Rank must be 0.
return B.expand_dims(B.expand_dims(x, axis=0), axis=1)
@_dispatch(FunctionType)
def uprank(f):
"""A decorator to ensure that the rank of the arguments is two."""
@wraps(f)
def wrapped_f(*args):
return f(*[uprank(x) for x in args])
return wrapped_f
| pb593/stheno | stheno/util.py | util.py | py | 1,175 | python | en | code | null | github-code | 36 | [
{
"api_name": "plum.Dispatcher",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lab.B.Numeric",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "lab.B",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "lab.B.rank",
"line_numbe... |
32006328277 | import Chat
import nltk
from nltk.corpus import wordnet as wn
#nltk.download()
#!/usr/bin/python
def getSynonyms(words):
synonyms = []
for word in words:
for s in wn.synsets(word):
for l in s.lemmas():
synonyms.append(l.name())
return set(synonyms)
# findPersonKeywords is a function that takes a list of words (or user's "chat" in this context)
# returns the list of person names in the given list
def findPersonKeywords(words):
persons = []
if 'my' in words:
myIndex = words.index('my')
if (words[myIndex+1] != "roommate" and words[myIndex+1] != "friend" and words[myIndex+1] != "classmate"):
persons.append(words[myIndex + 1])
if 'with' in words:
withIndex = words.index('with') # returns the index of the word 'with' in the list of words
if words[withIndex + 1].lower() == 'my' and words[withIndex + 2].lower() != 'him' and words[withIndex + 2].lower() != 'her' and words[withIndex + 2].lower() != 'them':
if words[withIndex+2] not in persons:
persons.append(words[withIndex+2])
if words[withIndex+1] not in persons:
persons.append(words[withIndex+1])
'''if 'to' in words:
toIndex = words.index('to')
if (words[toIndex - 1].lower() != 'went') and (words[toIndex - 1].lower() != 'how') and (words[toIndex -
1].lower() != 'go') and (words[toIndex - 1].lower() != 'visit'): if words[toIndex + 1].lower() == 'my' and
words[toIndex + 2] not in persons: persons.append(words[toIndex + 2]) else: personName = words[toIndex + 1]
personName = personName.capitalize() if personName not in persons: persons.append(personName) '''
return persons
# findLocationKeywords is a function that takes a list of words (or user's "chat" in this context)
# returns the list of location names in the given list
def findLocationKeywords(words):
locations = []
synonyms = getSynonyms(["travel"])
if 'to' in words:
toIndex = words.index('to')
if (words[toIndex - 1].lower() in synonyms or words[toIndex - 1].lower() == 'went'):
if (words[toIndex+1] == "the"):
locationName = words[toIndex + 2].capitalize()
if locationName not in locations:
locations.append(locationName)
elif (words[toIndex+1]=="to"):
if (words[toIndex + 1] == "the"):
locationName = words[toIndex + 2].capitalize()
if locationName not in locations:
locations.append(locationName)
else:
locationName = words[toIndex + 1].capitalize()
if locationName not in locations:
locations.append(locationName)
return locations
def determineBranch(words):
depressedWordsList = ['stress','stressing','depressed','lonely','sad','unhappy', 'not well', 'unwell', 'miserable', 'upset', 'discouraged','broken-hearted','down', 'glum']
neutralWordsList = ['good','alright','okay', 'fine', 'so-so', 'happy', 'content', 'cheery', 'blessed', 'thrilled']
depressedWords = getSynonyms(depressedWordsList)
neutralWords = getSynonyms(neutralWordsList)
if (any(string in words for string in depressedWords)):
branch = 'depressed'
return branch
if (any(string in words for string in neutralWords)):
branch = 'neutral'
return branch
if 'suicidal' in words:
branch = 'suicidal'
return branch
if 'not' in words:
notIndex = words.index('not')
if (words[notIndex+1] == "good") or (words[notIndex+1] == "well") or (words[notIndex+1] == "happy"):
branch = 'depressed'
return branch
branch = Chat.getBranch()
if branch=="":
branch = "general"
return branch
# PersonOrLocation is a function that takes a string (or user's "chat" in this context)
# Returns a dictionary data type with the list of people's names and locations.
def PersonOrLocation(string):
words = string.split() # removes unnecessary left/right spaces and returns a list of words in the string
return [findPersonKeywords(words),findLocationKeywords(words)]
| Ryanlys/310_A2 | PersonOrLocation.py | PersonOrLocation.py | py | 4,273 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "nltk.corpus.wordnet.synsets",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.wordnet",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "Chat.getBranch",
"line_number": 91,
"usage_type": "call"
}
] |
36156312492 | # coding:utf-8
"""
@author:hanmy
@file:ParaEsti_N.py
@time:2019/04/24
"""
import numpy as np
import matplotlib.pyplot as plt
from gmm import GMM
# 计算steps次参数估计的权重值的均值和方差
def pi_N(N, pi, mean_1, cov_1, mean_2, cov_2, steps):
gmm = GMM(N, pi, mean_1, cov_1, mean_2, cov_2)
pi_steps = np.zeros(shape=steps)
# 均值
pi_mu = 0
# 方差
pi_sigma = 0
# 计算steps次估计值和均值
for i in range(steps):
D = gmm.dataset()
pi_learn, _, _, _, _ = gmm.EM(D, N)
pi_mu += pi_learn
pi_steps[i] = pi_learn
pi_mu /= steps
# 计算steps次方差
for i in range(steps):
pi_sigma += (pi_steps[i]-pi_mu)**2
pi_sigma /= steps
return pi_mu, pi_sigma
if __name__ == "__main__":
# 样本点数Ni
N1 = 100
N2 = 1000
N3 = 10000
pi = 0.8
mean_1 = np.array([0.0, 0.0])
cov_1 = np.mat([[1.0, 0.0], [0.0, 1.0]])
mean_2 = np.array([3.0, 3.0])
cov_2 = np.mat([[1.0, 0.5], [0.5, 1.0]])
# 参数估计次数steps
steps = 10
Y_mu = []
Y_sigma = []
pi_mu_100, pi_sigma_100 = pi_N(N1, pi, mean_1, cov_1, mean_2, cov_2, steps)
Y_mu.append(pi_mu_100)
Y_sigma.append(pi_sigma_100)
print("N=100时学习", steps, "次得到的权重值均值:", pi_mu_100, ",方差:", pi_sigma_100)
pi_mu_1000, pi_sigma_1000 = pi_N(N2, pi, mean_1, cov_1, mean_2, cov_2, steps)
Y_mu.append(pi_mu_1000)
Y_sigma.append(pi_sigma_1000)
print("N=1000时学习", steps, "次得到的权重值均值:", pi_mu_1000, ",方差:", pi_sigma_1000)
pi_mu_10000, pi_sigma_10000 = pi_N(N3, pi, mean_1, cov_1, mean_2, cov_2, steps)
Y_mu.append(pi_mu_10000)
Y_sigma.append(pi_sigma_10000)
print("N=10000时学习", steps, "次得到的权重值均值:", pi_mu_10000, ",方差:", pi_sigma_10000)
Y_mu.append(0.8)
X = [1, 2, 3, 4]
plt.bar(X, Y_mu, align='center', tick_label=['N=100', 'N=1000', 'N=10000', 'standard value'], width=0.5)
plt.ylim((0.7, 0.85))
plt.xlabel('different N')
plt.ylabel('mean of pi')
plt.show()
| hanmy1021/NLP | gmm/ParaEsti_N.py | ParaEsti_N.py | py | 2,130 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "gmm.GMM",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gmm.dataset",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "gmm.EM",
"line_number": 24,
"... |
21131384578 | """Django Models for tracking the configuration compliance per feature and device."""
import json
import logging
from deepdiff import DeepDiff
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.module_loading import import_string
from hier_config import Host as HierConfigHost
from nautobot.core.models.generics import PrimaryModel
from nautobot.core.models.utils import serialize_object, serialize_object_v2
from nautobot.dcim.models import Device
from nautobot.extras.models import ObjectChange
from nautobot.extras.models.statuses import StatusField
from nautobot.extras.utils import extras_features
from netutils.config.compliance import feature_compliance
from nautobot_golden_config.choices import ComplianceRuleConfigTypeChoice, ConfigPlanTypeChoice, RemediationTypeChoice
from nautobot_golden_config.utilities.constant import ENABLE_SOTAGG, PLUGIN_CFG
LOGGER = logging.getLogger(__name__)
GRAPHQL_STR_START = "query ($device_id: ID!)"
ERROR_MSG = (
"There was an issue with the data that was returned by your get_custom_compliance function. "
"This is a local issue that requires the attention of your systems administrator and not something "
"that can be fixed within the Golden Config plugin. "
)
MISSING_MSG = (
ERROR_MSG + "Specifically the `{}` key was not found in value the get_custom_compliance function provided."
)
VALIDATION_MSG = (
ERROR_MSG + "Specifically the key {} was expected to be of type(s) {} and the value of {} was not that type(s)."
)
CUSTOM_FUNCTIONS = {
"get_custom_compliance": "custom",
"get_custom_remediation": RemediationTypeChoice.TYPE_CUSTOM,
}
def _is_jsonable(val):
"""Check is value can be converted to json."""
try:
json.dumps(val)
return True
except (TypeError, OverflowError):
return False
def _null_to_empty(val):
"""Convert to empty string if the value is currently null."""
if not val:
return ""
return val
def _get_cli_compliance(obj):
"""This function performs the actual compliance for cli configuration."""
feature = {
"ordered": obj.rule.config_ordered,
"name": obj.rule,
}
feature.update({"section": obj.rule.match_config.splitlines()})
value = feature_compliance(
feature, obj.actual, obj.intended, obj.device.platform.network_driver_mappings.get("netutils_parser")
)
compliance = value["compliant"]
if compliance:
compliance_int = 1
ordered = value["ordered_compliant"]
else:
compliance_int = 0
ordered = value["ordered_compliant"]
missing = _null_to_empty(value["missing"])
extra = _null_to_empty(value["extra"])
return {
"compliance": compliance,
"compliance_int": compliance_int,
"ordered": ordered,
"missing": missing,
"extra": extra,
}
def _get_json_compliance(obj):
"""This function performs the actual compliance for json serializable data."""
def _normalize_diff(diff, path_to_diff):
"""Normalizes the diff to a list of keys and list indexes that have changed."""
dictionary_items = list(diff.get(f"dictionary_item_{path_to_diff}", []))
list_items = list(diff.get(f"iterable_item_{path_to_diff}", {}).keys())
values_changed = list(diff.get("values_changed", {}).keys())
type_changes = list(diff.get("type_changes", {}).keys())
return dictionary_items + list_items + values_changed + type_changes
diff = DeepDiff(obj.actual, obj.intended, ignore_order=obj.ordered, report_repetition=True)
if not diff:
compliance_int = 1
compliance = True
ordered = True
missing = ""
extra = ""
else:
compliance_int = 0
compliance = False
ordered = False
missing = _null_to_empty(_normalize_diff(diff, "added"))
extra = _null_to_empty(_normalize_diff(diff, "removed"))
return {
"compliance": compliance,
"compliance_int": compliance_int,
"ordered": ordered,
"missing": missing,
"extra": extra,
}
def _verify_get_custom_compliance_data(compliance_details):
"""This function verifies the data is as expected when a custom function is used."""
for val in ["compliance", "compliance_int", "ordered", "missing", "extra"]:
try:
compliance_details[val]
except KeyError:
raise ValidationError(MISSING_MSG.format(val)) from KeyError
for val in ["compliance", "ordered"]:
if compliance_details[val] not in [True, False]:
raise ValidationError(VALIDATION_MSG.format(val, "Boolean", compliance_details[val]))
if compliance_details["compliance_int"] not in [0, 1]:
raise ValidationError(VALIDATION_MSG.format("compliance_int", "0 or 1", compliance_details["compliance_int"]))
for val in ["missing", "extra"]:
if not isinstance(compliance_details[val], str) and not _is_jsonable(compliance_details[val]):
raise ValidationError(VALIDATION_MSG.format(val, "String or Json", compliance_details[val]))
def _get_hierconfig_remediation(obj):
"""Returns the remediating config."""
hierconfig_os = obj.device.platform.network_driver_mappings["hier_config"]
if not hierconfig_os:
raise ValidationError(f"platform {obj.network_driver} is not supported by hierconfig.")
try:
remediation_setting_obj = RemediationSetting.objects.get(platform=obj.rule.platform)
except Exception as err: # pylint: disable=broad-except:
raise ValidationError(f"Platform {obj.network_driver} has no Remediation Settings defined.") from err
remediation_options = remediation_setting_obj.remediation_options
try:
hc_kwargs = {"hostname": obj.device.name, "os": hierconfig_os}
if remediation_options:
hc_kwargs.update(hconfig_options=remediation_options)
host = HierConfigHost(**hc_kwargs)
except Exception as err: # pylint: disable=broad-except:
raise Exception( # pylint: disable=broad-exception-raised
f"Cannot instantiate HierConfig on {obj.device.name}, check Device, Platform and Hier Options."
) from err
host.load_generated_config(obj.intended)
host.load_running_config(obj.actual)
host.remediation_config()
remediation_config = host.remediation_config_filtered_text(include_tags={}, exclude_tags={})
return remediation_config
# The below maps the provided compliance types
FUNC_MAPPER = {
ComplianceRuleConfigTypeChoice.TYPE_CLI: _get_cli_compliance,
ComplianceRuleConfigTypeChoice.TYPE_JSON: _get_json_compliance,
RemediationTypeChoice.TYPE_HIERCONFIG: _get_hierconfig_remediation,
}
# The below conditionally add the custom provided compliance type
for custom_function, custom_type in CUSTOM_FUNCTIONS.items():
if PLUGIN_CFG.get(custom_function):
try:
FUNC_MAPPER[custom_type] = import_string(PLUGIN_CFG[custom_function])
except Exception as error: # pylint: disable=broad-except
msg = (
"There was an issue attempting to import the custom function of"
f"{PLUGIN_CFG[custom_function]}, this is expected with a local configuration issue "
"and not related to the Golden Configuration Plugin, please contact your system admin for further details"
)
raise Exception(msg).with_traceback(error.__traceback__)
@extras_features(
"custom_fields",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ComplianceFeature(PrimaryModel): # pylint: disable=too-many-ancestors
"""ComplianceFeature details."""
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
description = models.CharField(max_length=200, blank=True)
class Meta:
"""Meta information for ComplianceFeature model."""
ordering = ("slug",)
def __str__(self):
"""Return a sane string representation of the instance."""
return self.slug
@extras_features(
"custom_fields",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ComplianceRule(PrimaryModel): # pylint: disable=too-many-ancestors
"""ComplianceRule details."""
feature = models.ForeignKey(to="ComplianceFeature", on_delete=models.CASCADE, related_name="feature")
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="compliance_rules",
)
description = models.CharField(
max_length=200,
blank=True,
)
config_ordered = models.BooleanField(
verbose_name="Configured Ordered",
help_text="Whether or not the configuration order matters, such as in ACLs.",
default=False,
)
config_remediation = models.BooleanField(
default=False,
verbose_name="Config Remediation",
help_text="Whether or not the config remediation is executed for this compliance rule.",
)
match_config = models.TextField(
blank=True,
verbose_name="Config to Match",
help_text="The config to match that is matched based on the parent most configuration. E.g.: For CLI `router bgp` or `ntp`. For JSON this is a top level key name.",
)
config_type = models.CharField(
max_length=20,
default=ComplianceRuleConfigTypeChoice.TYPE_CLI,
choices=ComplianceRuleConfigTypeChoice,
help_text="Whether the configuration is in CLI or JSON/structured format.",
)
custom_compliance = models.BooleanField(
default=False, help_text="Whether this Compliance Rule is proceeded as custom."
)
@property
def remediation_setting(self):
"""Returns remediation settings for a particular platform."""
return RemediationSetting.objects.filter(platform=self.platform).first()
class Meta:
"""Meta information for ComplianceRule model."""
ordering = ("platform", "feature__name")
unique_together = (
"feature",
"platform",
)
def __str__(self):
"""Return a sane string representation of the instance."""
return f"{self.platform} - {self.feature.name}"
def clean(self):
"""Verify that if cli, then match_config is set."""
if self.config_type == ComplianceRuleConfigTypeChoice.TYPE_CLI and not self.match_config:
raise ValidationError("CLI configuration set, but no configuration set to match.")
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigCompliance(PrimaryModel): # pylint: disable=too-many-ancestors
"""Configuration compliance details."""
device = models.ForeignKey(to="dcim.Device", on_delete=models.CASCADE, help_text="The device")
rule = models.ForeignKey(to="ComplianceRule", on_delete=models.CASCADE, related_name="rule")
compliance = models.BooleanField(blank=True)
actual = models.JSONField(blank=True, help_text="Actual Configuration for feature")
intended = models.JSONField(blank=True, help_text="Intended Configuration for feature")
# these three are config snippets exposed for the ConfigDeployment.
remediation = models.JSONField(blank=True, help_text="Remediation Configuration for the device")
missing = models.JSONField(blank=True, help_text="Configuration that should be on the device.")
extra = models.JSONField(blank=True, help_text="Configuration that should not be on the device.")
ordered = models.BooleanField(default=False)
# Used for django-pivot, both compliance and compliance_int should be set.
compliance_int = models.IntegerField(blank=True)
def to_objectchange(
self, action, *, related_object=None, object_data_extra=None, object_data_exclude=None
): # pylint: disable=arguments-differ
"""Remove actual and intended configuration from changelog."""
if not object_data_exclude:
object_data_exclude = ["actual", "intended"]
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
object_data=serialize_object(self, extra=object_data_extra, exclude=object_data_exclude),
object_data_v2=serialize_object_v2(self),
related_object=related_object,
)
class Meta:
"""Set unique together fields for model."""
ordering = ["device", "rule"]
unique_together = ("device", "rule")
def __str__(self):
"""String representation of a the compliance."""
return f"{self.device} -> {self.rule} -> {self.compliance}"
def compliance_on_save(self):
"""The actual configuration compliance happens here, but the details for actual compliance job would be found in FUNC_MAPPER."""
if self.rule.custom_compliance:
if not FUNC_MAPPER.get("custom"):
raise ValidationError(
"Custom type provided, but no `get_custom_compliance` config set, please contact system admin."
)
compliance_details = FUNC_MAPPER["custom"](obj=self)
_verify_get_custom_compliance_data(compliance_details)
else:
compliance_details = FUNC_MAPPER[self.rule.config_type](obj=self)
self.compliance = compliance_details["compliance"]
self.compliance_int = compliance_details["compliance_int"]
self.ordered = compliance_details["ordered"]
self.missing = compliance_details["missing"]
self.extra = compliance_details["extra"]
def remediation_on_save(self):
"""The actual remediation happens here, before saving the object."""
if self.compliance:
self.remediation = ""
return
if not self.rule.config_remediation:
self.remediation = ""
return
if not self.rule.remediation_setting:
self.remediation = ""
return
remediation_config = FUNC_MAPPER[self.rule.remediation_setting.remediation_type](obj=self)
self.remediation = remediation_config
def save(self, *args, **kwargs):
"""The actual configuration compliance happens here, but the details for actual compliance job would be found in FUNC_MAPPER."""
self.compliance_on_save()
self.remediation_on_save()
self.full_clean()
super().save(*args, **kwargs)
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class GoldenConfig(PrimaryModel): # pylint: disable=too-many-ancestors
"""Configuration Management Model."""
device = models.OneToOneField(
to="dcim.Device",
on_delete=models.CASCADE,
help_text="device",
blank=False,
)
backup_config = models.TextField(blank=True, help_text="Full backup config for device.")
backup_last_attempt_date = models.DateTimeField(null=True, blank=True)
backup_last_success_date = models.DateTimeField(null=True, blank=True)
intended_config = models.TextField(blank=True, help_text="Intended config for the device.")
intended_last_attempt_date = models.DateTimeField(null=True, blank=True)
intended_last_success_date = models.DateTimeField(null=True, blank=True)
compliance_config = models.TextField(blank=True, help_text="Full config diff for device.")
compliance_last_attempt_date = models.DateTimeField(null=True, blank=True)
compliance_last_success_date = models.DateTimeField(null=True, blank=True)
def to_objectchange(
self, action, *, related_object=None, object_data_extra=None, object_data_exclude=None
): # pylint: disable=arguments-differ
"""Remove actual and intended configuration from changelog."""
if not object_data_exclude:
object_data_exclude = ["backup_config", "intended_config", "compliance_config"]
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
object_data=serialize_object(self, extra=object_data_extra, exclude=object_data_exclude),
object_data_v2=serialize_object_v2(self),
related_object=related_object,
)
@staticmethod
def get_dynamic_group_device_pks():
"""Get all Device PKs associated with GoldenConfigSetting DynamicGroups."""
gc_dynamic_group_device_queryset = Device.objects.none()
for setting in GoldenConfigSetting.objects.all():
# using "|" should not require calling distinct afterwards
gc_dynamic_group_device_queryset = gc_dynamic_group_device_queryset | setting.dynamic_group.members
return set(gc_dynamic_group_device_queryset.values_list("pk", flat=True))
@classmethod
def get_golden_config_device_ids(cls):
"""Get all Device PKs associated with GoldenConfig entries."""
return set(cls.objects.values_list("device__pk", flat=True))
class Meta:
"""Set unique together fields for model."""
ordering = ["device"]
def __str__(self):
"""String representation of a the compliance."""
return f"{self.device}"
@extras_features(
"graphql",
)
class GoldenConfigSetting(PrimaryModel): # pylint: disable=too-many-ancestors
"""GoldenConfigSetting Model definition. This provides global configs instead of via configs.py."""
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
weight = models.PositiveSmallIntegerField(default=1000)
description = models.CharField(
max_length=200,
blank=True,
)
backup_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="backup_repository",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.backupconfigs"},
)
backup_path_template = models.CharField(
max_length=255,
blank=True,
verbose_name="Backup Path in Jinja Template Form",
help_text="The Jinja path representation of where the backup file will be found. The variable `obj` is available as the device instance object of a given device, as is the case for all Jinja templates. e.g. `{{obj.location.name|slugify}}/{{obj.name}}.cfg`",
)
intended_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="intended_repository",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.intendedconfigs"},
)
intended_path_template = models.CharField(
max_length=255,
blank=True,
verbose_name="Intended Path in Jinja Template Form",
help_text="The Jinja path representation of where the generated file will be places. e.g. `{{obj.location.name|slugify}}/{{obj.name}}.cfg`",
)
jinja_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="jinja_template",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.jinjatemplate"},
)
jinja_path_template = models.CharField(
max_length=255,
blank=True,
verbose_name="Template Path in Jinja Template Form",
help_text="The Jinja path representation of where the Jinja template can be found. e.g. `{{obj.platform.network_driver}}.j2`",
)
backup_test_connectivity = models.BooleanField(
default=True,
verbose_name="Backup Test",
help_text="Whether or not to pretest the connectivity of the device by verifying there is a resolvable IP that can connect to port 22.",
)
sot_agg_query = models.ForeignKey(
to="extras.GraphQLQuery",
on_delete=models.PROTECT,
null=True,
blank=True,
related_name="sot_aggregation",
)
dynamic_group = models.OneToOneField(
to="extras.DynamicGroup",
on_delete=models.PROTECT,
related_name="golden_config_setting",
)
def __str__(self):
"""Return a simple string if model is called."""
return f"Golden Config Setting - {self.name}"
class Meta:
"""Set unique fields for model.
Provide ordering used in tables and get_device_to_settings_map.
Sorting on weight is performed from the highest weight value to the lowest weight value.
This is to ensure only one plugin settings could be applied per single device based on priority and name.
"""
verbose_name = "Golden Config Setting"
ordering = ["-weight", "name"] # Refer to weight comment in class docstring.
def clean(self):
"""Validate the scope and GraphQL query."""
super().clean()
if ENABLE_SOTAGG and not self.sot_agg_query:
raise ValidationError("A GraphQL query must be defined when `ENABLE_SOTAGG` is True")
if self.sot_agg_query:
LOGGER.debug("GraphQL - test query start with: `%s`", GRAPHQL_STR_START)
if not str(self.sot_agg_query.query.lstrip()).startswith(GRAPHQL_STR_START):
raise ValidationError(f"The GraphQL query must start with exactly `{GRAPHQL_STR_START}`")
def get_queryset(self):
"""Generate a Device QuerySet from the filter."""
return self.dynamic_group.members
def device_count(self):
"""Return the number of devices in the group."""
return self.dynamic_group.count
def get_url_to_filtered_device_list(self):
"""Get url to all devices that are matching the filter."""
return self.dynamic_group.get_group_members_url()
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigRemove(PrimaryModel): # pylint: disable=too-many-ancestors
"""ConfigRemove for Regex Line Removals from Backup Configuration Model definition."""
name = models.CharField(max_length=255)
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="backup_line_remove",
)
description = models.CharField(
max_length=200,
blank=True,
)
regex = models.CharField(
max_length=200,
verbose_name="Regex Pattern",
help_text="Regex pattern used to remove a line from the backup configuration.",
)
clone_fields = ["platform", "description", "regex"]
class Meta:
"""Meta information for ConfigRemove model."""
ordering = ("platform", "name")
unique_together = ("name", "platform")
def __str__(self):
"""Return a simple string if model is called."""
return self.name
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigReplace(PrimaryModel): # pylint: disable=too-many-ancestors
"""ConfigReplace for Regex Line Replacements from Backup Configuration Model definition."""
name = models.CharField(max_length=255)
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="backup_line_replace",
)
description = models.CharField(
max_length=200,
blank=True,
)
regex = models.CharField(
max_length=200,
verbose_name="Regex Pattern to Substitute",
help_text="Regex pattern that will be found and replaced with 'replaced text'.",
)
replace = models.CharField(
max_length=200,
verbose_name="Replaced Text",
help_text="Text that will be inserted in place of Regex pattern match.",
)
clone_fields = ["platform", "description", "regex", "replace"]
class Meta:
"""Meta information for ConfigReplace model."""
ordering = ("platform", "name")
unique_together = ("name", "platform")
def __str__(self):
"""Return a simple string if model is called."""
return self.name
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class RemediationSetting(PrimaryModel): # pylint: disable=too-many-ancestors
"""RemediationSetting details."""
# Remediation points to the platform
platform = models.OneToOneField(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="remediation_settings",
)
remediation_type = models.CharField(
max_length=50,
default=RemediationTypeChoice.TYPE_HIERCONFIG,
choices=RemediationTypeChoice,
help_text="Whether the remediation setting is type HierConfig or custom.",
)
# takes options.json.
remediation_options = models.JSONField(
blank=True,
default=dict,
help_text="Remediation Configuration for the device",
)
csv_headers = [
"platform",
"remediation_type",
]
class Meta:
"""Meta information for RemediationSettings model."""
ordering = ("platform", "remediation_type")
def to_csv(self):
"""Indicates model fields to return as csv."""
return (
self.platform,
self.remediation_type,
)
def __str__(self):
"""Return a sane string representation of the instance."""
return str(self.platform)
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
"statuses",
)
class ConfigPlan(PrimaryModel): # pylint: disable=too-many-ancestors
"""ConfigPlan for Golden Configuration Plan Model definition."""
plan_type = models.CharField(max_length=20, choices=ConfigPlanTypeChoice, verbose_name="Plan Type")
device = models.ForeignKey(
to="dcim.Device",
on_delete=models.CASCADE,
related_name="config_plan",
)
config_set = models.TextField(help_text="Configuration set to be applied to device.")
feature = models.ManyToManyField(
to=ComplianceFeature,
related_name="config_plan",
blank=True,
)
plan_result = models.ForeignKey(
to="extras.JobResult",
on_delete=models.CASCADE,
related_name="config_plan",
verbose_name="Plan Result",
)
deploy_result = models.ForeignKey(
to="extras.JobResult",
on_delete=models.PROTECT,
related_name="config_plan_deploy_result",
verbose_name="Deploy Result",
blank=True,
null=True,
)
change_control_id = models.CharField(
max_length=50,
blank=True,
verbose_name="Change Control ID",
help_text="Change Control ID for this configuration plan.",
)
change_control_url = models.URLField(blank=True, verbose_name="Change Control URL")
status = StatusField(blank=True, null=True, on_delete=models.PROTECT)
class Meta:
"""Meta information for ConfigPlan model."""
ordering = ("-created", "device")
unique_together = (
"plan_type",
"device",
"created",
)
def __str__(self):
"""Return a simple string if model is called."""
return f"{self.device.name}-{self.plan_type}-{self.created}"
| nautobot/nautobot-plugin-golden-config | nautobot_golden_config/models.py | models.py | py | 27,885 | python | en | code | 91 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nautobot_golden_config.choices.RemediationTypeChoice.TYPE_CUSTOM",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "nautobot_golden_config.choices.RemediationTypeChoice",
... |
75186130022 | import argparse
import logging
import multiprocessing as mp
import os
import pytz
import shutil
from datetime import datetime
from lego_prover.env.chromas import ChromaBridge
from lego_prover.evolver import Evolver
from lego_prover.prover import Prover
import lego_prover.utils as U
from openai_key import *
parser = argparse.ArgumentParser(description='LEGO-Prover')
parser.add_argument('--resume', action='store_true',
help='whether to resume from the checkpoint')
parser.add_argument('--data_split', type=str, choices=['valid', 'test'],
default='valid', help='data split to use in the miniF2F dataset')
parser.add_argument('--ckpt_dir', type=str, default='checkpoints/lego_prover_valid_2023_10_27',
help='path to the checkpoint directory')
parser.add_argument('--isabelle_path', type=str, default='/data2/wanghaiming/Isabelle2022/',
help='path to the Isabelle2022 directory')
parser.add_argument('--model_name', type=str, choices=["gpt-3.5-turbo", "gpt-4"],
default='gpt-3.5-turbo', help='OpenAI model name')
parser.add_argument('--temperature', type=float, default=0.7,
help='temperature for sampling the LLM')
parser.add_argument('--num_prover', type=int, default=3,
help='number of prover processes')
parser.add_argument('--num_evolver', type=int, default=8,
help='number of evolver processes')
parser.add_argument('--num_attempts', type=int, default=100,
help='number of proving attempts for each problem in the dataset')
args = parser.parse_args()
resume = args.resume
data_split = args.data_split
ckpt_dir = args.ckpt_dir
isabelle_path = args.isabelle_path
model_name = args.model_name
temperature = args.temperature
number_of_prover_processes = args.num_prover
number_of_evolver_processes = args.num_evolver
number_of_prover_attempts = args.num_attempts
if os.path.exists(ckpt_dir) and not resume:
text = input(f"the checkpoint directory {ckpt_dir} is already exist, and" + \
f"you are not resuming from it, do you want to delete it? (y/n)")
if "y" in text.lower():
shutil.rmtree(ckpt_dir, ignore_errors=True)
resume = False
else:
resume = True
# load miniF2F tasks and resume from the checkpoint
miniF2F_tasks = mp.Queue()
problem_names = []
if resume:
if os.path.exists(f"{ckpt_dir}/curriculum/completed_tasks.json"):
completed_tasks = U.load_json(
f"{ckpt_dir}/curriculum/completed_tasks.json")
if os.path.exists(f"{ckpt_dir}/curriculum/failed_tasks.json"):
failed_tasks = U.load_json(f"{ckpt_dir}/curriculum/failed_tasks.json")
print("Current progress: ", len(completed_tasks) + len(set(failed_tasks)))
else:
completed_tasks = []
failed_tasks = []
for name in os.listdir(f"data/full_data/{data_split}"):
path = os.path.join(f"data/full_data/{data_split}", name)
context = U.load_json(path)
problem_names.append((path, len(context["informal_proof"])))
problem_names = sorted(problem_names, key=lambda x: x[1])
problem_names = [pn[0] for pn in problem_names]
problem_names = problem_names * number_of_prover_attempts # 10 * 20 = 200 sketch
for pn in problem_names:
if pn in completed_tasks:
continue
if pn in failed_tasks:
failed_tasks.remove(pn)
continue
miniF2F_tasks.put(pn)
print(f"Sketch to finish: {miniF2F_tasks.qsize()}")
# setup multiprocessing logger
start_time = datetime.now(pytz.timezone(
'Asia/Shanghai')).strftime("%Y%m%d_%H%M%S")
os.makedirs(f'logs/prover/{start_time}_logs', exist_ok=True)
for rank in range(number_of_prover_processes):
logger = logging.getLogger(f'prover-{rank}')
handler = logging.FileHandler(
f"logs/prover/{start_time}_logs/rank_{rank}.log")
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
os.makedirs(f'logs/evolver/{start_time}_logs', exist_ok=True)
for evolver_rank in range(number_of_evolver_processes):
evolver_rank += number_of_prover_processes
logger = logging.getLogger(f'evolver-{evolver_rank}')
handler = logging.FileHandler(
f"logs/evolver/{start_time}_logs/rank_{evolver_rank}.log")
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# define the function to run the prover and evolver
def run_prover(rank, tasks, skill_manager_lock, curriculum_agent_lock, chroma_bridge):
server_port = 8051 + rank
prover = Prover(
rank=rank,
isabelle_path=isabelle_path,
server_port=server_port,
model_name=model_name,
skill_manager_lock=skill_manager_lock,
action_agent_task_max_retries=1,
curriculum_task_type="queue_curriculum",
curriculum_agent_lock=curriculum_agent_lock,
resume=resume,
temperature=temperature,
miniF2F_tasks=tasks,
ckpt_dir=ckpt_dir,
chroma_bridge=chroma_bridge,
)
prover.learn()
def run_evolver(rank, skill_manager_lock, chroma_bridge):
server_port = 8011 + rank
evolver = Evolver(
rank=rank,
isabelle_path=isabelle_path,
ckpt_dir=ckpt_dir,
server_port=server_port,
data_split=data_split,
skill_manager_lock=skill_manager_lock,
model_name=model_name,
temperature=temperature,
chroma_bridge=chroma_bridge
)
evolver.evolve()
processes = []
skill_manager_lock = mp.Lock()
curriculum_agent_lock = mp.Lock()
chroma_bridge = ChromaBridge(ckpt_path=ckpt_dir, resume=resume)
# creating processes
for rank in range(number_of_prover_processes):
p = mp.Process(target=run_prover, args=(rank, miniF2F_tasks,
skill_manager_lock, curriculum_agent_lock, chroma_bridge))
processes.append(p)
p.start()
for rank in range(number_of_evolver_processes):
rank += number_of_prover_processes
p = mp.Process(target=run_evolver, args=(
rank, skill_manager_lock, chroma_bridge))
processes.append(p)
p.start()
# completing process
for p in processes:
p.join()
| wiio12/LEGO-Prover | run_multiprocess.py | run_multiprocess.py | py | 6,373 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
... |
12626750581 | # Python standard library
import sys
sys.path.append('./../pyqtgraph')
# Scipy
import matplotlib.backends.backend_qt4agg
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.patches import Polygon
from mpl_toolkits.basemap import Basemap
import numpy as np
# PyQt
import PyQt4.QtCore
import PyQt4.QtGui
import pyqtgraph as pg
class GlobePlot(matplotlib.backends.backend_qt4agg.FigureCanvas):
def __init__(self, parent=None):
self.fig = plt.figure(figsize=(3,3), dpi = 10)
self.axes = self.fig.add_subplot(111)
self.patches = {'west_greenland': None,\
'bering': None,\
'severny': None,\
'hudson': None,\
'custom': None}
matplotlib.backends.backend_qt4agg.FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
matplotlib.backends.backend_qt4agg.FigureCanvas.setSizePolicy(self,
PyQt4.QtGui.QSizePolicy.Expanding,
PyQt4.QtGui.QSizePolicy.Expanding)
matplotlib.backends.backend_qt4agg.FigureCanvas.updateGeometry(self)
self.setup_map()
self.width, self.height = 565, 565
self.plot_all_roi_patches()
def setup_map(self):
self.map = Basemap(projection='npstere',boundinglat=50,lon_0=270,resolution='l')
self.toolbar = matplotlib.backends.backend_qt4agg.NavigationToolbar2QT(self, self)
self.toolbar.hide()
self.toolbar.pan()
self.im1 = None
self.map.drawparallels(np.arange(50.,90.,10.), labels=[0,0,0,0],zorder=50, color='grey', linewidth=5)
self.map.drawmeridians(np.arange(-180.,181.,20.),latmax=90, labels=[0,0,0,0],zorder=50,color='grey', linewidth=5)
self.map.drawcoastlines(linewidth=1)
self.map.fillcontinents(color='chocolate',lake_color='lightblue', alpha=1, zorder=20)
self.map.drawmapboundary(fill_color='brown', linewidth=1)
def get_figure_coordinates(self, coords):
xpt, ypt = self.map(coords[0], coords[1])
xy_pixels = self.axes.transData.transform([xpt,ypt])
#xy_pixels = self.fig.transFigure.transform()
#xy_pixels = self.fig.transFigure.inverted().transform(xy_pixels)
fig_x = xy_pixels[0]
fig_y = self.height - xy_pixels[1]
return fig_x, fig_y
def set_data(self, lats, longs, data):
if self.im1:
self.im1.remove()
self.im1 = self.map.pcolormesh(longs, lats, data,
shading='flat',
cmap=cm.seismic,
latlon=True,
zorder=0)
self.colorbar = self.map.colorbar(self.im1, "bottom", size="5%", pad="2%")
self.fig.tight_layout()
self.draw()
def plot_roi_patch(self, roi_name, latmin, latmax, lonmin, lonmax):
x1,y1 = self.map(lonmax, latmax) # lon_max, lat_max
x2,y2 = self.map(lonmax, latmin) # lon_max, lat_min
xmid1, ymid1 = self.map( (lonmax+lonmin)/2, latmin )
x3,y3 = self.map(lonmin, latmin) # lon_min, lat_min
x4,y4 = self.map(lonmin, latmax) # lon_min, lat_max
xmid2, ymid2 = self.map( (lonmax+lonmin)/2, latmax )
boundary=[]
boundary.append([x1,y1])
boundary.append([x2,y2])
for n in range((lonmax-lonmin)*3):
xx,yy = self.map(lonmax-(n+1)/3., latmin)
boundary.append([xx,yy])
boundary.append([x3,y3])
boundary.append([x4,y4])
for n in range((lonmax-lonmin)*3):
xx,yy = self.map(lonmin+(n+1)/3., latmax)
boundary.append([xx,yy])
if self.patches[roi_name]:
self.patches[roi_name].remove()
self.patches[roi_name] = Polygon(boundary,
facecolor='yellow', edgecolor='yellow',linewidth = 6, zorder = 1000, alpha = 0.35, ls = '-')
plt.gca().add_patch(self.patches[roi_name])
def plot_all_roi_patches(self):
# West greenland
latmin, latmax= 66,75
lonmin, lonmax= 290,310 #220,240
self.plot_roi_patch('west_greenland', latmin,latmax,lonmin,lonmax)
# Hudson Bay
latmin, latmax= 55,65
lonmin, lonmax= 265,285
self.plot_roi_patch('hudson', latmin,latmax,lonmin,lonmax)
# Bering Strait
latmin, latmax= 63,70
lonmin, lonmax= 180,199
self.plot_roi_patch('bering', latmin,latmax,lonmin,lonmax)
# Severny
latmin, latmax = 70, 80
lonmin, lonmax = 50, 75
self.plot_roi_patch('severny', latmin, latmax, lonmin, lonmax)
class View(PyQt4.QtGui.QMainWindow):
def __init__(self, parent = None):
super(View, self).__init__(None)
self.res = [1280,800]
self.showFullScreen()
#self.setGeometry(0,0,800,600)
self.setup_layout()
self.setup_time_widgets()
self.setup_stats_roi_sliders()
self.setup_globe_plot()
self.setup_stats_plot()
self.setup_loc_buttons()
self.setup_roi_sliders()
self.setup_conc_labels()
self.globe_plot.get_figure_coordinates([0,0])
self.setup_roi_sliders()
def setup_conc_labels(self):
self.conc_0_lineedit = PyQt4.QtGui.QLineEdit('0', parent = self)
self.conc_0_lineedit.setGeometry(50,800,50,55)
self.conc_0_lineedit.setStyleSheet("""
.QLineEdit {
border: 0px solid black;
border-radius: 10px;
background-color: rgba(0, 0, 255, 0);
}
""")
self.conc_0_lineedit.show()
self.conc_50_lineedit = PyQt4.QtGui.QLineEdit('50', parent = self)
self.conc_50_lineedit.setGeometry(50+565/2,800,50,55)
self.conc_50_lineedit.setStyleSheet("""
.QLineEdit {
border: 0px solid black;
border-radius: 10px;
background-color: rgba(255, 255, 255, 0);
}
""")
self.conc_50_lineedit.show()
self.conc_100_lineedit = PyQt4.QtGui.QLineEdit('100', parent = self)
self.conc_100_lineedit.setGeometry(50+565,800,50,55)
self.conc_100_lineedit.setStyleSheet("""
.QLineEdit {
border: 0px solid black;
border-radius: 10px;
background-color: rgba(255, 0, 0, 0);
}
""")
self.conc_100_lineedit.show()
def setup_roi_sliders(self):
self.roi_lat0_slider = PyQt4.QtGui.QSlider(parent = self, orientation = PyQt4.QtCore.Qt.Horizontal)
self.roi_lat0_slider.setGeometry(715,150,250,50)
self.roi_lat0_slider.setMinimum(40)
self.roi_lat0_slider.setMaximum(90)
self.roi_lat0_slider.show()
self.roi_long0_slider = PyQt4.QtGui.QSlider(parent = self, orientation = PyQt4.QtCore.Qt.Horizontal)
self.roi_long0_slider.setGeometry(715,100,250,50)
self.roi_long0_slider.setMinimum(0)
self.roi_long0_slider.setMaximum(360) # Total data points
self.roi_long0_slider.show()
self.roi_lat1_slider = PyQt4.QtGui.QSlider(parent = self, orientation = PyQt4.QtCore.Qt.Horizontal)
self.roi_lat1_slider.setGeometry(980,150,250,50)
self.roi_lat1_slider.setMinimum(40)
self.roi_lat1_slider.setMaximum(90)
self.roi_lat1_slider.show()
self.roi_long1_slider = PyQt4.QtGui.QSlider(parent = self, orientation = PyQt4.QtCore.Qt.Horizontal)
self.roi_long1_slider.setGeometry(980,100,250,50)
self.roi_long1_slider.setMinimum(0)
self.roi_long1_slider.setMaximum(360) # Total data points
self.roi_long1_slider.show()
self.roi_lat_lineedit = PyQt4.QtGui.QLineEdit('LAT', parent = self)
self.roi_lat_lineedit.setGeometry(665,150,50,65)
self.roi_lat_lineedit.show()
self.roi_long_lineedit = PyQt4.QtGui.QLineEdit('LONG', parent = self)
self.roi_long_lineedit.setGeometry(665,100,50,65)
self.roi_long_lineedit.show()
self.active_roi_lineedit = PyQt4.QtGui.QLineEdit('', parent = self)
self.active_roi_lineedit.setGeometry(665,50,200,65)
self.active_roi_lineedit.show()
for le in [self.roi_long_lineedit, self.roi_lat_lineedit, self.active_roi_lineedit]:
le.setStyleSheet("""
.QLineEdit {
border: 0px solid black;
border-radius: 10px;
background-color: rgba(255, 255, 255, 0);
}
""")
def set_defaults(self):
# Time
self.severny_loc_button.clicked.emit(True)
self.time_month_slider.setSliderPosition(2)
self.time_month_slider.sliderReleased.emit()
self.time_year_slider.setSliderPosition(1950)
self.time_year_slider.sliderReleased.emit()
def setup_layout(self):
return
def setup_time_widgets(self):
# slider
self.time_year_slider = PyQt4.QtGui.QSlider(parent = self, orientation = PyQt4.QtCore.Qt.Horizontal)
self.time_year_slider.setGeometry(50,150,500,50)
self.time_year_slider.setMinimum(1850)
self.time_year_slider.setMaximum(2013)
self.time_year_slider.show()
self.time_month_slider = PyQt4.QtGui.QSlider(parent = self, orientation = PyQt4.QtCore.Qt.Horizontal)
self.time_month_slider.setGeometry(50,100,500,50)
self.time_month_slider.setMinimum(0)
self.time_month_slider.setMaximum(11) # Total data points
self.time_month_slider.show()
# lineedit
self.time_year_lineedit = PyQt4.QtGui.QLineEdit(parent = self)
self.time_year_lineedit.setGeometry(550,150,100,65)
self.time_year_lineedit.setAlignment(PyQt4.QtCore.Qt.AlignHCenter)
self.time_year_lineedit.setStyleSheet("""
.QLineEdit {
border: 0px solid black;
border-radius: 10px;
background-color: rgba(255, 255, 255, 0);
}
""")
self.time_year_lineedit.show()
self.time_month_lineedit = PyQt4.QtGui.QLineEdit(parent = self)
self.time_month_lineedit.setGeometry(550,100,100,65)
self.time_month_lineedit.setAlignment(PyQt4.QtCore.Qt.AlignHCenter)
self.time_month_lineedit.setStyleSheet("""
.QLineEdit {
border: 0px solid black;
border-radius: 10px;
background-color: rgba(255, 255, 255, 0);
}
""")
self.time_month_lineedit.show()
def setup_stats_roi_sliders(self):
pass
def setup_globe_plot(self):
self.globe_plot = GlobePlot(self)
self.globe_plot.setGeometry(50,200,565,565)
self.globe_plot.show()
def setup_stats_plot(self):
self.stats_plot = pg.PlotWidget(parent = self)
self.stats_plot.setLabel('left', text = 'Mean concentration (units)')
self.stats_plot.setLabel('bottom', text = 'Time (s)')
self.stats_plot.showGrid(x = True, y = True, alpha = 0.2)
self.stats_plot.setGeometry(665,200,565,565)
pen_0 = PyQt4.QtGui.QPen(PyQt4.QtGui.QColor(238,50,50))
pen_1 = PyQt4.QtGui.QPen(PyQt4.QtGui.QColor(50,175,238))
self.stats_plot_item_roi_mar = pg.PlotDataItem(name = 'Mar')
self.stats_plot_item_roi_mar.setPen(pen_1)
self.stats_plot.addItem(self.stats_plot_item_roi_mar)
self.stats_plot_item_roi_sep = pg.PlotDataItem(name = 'Sep')
self.stats_plot_item_roi_sep.setPen(pen_0)
self.stats_plot.addItem(self.stats_plot_item_roi_sep)
self.stats_plot.addLegend()
#self.stats_plot_item_full = pg.PlotDataItem()
#self.stats_plot_item_full.setPen(pen_1)
#self.stats_plot.addItem(self.stats_plot_item_full)
self.stats_plot.show()
def setup_loc_buttons(self):
# West greenland
self.west_greenland_loc_button = PyQt4.QtGui.QPushButton('X', self)
self.west_greenland_loc_button.setStyleSheet('''
background-color: rgba(255,255,255,0);
color: green
''' )
self.west_greenland_loc_button.setFont(PyQt4.QtGui.QFont("Arial", 20, PyQt4.QtGui.QFont.Bold))
self.west_greenland_loc_button.setMaximumWidth(30)
self.west_greenland_loc_button.setMaximumHeight(30)
self.west_greenland_loc_button.show()
# Bering
self.bering_loc_button = PyQt4.QtGui.QPushButton('X', self)
self.bering_loc_button.setStyleSheet('''
background-color: rgba(255,255,255,0);
color: green
''' )
self.bering_loc_button.setFont(PyQt4.QtGui.QFont("Arial", 20, PyQt4.QtGui.QFont.Bold))
self.bering_loc_button.setMaximumWidth(30)
self.bering_loc_button.setMaximumHeight(30)
self.bering_loc_button.show()
# Hudson
self.hudson_loc_button = PyQt4.QtGui.QPushButton('X', self)
self.hudson_loc_button.setStyleSheet('''
background-color: rgba(255,255,255,0);
color: green
''' )
self.hudson_loc_button.setFont(PyQt4.QtGui.QFont("Arial", 20, PyQt4.QtGui.QFont.Bold))
self.hudson_loc_button.setMaximumWidth(30)
self.hudson_loc_button.setMaximumHeight(30)
self.hudson_loc_button.show()
# Severny
self.severny_loc_button = PyQt4.QtGui.QPushButton('X', self)
self.severny_loc_button.setStyleSheet('''
background-color: rgba(255,255,255,0);
color: green
''' )
self.severny_loc_button.setFont(PyQt4.QtGui.QFont("Arial", 20, PyQt4.QtGui.QFont.Bold))
self.severny_loc_button.setMaximumWidth(30)
self.severny_loc_button.setMaximumHeight(30)
self.severny_loc_button.show()
| tphinkle/sea_ice | qt_app/view/view.py | view.py | py | 14,067 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.backends.backend_qt4agg.backends",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_... |
14312689350 | from django.urls import path
from .views import *
urlpatterns = [
path('test/', test, name="test"),
# HttpResponse--------------------------------------------------------------------
path('test-1/', TestViewClass_1.as_view(), name="TestViewClass_1"),
path('test-2/', TestViewClass_2.as_view(), name="TestViewClass_2"),
path('test-3/', TestViewClass_3.as_view(subject="Chemistry"), name="TestViewClass_3"),
path('sub-t-2/', SubClass.as_view(), name="SubClass"),
path('test-3-1/', TestViewClass_3_1.as_view(), name="TestViewClass_3_1"),
# Class base View --------------------------------------------------------------------
path('test-4/', TestViewClass_4.as_view(), name="TestViewClass_4"),
path('test-5/', TestViewClass_5.as_view(), name="TestViewClass_5"),
path('test-6/', TestViewClass_6.as_view(), name="TestViewClass_6"),
path('test-7/', TestViewClass_7.as_view(), name="TestViewClass_7"),
path('test-8/', MyView.as_view(), name="MyView"),
#----------------------------------------------------Template View--------------------------------------------------------
path('tv-1/', templateview_1.as_view( template_name="viewtest/TemplateView_1.html" ,extra_context ={'course': 'Django'}), name="templateview_1"),
path('tv-2/', templateview_2.as_view(extra_context ={'course': 'Django', 'fruit': 'Mango'}), name="templateview_2"),
path('tv-3/', templateview_3.as_view(), name="templateview_3"),
path('tv-4/<int:id>/', templateview_4.as_view(), name="templateview_4"),
#----------------------------------------------------Redirect View--------------------------------------------------------
path('rd-1/', RedirectView_1.as_view(url="http://studentportal.diu.edu.bd/#/dashboard1"), name="RedirectView_1"),
path('rd-2/', RedirectView_2.as_view(), name="RedirectView_2"),
path('rd-3/', RedirectView_3.as_view(), name="RedirectView_3"),
path('rd-4/', RedirectView_4.as_view(), name="RedirectView_4"),
path('rd-5/', RedirectView_5.as_view(pattern_name = 'gen_1'), name="RedirectView_5"),
path('rd-6/<int:id>/', RedirectView_6.as_view(), name="RedirectView_6"),
path('rv-result/<int:id>/', R_V.as_view(extra_context ={'address': 'Saver'}), name="R_V"),
path('rd-7/<slug:product>/', RedirectView_7.as_view(), name="RedirectView_7"),
path('rv-result-2/<slug:product>/', R_V_2.as_view(extra_context ={'address': 'Saver'}), name="R_V_2"),
#path('rv-1/<int:id>/', RedirectView_1.as_view(), name="RedirectView_1"),
#path('rv-5/', RedirectView.as_view(), name="RedirectView"),
#path('rv-1/', RedirectView_1.as_view(pattern_name ="TestViewClass_1"), name="RedirectView_1"),
# path('<slug:frute>/', templateview_1.as_view( template_name="test/TemplateView_1.html" ,extra_context ={'course': 'Django'}), name="templateview_rv"),
path('rv-result/<int:id>/', R_V.as_view(extra_context ={'address': 'Saver'}), name="R_V"),
#path('rv-result/', R_V.as_view(extra_context ={'address': 'Saver'}), name="R_V"),
] | rakib1515hassan/Class-Base-View-Project | viewtest/urls.py | urls.py | py | 3,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
15476948155 | from __future__ import division, print_function, absolute_import
import pytest
from faker.providers import BaseProvider
from hypothesis import given
from hypothesis.strategytests import strategy_test_suite
from hypothesis.internal.debug import minimal
from hypothesis.extra.fakefactory import fake_factory
class KittenProvider(BaseProvider):
def kittens(self):
return u'meow %d' % (self.random_number(digits=10),)
@given(fake_factory(u'kittens', providers=[KittenProvider]))
def test_kittens_meow(kitten):
assert u'meow' in kitten
@given(fake_factory(u'email'))
def test_email(email):
assert u'@' in email
@given(fake_factory(u'name', locale=u'en_US'))
def test_english_names_are_ascii(name):
name.encode(u'ascii')
def test_french_names_may_have_an_accent():
minimal(
fake_factory(u'name', locale=u'fr_FR'),
lambda x: u'é' not in x
)
def test_fake_factory_errors_with_both_locale_and_locales():
with pytest.raises(ValueError):
fake_factory(
u'name', locale=u'fr_FR', locales=[u'fr_FR', u'en_US']
)
def test_fake_factory_errors_with_unsupported_locale():
with pytest.raises(ValueError):
fake_factory(
u'name', locale=u'badger_BADGER'
)
def test_factory_errors_with_source_for_unsupported_locale():
with pytest.raises(ValueError):
fake_factory(u'state', locale=u'ja_JP')
def test_fake_factory_errors_if_any_locale_is_unsupported():
with pytest.raises(ValueError):
fake_factory(
u'name', locales=[u'fr_FR', u'en_US', u'mushroom_MUSHROOM']
)
def test_fake_factory_errors_if_unsupported_method():
with pytest.raises(ValueError):
fake_factory(u'spoon')
def test_fake_factory_errors_if_private_ish_method():
with pytest.raises(ValueError):
fake_factory(u'_Generator__config')
TestFakeEmail = strategy_test_suite(
fake_factory(u'email')
)
TestFakeNames = strategy_test_suite(
fake_factory(u'name')
)
TestFakeEnglishNames = strategy_test_suite(
fake_factory(u'name', locale=u'en_US')
)
TestStates = strategy_test_suite(
fake_factory(u'state')
)
| LyleH/hypothesis-python_1 | tests/fakefactory/test_fake_factory.py | test_fake_factory.py | py | 2,163 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "faker.providers.BaseProvider",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "hypothesis.given",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "hypothesis.extra.fakefactory.fake_factory",
"line_number": 18,
"usage_type": "call"
},
... |
72692460583 | #!/usr/bin/env python3
# pyre-strict
import asyncio
import logging
import sys
from typing import Any, Union
import click
__version__ = "0.6.9"
LOG: logging.Logger = logging.getLogger(__name__)
def _handle_debug(
ctx: Union[click.core.Context, None],
param: Union[click.core.Option, click.core.Parameter, None],
debug: Union[bool, int, str],
) -> Union[bool, int, str]:
"""Turn on debugging if asked otherwise INFO default"""
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)",
level=log_level,
)
return debug
async def async_main(debug: bool) -> int:
return 0
# Some typing stub issue I can't work out - https://github.com/pallets/click/issues/2558
# Fix for 8.1.6 dosen't seem to fix me - To look into
@click.command(context_settings={"help_option_names": ["-h", "--help"]}) # type: ignore
@click.option(
"--debug",
is_flag=True,
callback=_handle_debug,
show_default=True,
help="Turn on debug logging",
)
@click.pass_context
def main(ctx: click.core.Context, **kwargs: Any) -> None:
LOG.debug(f"Starting {sys.argv[0]}")
ctx.exit(asyncio.run(async_main(**kwargs)))
if __name__ == "__main__": # pragma: no cover
main()
| cooperlees/base_clis | py/base_cli.py | base_cli.py | py | 1,315 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.Logger",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "click.core",
"... |
27551438109 | #!/usr/bin/env python3
import argparse
import dgl
import numpy as np
import pandas as pd
import pickle as pkl
import torch
import torch.nn.functional as F
import ipdb
from gnn.link_prediction import LinkPredictor, compute_lp_loss
from gnn.node_classification import NodeClassifier, NodeClassifierConv
# if torch.cuda.is_available():
# DEVICE = 'cuda:0'
# else:
# DEVICE = 'cpu'
DEVICE='cpu'
def preprocess_edges(graph):
chem = pd.read_csv("./data/chemicals.csv")
maccs = torch.tensor([[int(x) for x in xx] for xx in chem.maccs]).float().to(DEVICE)
node_features = {
'chemical': maccs,
#'chemical': torch.ones((graph.number_of_nodes(ntype='chemical'))).unsqueeze(1).to(DEVICE),
'assay': torch.ones((graph.number_of_nodes(ntype='assay'))).unsqueeze(1).to(DEVICE),
'gene': torch.ones((graph.number_of_nodes(ntype='gene'))).unsqueeze(1).to(DEVICE)
}
input_type_map = dict([(x[1], x[0]) for x in graph.canonical_etypes])
node_sizes = { k: v.shape[1] for k, v in node_features.items() }
edge_input_sizes = { k: node_features[v].shape[1] for k, v in input_type_map.items() }
return node_features, node_sizes, edge_input_sizes
def drop_node_return_binary_labels(graph, ntype, node_index, pos_etype, neg_etype):
"""Drop a node from the graph and save its original connectivity as labels
for supervised learning (especially node classification).
Parameters
----------
graph : dgl.DGLGraph
Graph on which to run the operation.
ntype : str
Node type of the node to drop from the graph.
node_index : int
Integer index of the (typed) node to drop.
pos_etype : str
Name of an incoming edge, the source of which will be labeled 1.
neg_etype : str
Name of an incoming edge, the source of which will be labeled 0.
Notes
-----
This is not an 'in place' operation. If you want to overwrite the original
graph, you should reassign to it when calling the function.
"""
# Get node connectivity and build label indices
# FIX: Need to get the source node ids only!
pos_nodes = graph.in_edges(node_index, form='uv', etype=pos_etype)[0]
neg_nodes = graph.in_edges(node_index, form='uv', etype=neg_etype)[0]
# Remove node
new_graph = dgl.remove_nodes(graph, node_index, ntype=ntype)
# Return both the transformed graph and the labels
return new_graph, pos_nodes, neg_nodes
def collate_labels(graph, ntype, pos_idxs, neg_idxs, ratio=(0.8, 0.2)):
"""Make training/testing sets along with labels.
"""
assert sum(ratio) == 1.0
all_node_idxs = graph.nodes(ntype)
pos_tensor = torch.cat((pos_idxs.unsqueeze(0), torch.ones_like(pos_idxs).unsqueeze(0)))
neg_tensor = torch.cat((neg_idxs.unsqueeze(0), torch.zeros_like(neg_idxs).unsqueeze(0)))
labeled_nodes = torch.cat((pos_tensor, neg_tensor), dim=1)
idxs = torch.randperm(labeled_nodes.shape[1])
shuffled = labeled_nodes[:,idxs]
split_points = (
round(shuffled.shape[1] * ratio[0]),
)
train = shuffled[:, :split_points[0]]
test = shuffled[:, split_points[0]:]
return train, test
def construct_negative_graph(graph, k, etype):
"""Construct a negative graph for negative sampling in edge prediction.
This implementation is designed for heterogeneous graphs - the user specifies
the edge type on which negative sampling will be performed.
Parameters
----------
graph : dgl.heterograph.DGLHeterograph
Graph on which the sampling will be performed.
k : int
Number of negative examples to retrieve.
etype : tuple
A tuple in the format (subj_node_type, edge_label, obj_node_type) corresponding
to the edge on which negative sampling will be performed.
"""
utype, _, vtype = etype
src, dst = graph.edges(etype=etype)
neg_src = src.repeat_interleave(k)
neg_dst = torch.randint(0, graph.num_nodes(vtype), (len(src) * k,))
return dgl.heterograph(
{etype: (neg_src, neg_dst)},
num_nodes_dict= { ntype: graph.num_nodes(ntype) for ntype in graph.ntypes }
)
def link_prediction(args):
"""Predict edges in a heterogeneous graph given a particular edge type.
For this implementation, the edge type we are predicting is:
`('chemical', 'chemicalhasactiveassay', 'assay')`
There are two approaches for training the network:
1. Train known edges against a negative sampling of the entire graph, using
margin loss (or equivalent) to maximize the difference between known edges
and the background "noise distribution" of randomly sampled edges.
2. Use a predetermined edge (e.g., `'chemicalhasinactiveassay'`) instead as the
negative graph. This approach may be more powerful. Cross-entropy loss also
may be more appropriate than margin loss in this scenario.
Parameters
----------
args : (namespace output of argparse.parse_args() - see below for details)
"""
G = dgl.load_graphs(args.graph_file)[0][0]
k = 5
node_features, node_sizes, edge_input_sizes = preprocess_edges(G)
ep_model = EPModel(edge_input_sizes, 20, 5, G.etypes)
opt = torch.optim.Adam(ep_model.parameters())
for epoch in range(100):
neg_G = construct_negative_graph(G, k, ('chemical', 'chemicalhasactiveassay', 'assay'))
pos_score, neg_score = ep_model(G.to(DEVICE), neg_G.to(DEVICE), node_features, ('chemical', 'chemicalhasactiveassay', 'assay'))
# margin loss
loss = compute_ep_loss(pos_score, neg_score)
opt.zero_grad()
loss.backward()
opt.step()
print("epoch: %3d; margin loss: %.5f" % (epoch, loss.item()))
# Now, we need to figure out something to do with the trained model!
ipdb.set_trace()
def node_classification(args, label_assay_idx):
"""Predict node labels in a heterogeneous graph given a particular node type
and a dataset of known node labels.
Here, the node type is `'chemical'` and the labels we predict are annotations
representing edge connectivity to a (deleted) `'assay'` node where 0 is
`'chemicalhasinactiveassay'` edges and 1 is `'chemicalhasactiveassay'` edges.
Labels are not provided for nodes with unknown activity annotations to the
assay of interest.
Parameters
----------
args : (namespace output of argparse.parse_args() - see below for details)
label_assay_idx : int
Index of
"""
G = dgl.load_graphs(args.graph_file)[0][0]
# Remove the prediction task node and get labels before doing anything else
G, pos_nodes, neg_nodes = drop_node_return_binary_labels(G, 'assay', label_assay_idx, 'chemicalhasactiveassay', 'chemicalhasinactiveassay')
try:
ratio_pos = len(pos_nodes) / (len(pos_nodes) + len(neg_nodes))
except ZeroDivisionError:
ratio_pos = 0
train, test = collate_labels(G, 'chemical', pos_nodes, neg_nodes)
train_idx = train[0,:]
train_labels = train[1,:]
test_idx = test[0,:]
test_labels = test[1,:]
# Remove nodes for ablation analysis
G = dgl.edge_type_subgraph(G,
[
('chemical', 'chemicalhasinactiveassay', 'assay'),
('assay', 'assayinactiveforchemical', 'chemical'),
('chemical', 'chemicalhasactiveassay', 'assay'),
('assay', 'assayactiveforchemical', 'chemical'),
('chemical', 'chemicalbindsgene', 'gene'),
('gene', 'genebindedbychemical', 'chemical'),
('chemical', 'chemicaldecreasesexpression', 'gene'),
('gene', 'expressiondecreasedbychemical', 'chemical'),
('chemical', 'chemicalincreasesexpression', 'gene'),
('gene', 'expressionincreasedbychemical', 'chemical'),
('gene', 'geneinteractswithgene', 'gene'),
('gene', 'geneinverseinteractswithgene', 'gene')
]
)
# Note: We don't do anything with the node features (yet)
node_features, node_sizes, edge_input_sizes = preprocess_edges(G)
model = NodeClassifier(G, node_sizes, edge_input_sizes, 2, 2) # (self, G, node_sizes, edge_input_sizes, hidden_size, out_size)
# model = NodeClassifierConv(G, node_sizes, edge_input_sizes, 2, 2) # (self, G, node_sizes, edge_input_sizes, hidden_size, out_size)
opt = torch.optim.Adam(model.parameters(), lr=0.02, weight_decay=5e-4)
# compute class weights
pos_weight = 1 - (len(pos_nodes) / (len(pos_nodes)+len(neg_nodes)))
neg_weight = len(pos_nodes) / (len(pos_nodes)+len(neg_nodes))
weights = torch.tensor([neg_weight, pos_weight])
best_test_acc = 0
best_f1 = 0
for epoch in range(1000):
logits = model(G)
# logits = model(G, node_features)
p = F.softmax(logits, dim=1)
loss = F.cross_entropy(torch.log(p[train_idx]), train_labels)
pred = logits.argmax(1)
train_acc = (pred[train_idx] == train_labels).float().mean()
test_acc = (pred[test_idx] == test_labels).float().mean()
tp = sum(torch.logical_and(pred[test_idx], test_labels)).item()
fp = sum(torch.logical_and(pred[test_idx], torch.logical_not(test_labels))).item()
fn = sum(torch.logical_and(torch.logical_not(pred[test_idx]), test_labels)).item()
f1 = (tp)/(tp + (0.5*(fp+fn)))
if best_test_acc < test_acc:
best_test_acc = test_acc
# best_f1 = f1
# best_probs = p
if best_f1 < f1:
best_f1 = f1
best_probs = p
opt.zero_grad()
loss.backward()
opt.step()
if epoch % 5 == 0:
try:
print('Epoch %4d: Loss %.4f, Train Acc %.4f, Test Acc %.4f (Best %.4f), Test F1 %.4f (Best %.4f)' % (
epoch,
loss.item(),
train_acc.item(),
test_acc.item(),
best_test_acc.item(),
f1,
best_f1
))
except AttributeError:
ipdb.set_trace()
print()
preds = pd.DataFrame({'proba': p[test_idx][:,1].detach().numpy(), 'true_label': test_labels.detach().numpy()})
ipdb.set_trace()
return (best_test_acc.item(), best_f1, preds, ratio_pos)
def main(args):
if args.task == "lp":
link_prediction(args)
elif args.task == "nc":
assay_index = []
assay_node_ids = []
assays = pd.read_csv("./data/assays.csv")
for ind, a in assays.iterrows():
assay_index.append(ind)
assay_node_ids.append(a)
if args.assay == "all":
assay_index = []
assay_node_ids = []
assays = pd.read_csv("./data/assays.csv")
for ind, a in assays.iterrows():
assay_index.append(ind)
assay_node_ids.append(a)
test_accs = []
f1s = []
ratio_positive_nodes = []
with open("./gnn_nc_results/nc_results.tsv", 'w') as fp:
fp.write("assay_index\tassay_node_id\tacc\tf1\tpos_ratio\n")
for i in assay_index:
if i == 47:
continue # need to debug this one...
print("================================")
print(" ASSAY {0} OF {1}".format(i, len(assay_index)))
print("================================")
try:
test_acc, f1, preds, rp = node_classification(args, i)
except:
with open("./gnn_nc_results/nc_results_pd.tsv", 'a') as fp:
fp.write("ERROR ON ASSAY WITH INDEX: {0}\n".format(i))
print("UH OH SOMETHING WENT WRONG")
continue
print()
test_accs.append(test_acc)
f1s.append(f1)
ratio_positive_nodes.append(rp)
preds.to_csv('gnn_nc_results/{0}.tsv'.format(i), sep='\t', index=False)
with open("./gnn_nc_results/nc_results.tsv", 'a') as fp:
fp.write(f"{i}\t{int(assay_node_ids[i])}\t{test_acc:.4f}\t{f1:.4f}\t{rp:.3f}\n")
all_assay_results = pd.DataFrame({'assay_index': assay_index, 'assay_node_id': assay_node_ids, 'acc': test_accs, 'f1': f1s})
all_assay_results.to_csv('gnn_nc_results/nc_results_pd.tsv', sep='\t', index=False)
ipdb.set_trace()
print()
elif args.assay.isdigit():
assay_idx = int(args.assay)
test_acc, f1, preds, rp = node_classification(args, assay_idx)
#preds.to_csv('gnn_nc_results/ablation/{0}.tsv'.format(assay_idx), sep='\t', index=False)
print(f"{assay_idx}\t{int(assay_node_ids[assay_idx])}\t{test_acc:.4f}\t{f1:.4f}\t{rp:.3f}\n")
ipdb.set_trace()
else:
raise argparse.ArgumentError("Error - `--assay` must be an integer or 'all'.")
if __name__=="__main__":
tasks = ['nc', 'lp']
parser = argparse.ArgumentParser(description="Train a heterogeneous RGCN on a prediction task.")
parser.add_argument("--task", type=str, default="nc",
help="Type of prediction task to perform.",
choices=tasks)
parser.add_argument("--graph-file", type=str, default="./data/graph.bin",
help="File location where the DGL heterograph is stored.")
parser.add_argument("--lr", type=float, default=0.01,
help="Learning rate for the NN optimizer.")
parser.add_argument("--assay", )
parser.set_defaults(validation=True)
args = parser.parse_args()
main(args)
| EpistasisLab/qsar-gnn | main.py | main.py | py | 13,805 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number... |
74579468262 | from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import landing_page.routing
application = ProtocolTypeRouter({
# (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
landing_page.routing.websocket_urlpatterns
)
),
})
# Channels
#ASGI_APPLICATION = 'sitotest.routing.application' | gitsh1t/vetrina_test | sitotest/routing.py | routing.py | py | 422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "channels.routing.ProtocolTypeRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "channels.auth.AuthMiddlewareStack",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "channels.routing.URLRouter",
"line_number": 8,
"usage_type": "call"
... |
29466618583 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pron91pkg import httputil
from bs4 import BeautifulSoup
from pron91pkg import disk
import requests
import shutil
import os
import time
from pron91pkg.FakeHeader import FakeHeader
# 1.get title
#2.get html5 m3u8
#3.download ts files from m3u8
#4.merge m3u8 to one file(ts file)
Sleep_Per_File = 0
Sleep_Per_TioutOut = 10
chunk_size = 512
basePath = "Spider/yezmw/"
def handleVideoContent(url):
disk.mkdir(basePath)
content = httputil.fetchContent(url)
# print(content)
soup = BeautifulSoup(content , "html.parser")
results = soup.find_all("span",class_="title")
global result
global title
global hlsViedoUrl
if len(results)>0:
targetSoup = results[0]
title = str(targetSoup.text)
title = httputil.__escape_file_name_str(title)
results = soup.find_all("div",class_="dz")
if len(results) >0:
targetSoup = results[0]
childSoup = targetSoup.find("p")
hlsViedoUrl = str(childSoup.text)
result = {
"title":title,
"hlsViedoUrl":hlsViedoUrl
}
return result
# url = "http://yezmw.com/video/show/id/4239"
#
# handleVideoContent(url)
def decodeM3u8File(title,hlsVideoUrl):
folderPath = basePath + title +"/"
disk.mkdir(folderPath)
#create floder
index = hlsVideoUrl.rfind("/")
targetPath = folderPath+hlsVideoUrl[index+1:]
baseURL = hlsVideoUrl[:index+1]
#remove m3u8 file
try:
file = open(targetPath, 'r')
file.close()
os.remove(targetPath)
except FileNotFoundError:
pass
#remove convert.m3u8 file
try:
file = open(folderPath +"convert.m3u8", 'r')
file.close()
os.remove(folderPath +"convert.m3u8")
except FileNotFoundError:
pass
#download m3u8 file
response = requests.get(hlsVideoUrl, stream=True)
with open(targetPath, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
#decode m3u8 file
hlsFile = open(targetPath,"r+")
outFile = open(folderPath +"convert.m3u8","w+")
# print(hlsFile.name)
line = hlsFile.readline()
lineCount = 0
while(line!= ''):
if "#" in line:
pass
else:
line = baseURL + line
outFile.write(line)
lineCount = lineCount + 1
line = hlsFile.readline()
print("decodeM3u8File end")
hlsFile.close()
outFile.close()
return lineCount
def startdownloadVideo(referUrl,name,linecount):
fakeHeader = FakeHeader()
name = name.replace("\n","")
folderPath = basePath + name +"/"
downloadPath = folderPath + "parts/"
disk.mkdir(downloadPath)
downloadFile = open(folderPath+"convert.m3u8","r+")
line = downloadFile.readline()
line = line.replace("\n","")
index = line.rfind(".")
type = line[index:]
type = type.replace("\n","")
# xxx.ts
downloadPath = downloadPath + name + type
# remove xxx.ts file
try:
file = open(downloadPath, 'r')
file.close()
os.remove(downloadPath)
except FileNotFoundError:
pass
outFile = open(downloadPath,"wb+")
recordNum = 0;
i = 0
while(line!= ''):
try:
line = line.replace("\n","")
partUrl = line
recordNum = recordNum + 1
print(partUrl)
print("正在下载片段 " + str(recordNum) + " "+str(format(recordNum/linecount*100,".2f")) + "%")
request_headers = fakeHeader.buildFakeHeader(referer=referUrl)
response = requests.get(partUrl, stream=True,timeout=5,headers = request_headers)
# print(request_headers)
# print(response.status_code)
for chunk in response.iter_content(chunk_size):
outFile.write(chunk)
del response
except requests.exceptions.ReadTimeout:
time.sleep(Sleep_Per_TioutOut)
continue
except requests.exceptions.ConnectionError:
time.sleep(Sleep_Per_TioutOut)
continue
time.sleep(Sleep_Per_File)
line = downloadFile.readline()
downloadFile.close()
outFile.close()
pass
# hlsVideoUrl ="http://video1.feimanzb.com:8091/20171215/RKI-413-C/550kb/hls/index.m3u8"
# index = hlsVideoUrl.rfind("/")
# targetPath=hlsVideoUrl[index+1:]
# baseURL = hlsVideoUrl[:index+1]
#
# print(targetPath)
# print(baseURL)
# # response = requests.get(hlsVideoUrl, stream=True)
# # with open(targetPath, 'wb') as out_file:
# # shutil.copyfileobj(response.raw, out_file)
# # del response
# hlsFile = open(targetPath,"r+")
# outFile = open("convert.m3u8","w+")
# print(hlsFile.name)
#
# line = hlsFile.readline()
# while(line!= ''):
# if "#" in line:
# pass
# else:
# line = baseURL + line
# outFile.write(line)
# line = hlsFile.readline()
#
#
#
# print("end")
# hlsFile.close()
# outFile.close()
# url = "http://video1.feimanzb.com:8091/20171215/RKI-413-C/550kb/hls/gUYZa0Kw2426000.ts"
# index = url.rfind(".")
# print(url[index:])
url="https://www.avbyebye.com"
url="http://www.allitebooks.com/powershell-for-sql-server-essentials/"
content = httputil.fetchContent(url)
print(content) | Crazyalllife/pron91 | yezmw/yezmw.py | yezmw.py | py | 5,298 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pron91pkg.disk.mkdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pron91pkg.disk",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pron91pkg.httputil.fetchContent",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "p... |
34036888232 |
# Задание №7
import json
with open("lesson_5_hw_77.json", "w") as j_file:
with open("lesson_5_hw_7.txt", "r") as f_o:
subjects = {}
middle = {}
k, o = 0, 0
line = f_o.read().split("\n")
for i in line:
i = i.split()
profit = int(i[2]) - int(i[3])
subjects[i[0]] = profit
if profit > 0:
k += profit
o += 1
middle["avegare"] = k / o
all_list = [subjects, middle]
json.dump(all_list, j_file)
| TBidnik/python | practicum.py | practicum.py | py | 556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
}
] |
33902538934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" @brief Precision and Recall
@ref https://en.wikipedia.org/wiki/Precision_and_recall
Modified from https://github.com/lyst/lightfm and https://github.com/jfkirk/tensorrec
@author <ariel kalingking> akalingking@gmail.com """
import numpy as np
import pandas as pd
from scipy import sparse
__all__ = ["precision_at_k", "recall_at_k", "rank_matrix"]
def rank_matrix(mat):
assert isinstance(mat, (np.ndarray,))
mat_ = pd.DataFrame(data=mat)
mat_ = mat_.rank(axis=1, ascending=False)
return mat_.values
def precision_at_k(predicted_ranks, interactions, k=10, preserve_rows=False):
assert isinstance(interactions, (sparse.coo_matrix, sparse.csr_matrix))
assert isinstance(predicted_ranks, (np.ndarray,))
relevant = interactions > 0
ranks = sparse.csr_matrix(predicted_ranks * relevant.A)
ranks.data = np.less(ranks.data, (k + 1), ranks.data)
precision = np.squeeze(np.array(ranks.sum(axis=1))).astype(float) / k
if not preserve_rows:
precision = precision[relevant.getnnz(axis=1) > 0]
return precision.mean()
def recall_at_k(predicted_ranks, interactions, k=10, preserve_rows=False):
assert isinstance(interactions, (sparse.coo_matrix, sparse.csr_matrix))
assert isinstance(predicted_ranks, (np.ndarray,))
relevant = interactions > 0
ranks = sparse.csr_matrix(predicted_ranks * relevant.A)
ranks.data = np.less(ranks.data, (k + 1), ranks.data)
retrieved = np.squeeze(relevant.getnnz(axis=1))
hit = np.squeeze(np.array(ranks.sum(axis=1)))
if not preserve_rows:
hit = hit[relevant.getnnz(axis=1) > 0]
retrieved = retrieved[relevant.getnnz(axis=1) > 0]
return (hit.astype(float) / retrieved.astype(float)).mean()
| akalingking/RecSys | metrics.py | metrics.py | py | 1,793 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.coo_matrix",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "scip... |
36992033529 | import os
import sys
from PIL import Image
from scene.cameras import Camera
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from scene.hyper_loader import Load_hyper_data, format_hyper_data
import torchvision.transforms as transforms
import copy
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
import numpy as np
import torch
import json
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
from utils.general_utils import PILtoTorch
from tqdm import tqdm
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
time : float
mask: np.array
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
video_cameras: list
nerf_normalization: dict
ply_path: str
maxtime: int
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
# breakpoint()
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model in ["SIMPLE_PINHOLE", "SIMPLE_RADIAL"]:
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model == "OPENCV":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
image = PILtoTorch(image,None)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height,
time = 0, mask=None)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'f4'), ('green', 'f4'), ('blue', 'f4')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
# breakpoint()
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
# breakpoint()
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=train_cam_infos,
maxtime=0,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def generateCamerasFromTransforms(path, template_transformsfile, extension, maxtime):
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1]]).float()
rot_theta = lambda th : torch.Tensor([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1]]).float()
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
return c2w
cam_infos = []
# generate render poses and times
render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,160+1)[:-1]], 0)
render_times = torch.linspace(0,maxtime,render_poses.shape[0])
with open(os.path.join(path, template_transformsfile)) as json_file:
template_json = json.load(json_file)
try:
fovx = template_json["camera_angle_x"]
except:
fovx = focal2fov(template_json["fl_x"], template_json['w'])
print("hello!!!!")
# breakpoint()
# load a single image to get image info.
for idx, frame in enumerate(template_json["frames"]):
cam_name = os.path.join(path, frame["file_path"] + extension)
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
im_data = np.array(image.convert("RGBA"))
image = PILtoTorch(image,(800,800))
break
# format information
for idx, (time, poses) in enumerate(zip(render_times,render_poses)):
time = time/maxtime
matrix = np.linalg.inv(np.array(poses))
R = -np.transpose(matrix[:3,:3])
R[:,0] = -R[:,0]
T = -matrix[:3, 3]
fovy = focal2fov(fov2focal(fovx, image.shape[1]), image.shape[2])
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=None, image_name=None, width=image.shape[1], height=image.shape[2],
time = time, mask=None))
return cam_infos
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png", mapper = {}):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
try:
fovx = contents["camera_angle_x"]
except:
fovx = focal2fov(contents['fl_x'],contents['w'])
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
time = mapper[frame["time"]]
matrix = np.linalg.inv(np.array(frame["transform_matrix"]))
R = -np.transpose(matrix[:3,:3])
R[:,0] = -R[:,0]
T = -matrix[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
image = PILtoTorch(image,(800,800))
fovy = focal2fov(fov2focal(fovx, image.shape[1]), image.shape[2])
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.shape[1], height=image.shape[2],
time = time, mask=None))
return cam_infos
def read_timeline(path):
with open(os.path.join(path, "transforms_train.json")) as json_file:
train_json = json.load(json_file)
with open(os.path.join(path, "transforms_test.json")) as json_file:
test_json = json.load(json_file)
time_line = [frame["time"] for frame in train_json["frames"]] + [frame["time"] for frame in test_json["frames"]]
time_line = set(time_line)
time_line = list(time_line)
time_line.sort()
timestamp_mapper = {}
max_time_float = max(time_line)
for index, time in enumerate(time_line):
# timestamp_mapper[time] = index
timestamp_mapper[time] = time/max_time_float
return timestamp_mapper, max_time_float
def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
timestamp_mapper, max_time = read_timeline(path)
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension, timestamp_mapper)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension, timestamp_mapper)
print("Generating Video Transforms")
video_cam_infos = generateCamerasFromTransforms(path, "transforms_train.json", extension, max_time)
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "fused.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 2000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
# storePly(ply_path, xyz, SH2RGB(shs) * 255)
else:
pcd = fetchPly(ply_path)
# xyz = -np.array(pcd.points)
# pcd = pcd._replace(points=xyz)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=video_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=max_time
)
return scene_info
def format_infos(dataset,split):
# loading
cameras = []
image = dataset[0][0]
if split == "train":
for idx in tqdm(range(len(dataset))):
image_path = None
image_name = f"{idx}"
time = dataset.image_times[idx]
# matrix = np.linalg.inv(np.array(pose))
R,T = dataset.load_pose(idx)
FovX = focal2fov(dataset.focal[0], image.shape[1])
FovY = focal2fov(dataset.focal[0], image.shape[2])
cameras.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.shape[2], height=image.shape[1],
time = time, mask=None))
return cameras
def readHyperDataInfos(datadir,use_bg_points,eval):
train_cam_infos = Load_hyper_data(datadir,0.5,use_bg_points,split ="train")
test_cam_infos = Load_hyper_data(datadir,0.5,use_bg_points,split="test")
print("load finished")
train_cam = format_hyper_data(train_cam_infos,"train")
print("format finished")
max_time = train_cam_infos.max_time
video_cam_infos = copy.deepcopy(test_cam_infos)
video_cam_infos.split="video"
ply_path = os.path.join(datadir, "points3D_downsample.ply")
pcd = fetchPly(ply_path)
xyz = np.array(pcd.points)
pcd = pcd._replace(points=xyz)
nerf_normalization = getNerfppNorm(train_cam)
plot_camera_orientations(train_cam_infos, pcd.points)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=video_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=max_time
)
return scene_info
def format_render_poses(poses,data_infos):
cameras = []
tensor_to_pil = transforms.ToPILImage()
len_poses = len(poses)
times = [i/len_poses for i in range(len_poses)]
image = data_infos[0][0]
for idx, p in tqdm(enumerate(poses)):
# image = None
image_path = None
image_name = f"{idx}"
time = times[idx]
pose = np.eye(4)
pose[:3,:] = p[:3,:]
# matrix = np.linalg.inv(np.array(pose))
R = pose[:3,:3]
R = - R
R[:,0] = -R[:,0]
T = -pose[:3,3].dot(R)
FovX = focal2fov(data_infos.focal[0], image.shape[2])
FovY = focal2fov(data_infos.focal[0], image.shape[1])
cameras.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.shape[2], height=image.shape[1],
time = time, mask=None))
return cameras
def add_points(pointsclouds, xyz_min, xyz_max):
add_points = (np.random.random((100000, 3)))* (xyz_max-xyz_min) + xyz_min
add_points = add_points.astype(np.float32)
addcolors = np.random.random((100000, 3)).astype(np.float32)
addnormals = np.random.random((100000, 3)).astype(np.float32)
# breakpoint()
new_points = np.vstack([pointsclouds.points,add_points])
new_colors = np.vstack([pointsclouds.colors,addcolors])
new_normals = np.vstack([pointsclouds.normals,addnormals])
pointsclouds=pointsclouds._replace(points=new_points)
pointsclouds=pointsclouds._replace(colors=new_colors)
pointsclouds=pointsclouds._replace(normals=new_normals)
return pointsclouds
# breakpoint()
# new_
def readdynerfInfo(datadir,use_bg_points,eval):
# loading all the data follow hexplane format
# ply_path = os.path.join(datadir, "points3D_dense.ply")
ply_path = os.path.join(datadir, "points3D_downsample2.ply")
from scene.neural_3D_dataset_NDC import Neural3D_NDC_Dataset
train_dataset = Neural3D_NDC_Dataset(
datadir,
"train",
1.0,
time_scale=1,
scene_bbox_min=[-2.5, -2.0, -1.0],
scene_bbox_max=[2.5, 2.0, 1.0],
eval_index=0,
)
test_dataset = Neural3D_NDC_Dataset(
datadir,
"test",
1.0,
time_scale=1,
scene_bbox_min=[-2.5, -2.0, -1.0],
scene_bbox_max=[2.5, 2.0, 1.0],
eval_index=0,
)
train_cam_infos = format_infos(train_dataset,"train")
val_cam_infos = format_render_poses(test_dataset.val_poses,test_dataset)
nerf_normalization = getNerfppNorm(train_cam_infos)
# xyz = np.load
pcd = fetchPly(ply_path)
print("origin points,",pcd.points.shape[0])
print("after points,",pcd.points.shape[0])
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_dataset,
test_cameras=test_dataset,
video_cameras=val_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=300
)
return scene_info
def setup_camera(w, h, k, w2c, near=0.01, far=100):
from diff_gaussian_rasterization import GaussianRasterizationSettings as Camera
fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]
w2c = torch.tensor(w2c).cuda().float()
cam_center = torch.inverse(w2c)[:3, 3]
w2c = w2c.unsqueeze(0).transpose(1, 2)
opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],
[0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],
[0.0, 0.0, far / (far - near), -(far * near) / (far - near)],
[0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)
full_proj = w2c.bmm(opengl_proj)
cam = Camera(
image_height=h,
image_width=w,
tanfovx=w / (2 * fx),
tanfovy=h / (2 * fy),
bg=torch.tensor([0, 0, 0], dtype=torch.float32, device="cuda"),
scale_modifier=1.0,
viewmatrix=w2c,
projmatrix=full_proj,
sh_degree=0,
campos=cam_center,
prefiltered=False,
debug=True
)
return cam
def plot_camera_orientations(cam_list, xyz):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax2 = fig.add_subplot(122, projection='3d')
# xyz = xyz[xyz[:,0]<1]
threshold=2
xyz = xyz[(xyz[:, 0] >= -threshold) & (xyz[:, 0] <= threshold) &
(xyz[:, 1] >= -threshold) & (xyz[:, 1] <= threshold) &
(xyz[:, 2] >= -threshold) & (xyz[:, 2] <= threshold)]
ax.scatter(xyz[:,0],xyz[:,1],xyz[:,2],c='r',s=0.1)
for cam in tqdm(cam_list):
# 提取 R 和 T
R = cam.R
T = cam.T
direction = R @ np.array([0, 0, 1])
ax.quiver(T[0], T[1], T[2], direction[0], direction[1], direction[2], length=1)
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
plt.savefig("output.png")
# breakpoint()
def readPanopticmeta(datadir, json_path):
with open(os.path.join(datadir,json_path)) as f:
test_meta = json.load(f)
w = test_meta['w']
h = test_meta['h']
max_time = len(test_meta['fn'])
cam_infos = []
for index in range(len(test_meta['fn'])):
focals = test_meta['k'][index]
w2cs = test_meta['w2c'][index]
fns = test_meta['fn'][index]
cam_ids = test_meta['cam_id'][index]
time = index / len(test_meta['fn'])
# breakpoint()
for focal, w2c, fn, cam in zip(focals, w2cs, fns, cam_ids):
image_path = os.path.join(datadir,"ims")
image_name=fn
# breakpoint()
image = Image.open(os.path.join(datadir,"ims",fn))
im_data = np.array(image.convert("RGBA"))
# breakpoint()
im_data = PILtoTorch(im_data,None)[:3,:,:]
# breakpoint()
# print(w2c,focal,image_name)
camera = setup_camera(w, h, focal, w2c)
cam_infos.append({
"camera":camera,
"time":time,
"image":im_data})
cam_centers = np.linalg.inv(test_meta['w2c'][0])[:, :3, 3] # Get scene radius
scene_radius = 1.1 * np.max(np.linalg.norm(cam_centers - np.mean(cam_centers, 0)[None], axis=-1))
# breakpoint()
return cam_infos, max_time, scene_radius
def readPanopticSportsinfos(datadir):
train_cam_infos, max_time, scene_radius = readPanopticmeta(datadir, "train_meta.json")
test_cam_infos,_, _ = readPanopticmeta(datadir, "test_meta.json")
nerf_normalization = {
"radius":scene_radius,
"translate":torch.tensor([0,0,0])
}
ply_path = os.path.join(datadir, "pointd3D.ply")
# Since this data set has no colmap data, we start with random points
plz_path = os.path.join(datadir, "init_pt_cld.npz")
data = np.load(plz_path)["data"]
xyz = data[:,:3]
rgb = data[:,3:6]
num_pts = xyz.shape[0]
pcd = BasicPointCloud(points=xyz, colors=rgb, normals=np.ones((num_pts, 3)))
storePly(ply_path, xyz, rgb)
# pcd = fetchPly(ply_path)
# breakpoint()
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
video_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path,
maxtime=max_time,
)
return scene_info
sceneLoadTypeCallbacks = {
"Colmap": readColmapSceneInfo,
"Blender" : readNerfSyntheticInfo,
"dynerf" : readdynerfInfo,
"nerfies": readHyperDataInfos, # NeRFies & HyperNeRF dataset proposed by [https://github.com/google/hypernerf/releases/tag/v0.1]
"PanopticSports" : readPanopticSportsinfos
}
| hustvl/4DGaussians | scene/dataset_readers.py | dataset_readers.py | py | 23,816 | python | en | code | 995 | github-code | 36 | [
{
"api_name": "typing.NamedTuple",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
... |
13448911430 | import logging
import cv2
import numpy
from inquire.detection.ButtonState import ButtonState
from inquire.detection.RecognizedElement import RecognizedElement
try:
from robot.api.logger import info
robot_logger = True
except ImportError:
robot_logger = False
logging.basicConfig(level=logging.INFO)
class TextElement(RecognizedElement):
"""
TextElement Class. It's used to store the information about the found Text, like it's state,...
"""
def __init__(self, text: str | None,
image: numpy.ndarray,
rectangle: tuple = (0, 0, 0, 0),
confidence: float | None = None,
colours: list | None = None,
state: ButtonState | None = None,
binary_image: numpy.ndarray | None = None,
render_bin: int = 0):
super().__init__(image=image,
rectangle=rectangle,
confidence=confidence,
colours=colours,
state=state,
binary_image=binary_image,
render_bin=render_bin)
self.text = text
def __str__(self):
if self.rectangle != (0, 0, 0, 0):
return f'({self.text}, conf:{self.confidence}, rec:{self.rectangle})'
else:
return f'({self.text}, conf:{self.confidence})'
def __repr__(self):
return self.__str__()
@staticmethod
def concatenate_horizontal(img_list: list[numpy.ndarray]) -> numpy.ndarray:
h_min = min(img.shape[0] for img in img_list)
im_list_resize = []
for img in img_list:
if img.size == 0:
if robot_logger:
info(f'textelement merging encountered empty fields', html=True)
else:
logging.info(f'textelement merging encountered empty fields')
continue
width = int(img.shape[1] * h_min / img.shape[0])
resized_img = cv2.resize(img, (width, h_min), interpolation=cv2.INTER_CUBIC)
im_list_resize.append(resized_img)
# im_list_resize = [cv2.resize(img, (int(img.shape[1] * h_min / img.shape[0]), h_min),
# interpolation=cv2.INTER_CUBIC)
# for img in img_list]
concat = cv2.hconcat(im_list_resize)
return concat
def do_they_align_horizontally(self, element: 'TextElement') -> bool:
if (element.rectangle[1] + element.rectangle[3] < self.rectangle[1]) or (
element.rectangle[1] > self.rectangle[1] + self.rectangle[3]):
return False
else:
return True
def do_they_align_vertically(self, element: 'TextElement') -> bool:
if (element.rectangle[0] + element.rectangle[2] < self.rectangle[0]) or (
element.rectangle[0] > self.rectangle[0] + self.rectangle[2]):
return False
else:
return True
def append_text_element(self, element: 'TextElement') -> None:
"""
TODO img, rectangle
Append the text and confidence level of the given text element to the current one. (Merges)
:param element: Another TextElement to append
:type element: TextElement
:return:
:rtype: None
"""
if element.text is not None:
self.text += " " + element.text
if element.confidence is not None:
self.confidence = (self.confidence + element.confidence) / 2
self.rectangle = (min(self.rectangle[0], element.rectangle[0]),
min(self.rectangle[1], element.rectangle[1]),
self.rectangle[2] + element.rectangle[2],
self.rectangle[3] + element.rectangle[3])
self.image = self.concatenate_horizontal([self.image, element.image])
| Diyomee/Inquire | src/inquire/detection/TextElement.py | TextElement.py | py | 3,930 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "inquire.detection.RecognizedElement.RecognizedElement",
"line_number": 18,
"usage_type": "name"
}... |
18665423985 | """
Calculate L and T velocities from LL and LT backwalls.
Raises
------
IndefiniteVelocityError
Output
------
conf.d/30_block_velocities.yaml
velocity_L.png
velocity_T.png
"""
import logging
import numpy as np
import matplotlib.pyplot as plt
import arim
import arim.ray
from arim.im.das import lanczos_interpolation
from tqdm import tqdm
import pandas as pd
import yaml
import arim.models.block_in_immersion as bim
import numba
from . import common
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging.getLogger("arim").setLevel(logging.WARNING)
class IndefiniteVelocityError(RuntimeError):
pass
def _time_of_flights_backwall_LL(conf):
probe = common.load_probe(conf)
examination_object = arim.io.examination_object_from_conf(conf)
tx_list, rx_list = arim.ut.fmc(probe.numelements)
# Backwall paths
backwall_paths = bim.backwall_paths(
examination_object.couplant_material,
examination_object.block_material,
probe.to_oriented_points(),
examination_object.frontwall,
examination_object.backwall,
)
path = backwall_paths["LL"]
arim.ray.ray_tracing_for_paths([path])
return path.rays.times
def _time_of_flights_backwall_LT(conf):
probe = common.load_probe(conf)
examination_object = arim.io.examination_object_from_conf(conf)
tx_list, rx_list = arim.ut.fmc(probe.numelements)
# Backwall paths
backwall_paths = bim.backwall_paths(
examination_object.couplant_material,
examination_object.block_material,
probe.to_oriented_points(),
examination_object.frontwall,
examination_object.backwall,
)
path = backwall_paths["LT"]
arim.ray.ray_tracing_for_paths([path])
return path.rays.times
@numba.njit(parallel=True)
def _wall_intensities_lanczos(scanlines, tof_arr, tx, rx, t0, invdt, a):
res = 0.0
for scan in range(scanlines.shape[0]):
tof = tof_arr[tx[scan], rx[scan]]
tof_idx = (tof - t0) * invdt
res += lanczos_interpolation(tof_idx, scanlines[scan], a)
return res
def _wall_intensities(frame, tof_arr):
return _wall_intensities_lanczos(
frame.scanlines,
tof_arr,
frame.tx,
frame.rx,
frame.time.start,
1 / frame.time.step,
a=3,
)
def _measure_l_vel(conf, frame, l_vel_range):
intensities = []
for l_vel in tqdm(l_vel_range, desc="L velocity"):
conf["block_material"]["longitudinal_vel"] = l_vel
tof = _time_of_flights_backwall_LL(conf)
intensities.append(_wall_intensities(frame, tof))
return pd.Series(intensities, index=l_vel_range)
def _measure_t_vel(conf, frame, t_vel_range):
intensities = []
for t_vel in tqdm(t_vel_range, desc="T velocity"):
conf["block_material"]["transverse_vel"] = t_vel
tof = _time_of_flights_backwall_LT(conf)
intensities.append(_wall_intensities(frame, tof))
return pd.Series(intensities, index=t_vel_range)
def measure_velocities_from_timetraces(dataset_name, save, noshow=False):
"""
maximise Sum_i(Envelope(TimeTrace[tof_backwall_i]))
"""
conf = arim.io.load_conf(dataset_name)
# conf["frontwall"]["numpoints"] = 1000
# conf["backwall"]["numpoints"] = 1000
root_dir = conf["root_dir"]
result_dir = conf["result_dir"]
frame = common.load_frame(
conf, apply_filter=True, expand=True, warn_if_fallback_vel=False
)
frame.scanlines = np.abs(frame.scanlines)
# === L velocity ===
# First pass
base_l_vel = (
conf["block_material"]["longitudinal_vel"] // 10
) * 10 # make round numbers
l_vel_range_1 = np.arange(base_l_vel - 100, base_l_vel + 100.1, 10.0)
intensities_1 = _measure_l_vel(conf, frame, l_vel_range_1)
l_vel_1_idx = intensities_1.values.argmax()
if l_vel_1_idx == 0 or l_vel_1_idx == (len(l_vel_range_1) - 1):
# we're on a bound, that's bad
raise IndefiniteVelocityError
# Second pass
l_vel_range_2 = np.arange(
l_vel_range_1[l_vel_1_idx - 1] + 1, l_vel_range_1[l_vel_1_idx + 1], 1.0
)
intensities_2 = _measure_l_vel(conf, frame, l_vel_range_2)
# agregate results
intensities = pd.concat([intensities_1, intensities_2]).sort_index()
l_vel_opt = intensities.idxmax()
logger.info(f"Optimal L velocitiy: {l_vel_opt} m/s")
conf["block_material"]["longitudinal_vel"] = l_vel_opt
# plot
plt.figure()
plt.plot(intensities.index, intensities, ".-")
plt.xlabel("L velocitiy (m/s)")
plt.ylabel("Backwall LL intensity")
plt.title(f"Optimum: {l_vel_opt}")
if save:
plt.savefig(result_dir / "velocity_L")
# === T velocity ===
# First pass
base_t_vel = (
conf["block_material"]["transverse_vel"] // 10
) * 10 # make round numbers
t_vel_range_1 = np.arange(base_t_vel - 100, base_t_vel + 100.1, 10.0)
intensities_1 = _measure_t_vel(conf, frame, t_vel_range_1)
t_vel_1_idx = intensities_1.values.argmax()
if t_vel_1_idx == 0 or t_vel_1_idx == (len(t_vel_range_1) - 1):
# we're on a bound, that's bad
raise IndefiniteVelocityError
# Second pass
t_vel_range_2 = np.arange(
t_vel_range_1[t_vel_1_idx - 1] + 1, t_vel_range_1[t_vel_1_idx + 1], 1.0
)
intensities_2 = _measure_t_vel(conf, frame, t_vel_range_2)
# agregate results
intensities = pd.concat([intensities_1, intensities_2]).sort_index()
t_vel_opt = intensities.idxmax()
logger.info(f"Optimal T velocitiy: {t_vel_opt} m/s")
conf["block_material"]["transverse_vel"] = t_vel_opt
# plot
plt.figure()
plt.plot(intensities.index, intensities, ".-")
plt.xlabel("T velocitiy (m/s)")
plt.ylabel("Backwall LT intensity")
plt.title(f"Optimum: {t_vel_opt}")
if save:
plt.savefig(result_dir / "velocity_T")
if save:
# Save velocities as conf file
block_conf = dict(
longitudinal_vel=float(l_vel_opt),
transverse_vel=float(t_vel_opt),
metadata=dict(source="Velocities measured from TFM", is_fallback=False),
)
block_conf2 = dict(block_material=block_conf)
with (root_dir / "conf.d/30_block_velocities.yaml").open("w") as f:
f.write("# generated by measure_velocities_from_timetraces.py\n")
yaml.dump(block_conf2, f, default_flow_style=False)
if noshow:
plt.close("all")
else:
plt.show()
return l_vel_opt, t_vel_opt
if __name__ == "__main__":
args = common.argparser(__doc__).parse_args()
dataset_name = args.dataset_name
save = args.save
l_vel_opt, t_vel_opt = measure_velocities_from_timetraces(
dataset_name, save, noshow=args.noshow
)
| nbud/arimtoolkit | arimtoolkit/measure_velocities_from_timetraces.py | measure_velocities_from_timetraces.py | py | 6,801 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "logging.getLo... |
30333812871 | import cv2
import numpy as np
img1 = cv2.imread('pratica02/cameraman.tif', cv2.IMREAD_UNCHANGED)
img2 = cv2.imread('pratica02/morangos.tif', cv2.IMREAD_UNCHANGED)
imgCinza = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# python exibe em BGR => B=0 G=1 R=2
red2 = (img2[:, :, 2])
blue2 = (img2[:, :, 0])
green2 = (img2[:, :, 1])
stacked = np.dstack((blue2, green2, red2))
print(red2.dtype, blue2.dtype, green2.dtype)
x, y, channels = img2.shape
average = np.zeros((x, y))
# extracting the mean pixel per pixel on each of RGB channels. the mean of the channels translate into the grayscale image
for i in range(x):
for j in range(y):
average[i, j] = (int(red2[i, j]) +
int(blue2[i, j]) + int(green2[i, j]))/3
# same approach but way simpler using numpy.mean
imgCinzaMean = np.uint8(np.mean(img2, axis=2))
average = np.uint8(average)
# the 3 should look exactly the same
cv2.imshow('original grayscale img', imgCinza)
cv2.imshow('gray image using for loop', average)
cv2.imshow('gray image using np.mean', imgCinzaMean)
cv2.waitKey(0)
| Vicinius/digital-image-processing | pratica02/rgb-to-grayscale.py | rgb-to-grayscale.py | py | 1,072 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
... |
74050045544 | from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
from parlai.utils.conversations import Conversations
from parlai.utils.misc import TimeLogger
import random
import tempfile
def setup_args():
"""
Set up conversion args.
"""
parser = ParlaiParser()
parser.add_argument(
'-n',
'--num-episodes',
default=-1,
type=int,
help='Total number of episodes to convert, -1 to convert all examples',
)
parser.add_argument(
'-of',
'--outfile',
default=None,
type=str,
help='Output file where to save, by default will be created in /tmp',
)
parser.add_argument(
'-s1id', '--speaker-0-id', type=str, help='Speaker id of agent who speaks first'
)
parser.add_argument(
'-s1id',
'--speaker-1-id',
type=str,
help='Speaker id of agent who speaks second',
)
parser.add_argument(
'--prepended-context',
type='bool',
default=False,
help='specify if the context is prepended to the first act',
)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)
parser.set_defaults(datatype='train:ordered')
return parser
def dump_data(opt):
"""
Dump task data to ACUTE-Eval.
"""
# create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
task = opt.get('task')
speaker_0_id = opt.get('speaker_0_id') or f'{task}_as_human'
speaker_1_id = opt.get('speaker_1_id') or f'{task}_as_model'
if opt['outfile'] is None:
outfile = tempfile.mkstemp(
prefix='{}_{}_'.format(opt['task'], opt['datatype']), suffix='.txt'
)[1]
else:
outfile = opt['outfile']
num_episodes = (
world.num_episodes()
if opt['num_episodes'] == -1
else min(opt['num_episodes'], world.num_episodes())
)
log_timer = TimeLogger()
print(f'[ starting to convert, saving output to {outfile} ]')
dialogues = []
for _ in range(num_episodes):
episode = []
episode_done = False
while not episode_done:
world.parley()
acts = world.get_acts()
text = acts[0].get('text')
split_text = text.split('\n')
label = random.choice(
acts[0].get('labels', acts[0].pop('eval_labels', None))
)
if not episode and opt.get('prepended_context'):
# first turn
context = split_text[:-1]
text = split_text[-1]
context_turn = [
{'text': context, 'episode_done': False, 'id': 'context'}
for _ in range(2)
]
episode.append(context_turn)
turn = [
{'text': text, 'episode_done': False, 'id': speaker_0_id},
{'text': label, 'episode_done': False, 'id': speaker_1_id},
]
episode.append(turn)
if acts[0].get('episode_done', False):
episode[-1][-1]['episode_done'] = True
episode_done = True
dialogues.append(episode)
if log_timer.time() > opt['log_every_n_secs']:
text, _log = log_timer.log(world.total_parleys, world.num_examples())
print(text)
if world.epoch_done():
break
Conversations.save_conversations(dialogues, outfile, opt)
def main():
random.seed(42)
# Get command line arguments
parser = setup_args()
opt = parser.parse_args()
dump_data(opt)
if __name__ == '__main__':
main()
| facebookresearch/ParlAI | parlai/crowdsourcing/tasks/acute_eval/dump_task_to_acute_format.py | dump_task_to_acute_format.py | py | 3,809 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "parlai.core.params.ParlaiParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "parlai.agents.repeat_label.repeat_label.RepeatLabelAgent",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "parlai.core.worlds.create_task",
"line_number": 56,
... |
13495728190 | from flask import Flask
from flask_restful import Api
from helpers.crossdomain import *
from messenger_webhook import MessengerWebhook
application = Flask(__name__, template_folder='template', static_url_path='/static')
application.config.from_object(__name__)
application.config['SECRET_KEY'] = '1ex13eu103me91i-sdf'
application.url_map.strict_slashes = False
api = Api(application, decorators=[crossdomain(origin="*")])
api.add_resource(
MessengerWebhook,
f'/messenger-webhook',
endpoint=f'/messenger-webhook'
)
def main():
pass
main()
if __name__ == "__main__":
print("------------------- Local Application Start ---------------------")
application.run(host='0.0.0.0', debug=True)
| joshwolff1/cs238-final-project | application.py | application.py | py | 717 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "messenger_webhook.MessengerWebhook",
"line_number": 13,
"usage_type": "argument"
}
] |
40086649970 |
import os
import bpy
from mathutils import Vector
import numpy as np
import sys
sys.path.append('.')
from blender import BlenderWrapper
class BlenderHelper(BlenderWrapper):
def __init__(self):
super().__init__()
self.set_transparent_background()
self.set_image_size(1800, 1090)
########################################################################
## Camera ##
########################################################################
def point_camera_to_origin(self):
direction = (Vector((0.0, 0.0, 0.0)) - self.cam.location).normalized()
euler_direction = direction.to_track_quat('-Z', 'Y').to_euler()
if self.camera_roll is not None:
euler_direction[0] = self.camera_roll
if self.camera_pitch is not None:
euler_direction[1] = self.camera_pitch
if self.camera_yaw is not None:
euler_direction[2] = self.camera_yaw
self.set_cam_rotation(euler_direction)
########################################################################
## Image ##
########################################################################
def set_transparent_background(self):
self.scene.render.image_settings.color_mode = 'RGBA'
self.scene.render.film_transparent = True
world = self.scene.world
world_tree = bpy.data.worlds[world.name].node_tree
background_node = world_tree.nodes.new("ShaderNodeBackground")
background_node.inputs[0].default_value = (1, 1, 1, 0)
########################################################################
## UV ##
########################################################################
def fit_uv_to_bounds(self, active_ob=None):
if active_ob is None:
active_ob = self.obj_list[-1]
self.select_object(active_ob)
self.toggle_object_edit_mode()
self.select_all_uv()
bpy.ops.uv.pack_islands()
self.deselect_all_uv()
self.deselect_all_objects()
def save_uv_layout(self, image_name, active_ob=None):
if active_ob is None:
active_ob = self.obj_list[-1]
self.select_object(active_ob)
self.toggle_object_edit_mode()
bpy.ops.uv.export_layout(filepath=image_name, export_all=True, opacity=0.0)
self.deselect_all_objects()
def scale_uv_coords(self, scale, axis, mapping_node_name='mapping'):
mapping_node = self.nodes[mapping_node_name]
mapping_node.inputs['Scale'].default_value[axis] = scale
def rotate_uv_coords(self, rotation, axis, mapping_node_name='mapping'):
mapping_node = self.nodes[mapping_node_name]
mapping_node.inputs['Rotation'].default_value[axis] = rotation
def flip_uv(self, axis='x', active_ob=None):
if active_ob is None:
active_ob = self.obj_list[-1]
self.select_object(active_ob)
self.toggle_object_edit_mode()
# self.select_all_uv()
if axis == 'x':
axis_rotation = (True, False, False)
elif axis == 'y':
axis_rotation = (False, True, False)
bpy.ops.transform.mirror(constraint_axis=axis_rotation)
# self.deselect_all_uv()
self.deselect_all_objects()
########################################################################
## Checkerboard ##
########################################################################
def attach_checkerboard_texture(self, checkerboard_type='color'):
if checkerboard_type == 'color':
image_type = 'COLOR_GRID'
else:
image_type = 'UV_GRID'
self.set_color_by_texture()
material = self.create_new_material('checkerboard_mat')
texture_node = self.create_node(name='texture', type='ShaderNodeTexImage', material=material)
mapping_node = self.create_node(name='mapping', type='ShaderNodeMapping', material=material)
text_coord_node = self.create_node(name='texcoord', type='ShaderNodeTexCoord', material=material)
shader_inputs = self.get_shader_inputs(material=material, shader_name='Principled BSDF')
self.link_nodes(material, shader_inputs['Base Color'], texture_node.outputs["Color"])
self.link_nodes(material, text_coord_node.outputs['UV'], mapping_node.inputs['Location'])
self.link_nodes(material, mapping_node.outputs['Vector'], texture_node.inputs['Vector'])
mapping_node.vector_type = 'TEXTURE'
image = self.create_new_image(name='uv_texture', type=image_type)
texture_node.image = image
self.attach_material_to_object(material, self.obj_list[-1])
def save_texture_image(self, filename, image_name='uv_texture'):
image = self.images[image_name]
image.save_render(filename)
########################################################################
## Rendering ##
########################################################################
def render_views_rotating(self, file_prefix, num_view=10, up_axis='Y'):
rotation_step = 2.0 * np.pi /num_view
for i in range(num_view):
bpy.ops.transform.rotate(value=rotation_step, orient_axis=up_axis)
filename = '{}_{}_{:02}.png'.format(file_prefix, up_axis, i)
self.render(filename)
########################################################################
| luca-morreale/blender_rendering3d | source/blender_helper.py | blender_helper.py | py | 5,728 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "blender.BlenderWrapper",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "mathutils.Vector",... |
17093369252 | import itertools
N, M = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
if (M != 0):
cd = [list(map(int, input().split())) for _ in range(M)]
else:
cd = list(list())
groups = list(list())
for i in range(1, N + 1):
conn_list = [item for item in cd if item[0] == i or item[1] == i]
if (len(conn_list) == 0):
conn_list.append([i])
conn_list = list(set(itertools.chain.from_iterable(conn_list)))
indexes = [index for index, group in enumerate(
groups) if not set(conn_list).isdisjoint(set(group))]
if len(indexes) == 0:
groups.append([i])
indexes = [index for index, group in enumerate(
groups) if not set(conn_list).isdisjoint(set(group))]
if (len(indexes) > 1):
for j in range(len(indexes) - 1, 0, -1):
groups[indexes[0]].extend(groups[indexes[j]])
groups[indexes[0]] = list(set(groups[indexes[0]]))
del groups[indexes[j]]
for item in conn_list:
groups[indexes[0]].append(item)
groups[indexes[0]] = list(set(groups[indexes[0]]))
flag = True
for group in groups:
sum_a = sum([item for index, item in enumerate(a) if index+1 in group])
sum_b = sum([item for index, item in enumerate(b) if index+1 in group])
if sum_a != sum_b:
flag = False
break
if (flag):
print('Yes')
else:
print('No')
| kmdkuk/myAtCoder | acr106/b/main.py | main.py | py | 1,414 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.chain.from_iterable",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 16,
"usage_type": "attribute"
}
] |
31705158569 | from hx711 import HX711
from time import sleep, strftime
from datetime import datetime, timedelta
from datetime import timezone
from output import Output
import numpy
import os
import logging
fileRW = Output()
logger = logging.getLogger(__name__)
class sensor:
def __init__(self):
# Sets up scales
global hx
self.No_Tare = False
hx = HX711(5, 6)
hx.set_reading_format("LSB", "MSB")
hx.set_reference_unit(451) # calibrated on 1000g
hx.reset()
hx.tare()
check = os.path.isfile("/home/pi/weight.csv") # checks if file exists
if check == False:
open("/home/pi/weight.csv", "x") # if not creates file
check = os.path.isfile("/home/pi/tare_weight.csv") # checks if file exists
# removed file checking as w+ method creates
def tare_weight(self):
hx.store_offset() # store the offset in a file
return
def get_time(self):
d = datetime.now(timezone.utc)
x = d.strftime("%Y %m %d %H %M %S")
logger.debug("Raw time: %s", d)
logger.debug("Refined time: %s", x)
return x
def read(self):
val = hx.get_weight_no_tare(5) # use stored data
# reslove zero values for negatives
if val < 0:
val = 0
t = self.get_time()# false removed no debugging
tup_weight = (t, val)
hx.power_down()
hx.power_up()
logger.debug("Data: %s", val)
logger.debug("Combined: %s", tup_weight)
sleep(0.5)
return tup_weight
def write(self, filename, iterations=10):
i = 0
while i <= iterations:
data = self.read()
logger.debug("Data to write: %s", data)
fileRW.write("/home/pi/" + filename, data) # append
i += 1
logger.debug("this is the iteration: %s", i)
def avrg(self, readfile, writefile):
# declarations
sum_count = 0 # declare before sum_count
valid_number = 0 # decalare before use to avoid negative division
count = True
starttime = "" # declare before use to avoid undeclared error
times = fileRW.read("/home/pi/" + readfile, 0)
data = fileRW.read("/home/pi/" + readfile, 1)
start = times[0] # set to 1st time incase no weight readings
data_array = numpy.array(data).astype(numpy.float)
# find the mean
numpy_average = numpy.median(data_array) # Complete avarage
Q1 = numpy.percentile(data_array, 25, interpolation='midpoint') # quarter at 25%
Q3 = numpy.percentile(data_array, 75, interpolation='midpoint')
logger.debug("Median is... %s", numpy_average)
logger.debug("Q1 value is .... %s", Q1)
logger.debug("Q3 value is .... %s", Q3)
j = 0
for i in numpy.nditer(data_array): # why data array when data is list ??
if i > Q1 and i < Q3 : # Only use values in IQR
sum_count = sum_count + i # count the sum
valid_number = valid_number + 1 # count the number
if count == True: # need to get index of i then lookup timestamp
start = times[j]
count = False
logger.debug(i)
else:
logger.debug("Do not use")
j += 1
if valid_number == 0:
# check for zero division
valid_number = 1
starttime = datetime.strptime(
start, "%Y %m %d %H %M %S"
) # 1st item in list times
# calculate averages
sp_average = sum_count / valid_number # gives average weight of hedgehog
tup_weight_refined = ("Average Weight", start, "%.2f" % sp_average)
fileRW.write("/home/pi/" + writefile, tup_weight_refined)
# delete file after use to give clean start for next average
os.remove("/home/pi/" + readfile)
logger.debug(sum_count)
logger.debug(valid_number)
logger.debug("The real average is: %s", sp_average)
logger.debug("Start time is: %s", starttime)
logger.debug("The combined data is: %s", tup_weight_refined)
return tup_weight_refined # http post this value
| tech4nature/HogPi | app/weight.py | weight.py | py | 4,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "output.Output",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "hx711.HX711",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"lin... |
11543344946 | from flask import Flask, render_template, request,jsonify
from flask_cors import CORS,cross_origin
import requests
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen
import logging
import pymongo
logging.basicConfig(filename="scrapper.log" , level=logging.INFO)
app = Flask(__name__)
@app.route("/", methods = ['GET'])
def homepage():
return render_template("index.html")
@app.route("/review" , methods = ['POST' , 'GET'])
def index():
if request.method == 'POST':
try:
searchString = request.form['content'].replace(" ","")
flip_url = "https://www.flipkart.com/search?q=" + searchString
url_opened = urlopen(flip_url).read()
code_beautify = bs(url_opened,'html.parser')
bigbox = code_beautify.find_all("div",{"class":"_13oc-S"})
filename = searchString + ".csv"
fw = open(filename, "w")
headers = "Product, Customer Name, Rating, Heading, Comment \n"
fw.write(headers)
allurl=[]
for i in bigbox:
a=("https://www.flipkart.com" + i.div.div.a["href"])
allurl.append(a)
rating = []
short_com = []
main_com = []
name = []
reviews=[]
for j in allurl:
opener = requests.get(j)
opener.encoding='utf-8'
product_link_code_bs = bs(opener.text,'html.parser')
rating1 = product_link_code_bs.find_all("div",{"class":["col _2wzgFH","t-ZTKy _1QgsS5"]})
shortlong_comm = product_link_code_bs.find_all("div",{"class":"_6K-7Co"})
short = product_link_code_bs.find_all("p",{"class":"_2-N8zT"})
comment = product_link_code_bs.find_all("div",{"class":"t-ZTKy"})
name_tag = product_link_code_bs.find_all("div",{"class":"row _3n8db9"})
try:
for i in rating1:
b = (i.div.div.text)
rating.append(b)
except:
b = "No rating"
logging.info("b")
try:
for s in name_tag:
u = (s.div.p.text)
name.append(u)
except:
u = "No Name"
logging.info("u")
try:
for y in short:
c = (y.text)
short_com.append(c)
if len(short_com) != len(rating):
for k in shortlong_comm:
c = (k.text)
short_com.append(c)
except:
c = "No Short Comment"
logging.info("c")
try:
for l in shortlong_comm:
d = (l.text)
main_com.append(d)
if len(main_com) != len(rating):
for l in comment:
d =(l.div.div.text)
main_com.append(d)
except:
d = "No Long Comment"
logging.info("d")
for i in range (len(rating)):
mydict = {"Search Term": searchString ,"Name" : name[i], "Rating": rating[i], "CommentHead": short_com[i],"Comment": main_com[i]}
reviews.append(mydict)
logging.info("log my final result {}".format(reviews))
client = pymongo.MongoClient("mongodb+srv://naman7374:naman7374@cluster0.ehmrlj9.mongodb.net/?retryWrites=true&w=majority")
db =client['scrapper_eng_pwskills']
coll_pw_eng = db['scraper_pwskills_eng']
coll_pw_eng.insert_many(reviews)
return render_template('result.html', reviews=reviews[0:(len(reviews)-1)])
except Exception as e:
logging.info("e")
return 'something is wrong 6'
else:
return render_template('index.html')
if __name__=="__main__":
app.run(host="0.0.0.0") | nnamanagarwal/data_science_project | new_pw_eng_scrap/app.py | app.py | py | 4,203 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.render_template... |
468491372 | import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from ...base import device
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
def get_rand_dataloader(dim_input, n_sample, batch_size):
dataset = RandomDataset(dim_input, n_sample)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True
), dataset
class Model(nn.Module):
def __init__(self, dim_input, dim_output):
super(Model, self).__init__()
self.fc = nn.Linear(dim_input, dim_output)
def forward(self, input):
output = self.fc(input)
print("\tIn Model: input shape", input.size(),
"output shape", output.size())
return output
class Hook:
def __init__(self) -> None:
self.input = []
def __call__(self, module, args, out):
assert isinstance(args, tuple)
self.input.append(args[0])
print(len(self.input))
def prepare(
dim_input=5,
dim_output=2,
batch_size=30,
n_sample=100,
hooked=False
):
model = Model(dim_input=dim_input, dim_output=dim_output)
if hooked:
for m in model.modules():
if isinstance(m, nn.Linear):
m.register_forward_hook(Hook())
dataloader, dataset = get_rand_dataloader(dim_input=dim_input, n_sample=n_sample, batch_size=batch_size)
return model, dataloader, dataset | cdgyp/sparsity | codes/scripts/distributed/toy.py | toy.py | py | 1,620 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.randn",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch... |
32468298278 | from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from rest_framework.routers import DefaultRouter
from users import views
from .views import UserViewSet, custom_auth_token
router = DefaultRouter()
router.register(r'users', UserViewSet)
urlpatterns = router.urls
urlpatterns += [
url(r'^custom-auth-token/$', csrf_exempt(custom_auth_token)),
url(r'^create-user$', views.create_user),
url(r'^update-user-profile$', views.update_user_profile),
url(r'^create-event-card$', views.create_event_card),
url(r'^get-event-cards$', views.get_event_cards),
url(r'^get-doctors$', views.get_doctors),
url(r'^get-profile-data$', views.get_profile_data),
url(r'^create-notification$', views.create_notification),
url(r'^get-notifications$', views.get_notifications),
url(r'^approve-notification$', views.approve_notification),
url(r'^decline-notification$', views.decline_notification),
url(r'^get-appointments$', views.get_appointments),
url(r'^get-current-appointment$', views.get_current_appointment),
url(r'^get-specializations$', views.get_specializations),
url(r'^get-hospitals$', views.get_hospitals),
url(r'^submit-feedback$', views.submit_feedback),
url(r'^get-feedbacks$', views.get_feedbacks),
url(r'^get-permission-for-feedback$', views.get_permission_for_feedback),
url(r'^save-blank$', views.save_blank),
url(r'^complete-appointment$', views.complete_appointment),
url(r'^get-medical-records$', views.get_medical_records)
] | faradzh/medilix | medilix/users/urls.py | urls.py | py | 1,545 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.UserViewSet",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api... |
30981798805 | #!/usr/bin/env python
# <bitbar.title>Countdown</bitbar.title>
# <bitbar.version>v2.0</bitbar.version>
# <bitbar.author>Pere Albujer</bitbar.author>
# <bitbar.author.github>P4R</bitbar.author.github>
# <bitbar.desc>Shows countdown of established date.</bitbar.desc>
# <bitbar.image>https://cloud.githubusercontent.com/assets/7404532/12356787/ae62636c-bba4-11e5-8ff8-6a1eaffcbfc2.png</bitbar.image>
# <bitbar.dependencies>python</bitbar.dependencies>
from datetime import datetime
import sys
from exceptions import ValueError
def dateDiffInSeconds(date1, date2):
timedelta = date2 - date1
return timedelta.days * 24 * 3600 + timedelta.seconds
def daysHoursMinutesSecondsFromSeconds(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
return (days, hours, minutes)
def main():
if "--help" in sys.argv:
print(
"""
To pass arguments to this script, you can create a separate sh file and execute the main script with it.
Available Args:
--bar-title: This will appear as the first line in the output. The default is 'Countdown Timer'.
--date-format: You can provide a custom date format. The default is '%d-%m-%Y %H:%M'
--no-cycle: If this is present in the arguments, the times will not cycle.
--help: Prints this message and exits.
Example:
countdown.py "--bar-title" "Custom Bar Title" "--no-cycle" "--date-format" "%d-%m-%Y" "Time #1" "17-07-2017" "Time #2" "15-08-2017"
Script Example:
chmod +x /Path/to/countdown.py && /Path/to/countdown.py "--bar-title" "Custom Bar Title" "--no-cycle" "--date-format" "%d-%m-%Y" "Time #1" "17-07-2017" "Time #2" "15-08-2017"
"""
)
return
arg_count = len(sys.argv)
now = datetime.now()
date_format = '%d-%m-%Y %H:%M'
bar_title = "Countdown Timer"
label = ""
time = None
if "--bar-title" in sys.argv:
found_index = sys.argv.index("--bar-title")
if len(sys.argv) > found_index + 1:
bar_title = sys.argv[found_index + 1]
if "--date-format" in sys.argv:
found_index = sys.argv.index("--date-format")
if len(sys.argv) > found_index + 1:
date_format = sys.argv[found_index + 1]
print(bar_title + " | font=\'Monospace\'")
if "--no-cycle" in sys.argv:
print("---")
if arg_count == 1:
print("""
Please pass the correct arguments for this plugin to work.
You can create an sh file that executes the main Python
script file with the appropriate arguments.
For examples, see the script file.
""")
for index in range(1, arg_count):
arg = sys.argv[index].strip()
if arg == "--no-cycle":
continue
if arg == "--bar-title":
continue
if index > 0 and sys.argv[index - 1] == "--date-format":
continue
try:
time = datetime.strptime(arg, date_format)
print(label + ": %d d, %d h, %d m | font=\'Monospace\'" % daysHoursMinutesSecondsFromSeconds(dateDiffInSeconds(now, time)))
except ValueError:
label = arg
if __name__ == "__main__":
main()
| damncabbage/dotfiles | macOS/BitBar/Plugins/Time/countdown.1s.py | countdown.1s.py | py | 3,243 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",... |
22041191064 | from matplotlib import pyplot as plt
import scipy.stats as spstats
from loomio import *
from socialchoice import *
from timeseries import *
legend_fontsize = 7
formats = ['.-', 's-']
markersizes = [9, 5]
class NetDelib(object):
def __init__(self):
self.plot_mean = False
def plot_errorbar(self, data, data_labels, ylabel, title):
for i, d in enumerate(data):
if self.plot_mean:
y = d.mean()
else:
y = d.y()
plt.errorbar(range(4), y, yerr=d.yerr95(), label=data_labels[i], capsize=6)
plt.xticks(range(4))
plt.xlabel('Round')
plt.ylabel(ylabel)
plt.grid()
plt.legend(fontsize=legend_fontsize)
plt.title(title)
def plot(self, data, data_labels, ylabel, title, ylim=None):
for i, d in enumerate(data):
if self.plot_mean:
y = d.mean()
else:
y = d.y()
plt.plot(range(4), y, formats[i], label=data_labels[i], markersize=markersizes[i])
plt.xticks(range(4))
plt.xlabel('Round')
plt.ylabel(ylabel)
if ylim:
plt.ylim(ylim)
plt.grid()
plt.legend(fontsize=legend_fontsize)
plt.title(title)
def plot_kendall(self, ylim=None):
self.plot(
[self.kendall_control, self.kendall_random],
['Control', 'Random-Pod'],
'Kendall Correlation',
self.title + ' (Kendall)', ylim=ylim)
def plot_spearman(self, ylim=None):
self.plot(
[self.spearman_control, self.spearman_random],
['Control', 'Random-Pod'],
'Spearman Correlation',
self.title + ' (Spearman)', ylim=ylim)
def plot_ballot(self, ylim=None):
self.plot(
[self.ballot_control, self.ballot_random],
['Control', 'Random-Pod'],
'Ballot Correlation',
self.title + ' (Ballot)', ylim=ylim)
def plot_crossing(self, ylim=None):
self.plot(
[self.crossing_control, self.crossing_random],
['Control', 'Random-Pod'],
'Crossing Correlation',
self.title + ' (Crossing)', ylim=ylim)
def plot_tideman(self, ylim=None):
self.plot(
[self.tideman_control, self.tideman_random],
['Control', 'Random-Pod'],
'Tideman Fraction',
self.title + ' (Tideman)', ylim=ylim)
class NetDelibAgreement(NetDelib):
def __init__(self, df_score):
super().__init__()
self.title = 'Agreement'
self.bootstrap_runs = 0
self.control_profiles = [
Profile.from_score(df_score[(df_score.stage == stage) & (df_score.treatment == 1)])
for stage in sorted(set(df_score.stage))]
self.random_profiles = [
Profile.from_score(df_score[(df_score.stage == stage) & (df_score.treatment == 2)])
for stage in sorted(set(df_score.stage))]
self.kendall_control = TimeSeriesResult()
self.spearman_control = TimeSeriesResult()
self.tideman_control = TimeSeriesResult()
self.ballot_control = TimeSeriesResult()
self.crossing_control = TimeSeriesResult()
print('Single Group')
print('round\tkendall\tspearman\ttideman\tballot\tcrossing')
for stage, profile in enumerate(self.control_profiles):
self.kendall_control.add_y(profile.agreement_kendall())
self.spearman_control.add_y(profile.agreement_spearman())
self.tideman_control.add_y(Tideman(profile).agreement_tideman())
self.ballot_control.add_y(profile.agreement_ballot())
self.crossing_control.add_y(profile.agreement_crossing())
for run in range(self.bootstrap_runs):
p = profile.sample_bootstrap()
self.kendall_control.add_sample(stage, p.agreement_kendall())
self.spearman_control.add_sample(stage, p.agreement_spearman())
self.tideman_control.add_sample(stage, Tideman(p).agreement_tideman())
self.ballot_control.add_sample(stage, p.agreement_ballot())
self.crossing_control.add_sample(stage, p.agreement_crossing())
print("{}\t{}\t{}\t{}\t{}\t{}".format(
stage,
self.kendall_control.y()[stage],
self.spearman_control.y()[stage],
self.tideman_control.y()[stage],
self.ballot_control.y()[stage],
self.crossing_control.y()[stage]))
self.kendall_random = TimeSeriesResult()
self.spearman_random = TimeSeriesResult()
self.tideman_random = TimeSeriesResult()
self.ballot_random = TimeSeriesResult()
self.crossing_random = TimeSeriesResult()
print('\nRandom Pod')
print('round\tkendall\tspearman\ttideman\tballot\tcrossing')
for stage, profile in enumerate(self.random_profiles):
self.kendall_random.add_y(profile.agreement_kendall())
self.spearman_random.add_y(profile.agreement_spearman())
self.tideman_random.add_y(Tideman(profile).agreement_tideman())
self.ballot_random.add_y(profile.agreement_ballot())
self.crossing_random.add_y(profile.agreement_crossing())
for run in range(self.bootstrap_runs):
p = profile.sample_bootstrap()
self.kendall_random.add_sample(stage, p.agreement_kendall())
self.spearman_random.add_sample(stage, p.agreement_spearman())
self.tideman_random.add_sample(stage, Tideman(p).agreement_tideman())
self.ballot_random.add_sample(stage, p.agreement_ballot())
self.crossing_random.add_sample(stage, p.agreement_crossing())
print("{}\t{}\t{}\t{}\t{}\t{}".format(
stage,
self.kendall_random.y()[stage],
self.spearman_random.y()[stage],
self.tideman_random.y()[stage],
self.ballot_random.y()[stage],
self.crossing_random.y()[stage]))
class NetDelibInitEvolution(NetDelib):
def __init__(self, df_score):
super().__init__()
self.title = 'Agreement w/ Initial'
self.bootstrap_runs = 10
self.plot_mean = True
df_control = df_score[df_score.treatment == 1]
df_random = df_score[df_score.treatment == 2]
self.control_collection = make_preference_sequence_collection(df_control)
self.random_collection = make_preference_sequence_collection(df_random)
participant_ids = self.control_collection.participant_ids()
self.kendall_control = TimeSeriesResult()
self.spearman_control = TimeSeriesResult()
self.ballot_control = TimeSeriesResult()
self.crossover_control = TimeSeriesResult()
for stage in range(0, 4):
for participant_id in participant_ids:
initial_preference = self.control_collection[participant_id][0]
try:
stage_preference = self.control_collection[participant_id][stage]
r, p = spstats.kendalltau(initial_preference.ranks(), stage_preference.ranks())
self.kendall_control.add_sample(stage, r)
r, p = spstats.spearmanr(initial_preference.ranks(), stage_preference.ranks())
self.spearman_control.add_sample(stage, r)
r = 1 - 2 * initial_preference.ballot_dissimilarity(stage_preference)
self.ballot_control.add_sample(stage, r)
r = 1 - 2 * initial_preference.crossover_dissimilarity(stage_preference)
self.crossover_control.add_sample(stage, r)
except:
continue
participant_ids = self.random_collection.participant_ids()
self.kendall_random = TimeSeriesResult()
self.spearman_random = TimeSeriesResult()
self.ballot_random = TimeSeriesResult()
self.crossover_random = TimeSeriesResult()
for stage in range(0, 4):
for participant_id in participant_ids:
initial_preference = self.random_collection[participant_id][0]
try:
stage_preference = self.random_collection[participant_id][stage]
r, p = spstats.kendalltau(initial_preference.ranks(), stage_preference.ranks())
self.kendall_random.add_sample(stage, r)
r, p = spstats.spearmanr(initial_preference.ranks(), stage_preference.ranks())
self.spearman_random.add_sample(stage, r)
r = 1 - 2 * initial_preference.ballot_dissimilarity(stage_preference)
self.ballot_random.add_sample(stage, r)
r = 1 - 2 * initial_preference.crossover_dissimilarity(stage_preference)
except:
continue | elplatt/Exp-Net-Delib | experiments/NetDelib-002/analysis/netdelib.py | netdelib.py | py | 9,150 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.errorbar",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": ... |
26661454366 | import datetime
import http
import json
import sys
import time
from http.client import HTTPSConnection
import discord
from discord.ext import commands, tasks
import settings
def current_time_string():
return datetime.datetime.utcfromtimestamp(time.time()).strftime('%H:%M:%S')
class TwitchCog(commands.Cog, name="Twitch"):
def __init__(self, bot):
self.bot = bot
self.was_previously_online = False
self.nextUpdateAllowedAt = 0
if not self.poll_thread.is_running():
self.poll_thread.start()
def cog_unload(self):
self.poll_thread.cancel()
def get_twitch_user_by_name(self, usernames):
try:
if isinstance(usernames, list):
usernames = ['login={0}'.format(i) for i in usernames]
req = '/helix/users?' + '&'.join(usernames)
else:
req = '/helix/users?login=' + usernames
print(req)
connection = http.client.HTTPSConnection('api.twitch.tv', timeout=10)
connection.request('GET', req, None, headers={
'Authorization': "Bearer " + settings.read_option(settings.KEY_TWITCH_ACCESS_TOKEN, ""),
'client-id': settings.TWITCH_CLIENT_ID})
response = connection.getresponse()
print("[{}] Twitch: {}: {} {}".format(current_time_string(), req, response.status, response.reason))
if response.status == 401:
self.get_access_token()
return self.get_twitch_user_by_name(usernames)
re = response.read().decode()
j = json.loads(re)
return j
except Exception as e:
print(e, file=sys.stderr)
return e
def get_access_token(self):
try:
print("Twitch: Attempting to get access token")
connect_string = "/oauth2/token?client_id={client_id}" \
"&client_secret={client_secret}" \
"&grant_type=client_credentials".format(client_id=settings.TWITCH_CLIENT_ID,
client_secret=settings.TWITCH_CLIENT_SECRET)
auth_connection = http.client.HTTPSConnection('id.twitch.tv', timeout=10)
auth_connection.request('POST', connect_string, None)
response = auth_connection.getresponse()
print("Twitch: {}: {} {}".format(connect_string, response.status, response.reason))
re = response.read().decode()
j = json.loads(re)
print(j)
settings.write_option(settings.KEY_TWITCH_ACCESS_TOKEN, j["access_token"])
return j
except Exception as e:
print(e, file=sys.stderr)
return e
def get_streams(self, usernames):
try:
if isinstance(usernames, list):
usernames = ['user_login={0}'.format(i) for i in usernames]
req = '/helix/streams?' + '&'.join(usernames)
else:
req = '/helix/streams?user_login=' + usernames
connection = http.client.HTTPSConnection('api.twitch.tv', timeout=10)
connection.request('GET', req, None, headers={
'Authorization': "Bearer " + settings.read_option(settings.KEY_TWITCH_ACCESS_TOKEN, ""),
'client-id': settings.TWITCH_CLIENT_ID
})
response = connection.getresponse()
print("Twitch: {}: {} {}".format(req, response.status, response.reason))
if response.status == 401:
self.get_access_token()
return self.get_streams(usernames)
re = response.read().decode()
j = json.loads(re)
return j
except Exception as e:
print(e, file=sys.stderr)
return e
@tasks.loop(seconds=settings.TWITCH_POLL_RATE)
async def poll_thread(self):
if settings.read_option(settings.KEY_TWITCH_INTEGRATION, "False") == "True" \
and time.time() > self.nextUpdateAllowedAt:
try:
result_json = self.get_streams(settings.read_option(settings.KEY_TWITCH_CHANNEL, ""))
is_online = False
for stream in result_json["data"]:
if stream["user_name"] == settings.read_option(settings.KEY_TWITCH_CHANNEL, ""):
is_online = True
if not self.was_previously_online:
await self.send_message_to_channel(
settings.TWITCH_ANNOUNCEMENT_MESSAGE.format(
streamer=stream['user_name'],
stream_link="https://twitch.tv/" + stream['user_name'],
stream_description=stream['title']),
int(settings.read_option(settings.KEY_ANNOUNCEMENT_CHANNEL_TWITCH, 0)))
self.nextUpdateAllowedAt = time.time() + settings.TWITCH_POLL_COOLDOWN_MINUTES * 60
break
print("[{}] Twitch: isOnline: {}, wasPreviouslyOnline: {}".format(current_time_string(), is_online,
self.was_previously_online))
self.was_previously_online = is_online
except Exception as e:
print(e)
else:
print("[{}] Waiting to be allowed to check again".format(current_time_string()))
async def send_message_to_channel(self, string, channel_id: int):
print("Sending announcement to channel {}".format(channel_id))
channel = self.bot.get_channel(channel_id)
await channel.send(string)
@commands.command()
@commands.has_any_role("Mods", "Admin")
async def disabletwitch(self, ctx):
settings.write_option(settings.KEY_TWITCH_INTEGRATION, "False")
await ctx.send("Twitch integration disabled")
@commands.command()
@commands.has_any_role("Mods", "Admin")
async def enabletwitch(self, ctx, twitch_username):
"""Send twitch updates to this channel"""
print(str(ctx.message.channel.id))
if isinstance(ctx.message.channel, discord.TextChannel):
user_json = self.get_twitch_user_by_name(twitch_username)
if isinstance(user_json, Exception):
await ctx.send("*Error: {}*".format(str(user_json)))
return
print(user_json)
try:
print("Found userid: {}".format(user_json["data"][0]["id"]))
settings.write_option(settings.KEY_TWITCH_CHANNEL, user_json["data"][0]["display_name"])
settings.write_option(settings.KEY_ANNOUNCEMENT_CHANNEL_TWITCH, str(ctx.message.channel.id))
settings.write_option(settings.KEY_TWITCH_INTEGRATION, "True")
await ctx.send(
"Successfully set the announcement channel to: {}, I will post here when {} comes online.".format(
ctx.message.channel.name, twitch_username))
except IndexError:
await ctx.send("Could not find user {}".format(twitch_username))
except Exception as e:
await ctx.send(str(e))
else:
await ctx.send("Needs to be done in a regular channel")
return
@enabletwitch.error
async def enabletwitch_error(self, ctx, error):
if isinstance(error, commands.UserInputError):
await ctx.send('Usage: `{}enabletwitch <twitch_channel_name>` '
'\nIt must be used in a regular channel so it knows where to post announcements.'
.format(settings.CALL_CHARACTER))
| gr3ger/pyGaBot | twitch_cog.py | twitch_cog.py | py | 7,835 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "d... |
37360523175 | import glob
import re
import traceback
from pathlib import Path
from typing import List, Union
import fitz
import utils
from constants import cmd_output_path
from loguru import logger
def convert_to_image_pdf(doc_path: Union[str, List[str]], dpi: int = 300, page_range: str = "all", output_path: str = None):
try:
path_list = []
if isinstance(doc_path, str):
if "*" in doc_path:
path_list = glob.glob(doc_path)
else:
path_list = [doc_path]
elif isinstance(doc_path, list):
for p in doc_path:
if "*" in p:
path_list.extend(glob.glob(p))
else:
path_list.append(p)
for path in path_list:
doc: fitz.Document = fitz.open(path)
writer: fitz.Document = fitz.open()
roi_indices = utils.parse_range(page_range, doc.page_count)
toc = doc.get_toc(simple=True)
logger.debug(toc)
for page_index in range(doc.page_count):
page = doc[page_index]
new_page = writer.new_page(width=page.rect.width, height=page.rect.height)
if page_index in roi_indices:
pix = page.get_pixmap(matrix=fitz.Matrix(dpi/72, dpi/72))
pix.set_dpi(dpi, dpi)
new_page.insert_image(new_page.rect, pixmap=pix)
else:
writer.insert_pdf(doc, from_page=page_index, to_page=page_index)
if output_path is None:
p = Path(path)
output_path = str(p.parent / f"{p.stem}-图片型.pdf")
writer.set_toc(toc)
writer.ez_save(output_path)
utils.dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logger.error(traceback.format_exc())
utils.dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
def convert_pdf2png(doc_path: str, dpi: int = 300, page_range: str = "all", output_path: str = None):
try:
path_list = []
if isinstance(doc_path, str):
if "*" in doc_path:
path_list = glob.glob(doc_path)
else:
path_list = [doc_path]
elif isinstance(doc_path, list):
for p in doc_path:
if "*" in p:
path_list.extend(glob.glob(p))
else:
path_list.append(p)
for path in path_list:
doc: fitz.Document = fitz.open(path)
roi_indices = utils.parse_range(page_range, doc.page_count)
p = Path(path)
if output_path is None:
output_dir = p.parent / f"{p.stem}-png"
output_dir.mkdir(exist_ok=True, parents=True)
else:
output_dir = Path(output_path)
output_dir.mkdir(exist_ok=True, parents=True)
for i in roi_indices:
page = doc[i]
pix = page.get_pixmap(matrix=fitz.Matrix(dpi/72, dpi/72))
pix.set_dpi(dpi, dpi)
pix.save(str(output_dir / f"{p.stem}-page-{i+1}.png"))
utils.dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logger.error(traceback.format_exc())
utils.dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
def convert_pdf2svg(doc_path: str, dpi: int = 300, page_range: str = "all", output_path: str = None):
try:
path_list = []
if isinstance(doc_path, str):
if "*" in doc_path:
path_list = glob.glob(doc_path)
else:
path_list = [doc_path]
elif isinstance(doc_path, list):
for p in doc_path:
if "*" in p:
path_list.extend(glob.glob(p))
else:
path_list.append(p)
for path in path_list:
doc: fitz.Document = fitz.open(path)
roi_indices = utils.parse_range(page_range, doc.page_count)
p = Path(path)
if output_path is None:
output_dir = p.parent / f"{p.stem}-svg"
output_dir.mkdir(exist_ok=True, parents=True)
else:
output_dir = Path(output_path)
output_dir.mkdir(exist_ok=True, parents=True)
for i in roi_indices:
page = doc[i]
out = page.get_svg_image(matrix=fitz.Matrix(dpi/72, dpi/72))
with open(str(output_dir / f"{p.stem}-page-{i+1}.svg"), "w") as f:
f.write(out)
utils.dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logger.error(traceback.format_exc())
utils.dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
def convert_svg2pdf(
input_path: Union[str, List[str]],
is_merge: bool = True,
sort_method: str = 'name',
sort_direction: str = 'asc',
paper_size: str = "a4",
orientation: str = "portrait",
output_path: str = None):
try:
path_list = []
if isinstance(input_path, str):
if "*" in input_path:
path_list = glob.glob(input_path)
else:
path_list = [input_path]
elif isinstance(input_path, list):
for p in input_path:
if "*" in p:
path_list.extend(glob.glob(p))
else:
path_list.append(p)
if is_merge:
if output_path is None:
p = Path(path_list[0])
output_path = str(p.parent / f"{p.stem}(等)-合并.pdf")
writer: fitz.Document = fitz.open()
new_path_list = path_list
if sort_method == "custom":
if sort_direction == "asc":
pass
else:
new_path_list = new_path_list[::-1]
elif sort_method == "name":
if sort_direction == "asc":
new_path_list.sort()
else:
new_path_list.sort(reverse=True)
elif sort_method == "name_digit":
new_path_list = sorted(new_path_list, key=lambda x: int(re.search(r"\d+$", Path(x).stem).group()))
if sort_direction == "asc":
pass
else:
new_path_list = new_path_list[::-1]
# create time
elif sort_method == "ctime":
if sort_direction == "asc":
new_path_list.sort(key=lambda x: Path(x).stat().st_ctime)
else:
new_path_list.sort(key=lambda x: Path(x).stat().st_ctime, reverse=True)
# modify time
elif sort_method == "mtime":
if sort_direction == "asc":
new_path_list.sort(key=lambda x: Path(x).stat().st_mtime)
else:
new_path_list.sort(key=lambda x: Path(x).stat().st_mtime, reverse=True)
for path in new_path_list:
with open(path, 'r') as f:
img = fitz.open(path)
if paper_size == "same":
w, h = img[0].rect.width, img[0].rect.height
else:
fmt = fitz.paper_rect(f"{paper_size}-l") if orientation == "landscape" else fitz.paper_rect(paper_size)
w, h = fmt.width, fmt.height
pdfbytes = img.convert_to_pdf()
pdf = fitz.open('pdf', pdfbytes)
page = writer.new_page(width=w, height=h)
page.show_pdf_page(img[0].rect, pdf, 0)
writer.save(output_path, garbage=3, deflate=True)
else:
if output_path is None:
p = Path(path_list[0])
output_dir = p.parent
else:
output_dir = Path(output_path)
output_dir.mkdir(exist_ok=True, parents=True)
for path in path_list:
img = fitz.open(path)
pdfbytes = img.convert_to_pdf()
pdf = fitz.open('pdf', pdfbytes)
savepath = str(output_dir / f"{Path(path).stem}.pdf")
pdf.save(savepath, garbage=3, deflate=True)
utils.dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logger.error(traceback.format_exc())
utils.dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
def convert_png2pdf(**kwargs):
convert_svg2pdf(**kwargs)
def convert_anydoc2pdf(input_path: str, output_path: str = None):
"""
supported document types: PDF, XPS, EPUB, MOBI, FB2, CBZ, SVG
"""
try:
path_list = []
if isinstance(input_path, str):
if "*" in input_path:
path_list = glob.glob(input_path)
else:
path_list = [input_path]
elif isinstance(input_path, list):
for p in input_path:
if "*" in p:
path_list.extend(glob.glob(p))
else:
path_list.append(p)
for path in path_list:
doc = fitz.open(path)
b = doc.convert_to_pdf() # convert to pdf
pdf = fitz.open("pdf", b) # open as pdf
toc= doc.get_toc() # table of contents of input
pdf.set_toc(toc) # simply set it for output
meta = doc.metadata # read and set metadata
if not meta["producer"]:
meta["producer"] = "PyMuPDF" + fitz.VersionBind
if not meta["creator"]:
meta["creator"] = "PyMuPDF PDF converter"
meta["modDate"] = fitz.get_pdf_now()
meta["creationDate"] = meta["modDate"]
pdf.set_metadata(meta)
# now process the links
link_cnti = 0
link_skip = 0
for pinput in doc: # iterate through input pages
links = pinput.get_links() # get list of links
link_cnti += len(links) # count how many
pout = pdf[pinput.number] # read corresp. output page
for l in links: # iterate though the links
if l["kind"] == fitz.LINK_NAMED: # we do not handle named links
logger.info("named link page", pinput.number, l)
link_skip += 1 # count them
continue
pout.insert_link(l) # simply output the others
# save the conversion result
if output_path is None:
p = Path(path)
output_path = str(p.parent / f"{p.stem}.pdf")
pdf.save(output_path, garbage=4, deflate=True)
utils.dump_json(cmd_output_path, {"status": "success", "message": ""})
except:
logger.error(traceback.format_exc())
utils.dump_json(cmd_output_path, {"status": "error", "message": traceback.format_exc()})
| kevin2li/PDF-Guru | thirdparty/convert.py | convert.py | py | 11,219 | python | en | code | 941 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 23,... |
11671695671 | import argparse
class CommonArgParser(argparse.ArgumentParser):
def __init__(self):
super(CommonArgParser, self).__init__()
self.add_argument('--model_name', default='TransE',
choices=['TransE', 'TransE_l1', 'TransE_l2', 'TransR',
'RESCAL', 'DistMult', 'ComplEx', 'RotatE',
'SimplE', 'ConvE', 'AttH'],
help='The models provided by DGL-KE.')
self.add_argument('--data_path', type=str, default='data',
help='The path of the directory where DGL-KE loads knowledge graph data.')
self.add_argument('--dataset', type=str, default='FB15k',
help='The name of the builtin knowledge graph. Currently, the builtin knowledge ' \
'graphs include FB15k, FB15k-237, wn18, wn18rr and Freebase. ' \
'DGL-KE automatically downloads the knowledge graph and keep it under data_path.')
self.add_argument('--format', type=str, default='built_in',
help='The format of the dataset. For builtin knowledge graphs,' \
'the foramt should be built_in. For users own knowledge graphs,' \
'it needs to be raw_udd_{htr} or udd_{htr}.')
self.add_argument('--data_files', type=str, default=None, nargs='+',
help='A list of data file names. This is used if users want to train KGE' \
'on their own datasets. If the format is raw_udd_{htr},' \
'users need to provide train_file [valid_file] [test_file].' \
'If the format is udd_{htr}, users need to provide' \
'entity_file relation_file train_file [valid_file] [test_file].' \
'In both cases, valid_file and test_file are optional.')
self.add_argument('--delimiter', type=str, default='\t',
help='Delimiter used in data files. Note all files should use the same delimiter.')
self.add_argument('--save_path', type=str, default='ckpts',
help='the path of the directory where models and logs are saved.')
self.add_argument('--no_save_emb', action='store_true',
help='Disable saving the embeddings under save_path.')
self.add_argument('--max_step', type=int, default=80000,
help='The maximal number of steps to train the model.' \
'A step trains the model with a batch of data.')
self.add_argument('--batch_size', type=int, default=1024,
help='The batch size for training.')
self.add_argument('--batch_size_eval', type=int, default=8,
help='The batch size used for validation and test.')
self.add_argument('--neg_sample_size', type=int, default=256,
help='The number of negative samples we use for each positive sample in the training.')
self.add_argument('--neg_deg_sample', action='store_true',
help='Construct negative samples proportional to vertex degree in the training.' \
'When this option is turned on, the number of negative samples per positive edge' \
'will be doubled. Half of the negative samples are generated uniformly while' \
'the other half are generated proportional to vertex degree.')
self.add_argument('--neg_deg_sample_eval', action='store_true',
help='Construct negative samples proportional to vertex degree in the evaluation.')
self.add_argument('--neg_sample_size_eval', type=int, default=-1,
help='The number of negative samples we use to evaluate a positive sample.')
self.add_argument('--eval_percent', type=float, default=1,
help='Randomly sample some percentage of edges for evaluation.')
self.add_argument('--no_eval_filter', action='store_false', dest='eval_filter',
help='Disable filter positive edges from randomly constructed negative edges for evaluation')
self.add_argument('--self_loop_filter', action='store_true', dest='self_loop_filter',
help='Disable filter triple like (head - relation - head) score for evaluation')
self.add_argument('-log', '--log_interval', type=int, default=1000,
help='Print runtime of different components every x steps.')
self.add_argument('--eval_interval', type=int, default=1,
help='Print evaluation results on the validation dataset every x steps' \
'if validation is turned on')
self.add_argument('--test', action='store_true',
help='Evaluate the model on the test set after the model is trained.')
self.add_argument('--num_proc', type=int, default=1,
help='The number of processes to train the model in parallel.' \
'In multi-GPU training, the number of processes by default is set to match the number of GPUs.' \
'If set explicitly, the number of processes needs to be divisible by the number of GPUs.')
self.add_argument('--num_thread', type=int, default=1,
help='The number of CPU threads to train the model in each process.' \
'This argument is used for multiprocessing training.')
self.add_argument('--force_sync_interval', type=int, default=-1,
help='We force a synchronization between processes every x steps for' \
'multiprocessing training. This potentially stablizes the training process'
'to get a better performance. For multiprocessing training, it is set to 1000 by default.')
self.add_argument('--hidden_dim', type=int, default=400,
help='The embedding size of relation and entity')
self.add_argument('--lr', type=float, default=0.01,
help='The learning rate. DGL-KE uses Adagrad to optimize the model parameters.')
self.add_argument('-g', '--gamma', type=float, default=12.0,
help='The margin value in the score function. It is used by TransX and RotatE.')
self.add_argument('-de', '--double_ent', action='store_true',
help='Double entitiy dim for complex number or canonical polyadic. It is used by RotatE and SimplE.')
self.add_argument('-dr', '--double_rel', action='store_true',
help='Double relation dim for complex number or canonical polyadic. It is used by RotatE and SimplE')
self.add_argument('-adv', '--neg_adversarial_sampling', action='store_true',
help='Indicate whether to use negative adversarial sampling.' \
'It will weight negative samples with higher scores more.')
self.add_argument('-a', '--adversarial_temperature', default=1.0, type=float,
help='The temperature used for negative adversarial sampling.')
self.add_argument('-rc', '--regularization_coef', type=float, default=0.000002,
help='The coefficient for regularization.')
self.add_argument('-rn', '--regularization_norm', type=int, default=3,
help='norm used in regularization.')
self.add_argument('-pw', '--pairwise', action='store_true',
help='Indicate whether to use pairwise loss function. '
'It compares the scores of a positive triple and a negative triple')
self.add_argument('--loss_genre', default='Logsigmoid',
choices=['Hinge', 'Logistic', 'Logsigmoid', 'BCE'],
help='The loss function used to train KGEM.')
self.add_argument('-m', '--margin', type=float, default=1.0,
help='hyper-parameter for hinge loss.')
# args for ConvE
self.add_argument('--tensor_height', type=int, default=10,
help='Tensor height for ConvE. Note hidden_dim must be divisible by it')
self.add_argument('--dropout_ratio', type=float, nargs='+', default=0,
help='Dropout ratio for input, conv, linear respectively. If 0 is specified, ConvE will not use dropout for that layer')
self.add_argument('--batch_norm', '-bn', type=bool, default=True,
help='Whether use batch normalization in ConvE or not')
self.add_argument('--label_smooth', type=float, default=.0,
help='use label smoothing for training.')
# args for reproducibility
self.add_argument('--seed', type=int, default=0,
help='Random seed for reproducibility')
self.add_argument('--num_node', type=int, default=1,
help='Number of node used for distributed training')
# this is used for distributed training. not implemented yet
self.add_argument('--node_rank', type=int, default=0,
help='The rank of node, ranged from [0, num_node - 1]')
# self.add_argument('--eval_chunk', type=int, default=8,
# help='Number of chunk to corrupt for the whole graph to pervent OOM for evaluation. The smaller the more RAM it consumed.')
self.add_argument('--mode', type=str, default='fit',
choices=['fit', 'eval'],
help='Whether to train the model or to evaluate.')
# TODO: lingfei - use function to substitute brute force sampling
self.add_argument('--init_strat', type=str, default='uniform',
choices=['uniform', 'xavier', 'constant'],
help='Initial strategy for embeddings.')
self.add_argument('--num_workers', type=int, default=8,
help='Number of process to fetch data for training/validation dataset.')
# hyper-parameter for hyperbolic embeddings
self.add_argument('--init_scale', type=float, default=0.001,
help='Initialization scale for entity embedding, relation embedding, curvature, attention in hyperbolic embeddings')
self.add_argument('--optimizer', type=str, default='Adagrad',
choices=['Adagrad', 'Adam'],
help='Optimizer for kg embeddings')
self.add_argument('--no_save_log', action='store_false', dest='save_log',
help='If specified, dglke will not save log and result file to save path.')
self.add_argument('--tqdm', action='store_true', dest='tqdm',
help='Use tqdm to visualize training and evaluation process. Note this might drag speed of process 0 for multi-GPU training.')
| menjarleev/dgl-ke | python/dglke/util/argparser/common_argparser.py | common_argparser.py | py | 11,343 | python | en | code | null | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 2,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.