id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4990973 | <reponame>tombenke/py-msgp
"""Test the mpa module"""
import unittest
import asyncio
from loguru import logger
from nats_messenger import Messenger
from mpa import MessageProcessorActor
from mpa.tests.config_test import (
URL,
CREDENTIALS,
CLUSTER_ID,
PRODUCER_CLIENT_ID,
CONSUMER_CLIENT_ID,
PROCESSOR_MPA_CLIENT_ID,
INBOUND_TOPIC,
OUTBOUND_TOPIC,
TEST_PAYLOAD,
)
class ProcessorMPATestCase(unittest.TestCase):
"""The Processor MPA test cases functions"""
def test_processor_actor(self) -> None:
"""Test the MessageProcessorActor"""
async def run():
logger.debug("Setup the test consumer")
total_consumer_messages = 0
consumer_callback_called = asyncio.Future()
async def consumer_callback(msg: bytes):
nonlocal total_consumer_messages
nonlocal consumer_callback_called
logger.debug(f"Received a message: '{msg}'")
self.assertEqual(actor_function_response, msg)
total_consumer_messages += 1
if total_consumer_messages >= 2:
consumer_callback_called.set_result(None)
consumer = Messenger(
URL, CREDENTIALS, CLUSTER_ID, CONSUMER_CLIENT_ID, logger
)
await consumer.open()
await consumer.subscribe(OUTBOUND_TOPIC, callback=consumer_callback)
logger.debug("Setup the processor actor")
total_messages = 0
actor_function_called = asyncio.Future()
actor_function_response = b"actor function response..."
async def actor_function(payload: bytes) -> bytes:
nonlocal total_messages
nonlocal actor_function_called
logger.debug(
f"Processor actor_function is called with message: '{payload}'"
)
self.assertEqual(TEST_PAYLOAD, payload)
total_messages += 1
if total_messages >= 2:
actor_function_called.set_result(None)
return actor_function_response
processor_actor = MessageProcessorActor(
Messenger(
URL, CREDENTIALS, CLUSTER_ID, PROCESSOR_MPA_CLIENT_ID, logger
),
INBOUND_TOPIC,
OUTBOUND_TOPIC,
actor_function,
durable_in=False,
durable_out=False,
)
await processor_actor.open()
logger.debug("Send something to consume")
producer = Messenger(
URL, CREDENTIALS, CLUSTER_ID, PRODUCER_CLIENT_ID, logger
)
await producer.open()
await producer.publish(INBOUND_TOPIC, TEST_PAYLOAD)
await producer.publish(INBOUND_TOPIC, TEST_PAYLOAD)
logger.debug("Wait for actor function callback and consumer callback")
await asyncio.wait_for(actor_function_called, 1)
await asyncio.wait_for(consumer_callback_called, 1)
# Shut down the processor actor, the consumer and the producer
await processor_actor.close()
await consumer.close()
await producer.close()
asyncio.run(run())
def test_processor_actor_durable(self) -> None:
"""Test the MessageProcessorActor"""
async def run():
logger.debug("Setup the test consumer")
total_consumer_messages = 0
consumer_callback_called = asyncio.Future()
async def consumer_callback(msg: bytes):
nonlocal total_consumer_messages
nonlocal consumer_callback_called
logger.debug(f"Received a message: '{msg}'")
self.assertEqual(actor_function_response, msg)
total_consumer_messages += 1
if total_consumer_messages >= 2:
consumer_callback_called.set_result(None)
consumer = Messenger(
URL, CREDENTIALS, CLUSTER_ID, CONSUMER_CLIENT_ID, logger
)
await consumer.open()
await consumer.subscribe_durable(OUTBOUND_TOPIC, callback=consumer_callback)
logger.debug("Setup the processor actor")
total_messages = 0
actor_function_called = asyncio.Future()
actor_function_response = b"actor function response..."
async def actor_function(payload: bytes) -> bytes:
nonlocal total_messages
nonlocal actor_function_called
logger.debug(
f"Processor actor_function is called with message: '{payload}'"
)
self.assertEqual(TEST_PAYLOAD, payload)
total_messages += 1
if total_messages >= 2:
actor_function_called.set_result(None)
return actor_function_response
processor_actor = MessageProcessorActor(
Messenger(
URL, CREDENTIALS, CLUSTER_ID, PROCESSOR_MPA_CLIENT_ID, logger
),
INBOUND_TOPIC,
OUTBOUND_TOPIC,
actor_function,
durable_in=True,
durable_out=True,
)
await processor_actor.open()
logger.debug("Send something to consume")
producer = Messenger(
URL, CREDENTIALS, CLUSTER_ID, PRODUCER_CLIENT_ID, logger
)
await producer.open()
await producer.publish_durable(INBOUND_TOPIC, TEST_PAYLOAD)
await producer.publish_durable(INBOUND_TOPIC, TEST_PAYLOAD)
logger.debug("Wait for actor function callback and consumer callback")
await asyncio.wait_for(actor_function_called, 1)
await asyncio.wait_for(consumer_callback_called, 1)
# Shut down the processor actor, the consumer and the producer
await processor_actor.close()
await consumer.close()
await producer.close()
asyncio.run(run())
| StarcoderdataPython |
8098739 | <filename>backend/api/models/user.py<gh_stars>0
from datetime import datetime
from core.db import Base
from sqlalchemy import Column, DateTime, Integer, String
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True, index=True)
username = Column(String(250), nullable=False)
password = Column(String(250), nullable=False)
created_at = Column(DateTime(timezone=True), default=datetime.now())
updated_at = Column(DateTime(timezone=True), default=datetime.now())
| StarcoderdataPython |
9778324 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 13:05:11 2020
@author: francesco
"""
import numpy as np
import sys
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rc
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
def contourplot(xvector, yvector, xlabel, ylabel, toplot, labels, save,numberoflines,fmt,cmap, levels,lines='on'):
'''contourplots'''
X,Y = np.meshgrid(xvector,yvector)
index = 0
for Rinfty in toplot:
'''
if index<2:
#fig = plt.figure(figsize=(2,2))
#plt.subplots_adjust(left=0.25, bottom=0.18, right=0.94, top=0.94, wspace=0, hspace=0)
else:
#fig = plt.figure(figsize=(2.4,2))
#plt.subplots_adjust(left=0.25, bottom=0.18, right=0.9, top=0.94, wspace=0, hspace=0)
ax = fig.add_subplot(111)
'''
#levels = np.arange(np.min(Rinfty)-0.2*np.min(Rinfty), np.max(Rinfty)+0.2*np.min(Rinfty), (np.max(Rinfty)-np.min(Rinfty))/numberoflines )
print(np.min(Rinfty),np.max(Rinfty))
Z =np.reshape(Rinfty,X.shape).T
#norm = cm.colors.Normalize(vmax=abs(Z).max(), vmin=abs(Z).min())
#cset1=ax.contourf(X, Y, Z, levels, norm=norm,
# cmap=cm.get_cmap(cmap, len(levels) - 1))
#cset2 = ax.contour(X, Y, Z, cset1.levels, colors='k')
#ax.clabel(cset2, levels[:-1],fmt=fmt,
# fontsize=10)
c = ax.pcolor(X,Y, Z,cmap = cmap,vmin = np.min(toplot), vmax = np.max(toplot))
ax.set_xlabel(xlabel,fontsize = 11, labelpad=-1)
ax.set_ylabel(ylabel,fontsize = 11, labelpad=-1)
ax.tick_params(axis='both', labelsize=10)
if index ==2:
fig.colorbar(c,ax=ax)
ax.text(0.05, 0.05, labels[index], transform=plt.gca().transAxes, size = 10, color='k')
ax.tick_params(axis='both', labelsize=12)
ax.set_xlabel(xlabel,fontsize = 11, labelpad =-1)
ax.set_ylabel(ylabel,fontsize = 11, labelpad=-3)
#Z =np.reshape(Rinfty,X.shape).T
#norm = cm.colors.Normalize(vmax=abs(Z).max(), vmin=abs(Z).min())
#cset1=ax.contourf(X, Y, Z, levels, norm=norm,
# cmap=cm.get_cmap(cmap, len(levels) - 1))
if lines != 'off':
cset2 = ax.contour(X, Y, Z, levels, colors='k', linestyles='--')
ax.clabel(cset2, levels,fmt=fmt,fontsize=10, colors='k')
else:
levels = np.arange(np.min(Rinfty)-0.2*np.min(Rinfty), np.max(Rinfty)+0.2*np.min(Rinfty), (np.max(Rinfty)-np.min(Rinfty))/numberoflines )
cset2 = ax.contour(X, Y, Z, levels, colors='k', linestyles='--')
#ax.set_zlabel(zlabel,fontsize = 8)
#ax.view_init(rotateview[0], rotateview[1])
plt.savefig("figures/%s.eps"%save[index],format='eps')
index +=1
if __name__=="__main__":
'''
It accepts 2 arguments, file_name which is the name of the file where
all the results of metapop_strats are stored, and the var that
was fixed in that run (see metapop_strats).```````
The it generates the contourplots accordingly (see figures in the paper)
and saves them in figures/name
the name is given by the following:
fixed_var_9_string1_string2
where fixed_var is either c or duration,
string1 is either R_infty, I_max, t_avg, and string_2 is either
local/local_to_global/global.
'''
''' file name is the seed of the random gen '''
#file_name = sys.argv[1]
file_name = "average_100x100_c"
#R_0 matrix
fixed_var = "c"
ngroups = 9
gamma = [1.0]*ngroups
size = 100
df = pd.read_csv('contourplots/%s.csv'%file_name)
R_sub = np.array(df['R_sub'].tolist())
I_sub = np.array(df['I_sub'].tolist())
t_sub = np.array(df['t_sub'].tolist())
R_subpop = np.array(df['R_subpop'].tolist())
I_subpop = np.array(df['I_subpop'].tolist())
t_subpop = np.array(df['t_subpop'].tolist())
R_glob = np.array(df['R_glob'].tolist())
I_glob = np.array(df['I_glob'].tolist())
t_glob = np.array(df['t_glob'].tolist())
'''
strength of the intervention (i.e. during int, new R_0 = c*old R_0)
'''
c = [0.8]*ngroups
tauf = 100
'''
the next three runs are done with a fixed value of c and varying the threshold and the duration
'''
if fixed_var=='c':
interventionthreshold = np.linspace(0.025,1, size)
interventionduration = np.linspace(0.5, 10, size)
labels = (r'$\mathbf{(a)}$',r'$\mathbf{(b)}$', r'$\mathbf{(c)}$')
xlabel = r"$Threshold$"
ylabel = r"$Duration$"
levels = [0.6,0.7,0.8]
fmt = {
0.6: r"$0.6$",
0.7: r"$0.7$",
0.8: r"$0.8$",}
save=['contourplots/%s_9_R_infty_local'%file_name, 'contourplots/%s_9_R_infty_localtoglobal'%file_name, 'contourplots/%s_9_R_infty_global'%file_name]
toplot=(R_sub, R_subpop, R_glob)
contourplot(interventionthreshold,interventionduration, xlabel, ylabel, toplot, labels, save, numberoflines=4, fmt = fmt,cmap = 'coolwarm',levels=levels,lines='on')
save=['contourplots/%s_9_I_max_local'%file_name, 'contourplots/%s_9_I_max_localtoglobal'%file_name, 'contourplots/%s_9_I_max_global'%file_name]
toplot = (I_sub, I_subpop, I_glob)
levels=[0.04,0.06,0.08,0.1]
fmt = {
0.04: r"$0.04$",
0.06: r"$0.07$",
0.08: r"$0.08$",
0.1 : r"$0.1$"}
contourplot(interventionthreshold,interventionduration, xlabel, ylabel, toplot, labels, save, numberoflines=4, fmt = fmt,cmap='coolwarm',levels=levels,lines='on')
levels=[10,15,20]
fmt = {
10: r"$10$",
15: r"$15$",
20: r"$20$"}
save=['contourplots/%s_9_t_avg_local'%file_name, 'contourplots/%s_9_t_avg_localtoglobal'%file_name, 'contourplots/%s_9_t_avg_global'%file_name]
toplot = (t_sub, t_subpop, t_glob)
contourplot(interventionthreshold,interventionduration, xlabel, ylabel, toplot, labels, save, numberoflines=9, fmt = fmt,levels=levels,cmap='coolwarm')
elif fixed_var =='duration':
# Simulations:
interventionduration = [2]*ngroups
cvector = np.linspace(0.1,0.9,size)
interventionthreshold = np.linspace(0.025,1, size)
levels = [0.75,0.8,0.85]
fmt = {
0.75: r"$0.75$",
0.8: r"$0.80$",
0.85: r"$0.85$",}
labels = (r'$\mathbf{(d)}$',r'$\mathbf{(e)}$', r'$\mathbf{(f)}$')
xlabel = r"$Threshold$"
ylabel = r"$c$"
save=['contourplots/%s_9_R_infty_local'%file_name, 'contourplots/%s_9_R_infty_localtoglobal'%file_name, 'contourplots/%s_9_R_infty_global'%file_name]
toplot=(R_sub, R_subpop, R_glob)
contourplot(interventionthreshold,cvector, xlabel, ylabel, toplot, labels, save,numberoflines=4, fmt = fmt,cmap='PiYG',levels=levels)
levels=[0.07,0.08,0.09]
fmt = {
0.07: r"$0.07$",
0.08: r"$0.08$",
0.09: r"$0.09$",}
save=['contourplots/%s_9_I_max_local'%file_name, 'contourplots/%s_9_I_max_localtoglobal'%file_name, 'contourplots/%s_9_I_max_global'%file_name]
toplot = (I_sub, I_subpop, I_glob)
contourplot(interventionthreshold,cvector, xlabel, ylabel, toplot, labels, save,numberoflines=3, fmt = fmt,cmap='PiYG', levels=levels)
levels=[9,10,11,12]
fmt = {
9: r"$9$",
10: r"$10",
11: r"$11$",
12: r"$12$"}
save=['contourplots/%s_9_t_avg_local'%file_name, 'contourplots/%s_9_t_avg_localtoglobal'%file_name, 'contourplots/%s_9_t_avg_global'%file_name]
toplot = (t_sub, t_subpop, t_glob)
contourplot(interventionthreshold,cvector, xlabel, ylabel, toplot, labels, save,numberoflines=7, fmt = fmt,cmap='PiYG',levels=levels)
else:
print("Sry cannot fix this var")
| StarcoderdataPython |
3468686 | <reponame>thorben-flapo/pandera
"""Utility functions for validation."""
from typing import Optional, Tuple, Union
import pandas as pd
def prepare_series_check_output(
check_obj: Union[pd.Series, pd.DataFrame],
check_output: pd.Series,
ignore_na: bool = True,
n_failure_cases: Optional[int] = None,
) -> Tuple[pd.Series, pd.Series]:
"""Prepare the check output and failure cases for a Series check output.
check_obj can be a dataframe, since a check function can potentially return
a Series resulting from applying some check function that outputs a Series.
"""
if ignore_na:
isna = (
check_obj.isna().any(axis="columns")
if isinstance(check_obj, pd.DataFrame)
else check_obj.isna()
)
check_output = check_output | isna
failure_cases = check_obj[~check_output]
if not failure_cases.empty and n_failure_cases is not None:
failure_cases = failure_cases.groupby(check_output).head(
n_failure_cases
)
return check_output, failure_cases
def prepare_dataframe_check_output(
check_obj: pd.DataFrame,
check_output: pd.DataFrame,
df_orig: Optional[pd.DataFrame] = None,
ignore_na: bool = True,
n_failure_cases: Optional[int] = None,
) -> Tuple[pd.Series, pd.Series]:
"""Unstack a dataframe of boolean values.
Check results consisting of a boolean dataframe should be reported at the
most granular level.
"""
if df_orig is not None:
assert df_orig.shape == check_output.shape
if df_orig is None:
df_orig = check_obj
check_output = check_output.unstack()
if ignore_na:
check_output = check_output | df_orig.unstack().isna()
failure_cases = (
check_obj.unstack()[~check_output]
.rename("failure_case")
.rename_axis(["column", "index"])
.reset_index()
)
if not failure_cases.empty and n_failure_cases is not None:
failure_cases = failure_cases.drop_duplicates().head(n_failure_cases)
return check_output, failure_cases
| StarcoderdataPython |
4966264 | from functools import reduce
# Time: O(m * n)
# Space: O(1)
class Solution(object):
# @param matrix, a list of lists of integers
# RETURN NOTHING, MODIFY matrix IN PLACE.
def setZeroes(self, matrix):
first_col = reduce(lambda acc, i: acc or matrix[i][0] == 0, xrange(len(matrix)), False)
first_row = reduce(lambda acc, j: acc or matrix[0][j] == 0, xrange(len(matrix[0])), False)
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if matrix[i][j] == 0:
matrix[i][0], matrix[0][j] = 0, 0
for i in xrange(1, len(matrix)):
for j in xrange(1, len(matrix[0])):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if first_col:
for i in xrange(len(matrix)):
matrix[i][0] = 0
if first_row:
for j in xrange(len(matrix[0])):
matrix[0][j] = 0
| StarcoderdataPython |
38766 | import numpy as np
import networkx as nx
import argparse
import random
from models.distance import get_dist_func
def get_fitness(solution, initial_node, node_list):
"""
Get fitness of solution encoded by permutation.
Args:
solution (numpy.ndarray): Solution encoded as a permutation
initial_node (int): Initial node in the permutation (equal to the first element - redundant)
node_list (list): List of node IDs in network
Returns:
(float): Fitness of specified solution
"""
# Append path back to initial node.
solution_aux = np.hstack((solution, initial_node))
# Compute fitness.
return np.sum([dist_func(node_list[el[0]], node_list[el[1]])
for el in [(solution_aux[idx], solution_aux[idx+1])
for idx in range(len(solution_aux)-1)]])
def get_inv_dist_mat(node_list):
"""
Get pairwise distance matrix for specified nodes in node list.
Args:
node_list (list): Nodes for which to compute the pairwise distances
Returns:
(numpy.ndarray): Matrix of pairwise distances
"""
# Initialize array.
dist_mat = np.zeros((len(node_list), len(node_list)), dtype=float)
# Compute pairwise distances
for idx1 in range(len(node_list)-1):
for idx2 in range(idx1+1, len(node_list)):
dist_mat[idx1, idx2] = dist_mat[idx2, idx1] = 1/dist_func(node_list[idx1], node_list[idx2])
# Return computed distance matrix.
return dist_mat
def aco(network, n_ants=100, max_it=500, rho=0.1, alpha=1.0, beta=1.0, q=1.0,
aug='relinking', p_mut=0.08, p_accept_worse=0.1, breeding_coeff=0.5):
"""
Perform ant colony optimization to estimate solution for travelling salesman problem.
Args:
network (object): Networkx representation of the graph
n_ants (int): Number of ants to use
max_it (int): Maximum number of iterations to perform
rho (float): Evaporation rate
alpha (float): Pheromone matrix power in transition probability matrix construction
beta (float): Inverse distance matrix power in transition probability matrix construction
q (float): Pheromone trail coefficient
aug (str): Algorithm augmentation to use. If None, use no augmentation. If equal to 'relinking' use path
relinking method. If equal to 'genetic' use replacement of worst ants with crossovers of best ants.
p_mut (float): Mutation probability
p_accept_worse (float): Probability of accepting a relinked solution that is worse than original.
breeding_coeff (float): Fraction of best ants to use in crossover and fraction of worst ants to
replace with offspring (genetic augmentation)
Returns:
(tuple): Best found solution, fitness of best solution, edgelists corresponding to solutions representing
the new global best solution.
"""
# Check aug parameter.
if aug is not None:
if aug not in {'relinking', 'genetic'}:
raise(ValueError('unknown value specified for aug parameter'))
# Initialize list for storing edge lists (for animating).
edgelists = []
# Initialize list of nodes (for converting enumerations to actual node IDs).
node_list = list(network.nodes())
# Set initial node.
initial_node = 0
# Initilize best found solution.
best_solution = {
'fitness' : np.inf,
'solution' : None
}
# Compute distance matrix for locations.
inv_dist_mat = get_inv_dist_mat(node_list)
# Initialize pheromone matrix.
pher_mat = 0.01*np.ones_like(inv_dist_mat, dtype=float)
# Initialize iteration index.
it_idx = 0
# Main iteration loop.
while it_idx < max_it:
# Increment iteration counter.
it_idx += 1
# Print iteration index and best fitness.
print('iteration: {0}'.format(it_idx))
print('best fitness: {0}'.format(best_solution['fitness']))
# Initialize array for storing ant solutions.
ant_solutions = np.empty((n_ants, len(node_list)), dtype=int)
# Initialize array for storing ant fitness values.
ant_fitness_vals = np.empty(n_ants, dtype=float)
# Build transition probability matrix.
p_mat = (pher_mat**alpha) * (inv_dist_mat**beta)
# Run ACO step.
for ant_idx in range(n_ants):
# Set initial node.
current_node = initial_node
# Get set of unvisited nodes.
unvisited = set(range(len(node_list)))
unvisited.remove(initial_node)
# Build ant's solution.
solution_nxt = np.empty(len(node_list), dtype=int)
solution_nxt[0] = initial_node
for step_idx in range(len(node_list) - 1):
unvisited_list = list(unvisited)
probs = p_mat[current_node, unvisited_list] / np.sum(p_mat[current_node, unvisited_list])
node_nxt = np.random.choice(unvisited_list, size=1, p=probs)[0]
unvisited.remove(node_nxt)
solution_nxt[step_idx+1] = node_nxt
current_node = node_nxt
# Compute fitness of solution and compare to global best.
fitness_solution = get_fitness(solution_nxt, initial_node, node_list)
ant_fitness_vals[ant_idx] = fitness_solution
if fitness_solution < best_solution['fitness']:
best_solution['fitness'] = fitness_solution
best_solution['solution'] = solution_nxt
solution_nxt_aug = np.hstack((solution_nxt, initial_node))
# Store edge list (for animating).
edgelists.append([(node_list[solution_nxt_aug[idx]], node_list[solution_nxt_aug[idx+1]])
for idx in range(len(solution_nxt_aug) - 1)])
# Store ant's solution.
ant_solutions[ant_idx, :] = solution_nxt
# Initialize matrix for accumulating pheromones (for pheromone update).
pher_add_mat = np.zeros_like(pher_mat, dtype=float)
if aug == 'relinking':
# If using relinking augmentation.
# Go over solutions.
for idx_solution in range(ant_solutions.shape[0]):
# Split solution at random point.
sec1, sec2 = np.split(ant_solutions[idx_solution], \
indices_or_sections=[np.random.randint(1, len(ant_solutions[idx_solution]))])
# Relink.
solution_mod = np.hstack((sec1, list(reversed(sec2))))
# Apply mutation with probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(solution_mod))
p2 = np.random.randint(0, len(solution_mod))
solution_mod[[p1, p2]] = solution_mod[[p2, p1]]
# Compute fitness value of relinked solution.
fitness_mod = get_fitness(solution_mod, initial_node, node_list)
# If fitness better accept. Also accept with specified probability.
if (fitness_mod < ant_fitness_vals[idx_solution]) or (np.random.rand() < p_accept_worse):
ant_solutions[idx_solution, :] = solution_mod
ant_fitness_vals[idx_solution] = fitness_mod
if aug == 'genetic':
# If using genetic augmentation.
# Sort ants ant fitness values from best to worst.
p = ant_fitness_vals.argsort()
ant_fitness_vals = ant_fitness_vals[p]
ant_solutions = ant_solutions[p, :]
# Get number of new ants and initialize array for crossovers.
n_new_ants = int(np.ceil(breeding_coeff*ant_solutions.shape[0]))
ant_solutions_new = np.empty((n_new_ants, ant_solutions.shape[1]), dtype=int)
ant_fitness_vals_new = np.empty(ant_solutions_new.shape[0], dtype=float)
# Go over solutions for which to perform crossover.
for idx in range(0, ant_solutions_new.shape[0], 2):
# Get solutions and cut at random point.
ant_sol_1 = ant_solutions[idx, :]
ant_sol_2 = ant_solutions[idx+1, :]
c1 = ant_sol_1[:np.random.randint(1, len(ant_sol_1))]
c2 = ant_sol_2[:np.random.randint(1, len(ant_sol_2))]
# Append elements in second solution in order found.
offspring1 = np.hstack((c1, ant_sol_2[~np.in1d(ant_sol_2, c1)]))
offspring2 = np.hstack((c2, ant_sol_1[~np.in1d(ant_sol_1, c2)]))
# Apply mutations with specified probability.
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring1))
p2 = np.random.randint(0, len(offspring1))
offspring1[[p1, p2]] = offspring1[[p2, p1]]
if np.random.rand() < p_mut:
p1 = np.random.randint(0, len(offspring2))
p2 = np.random.randint(0, len(offspring2))
offspring2[[p1, p2]] = offspring2[[p2, p1]]
# Set offspring and fitness values.
ant_solutions_new[idx, :] = offspring1
ant_solutions_new[idx+1, :] = offspring2
ant_fitness_vals_new[idx] = get_fitness(offspring1, initial_node, node_list)
ant_fitness_vals_new[idx+1] = get_fitness(offspring2, initial_node, node_list)
# Replace worst ants with offspring of best.
ant_solutions[-ant_solutions_new.shape[0]:] = ant_solutions_new
ant_fitness_vals[-len(ant_fitness_vals_new):] = ant_fitness_vals_new
# Compute and print diversity of solutions.
diversity = (np.mean(ant_fitness_vals) - np.min(ant_fitness_vals))/(np.max(ant_fitness_vals) - np.min(ant_fitness_vals))
print(diversity)
# Add pheromones to pheromone accumulation matrix (for next iteration).
for idx_sol, solution in enumerate(ant_solutions):
for idx in range(len(solution)-1):
pher_add_mat[solution[idx], solution[idx+1]] += q*(1/ant_fitness_vals[idx_sol])
pher_add_mat[solution[idx+1], solution[idx]] += q*(1/ant_fitness_vals[idx_sol])
# Update pheromone matrix.
pher_mat = (1-rho)*pher_mat + pher_add_mat
# Return best found solution, fitness value of best found solution and edgelist of network states
# corresponding to global best position updates.
return best_solution['solution'], best_solution['fitness'], edgelists
if __name__ == '__main__':
### PARSE ARGUMENTS ###
parser = argparse.ArgumentParser(description='Approximate solution to TSP using ant colony optimization.')
parser.add_argument('--num-nodes', type=int, default=50, help='Number of nodes to use')
parser.add_argument('--dist-func', type=str, default='geodesic', choices=['geodesic', 'learned'],
help='Distance function to use')
parser.add_argument('--prediction-model', type=str, default='gboosting', choices=['gboosting', 'rf'],
help='Prediction model to use for learned distance function')
parser.add_argument('--max-it', type=int, default=100, help='Maximum iterations to perform')
parser.add_argument('--n-ants', type=int, default=100, help='Number of ants to use')
parser.add_argument('--rho', type=float, default=0.1, help='Evaporation rate parameter')
parser.add_argument('--alpha', type=float, default=1.0, help='Alpha parameter in transition probability matrix update')
parser.add_argument('--beta', type=float, default=1.0, help='Beta parameter in transition probability matrix update')
parser.add_argument('--q', type=float, default=1.0, help='Pheromone update coefficient')
parser.add_argument('--aug', type=str, default=None, choices=['relinking', 'genetic'], help='Augmentation to use')
parser.add_argument('--p-mut', type=float, default=0.08, help='Mutation rate (augmentation)')
parser.add_argument('--p-accept-worse', type=float, default=0.08,
help='Probability of accepting a worse result of relinking (relinking augmentation)')
parser.add_argument('--breeding-coeff', type=float, default=0.5,
help='Fraction of best solution for which to perform crossover and fraction of worst solution to replace by offspring (genetic augmentation)')
args = parser.parse_args()
#######################
# Parse problem network.
network = nx.read_gpickle('./data/grid_data/grid_network.gpickle')
# Number of nodes to remove from network.
to_remove = network.number_of_nodes() - args.num_nodes
# Remove randomly sampled nodes to get specified number of nodes.
network.remove_nodes_from(random.sample(list(network.nodes), to_remove))
# Get distance function.
dist_func = get_dist_func(network, which=args.dist_func, prediction_model=args.prediction_model)
# Get solution using ant colony optimization.
solution_position, solution_fitness, edgelists = aco(network, n_ants=args.n_ants, max_it=args.max_it, rho=args.rho,
alpha=args.alpha, beta=args.beta, q=args.q, aug=args.aug, p_mut=args.p_mut,
p_accept_worse=args.p_accept_worse, breeding_coeff=args.breeding_coeff)
# Save list of edge lists for animation.
np.save('./results/edgelists/edgelist_tsp_ac.npy', list(map(np.vstack, edgelists)))
nx.write_gpickle(network, './results/networks/network_tsp_ac.gpickle')
# Print best solution fitness.
print('Fitness of best found solution: {0:.3f}'.format(solution_fitness))
| StarcoderdataPython |
1679456 | <reponame>cfculhane/autorest.python<gh_stars>10-100
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class BaseProduct(msrest.serialization.Model):
"""The product documentation.
All required parameters must be populated in order to send to Azure.
:ivar product_id: Required. Unique identifier representing a specific product for a given
latitude & longitude. For example, uberX in San Francisco will have a different product_id than
uberX in Los Angeles.
:vartype product_id: str
:ivar description: Description of product.
:vartype description: str
"""
_validation = {
"product_id": {"required": True},
}
_attribute_map = {
"product_id": {"key": "base_product_id", "type": "str"},
"description": {"key": "base_product_description", "type": "str"},
}
def __init__(self, *, product_id: str, description: Optional[str] = None, **kwargs):
"""
:keyword product_id: Required. Unique identifier representing a specific product for a given
latitude & longitude. For example, uberX in San Francisco will have a different product_id than
uberX in Los Angeles.
:paramtype product_id: str
:keyword description: Description of product.
:paramtype description: str
"""
super(BaseProduct, self).__init__(**kwargs)
self.product_id = product_id
self.description = description
class Error(msrest.serialization.Model):
"""Error.
:ivar status:
:vartype status: int
:ivar message:
:vartype message: str
:ivar parent_error:
:vartype parent_error: ~modelflattening.models.Error
"""
_attribute_map = {
"status": {"key": "status", "type": "int"},
"message": {"key": "message", "type": "str"},
"parent_error": {"key": "parentError", "type": "Error"},
}
def __init__(
self,
*,
status: Optional[int] = None,
message: Optional[str] = None,
parent_error: Optional["Error"] = None,
**kwargs
):
"""
:keyword status:
:paramtype status: int
:keyword message:
:paramtype message: str
:keyword parent_error:
:paramtype parent_error: ~modelflattening.models.Error
"""
super(Error, self).__init__(**kwargs)
self.status = status
self.message = message
self.parent_error = parent_error
class Resource(msrest.serialization.Model):
"""Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar type: Resource Type.
:vartype type: str
:ivar tags: A set of tags. Dictionary of :code:`<string>`.
:vartype tags: dict[str, str]
:ivar location: Resource Location.
:vartype location: str
:ivar name: Resource Name.
:vartype name: str
"""
_validation = {
"id": {"readonly": True},
"type": {"readonly": True},
"name": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"type": {"key": "type", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
"name": {"key": "name", "type": "str"},
}
def __init__(self, *, tags: Optional[Dict[str, str]] = None, location: Optional[str] = None, **kwargs):
"""
:keyword tags: A set of tags. Dictionary of :code:`<string>`.
:paramtype tags: dict[str, str]
:keyword location: Resource Location.
:paramtype location: str
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.type = None
self.tags = tags
self.location = location
self.name = None
class FlattenedProduct(Resource):
"""Flattened product.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar type: Resource Type.
:vartype type: str
:ivar tags: A set of tags. Dictionary of :code:`<string>`.
:vartype tags: dict[str, str]
:ivar location: Resource Location.
:vartype location: str
:ivar name: Resource Name.
:vartype name: str
:ivar p_name:
:vartype p_name: str
:ivar type_properties_type:
:vartype type_properties_type: str
:ivar provisioning_state_values: Possible values include: "Succeeded", "Failed", "canceled",
"Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
:vartype provisioning_state_values: str or
~modelflattening.models.FlattenedProductPropertiesProvisioningStateValues
:ivar provisioning_state:
:vartype provisioning_state: str
"""
_validation = {
"id": {"readonly": True},
"type": {"readonly": True},
"name": {"readonly": True},
"provisioning_state_values": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"type": {"key": "type", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"location": {"key": "location", "type": "str"},
"name": {"key": "name", "type": "str"},
"p_name": {"key": "properties.p\\.name", "type": "str"},
"type_properties_type": {"key": "properties.type", "type": "str"},
"provisioning_state_values": {"key": "properties.provisioningStateValues", "type": "str"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
location: Optional[str] = None,
p_name: Optional[str] = None,
type_properties_type: Optional[str] = None,
provisioning_state: Optional[str] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Dictionary of :code:`<string>`.
:paramtype tags: dict[str, str]
:keyword location: Resource Location.
:paramtype location: str
:keyword p_name:
:paramtype p_name: str
:keyword type_properties_type:
:paramtype type_properties_type: str
:keyword provisioning_state:
:paramtype provisioning_state: str
"""
super(FlattenedProduct, self).__init__(tags=tags, location=location, **kwargs)
self.p_name = p_name
self.type_properties_type = type_properties_type
self.provisioning_state_values = None
self.provisioning_state = provisioning_state
class FlattenParameterGroup(msrest.serialization.Model):
"""Parameter group.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. Product name with value 'groupproduct'.
:vartype name: str
:ivar simple_body_product: Simple body product to put.
:vartype simple_body_product: ~modelflattening.models.SimpleProduct
:ivar product_id: Required. Unique identifier representing a specific product for a given
latitude & longitude. For example, uberX in San Francisco will have a different product_id than
uberX in Los Angeles.
:vartype product_id: str
:ivar description: Description of product.
:vartype description: str
:ivar max_product_display_name: Display name of product.
:vartype max_product_display_name: str
:ivar capacity: Capacity of product. For example, 4 people. The only acceptable values to pass
in are None and "Large". The default value is None.
:vartype capacity: str
:ivar generic_value: Generic URL value.
:vartype generic_value: str
:ivar odata_value: URL value.
:vartype odata_value: str
"""
_validation = {
"name": {"required": True},
"product_id": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"simple_body_product": {"key": "SimpleBodyProduct", "type": "SimpleProduct"},
"product_id": {"key": "productId", "type": "str"},
"description": {"key": "description", "type": "str"},
"max_product_display_name": {"key": "max_product_display_name", "type": "str"},
"capacity": {"key": "capacity", "type": "str"},
"generic_value": {"key": "generic_value", "type": "str"},
"odata_value": {"key": "@odata\\.value", "type": "str"},
}
def __init__(
self,
*,
name: str,
product_id: str,
simple_body_product: Optional["SimpleProduct"] = None,
description: Optional[str] = None,
max_product_display_name: Optional[str] = None,
capacity: Optional[str] = None,
generic_value: Optional[str] = None,
odata_value: Optional[str] = None,
**kwargs
):
"""
:keyword name: Required. Product name with value 'groupproduct'.
:paramtype name: str
:keyword simple_body_product: Simple body product to put.
:paramtype simple_body_product: ~modelflattening.models.SimpleProduct
:keyword product_id: Required. Unique identifier representing a specific product for a given
latitude & longitude. For example, uberX in San Francisco will have a different product_id than
uberX in Los Angeles.
:paramtype product_id: str
:keyword description: Description of product.
:paramtype description: str
:keyword max_product_display_name: Display name of product.
:paramtype max_product_display_name: str
:keyword capacity: Capacity of product. For example, 4 people. The only acceptable values to
pass in are None and "Large". The default value is None.
:paramtype capacity: str
:keyword generic_value: Generic URL value.
:paramtype generic_value: str
:keyword odata_value: URL value.
:paramtype odata_value: str
"""
super(FlattenParameterGroup, self).__init__(**kwargs)
self.name = name
self.simple_body_product = simple_body_product
self.product_id = product_id
self.description = description
self.max_product_display_name = max_product_display_name
self.capacity = capacity
self.generic_value = generic_value
self.odata_value = odata_value
class GenericUrl(msrest.serialization.Model):
"""The Generic URL.
:ivar generic_value: Generic URL value.
:vartype generic_value: str
"""
_attribute_map = {
"generic_value": {"key": "generic_value", "type": "str"},
}
def __init__(self, *, generic_value: Optional[str] = None, **kwargs):
"""
:keyword generic_value: Generic URL value.
:paramtype generic_value: str
"""
super(GenericUrl, self).__init__(**kwargs)
self.generic_value = generic_value
class ProductUrl(GenericUrl):
"""The product URL.
:ivar generic_value: Generic URL value.
:vartype generic_value: str
:ivar odata_value: URL value.
:vartype odata_value: str
"""
_attribute_map = {
"generic_value": {"key": "generic_value", "type": "str"},
"odata_value": {"key": "@odata\\.value", "type": "str"},
}
def __init__(self, *, generic_value: Optional[str] = None, odata_value: Optional[str] = None, **kwargs):
"""
:keyword generic_value: Generic URL value.
:paramtype generic_value: str
:keyword odata_value: URL value.
:paramtype odata_value: str
"""
super(ProductUrl, self).__init__(generic_value=generic_value, **kwargs)
self.odata_value = odata_value
class ProductWrapper(msrest.serialization.Model):
"""The wrapped produc.
:ivar value: the product value.
:vartype value: str
"""
_attribute_map = {
"value": {"key": "property.value", "type": "str"},
}
def __init__(self, *, value: Optional[str] = None, **kwargs):
"""
:keyword value: the product value.
:paramtype value: str
"""
super(ProductWrapper, self).__init__(**kwargs)
self.value = value
class ResourceCollection(msrest.serialization.Model):
"""ResourceCollection.
:ivar productresource: Flattened product.
:vartype productresource: ~modelflattening.models.FlattenedProduct
:ivar arrayofresources:
:vartype arrayofresources: list[~modelflattening.models.FlattenedProduct]
:ivar dictionaryofresources: Dictionary of :code:`<FlattenedProduct>`.
:vartype dictionaryofresources: dict[str, ~modelflattening.models.FlattenedProduct]
"""
_attribute_map = {
"productresource": {"key": "productresource", "type": "FlattenedProduct"},
"arrayofresources": {"key": "arrayofresources", "type": "[FlattenedProduct]"},
"dictionaryofresources": {"key": "dictionaryofresources", "type": "{FlattenedProduct}"},
}
def __init__(
self,
*,
productresource: Optional["FlattenedProduct"] = None,
arrayofresources: Optional[List["FlattenedProduct"]] = None,
dictionaryofresources: Optional[Dict[str, "FlattenedProduct"]] = None,
**kwargs
):
"""
:keyword productresource: Flattened product.
:paramtype productresource: ~modelflattening.models.FlattenedProduct
:keyword arrayofresources:
:paramtype arrayofresources: list[~modelflattening.models.FlattenedProduct]
:keyword dictionaryofresources: Dictionary of :code:`<FlattenedProduct>`.
:paramtype dictionaryofresources: dict[str, ~modelflattening.models.FlattenedProduct]
"""
super(ResourceCollection, self).__init__(**kwargs)
self.productresource = productresource
self.arrayofresources = arrayofresources
self.dictionaryofresources = dictionaryofresources
class SimpleProduct(BaseProduct):
"""The product documentation.
All required parameters must be populated in order to send to Azure.
:ivar product_id: Required. Unique identifier representing a specific product for a given
latitude & longitude. For example, uberX in San Francisco will have a different product_id than
uberX in Los Angeles.
:vartype product_id: str
:ivar description: Description of product.
:vartype description: str
:ivar max_product_display_name: Display name of product.
:vartype max_product_display_name: str
:ivar capacity: Capacity of product. For example, 4 people. The only acceptable values to pass
in are None and "Large". The default value is None.
:vartype capacity: str
:ivar generic_value: Generic URL value.
:vartype generic_value: str
:ivar odata_value: URL value.
:vartype odata_value: str
"""
_validation = {
"product_id": {"required": True},
}
_attribute_map = {
"product_id": {"key": "base_product_id", "type": "str"},
"description": {"key": "base_product_description", "type": "str"},
"max_product_display_name": {"key": "details.max_product_display_name", "type": "str"},
"capacity": {"key": "details.max_product_capacity", "type": "str"},
"generic_value": {"key": "details.max_product_image.generic_value", "type": "str"},
"odata_value": {"key": "details.max_product_image.@odata\\.value", "type": "str"},
}
def __init__(
self,
*,
product_id: str,
description: Optional[str] = None,
max_product_display_name: Optional[str] = None,
capacity: Optional[str] = None,
generic_value: Optional[str] = None,
odata_value: Optional[str] = None,
**kwargs
):
"""
:keyword product_id: Required. Unique identifier representing a specific product for a given
latitude & longitude. For example, uberX in San Francisco will have a different product_id than
uberX in Los Angeles.
:paramtype product_id: str
:keyword description: Description of product.
:paramtype description: str
:keyword max_product_display_name: Display name of product.
:paramtype max_product_display_name: str
:keyword capacity: Capacity of product. For example, 4 people. The only acceptable values to
pass in are None and "Large". The default value is None.
:paramtype capacity: str
:keyword generic_value: Generic URL value.
:paramtype generic_value: str
:keyword odata_value: URL value.
:paramtype odata_value: str
"""
super(SimpleProduct, self).__init__(product_id=product_id, description=description, **kwargs)
self.max_product_display_name = max_product_display_name
self.capacity = capacity
self.generic_value = generic_value
self.odata_value = odata_value
class WrappedProduct(msrest.serialization.Model):
"""The wrapped produc.
:ivar value: the product value.
:vartype value: str
"""
_attribute_map = {
"value": {"key": "value", "type": "str"},
}
def __init__(self, *, value: Optional[str] = None, **kwargs):
"""
:keyword value: the product value.
:paramtype value: str
"""
super(WrappedProduct, self).__init__(**kwargs)
self.value = value
| StarcoderdataPython |
5132818 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 19 17:04:08 2021
@author: wanjinyu
"""
import numpy as np
import sklearn.svm as svm
from sklearn.model_selection import train_test_split,cross_val_score
import scipy.io as sio
import time
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from keras.layers import Dense,LSTM,Dropout
from keras.models import Sequential
from keras import optimizers
from sklearn.gaussian_process import GaussianProcessClassifier
class MLmodels(object):
def __init__(self):
pass
def initialTSVM(self, C=1.5, kernel='linear', Cl = 1.5, Cu=0.0001):
'''
C: penalty coefficient
kernel: kernel of svm
'''
self.C = C
self.Cl, self.Cu = Cl, Cu
self.kernel = kernel
self.TSVM = svm.SVC(C=self.C, kernel=self.kernel)
def initialSVM(self, C=1.5, kernel='linear'):
'''
C: penalty coefficient
kernel: kernel of svm
'''
self.C = C
self.kernel = kernel
self.SVM = svm.SVC(C=1.5, kernel=self.kernel)
def initialRF(self, initial_state=0):
self.rfc = RandomForestClassifier(random_state=initial_state)
def initialGP(self, kernel, initial_state=0):
self.gpc = GaussianProcessClassifier(kernel=kernel,random_state=initial_state)
def initialNN(self, struct, Afuc = 'tanh'):
'''
sturct = [L1, L2, L3, ...]: numeber of nuerons in each layer
Afuc: activation function of hidden neurons
'''
NL = len(struct)
self.NN = Sequential()
for i in range(NL):
if i==0:
self.NN.add(Dense(struct[0], input_dim = struct[0], activation = Afuc))
else:
self.NN.add(Dense(struct[i], activation = Afuc))
self.NN.add(Dense(2, activation = 'softmax'))
self.NN.compile(optimizer = optimizers.adam(lr = 0.001),loss='mse',metrics = ['mse'])
def trainTSVM(self, X1, Y1, X2):
'''
X1: Labeled training data
Y1: Labels of X1
X2: Unlabeled training data
'''
max_step = 50
N = len(X1) + len(X2)
sample_weight = np.ones(N)
sample_weight[len(X1):] = self.Cu
self.TSVM.fit(X1, np.ravel(Y1))
Y2 = self.TSVM.predict(X2)
Y2 = np.expand_dims(Y2, 1)
X2_id = np.arange(len(X2))
X3 = np.vstack([X1, X2])
Y3 = np.vstack([Y1, Y2])
step_1 = 1
while self.Cu < self.Cl:
# print('step1: '+str(step_1))
step_1 = step_1+1
step_2 = 1
self.TSVM.fit(X3, np.ravel(Y3), sample_weight=sample_weight)
if step_1 > max_step:
break
while True:
Y2_d = self.TSVM.decision_function(X2) # linear: w^Tx + b
Y2 = Y2.reshape(-1)
epsilon = 1 - Y2 * Y2_d # calculate function margin
positive_set, positive_id = epsilon[Y2 > 0], X2_id[Y2 > 0]
negative_set, negative_id = epsilon[Y2 < 0], X2_id[Y2 < 0]
if len(positive_set) >0 and len(negative_set)>0:
positive_max_id = positive_id[np.argmax(positive_set)]
a = epsilon[positive_max_id]
negative_max_id = negative_id[np.argmax(negative_set)]
b = epsilon[negative_max_id]
if a > 0 and b > 0 and a + b > 2.0:
Y2[positive_max_id] = Y2[positive_max_id] * -1
Y2[negative_max_id] = Y2[negative_max_id] * -1
Y2 = np.expand_dims(Y2, 1)
Y3 = np.vstack([Y1, Y2])
self.TSVM.fit(X3, np.ravel(Y3), sample_weight=sample_weight)
# print('step2: '+str(step_2))
step_2 = step_2+1
if step_2 > max_step:
break
else:
break
else:
break
self.Cu = min(2*self.Cu, self.Cl)
sample_weight[len(X1):] = self.Cu
def trainSVM(self, X1, Y1):
'''
X1: Labeled training data
Y1: Labels of X1
'''
self.SVM.fit(X1, np.ravel(Y1))
def trainRF(self, X1, Y1):
'''
X1: Labeled training data
Y1: Labels of X1
'''
self.rfc.fit(X1,np.ravel(Y1))
def trainGP(self, X1, Y1):
'''
X1: Labeled training data
Y1: Labels of X1
'''
self.gpc.fit(X1,np.ravel(Y1))
def trainNN(self, X1, Y1, ephochs = 3000):
'''
X1: Labeled training data
Y1: Labels of X1
'''
self.NNhistory = self.NN.fit(X1, Y1, epochs = ephochs)
def predict(self, model, X):
'''
Feed X and predict Y
'''
return model.predict(X)
| StarcoderdataPython |
3214347 | import json
from nltk.tokenize import RegexpTokenizer
def read_raw_data(filename):
with open(filename, "r", encoding="utf8") as data_file:
data_file_lines = data_file.readlines()
recipe_list = []
for line in data_file_lines:
recipe_list.append(json.loads(line))
return recipe_list
def write_json_data(json_data, filename):
with open(filename, "w", encoding="utf8") as data_outfile:
json.dump(json_data, data_outfile, indent=4)
def feature_selection(recipe_list):
recipes_list_feature_selected = []
for i in range(len(recipe_list)):
ingredients_detailed = recipe_list[i]["ingredient"]
ingredients = []
for ingredient_detailed in ingredients_detailed:
try:
ingredient = ingredient_detailed["ingredients"][0]
ingredients.append(ingredient)
except:
ingredient = ""
ingredients.append(ingredient)
ingredients = list(filter(None, ingredients))
recipes_list_feature_selected.append(
{
"id": i,
"title": recipe_list[i]["title"],
"ingredients": ingredients,
"instructions": recipe_list[i]["instructions"],
"url": recipe_list[i]["url"],
"photo": recipe_list[i]["photo_url"],
}
)
return recipes_list_feature_selected
def tokenization(recipe_list):
tokenizer = RegexpTokenizer(r"\\w+")
recipe_list_tokenized = recipe_list
for recipe, recipe_tokenized in zip(recipe_list, recipe_list_tokenized):
ingredient_list = list(set(recipe["ingredients"]))
ingredient_list_tokenized = []
for ingredient in ingredient_list:
ingredient_tokenized = "-".join(
tokenizer.tokenize(ingredient.lower()))
ingredient_list_tokenized.append(ingredient_tokenized)
ingredient_list_tokenized = list(set(ingredient_list_tokenized))
recipe_tokenized["ingredients"] = ingredient_list_tokenized
return recipe_list_tokenized
def load_into_corpus_list(recipe_list):
corpus_set = set()
for recipe in recipe_list:
corpus_set.update(set(recipe["ingredients"]))
corpus_list_sorted = sorted(corpus_set, reverse=True)
corpus_list = []
for id, item in zip(range(len(corpus_list_sorted)), corpus_list_sorted):
corpus_list.append({"id": id, "text": item})
return corpus_list
def main():
recipe_list = read_raw_data("data/raw.json")
recipe_list = feature_selection(recipe_list)
recipe_list = tokenization(recipe_list)
write_json_data(recipe_list, "data/data.json")
corpus_list = load_into_corpus_list(recipe_list)
write_json_data(corpus_list, "data/corpus.json")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1690899 | import sys
import json
import datetime
import time
import os
from azure.eventhub import EventHubClient, Sender, EventData
# Address can be in either of these formats:
# "amqps://<URL-encoded-SAS-policy>:<URL-encoded-SAS-key>@<mynamespace>.servicebus.windows.net/myeventhub"
# "amqps://<mynamespace>.servicebus.windows.net/myeventhub"
namespace = os.environ.get('AZURE_EVENTHUB_NAMESPACE')
hubname = os.environ.get('AZURE_EVENTHUB_HUBNAME')
address = 'amqps://{}.servicebus.windows.net/{}'.format(namespace, hubname)
# SAS policy and key are not required if they are encoded in the URL
user = os.environ.get('AZURE_EVENTHUB_POLICY')
key = os.environ.get('AZURE_EVENTHUB_KEY')
print('namespace: {}'.format(namespace))
print('hubname: {}'.format(hubname))
print('address: {}'.format(address))
print('user: {}'.format(user))
print('key: {}'.format(key))
def read_json(infile):
with open(infile, 'rt') as f:
return json.loads(f.read())
if __name__ == "__main__":
count = int(sys.argv[1])
try:
cities = read_json('data/city_list.json')
print('cities: {}'.format(len(cities)))
client = EventHubClient(address, debug=False, username=user, password=key)
sender = client.add_sender(partition="0")
client.run()
try:
start_time = time.time()
for i in range(count):
msg = cities[i]
msg['pk'] = msg['name']
msg['time'] = str(datetime.datetime.today())
print("Sending message: {}".format(msg))
sender.send(EventData(str(msg)))
time.sleep(1)
except:
raise
finally:
end_time = time.time()
client.stop()
run_time = end_time - start_time
print("Runtime: {} seconds".format(run_time))
except KeyboardInterrupt:
pass
| StarcoderdataPython |
9673079 | <filename>python/ml4ir/applications/classification/tests/test_classification_serving.py
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import models as kmodels
from ml4ir.applications.classification.pipeline import ClassificationPipeline
from ml4ir.applications.classification.tests.test_base import ClassificationTestBase
from ml4ir.base.config.keys import ServingSignatureKey
from ml4ir.base.data.relevance_dataset import RelevanceDataset
from ml4ir.base.features.preprocessing import split_and_pad_string
from ml4ir.base.model.relevance_model import RelevanceModel
class ClassificationServingTest(ClassificationTestBase):
"""Assess model serving."""
def test_serve_default_pipeline(self):
"""
Train a simple model and test serving flow by loading the SavedModel
"""
# Test model training on TFRecord Example data
self.set_seeds()
classification_pipeline: ClassificationPipeline = ClassificationPipeline(
args=self.get_overridden_args()
)
parsed_relevance_dataset: RelevanceDataset = classification_pipeline.get_relevance_dataset()
raw_relevance_dataset: RelevanceDataset = classification_pipeline.get_relevance_dataset(
parse_tfrecord=False
)
classification_model: RelevanceModel = classification_pipeline.get_relevance_model()
classification_model.fit(
dataset=parsed_relevance_dataset, num_epochs=1, models_dir=self.output_dir
)
preprocessing_keys_to_fns = {"split_and_pad_string": split_and_pad_string}
classification_model.save(
models_dir=self.args.models_dir,
preprocessing_keys_to_fns=preprocessing_keys_to_fns,
required_fields_only=True,
)
default_model = kmodels.load_model(
os.path.join(self.output_dir, "final", "default"), compile=False
)
assert ServingSignatureKey.DEFAULT in default_model.signatures
default_signature = default_model.signatures[ServingSignatureKey.DEFAULT]
tfrecord_model = kmodels.load_model(
os.path.join(self.output_dir, "final", "tfrecord"), compile=False
)
assert ServingSignatureKey.TFRECORD in tfrecord_model.signatures
tfrecord_signature = tfrecord_model.signatures[ServingSignatureKey.TFRECORD]
# Fetch a single batch for testing
sequence_example_protos = next(iter(raw_relevance_dataset.test))
parsed_sequence_examples = next(iter(parsed_relevance_dataset.test))[0]
parsed_dataset_batch = parsed_relevance_dataset.test.take(1)
# Use the loaded serving signatures for inference
model_predictions = classification_model.predict(parsed_dataset_batch)[
self.args.output_name
].values
default_signature_predictions = default_signature(**parsed_sequence_examples)[
self.args.output_name
]
# Since we do not pad dummy records in tfrecord serving signature,
# we can only predict on a single record at a time
tfrecord_signature_predictions = [
tfrecord_signature(protos=tf.gather(sequence_example_protos, [i]))[
self.args.output_name
]
for i in range(self.args.batch_size)
]
# Compare the scores from the different versions of the model
assert np.isclose(model_predictions[0], default_signature_predictions[0], rtol=0.01,).all()
assert np.isclose(
model_predictions[0], tfrecord_signature_predictions[0], rtol=0.01,
).all()
| StarcoderdataPython |
126653 | <filename>tests/pytest/test_nj_all_noise.py
# standard libraries
import json
# third party libraries
import pytest
import tqdm
# project libraries
from speech import dataset_info
from speech.utils.wave import array_from_wave
from speech.utils.signal_augment import audio_with_sox
import utils
def test_main():
# runs all the noise files against a set of audio files
check_all_noise()
# tests a set of noise and audio files against a range of noise_levels
check_noise_level()
# tests audio_with_sox utility that creates a range of segments for all noise files
#check_audio_with_sox()
def check_all_noise():
noise_dataset = dataset_info.NoiseDataset()
noise_files = noise_dataset.files_from_pattern()
audio_17s ="/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/19/198/19-198-0034.wav"
audio_2s = "/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/19/198/19-198-0000.wav"
test_audio = [audio_2s, audio_17s]
print(f"\n Test All Noise: testing {len(noise_files)} noise files")
for audio_file in test_audio:
for noise_file in noise_files:
try:
utils.check_length(audio_file, noise_file)
except AssertionError:
raise AssertionError(f"audio: {audio_file} and noise: {noise_file}")
except FileNotFoundError:
raise FileNotFoundError(f"audio: {audio_file} and noise: {noise_file}")
#except:
#raise Exception(f"audio: {audio_file}, noise: {noise_file}")
def check_noise_level():
"""
this test aims to test noise inject using a variety of noise levels
across a selection of noise files and test audio files
"""
noise_files = [
"/home/dzubke/awni_speech/data/background_noise/100263_43834-lq.wav",
"/home/dzubke/awni_speech/data/background_noise/101281_1148115-lq.wav",
"/home/dzubke/awni_speech/data/background_noise/102547_1163166-lq.wav",
"/home/dzubke/awni_speech/data/background_noise/elaborate_thunder-Mike_Koenig-1877244752.wav",
"/home/dzubke/awni_speech/data/background_noise/violet_noise_2.wav",
"/home/dzubke/awni_speech/data/background_noise/115418_8043-lq.wav"
]
# first test audio is 17 s, second is 2 s, third is from separate dataset
test_audio = [
"/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/19/198/19-198-0034.wav",
"/home/dzubke/awni_speech/data/LibriSpeech/train-clean-100/19/198/19-198-0000.wav",
"/home/dzubke/awni_speech/data/tedlium/TEDLIUM_release-3/data/converted/wav/EmmanuelJal_2009G_104.wav.wav"
]
# making a list of noise_levels form 0 to 1.15 in increments of 0.5
noise_levels = [x/100 for x in range(0,120, 5)]
print(f"\n Noise Level Test: testing {len(noise_files)} noise files")
for audio_file in test_audio:
for noise_file in noise_files:
for noise_level in noise_levels:
try:
utils.check_length(audio_file, noise_file, noise_level=noise_level)
except AssertionError:
raise AssertionError(f"audio:{audio_file}, noise:{noise_file}, noise_level:{noise_level}")
except FileNotFoundError:
raise FileNotFoundError(f"audio:{audio_file}, noise:{noise_file}, noise_level:{noise_level}")
except:
raise Exception(f"audio:{audio_file}, noise:{noise_file}, noise_level:{noise_level}")
def check_audio_with_sox():
"""
this test aims to find files where audio_with_sox raises a
FileNotFoundError by running audio_with_sox over the entire
noise file using different window sizes defined in data_lens
"""
noise_dataset = dataset_info.NoiseDataset()
noise_files = noise_dataset.files_from_pattern()
data_lens = [0.5, 5, 50] # in secs
step_size = 0.05
print(f"\n Test Full Noise File: testing {len(noise_files)} noise files...")
file_count = 0
for noise_file in noise_files:
print(f"Processing file {file_count}: {noise_file}")
file_count += 1
audio, samp_rate = array_from_wave(noise_file)
noise_len = audio.shape[0] / samp_rate
for data_len in data_lens:
start_end_tups = calc_start_end(noise_len, data_len, step_size)
for noise_start, noise_end in start_end_tups:
try:
noise_dst = audio_with_sox(noise_file, samp_rate, noise_start, noise_end)
except AssertionError:
raise AssertionError(f"noise:{noise_file}, data_len: {data_len}")
except FileNotFoundError:
raise FileNotFoundError(f"noise:{noise_file}, data_len: {data_len}")
except:
raise Exception(f"noise:{noise_file}, data_len: {data_len}")
def calc_start_end(noise_len:float, data_len:float, step_size:float)->list:
"""
returns a list of tuples of the start and end times
that specify the data_len window moving across noise_len
with a step specified by step_size
"""
start_end = list()
noise_start = 0.0
noise_end = noise_start + data_len
while (noise_end < noise_len):
start_end.append( (noise_start, noise_end))
noise_start += step_size
noise_end = noise_start + data_len
return start_end
| StarcoderdataPython |
9606855 | import time
import network
# the class starts a WiFi access point
class AccessPoint:
"""
Initialize a new WiFi access point
Notes:
Make sure that the password is not too short. Otherwise, an
OSError may occur while staring the access point.
"""
def __init__(self, access_point_ssid, access_point_password):
self.access_point_ssid = access_point_ssid
"""SSID string for access point"""
self.access_point_password = access_point_password
"""Password for access point"""
self.access_point = None
def start(self):
"""
Start the access point
"""
self.access_point = network.WLAN(network.AP_IF)
self.access_point.active(True)
self.access_point.config(essid=self.access_point_ssid,
password=<PASSWORD>,
authmode=network.AUTH_WPA_WPA2_PSK)
def ip(self):
"""
returns an IP address of the access point
"""
if self.access_point is None:
raise Exception('Access point has not started!')
return self.access_point.ifconfig()[0]
class Connection:
"""
Initializes a connection to a WiFi network
..code-block::
from cct.wifi import Connection
wifi = Connection("ssid", "password")
wifi.connect()
"""
def __init__(self, ssid, password=None):
# check if ssid and password are specified
if not ssid:
raise Exception('ssid not set')
self.ssid = ssid
"""SSID for connection"""
self.password = password
"""Password for connection"""
self.nic = network.WLAN(network.STA_IF)
"""Connection NIC"""
def connect(self):
"""
Connect to the previously specified wi-fi network
"""
print('connecting to network: %s' % self.ssid)
self.nic.active(True)
if self.password is not None:
self.nic.connect(self.ssid, self.password)
else:
self.nic.connect(self.ssid)
attempt = 0
while attempt < 30 and not self.nic.isconnected():
time.sleep(1)
attempt = attempt + 1
print('still connecting ...')
if self.nic.isconnected():
print('connected')
else:
print('could not connect to WiFi')
def is_connected(self):
"""
Check if the connection is active
Returns:
bool: `True` if connection active, otherwise `False`
"""
return self.nic is not None and self.nic.active() and self.nic.isconnected()
def reconnect_if_necessary(self):
"""
Reconnect if necessary
"""
while not self.is_connected():
self.connect()
def disconnect(self):
"""
Disconnect
"""
print('disconnecting ...')
self.nic.disconnect()
self.nic.active(False)
def reconnect(self):
"""
Reconnect
"""
self.disconnect()
self.connect()
| StarcoderdataPython |
11235592 | from itertools import combinations
import re
class Order:
########################
#from order import Order
##names_list = ['A','B','C','D', 'E'] # Names of files
##relations = ["E<C=B", "E<C<A", "E<C<D", "E<B<A", "E<B<D", "E<A<D"]
#ord = Order(names_list, relations)
#[order, ineq, scores] = ord.get_all()
#order = ord.get_order()
#ineq = ord.get_ineq()
#scores = ord.get_scores()
#print ord.glob_order
#print ord.glob_ineq
#print ord.glob_scores
########################
names_list = [] #['A','B','C','D', 'E'] # Names of files
relations = [] #["E<C=B", "E<C<A", "E<C<D", "E<B<A", "E<B<D", "E<A<D"] # Full list of comparisons
combos = [] # All possible 2-combinations of names_list
glob_order = [] # Final order of files
glob_ineq = [] # Final inequalities in the final order
glob_scores = [] # Scores attached to each file at the end
def __init__(self, names_list, relations):
self.names_list = names_list
self.relations = relations
self.combos = list(combinations(names_list, 2))
self.combos = dict( zip(self.combos, ['' for i in range(len(self.combos))]))
self.order()
def comparator(self, x, y):
if (x,y) in self.combos.keys():
if self.combos[(x,y)] == '<':
return -1
elif self.combos[(x,y)] == '=':
return 0
elif self.combos[(x,y)] == '>':
return 1
else:
print('Error in comparator - 1')
elif (y,x) in self.combos.keys():
if self.combos[(y,x)] == '<':
return 1
elif self.combos[(y,x)] == '=':
return 0
elif self.combos[(y,x)] == '>':
return -1
else:
print('Error in comparator - 2')
else:
print (x,y)
print('Error in comparator - 3')
def get_all(self):
return [self.glob_order, self.glob_ineq, self.glob_scores]
def get_order(self):
return self.glob_order
def get_ineq(self):
return self.glob_ineq
def get_scores(self):
return self.glob_scores
def order(self):
for rel in self.relations:
rel = rel.replace(' ', '')
rel_parts = re.split('<|=', rel)
ineq = [rel[rel.index(rel_parts[1])-1], rel[rel.index(rel_parts[2])-1]]
if ineq[0] == '<' or ineq[1] == '<':
ineq.insert(1,'<')
else:
ineq.insert(1,'=')
temp = list(combinations(rel_parts, 2))
for ww in range(len(temp)):
if temp[ww] in self.combos.keys():
self.combos[temp[ww]] = ''.join([self.combos[temp[ww]], ineq[ww]])
elif (temp[ww][1],temp[ww][0]) in self.combos.keys():
if ineq[ww] == '=':
self.combos[(temp[ww][1],temp[ww][0])] = ''.join([self.combos[(temp[ww][1],temp[ww][0])], '='])
else:
self.combos[(temp[ww][1],temp[ww][0])] = ''.join([self.combos[(temp[ww][1],temp[ww][0])], '>'])
for key, value in self.combos.items():
xx, yy, zz = value.count('<'), value.count('='), value.count('>')
temp = [xx, yy, zz]
maxi = temp.index(max(temp))
if maxi == 0 and xx != zz:
self.combos[key] = '<'
elif maxi == 1 or xx == zz:
self.combos[key] = '='
elif maxi == 2 and xx != zz:
self.combos[(key[1], key[0])] = self.combos.pop(key)
self.combos[(key[1], key[0])] = '<'
self.glob_order = sorted(self.names_list, cmp = self.comparator)
self.glob_ineq = []
for idx in range(len(self.glob_order)-1):
self.glob_ineq.append(self.combos[(self.glob_order[idx],self.glob_order[idx+1])])
if self.glob_ineq.count('<') == 0:
self.glob_scores = [1.0 for xx in range(len(self.glob_order))]
else:
incr = 1.0/self.glob_ineq.count('<')
self.glob_scores = [0]
for idx in range(len(self.glob_ineq)):
if self.glob_ineq[idx] == '=':
self.glob_scores.append(self.glob_scores[idx])
elif self.glob_ineq[idx] == '<':
self.glob_scores.append(self.glob_scores[idx]+incr)
self.glob_scores = [float('%.2f'% item) for item in self.glob_scores]
#return [self.glob_order, self.glob_ineq, self.glob_scores]
def get_critical_score(scores):
min_distance = 1
distance = 0
score_model = None
for score in scores:
if score.score <= 0.5:
distance = score.score
if distance < min_distance:
score_model = score
min_distance = distance
for score in scores:
if score.score > 0.5:
distance = 1.0-float(score.score)
if distance <= min_distance:
score_model = score
min_distance = distance
return score_model
| StarcoderdataPython |
4985780 | # -*- coding: utf-8 -*-
"""
This software is licensed under the License (MIT) located at
https://github.com/ephreal/rollbot/Licence
Please see the license for any restrictions or rights granted to you by the
License.
"""
import re
from subprocess import Popen, PIPE
async def is_url(url):
if re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+] | [!*\(\), ]'
'|(?:%[0-9a-fA-F][0-9a-fA-F]))+', url):
return True
return False
async def process_host_commands(command):
"""
Checks the commands against a whitelist before allowing them to run.
This is to allow only specific commands to run without allowing other
commands that may cause problems/give up sensitive information to run.
command: list[str]
-> command_output (str)
"""
whitelist = ["uptime", "free", "df", "w"]
if command[0] in whitelist:
data = Popen(command, stdout=PIPE)
data = data.communicate()[0].decode()
if len(data) > 1950:
data = data[0:1950]
data += "\n\n...\nOutput Truncated\n\n"
return f"```\n{data}```"
else:
return "```\nThat command is not available.```"
| StarcoderdataPython |
5150196 | from django.contrib import admin
from django.urls import path, include
from home import views
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView
urlpatterns = [
path("", views.index, name="home"),
path("home", views.index, name="home"),
path("about", views.about, name="about"),
path("blog", views.blog, name="blog"),
path("contact", views.contact, name="contact"),
path('favicon.ico', RedirectView.as_view(url=staticfiles_storage.url('img/favicon.ico')))
]
| StarcoderdataPython |
8112018 | import ctypes
attribute_hide = 0x02
retorno = ctypes.windll.kernel32.SetFileAttributesW('concealer.txt', attribute_hide)
if retorno:
print('File has been hidden')
else:
print('ile was not hidden') | StarcoderdataPython |
1701783 | # Generated by Django 3.0.3 on 2020-03-05 19:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('booking', '0006_booking_booked_on'),
]
operations = [
migrations.RemoveField(
model_name='rooms',
name='status',
),
]
| StarcoderdataPython |
3418976 | import sys
from datetime import datetime
from walt.server.threads.main.images.image import NodeImage, format_image_fullname
from walt.server.threads.main.network import nfs
# About terminology: See comment about it in image.py.
MSG_IMAGE_IS_USED_BUT_NOT_FOUND=\
"WARNING: image %s is not found. Cannot attach it to related nodes.\n"
CONFIG_ITEM_DEFAULT_IMAGE='default_image'
MSG_WOULD_OVERWRITE_IMAGE="""\
An image has the same name in your working set.
This operation would overwrite it%s.
"""
MSG_WOULD_OVERWRITE_IMAGE_REBOOTED_NODES='\
(and reboot %d node(s))'
MSG_REMOVING_FROM_DB = """\
WARNING: Removing image %s from db because docker does not list it."""
MSG_IMAGE_READY_BUT_MISSING = """\
Image %s is marked ready in db, but docker does not list it! Aborting."""
class NodeImageStore(object):
def __init__(self, docker, db):
self.docker = docker
self.db = db
self.images = {}
def refresh(self, startup = False):
db_images = { db_img.fullname: db_img.ready \
for db_img in self.db.select('images') }
docker_images = {}
# gather local images
for image in self.docker.local.get_images():
# restrict to images with a label 'walt.node.models'
if image['Labels'] is None:
continue
if 'walt.node.models' not in image['Labels']:
continue
if image['RepoTags'] is None: # dangling image
continue
for fullname in image['RepoTags']:
# discard dangling images and tags temporarily added
# for a 'clone' operation
if '/' in fullname and 'clone-temp/walt-image:' not in fullname:
docker_images[fullname] = image
# import new images from docker into the database
for fullname in docker_images:
if fullname not in db_images:
self.db.insert('images', fullname=fullname, ready=True)
db_images[fullname] = True
# add missing images
for db_fullname in db_images:
db_ready = db_images[db_fullname]
if db_fullname not in self.images:
if db_fullname in docker_images:
created_at = datetime.fromtimestamp(
docker_images[db_fullname]['Created'])
self.images[db_fullname] = NodeImage(self.db,
self.docker, db_fullname, created_at)
else:
# if the daemon is starting, remove images from db not listed
# by docker.
if startup:
print((MSG_REMOVING_FROM_DB % db_fullname))
self.db.delete('images', fullname = db_fullname)
else:
assert (db_ready == False), \
MSG_IMAGE_READY_BUT_MISSING % db_fullname
# image is not ready yet (probably being pulled)
self.images[db_fullname] = NodeImage(self.db,
self.docker, db_fullname)
# remove deleted images
for fullname in list(self.images.keys()):
if fullname not in db_images:
del self.images[fullname]
def register_image(self, image_fullname, is_ready):
self.db.insert('images', fullname=image_fullname, ready=is_ready)
self.db.commit()
self.refresh()
# Make sure to rename the image in docker *before* calling this.
def rename(self, old_fullname, new_fullname):
self.db.execute('update images set fullname = %(new_fullname)s where fullname = %(old_fullname)s',
dict(old_fullname = old_fullname, new_fullname = new_fullname))
self.db.commit()
self.refresh()
# Make sure to remove the image from docker *before* calling this.
def remove(self, image_fullname):
self.db.delete('images', fullname=image_fullname)
self.db.commit()
self.refresh()
def set_image_ready(self, image_fullname):
self.db.update('images', 'fullname', fullname=image_fullname, ready=True)
self.db.commit()
def __getitem__(self, image_fullname):
if image_fullname not in self.images:
# image was probably downloaded using docker commands,
# walt does not know it yet
self.refresh()
return self.images[image_fullname]
def __iter__(self):
return iter(self.images.keys())
def __len__(self):
return len(self.images)
def keys(self):
return self.images.keys()
def iteritems(self):
return self.images.items()
def itervalues(self):
return self.images.values()
def values(self):
return self.images.values()
# look for an image belonging to the requester.
# The 'expected' parameter allows to specify if we expect a matching
# result (expected = True), no matching result (expected = False),
# or if both options are ok (expected = None).
# If expected is True or False and the result does not match expectation,
# an error message will be printed.
def get_user_image_from_name(self, requester, image_name, expected = True, ready_only = True):
username = requester.get_username()
if not username:
return None # client already disconnected, give up
found = None
fullname = format_image_fullname(username, image_name)
for image in self.images.values():
if image.fullname == fullname:
found = image
if expected == True and found is None:
requester.stderr.write(
"Error: No such image '%s'. (tip: walt image show)\n" % image_name)
if expected == False and found is not None:
requester.stderr.write(
"Error: Image '%s' already exists.\n" % image_name)
if expected == True and found is not None:
if ready_only and found.ready == False:
requester.stderr.write(
"Error: Image '%s' is not ready.\n" % image_name)
found = None
return found
def get_user_unmounted_image_from_name(self, requester, image_name):
image = self.get_user_image_from_name(requester, image_name)
if image: # otherwise issue is already reported
if image.mounted:
requester.stderr.write('Sorry, cannot proceed because the image is mounted.\n')
return None
return image
def update_image_mounts(self, images_in_use = None, requester = None):
if images_in_use == None:
images_in_use = self.get_images_in_use()
images_found = []
nodes_found = self.db.select("nodes")
# ensure all needed images are mounted
for fullname in images_in_use:
if fullname in self.images:
img = self.images[fullname]
if not img.mounted:
img.mount(requester = requester)
images_found.append(img)
else:
sys.stderr.write(MSG_IMAGE_IS_USED_BUT_NOT_FOUND % fullname)
# update nfs configuration
nfs.update_exported_filesystems(images_found, nodes_found)
# unmount images that are not needed anymore
for fullname in self.images:
if fullname not in images_in_use:
img = self.images[fullname]
if img.mounted:
img.unmount()
def cleanup(self):
# release nfs mounts
nfs.update_exported_filesystems([], [])
# unmount images
for fullname in self.images:
img = self.images[fullname]
if img.mounted:
img.unmount()
def get_images_in_use(self):
res = set([ item.image for item in \
self.db.execute("""
SELECT DISTINCT image FROM nodes""").fetchall()])
return res
def get_default_image_fullname(self, node_model):
return 'waltplatform/%s-default:latest' % node_model
def umount_used_image(self, image):
images = self.get_images_in_use()
images.remove(image.fullname)
self.update_image_mounts(images_in_use = images)
def num_nodes_using_image(self, image_fullname):
return len(self.db.select("nodes", image=image_fullname))
def warn_overwrite_image(self, requester, image_name):
image_fullname = format_image_fullname(requester.get_username(), image_name)
num_nodes = self.num_nodes_using_image(image_fullname)
if num_nodes == 0:
reboot_message = ''
else:
reboot_message = MSG_WOULD_OVERWRITE_IMAGE_REBOOTED_NODES % num_nodes
requester.stderr.write(MSG_WOULD_OVERWRITE_IMAGE % reboot_message)
| StarcoderdataPython |
9680885 | <gh_stars>0
# TODO : Decorate all print thing
import os
import time
import datetime
import mysql.connector
# Modularized parts
import fileIO
import config
import compileScript
import gradingScript
import abb
import cmdMode
import importlib
import sys
from kbhit import KBHit
from colorama import Style, Fore, init # Terminal decoration
ogogi_bare = abb.bold + Fore.YELLOW + "OGOGI" + Style.RESET_ALL
ogogi = "[ " + ogogi_bare + " ] "
def onRecieved(submission, probInfo, mydb):
# Reassign for better readability
resultID = submission[0]
uploadTime = str(submission[1])
userID = str(submission[2])
probID = str(submission[3])
inContest = submission[9]
language = submission[10]
print("Result ID :\t" + str(resultID))
print(abb.bold + "Subject :\t" + userID)
print("Sub Time :\t" + uploadTime)
print(abb.bold + "Prob ID :\t" + probID + Style.RESET_ALL)
probName = str(probInfo[2])
resultStr = ""
sumTime = 0
nCase = 0
complieResult = None
# Interprete subject source file name
subjectFileName = config.subjectFileName
_replaces = [("[probID]", probID), ("[uploadTime]", uploadTime)]
for ph, rep in _replaces:
subjectFileName = subjectFileName.replace(ph, rep)
scriptPath = config.scriptPath.replace("[probName]", probName)
if os.path.exists(scriptPath):
# Unlimited # of testcase
case: os.path = fileIO.read(scriptPath)
nBegin = case.find(config.caseKey) + len(config.caseKey)
nEnd = case.find(config.caseKeyEnd)
nCase = int(case[nBegin:nEnd])
print("nCase :\t" + str(nCase), end="\n\n")
else:
complieResult = "NOCONFIG"
# Compile subject's source file
if complieResult == None:
complieResult = compileScript.compile(subjectFileName, userID, language)
# If there is no problem compiling, grade the subject.
errmsg = ""
if complieResult == None:
print(abb.ok + "Subject's file successfully compiled.")
if probInfo[8] and inContest:
subtask = probInfo[8].split(" ")
else:
subtask = [nCase]
resultStr, sumTime = gradingScript.run(submission, probInfo, subtask, mydb)
# If grading script error (interactive)
if sumTime == -1:
complieResult == "INTERERR"
errmsg = "Grading script error, contact admin."
sumTime = 0
# Compile error
elif complieResult == "NOCMP":
resultStr = "Compilation Error"
print(abb.error + "Failed to compile subject's file.")
# Try to read error message
try:
errmsg = fileIO.read("env/error.txt")
except:
print(abb.error + "Cannot read error log. Please check env/error.txt")
errmsg = "Cannot read error log. Unknown problem occured."
# File extension not supported
elif complieResult == "NOLANG":
resultStr = "Compilation Error"
errmsg = "Language not supported. Please check file extension."
print(abb.error + "Language not supported.")
# Missing config file (config.cfg or script.php)
elif complieResult == "NOCONFIG":
resultStr = "Compilation Error"
errmsg = "Cannot read config file. Please contact admins."
print(abb.error + "script.php is missing.")
# Calculate score
percentage = 0
if complieResult == None:
print(abb.bold + "\nResult :\t[" + abb.bold, end="")
for e in resultStr:
if e == "P":
print(Fore.GREEN, end="")
else:
print(Fore.RED, end="")
print(e, end="")
print(Style.RESET_ALL + abb.bold + "]")
# Count correct answer by counting 'P'
nCorrect = resultStr.count("P")
print("Time :\t" + str(round(sumTime, 2)) + " s" + Style.RESET_ALL)
percentage = 100 * (nCorrect / nCase)
return (resultStr, percentage, round(sumTime, 2), errmsg, resultID)
def main():
# Decorative purpose.
init()
# Nope, this is not the real otog.cf password XD.
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="<PASSWORD>",
# Original for otog.cf was :
# passwd='<PASSWORD>',
database="OTOG",
)
myCursor = mydb.cursor(buffered=True)
# for keybord interupt.
print(ogogi + "Grader started. Waiting for submission...")
kb = KBHit()
while True:
# Looking for keyboard interupt.
if kb.kbhit():
if kb.getch() == ":":
# Do functions.
print(ogogi + "Keyboard interupted. Entering command mode.")
kb.set_normal_term()
# Command mode
while True:
cmd, args = cmdMode.run()
# Shutdown signal
if cmd == abb.INCMD["SHUTDOWN"]:
# Good-bye message
print(ogogi + "Bye")
exit(0)
elif cmd == abb.INCMD["RELOAD"]:
# Reload modules in args
for e in args:
if e == "grader":
print(
abb.error
+ "'grader' itself cannot be reloaded. Please restart the program manually."
)
try:
importlib.reload(importlib.import_module(e))
except:
print(abb.error + "'" + e + "' cannot be reloaded.")
elif cmd == abb.INCMD["EXIT"]:
break
kb.set_kbhit_term()
print(ogogi + "Command mode exited. Continue waiting for submission.")
myCursor.execute("SELECT * FROM Result WHERE status = 0 ORDER BY time")
submission = myCursor.fetchone()
if submission != None:
print(abb.bold + Fore.GREEN + "\t--> recieved.\n" + Style.RESET_ALL)
print(
str(datetime.datetime.now().strftime("[ %d/%m/%y %H:%M:%S ]"))
+ " -----------------------------"
)
myCursor.execute(
"SELECT * FROM Problem WHERE id_Prob = " + str(submission[3])
)
probInfo = myCursor.fetchone()
# Submit result
sql = "UPDATE Result SET result = %s, score = %s, timeuse = %s, status = 1, errmsg = %s WHERE idResult = %s"
val = onRecieved(submission, probInfo, mydb)
myCursor.execute(sql, val)
print("---------------------------------------------------")
print("\n" + ogogi + "Finished grading session. Waiting for the next one.")
mydb.commit()
time.sleep(config.gradingInterval)
if __name__ == "__main__":
main()
| StarcoderdataPython |
347134 | <gh_stars>0
import unittest
from flask_restbolt.utils.crypto import encrypt, decrypt
class CryptoTestCase(unittest.TestCase):
def test_encrypt_decrypt(self):
key = '<KEY>'
seed = 'deadbeefcafebabe'
message = 'It should go through'
self.assertEqual(decrypt(encrypt(message, key, seed), key, seed), message) | StarcoderdataPython |
150863 | from sklearn.ensemble import RandomForestRegressor
from sklearn.utils.validation import check_is_fitted
from joblib import Parallel, delayed
from sklearn.ensemble._base import _partition_estimators
import threading
import numpy as np
class RandomForestRegressor2(RandomForestRegressor):
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor2, self).__init__(
n_estimators,
criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
min_impurity_decrease,
min_impurity_split,
bootstrap,
oob_score,
n_jobs,
random_state,
verbose,
warm_start)
def predict(self, X, return_std=False):
if return_std:
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# if self.n_outputs_ > 1:
# y_pred = np.zeros((self.n_estimators, X.shape[0], self.n_outputs_), dtype=np.float64)
# else:
# y_pred = np.zeros((self.n_estimators, X.shape[0]), dtype=np.float64)
# Parallel loop
# lock = self.threading.Lock()
y_pred = np.array(Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(e.predict)(X) for e in self.estimators_))
# y_hat /= len(self.estimators_)
ypred_mean = np.mean(y_pred, axis=0)
ypred_std = np.std(y_pred, axis=0)
if len(ypred_std.shape) > 1:
ypred_std = np.max(ypred_std, 1)
return ypred_mean, ypred_std
else:
return super(RandomForestRegressor2, self).predict(X)
| StarcoderdataPython |
9623715 | <reponame>ziegltob/tool-competition-av
from keras.preprocessing.image import load_img, img_to_array
import pandas as pd
import numpy as np
from skimage.exposure import rescale_intensity
from matplotlib.colors import rgb_to_hsv
from sklearn.model_selection import train_test_split
import os
from models.rambo.config import DataConfig
def make_hsv_data(path):
df = pd.read_csv(path)
num_rows = df.shape[0]
X = np.zeros((num_rows, row, col, 3), dtype=np.uint8)
for i in range(num_rows):
if i % 1000 == 0:
print("Processed " + str(i) + " images...")
path = df['fullpath'].iloc[i]
img = load_img(data_path + path, target_size=(row, col))
img = img_to_array(img)
img = rgb_to_hsv(img)
img = np.array(img, dtype=np.uint8)
X[i] = img
return X, np.array(df["angle"])
def make_color_data(path):
df = pd.read_csv(path)
num_rows = df.shape[0]
X = np.zeros((num_rows, row, col, 3), dtype=np.uint8)
for i in range(num_rows):
if i % 1000 == 0:
print("Processed " + str(i) + " images...")
path = df['fullpath'].iloc[i]
img = load_img(data_path + path, target_size=(row, col))
img = img_to_array(img)
img = np.array(img, dtype=np.uint8)
X[i] = img
return X, np.array(df["angle"])
def make_grayscale_diff_data(path, num_channels=2):
df = pd.read_csv(path)
num_rows = df.shape[0]
X = np.zeros((num_rows - num_channels, row, col, num_channels), dtype=np.uint8)
for i in range(num_channels, num_rows):
if i % 1000 == 0:
print("Processed " + str(i) + " images...")
for j in range(num_channels):
path0 = df['fullpath'].iloc[i - j - 1]
path1 = df['fullpath'].iloc[i - j]
img0 = load_img(data_path + path0, grayscale=True, target_size=(row, col))
img1 = load_img(data_path + path1, grayscale=True, target_size=(row, col))
img0 = img_to_array(img0)
img1 = img_to_array(img1)
img = img1 - img0
img = rescale_intensity(img, in_range=(-255, 255), out_range=(0, 255))
img = np.array(img, dtype=np.uint8)
X[i - num_channels, :, :, j] = img[:, :, 0]
return X, np.array(df["angle"].iloc[num_channels:])
def make_grayscale_diff_tx_data(path, num_channels=2):
df = pd.read_csv(path)
num_rows = df.shape[0]
X = np.zeros((num_rows - num_channels, row, col, num_channels), dtype=np.uint8)
for i in range(num_channels, num_rows):
if i % 1000 == 0:
print("Processed " + str(i) + " images...")
path1 = df['fullpath'].iloc[i]
img1 = load_img(data_path + path1, grayscale=True, target_size=(row, col))
img1 = img_to_array(img1)
for j in range(1, num_channels + 1):
path0 = df['fullpath'].iloc[i - j]
img0 = load_img(data_path + path0, grayscale=True, target_size=(row, col))
img0 = img_to_array(img0)
img = img1 - img0
img = rescale_intensity(img, in_range=(-255, 255), out_range=(0, 255))
img = np.array(img, dtype=np.uint8)
X[i - num_channels, :, :, j - 1] = img[:, :, 0]
return X, np.array(df["angle"].iloc[num_channels:])
def make_hsv_grayscale_diff_data(path, image_paths, angles, num_channels=2):
num_rows = min(len(image_paths), len(angles))
X = np.zeros((num_rows - num_channels, row, col, num_channels), dtype=np.uint8)
# for i in range(num_channels, num_rows):
for i in range(num_channels, 500):
if i % 1000 == 0:
print("Processed " + str(i) + " images...")
for j in range(num_channels):
path0 = image_paths[i - j - 2][0]
path1 = image_paths[i - j - 1][0]
img0 = load_img(path0, target_size=(row, col))
img1 = load_img(path1, target_size=(row, col))
img0.show()
img0 = img_to_array(img0)
img1 = img_to_array(img1)
img0 = rgb_to_hsv(img0)
img1 = rgb_to_hsv(img1)
img = img1[:, :, 2] - img0[:, :, 2]
img = rescale_intensity(img, in_range=(-255, 255), out_range=(0, 255))
img = np.array(img, dtype=np.uint8)
X[i - num_channels, :, :, j] = img
return X, np.array(angles[num_channels:])
def load_data():
"""
Load training data and split it into training and validation set
"""
tracks = [DATA_DIR]
x = np.empty([0, 3])
y = np.array([])
for track in tracks:
drive = os.listdir(track)
for drive_style in drive:
try:
csv_name = 'driving_log.csv'
csv_folder = os.path.join(track, drive_style)
csv_path = os.path.join(csv_folder, csv_name)
def fix_path(serie):
return serie.apply(lambda d: os.path.join(csv_folder, d))
data_df = pd.read_csv(csv_path)
pictures = data_df[['center', 'left', 'right']]
pictures_fixpath = pictures.apply(fix_path)
csv_x = pictures_fixpath.values
csv_y = data_df['steering'].values
x = np.concatenate((x, csv_x), axis=0)
y = np.concatenate((y, csv_y), axis=0)
except FileNotFoundError:
print("Unable to read file %s" % csv_path)
exit()
try:
X_train, X_valid, y_train, y_valid = train_test_split(x, y, test_size=0.2, random_state=0)
except TypeError:
print("Missing header to csv files")
exit()
# print("Train dataset: " + str(len(X_train)) + " elements")
# print("Test dataset: " + str(len(X_valid)) + " elements")
return X_train, X_valid, y_train, y_valid
DATA_DIR = './training_recordings'
if __name__ == "__main__":
config = DataConfig()
data_path = config.data_path
row, col = config.img_height, config.img_width
X_train, X_valid, y_train, y_valid = load_data()
print("Pre-processing phase 1 data...")
X_train_gray_diff, y_train_gray_diff = make_hsv_grayscale_diff_data("data/train_round1.txt", X_train, y_train, 2)
np.save(data_path + "/X_train_round1_hsv_gray_diff_ch4", X_train_gray_diff)
np.save(data_path + "/y_train_round1_hsv_gray_diff_ch4", y_train_gray_diff)
X_val_gray_diff, y_val_gray_diff = make_hsv_grayscale_diff_data("data/val_round1.txt", X_valid, y_valid, 2)
np.save("{}/X_train_round1_hsv_gray_diff_ch4".format(data_path), X_val_gray_diff)
np.save("{}/y_train_round1_hsv_gray_diff_ch4".format(data_path), y_val_gray_diff)
print("Pre-processing phase 2 data...")
for i in range(1, 6):
# X_train, y_train = make_hsv_grayscale_diff_data("data/train_round2_part" + str(i) + ".txt", X_train, y_train,
# 2)
np.save("{}/X_train_round2_hsv_gray_diff_ch4_part{}".format(data_path, i), X_train_gray_diff)
np.save("{}/y_train_round2_hsv_gray_diff_ch4_part{}".format(data_path, i), y_train_gray_diff)
| StarcoderdataPython |
5060711 | import os
import torch
import numpy as np
from torch_geometric.data import InMemoryDataset, download_url, Data
from torch_geometric.utils import from_scipy_sparse_matrix
class AirUSA(InMemoryDataset):
r"""This dataset is the airport traffic network in the USA from the
`"Data Augmentation for Graph Neural Networks"
<https://arxiv.org/pdf/2006.06830.pdf>`_ paper.
Each node represents an airport and edge indicates the existence of
commercial flights between the airports. The node labels are generated
based on the label of activity measured by people and flights passed
the airports. The original graph does not have any features, one-hot degree
vectors are used as node features.
Args:
root (string): Root directory where the dataset should be saved.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
"""
url = 'https://github.com/GAugAuthors/GAug/raw/master/data/graphs'
def __init__(self, root, transform=None, pre_transform=None):
super().__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return ['airport_adj.pkl', 'airport_features.pkl', 'airport_labels.pkl', 'airport_tvt_nids.pkl']
@property
def processed_file_names(self):
return 'data.pt'
def download(self):
for file in self.raw_file_names:
download_url(f'{self.url}/{file}', self.raw_dir)
# noinspection PyTypeChecker
def process(self):
x = np.load(os.path.join(self.raw_dir, 'airport_features.pkl'), allow_pickle=True)
y = np.load(os.path.join(self.raw_dir, 'airport_labels.pkl'), allow_pickle=True)
adj = np.load(os.path.join(self.raw_dir, 'airport_adj.pkl'), allow_pickle=True)
edge_index, _ = from_scipy_sparse_matrix(adj)
train, val, test = np.load(os.path.join(self.raw_dir, 'airport_tvt_nids.pkl'), allow_pickle=True)
train_mask = torch.zeros_like(y, dtype=torch.bool)
val_mask = torch.zeros_like(y, dtype=torch.bool)
test_mask = torch.zeros_like(y, dtype=torch.bool)
train_mask[train] = True
val_mask[val] = True
test_mask[test] = True
data = Data(
x=x, edge_index=edge_index, y=y, num_nodes=len(y),
train_mask=train_mask, val_mask=val_mask, test_mask=test_mask
)
if self.pre_transform is not None:
data = self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0])
def __repr__(self):
return 'AirUSA()'
| StarcoderdataPython |
3288586 | import typing as types
def Main():
a: types.List[types.Any] = []
| StarcoderdataPython |
3359439 | from django.shortcuts import render
from django.http import HttpResponse,Http404
from .models import Image
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def start(request):
pictures = Image.objects.all()
return render(request,'start.html',{"pictures":pictures})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'search.html', {"message":message,"images":searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
def search_location(request):
if 'image' in request.GET and request.GET["image"]:
search = request.GET.get("image")
searched_images = Image.search_by_location(search)
message = f"{search}"
return render(request, 'search.html', {"message":message,"images":searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'location.html',{"message":message})
def image_details(request,image_id):
try:
imagey = Image.objects.get(id = image_id)
except ObjectDoesNotExist:
raise Http404()
return render(request, "image_deets.html", {"imagey":imagey})
| StarcoderdataPython |
3234599 | <reponame>cydenix/OpenGLCffi<gh_stars>0
DEF = '''
typedef int32_t khronos_int32_t;
typedef uint32_t khronos_uint32_t;
typedef int64_t khronos_int64_t;
typedef uint64_t khronos_uint64_t;
typedef signed char khronos_int8_t;
typedef unsigned char khronos_uint8_t;
typedef signed short int khronos_int16_t;
typedef unsigned short int khronos_uint16_t;
typedef signed long int khronos_intptr_t;
typedef unsigned long int khronos_uintptr_t;
typedef signed long int khronos_ssize_t;
typedef unsigned long int khronos_usize_t;
typedef float khronos_float_t;
typedef khronos_uint64_t khronos_utime_nanoseconds_t;
typedef khronos_int64_t khronos_stime_nanoseconds_t;
typedef khronos_int8_t GLbyte;
typedef khronos_uint8_t GLubyte;
typedef khronos_float_t GLfloat;
typedef khronos_float_t GLclampf;
typedef khronos_int32_t GLfixed;
typedef khronos_int64_t GLint64;
typedef khronos_uint64_t GLuint64;
typedef khronos_int64_t GLint64EXT;
typedef khronos_uint64_t GLuint64EXT;
typedef khronos_intptr_t GLintptr;
typedef khronos_ssize_t GLsizeiptr;
typedef unsigned int GLenum;
typedef unsigned char GLboolean;
typedef unsigned int GLbitfield;
typedef void GLvoid;
typedef short GLshort;
typedef int GLint;
typedef int GLclampx;
typedef unsigned short GLushort;
typedef unsigned int GLuint;
typedef int GLsizei;
typedef double GLdouble;
typedef double GLclampd;
typedef void *GLeglImageOES;
typedef char GLchar;
typedef char GLcharARB;
typedef unsigned short GLhalfARB;
typedef unsigned short GLhalf;
typedef ptrdiff_t GLintptrARB;
typedef ptrdiff_t GLsizeiptrARB;
typedef struct __GLsync *GLsync;
struct _cl_context;
struct _cl_event;
typedef void (GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
typedef void (GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
typedef void (GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
typedef void (GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam);
typedef unsigned short GLhalfNV;
typedef GLintptr GLvdpauSurfaceNV;
typedef unsigned int GLhandleARB;
void glStencilMaskSeparate(GLenum face, GLuint mask);
void glCompressedTexSubImage3D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
void glTextureStorage3DEXT(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
void glGetProgramPipelineivEXT(GLuint pipeline, GLenum pname, GLint *params);
void glPathGlyphsNV(GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
void glEndPerfMonitorAMD(GLuint monitor);
void glCoverStrokePathInstancedNV(GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
void glUniform2ui64NV(GLint location, GLuint64EXT x, GLuint64EXT y);
void glColorMaskiEXT(GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
GLboolean glIsBuffer(GLuint buffer);
void glGetMultisamplefv(GLenum pname, GLuint index, GLfloat *val);
void glProgramUniformMatrix4fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glProgramUniform4fEXT(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
void glCoverStrokePathNV(GLuint path, GLenum coverMode);
void glDebugMessageControl(GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
void glProgramUniform4iEXT(GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
void glRenderbufferStorage(GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
void glGetInternalformatSampleivNV(GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei bufSize, GLint *params);
void glProgramUniform2fEXT(GLuint program, GLint location, GLfloat v0, GLfloat v1);
void glProgramUniform3f(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
void glObjectPtrLabelKHR(const void *ptr, GLsizei length, const GLchar *label);
void glFramebufferTextureLayerDownsampleIMG(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLint xscale, GLint yscale);
void glResumeTransformFeedback();
void glPathCommandsNV(GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
void glInsertEventMarkerEXT(GLsizei length, const GLchar *marker);
void glCopyImageSubDataEXT(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
void glDepthRangeArrayfvNV(GLuint first, GLsizei count, const GLfloat *v);
void glProgramUniformMatrix3x4fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GLenum glGetGraphicsResetStatus();
void glVertexAttrib1fv(GLuint index, const GLfloat *v);
GLboolean glIsEnabled(GLenum cap);
void glStencilOp(GLenum fail, GLenum zfail, GLenum zpass);
void glProgramUniform2i64vNV(GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
void glGenFramebuffers(GLsizei n, GLuint *framebuffers);
void glClearTexSubImageEXT(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data);
void glGetAttachedShaders(GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders);
void glDeleteVertexArrays(GLsizei n, const GLuint *arrays);
void glGetPathColorGenfvNV(GLenum color, GLenum pname, GLfloat *value);
void glGetPointerv(GLenum pname, void **params);
void glGetUniformfv(GLuint program, GLint location, GLfloat *params);
void glGetUniformuiv(GLuint program, GLint location, GLuint *params);
void glProgramUniformMatrix3fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glDrawElementsInstanced(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount);
void glSelectPerfMonitorCountersAMD(GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList);
void glGetRenderbufferParameteriv(GLenum target, GLenum pname, GLint *params);
void glProgramPathFragmentInputGenNV(GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs);
GLsync glFenceSync(GLenum condition, GLbitfield flags);
GLboolean glUnmapBufferOES(GLenum target);
void glGetQueryObjecti64vEXT(GLuint id, GLenum pname, GLint64 *params);
void glProgramUniform4uiEXT(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
void glValidateProgramPipeline(GLuint pipeline);
void glStencilStrokePathInstancedNV(GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues);
void glProgramUniform2i64NV(GLuint program, GLint location, GLint64EXT x, GLint64EXT y);
void glGenSamplers(GLsizei count, GLuint *samplers);
void glStencilThenCoverStrokePathNV(GLuint path, GLint reference, GLuint mask, GLenum coverMode);
void glGetTexParameterIuiv(GLenum target, GLenum pname, GLuint *params);
GLboolean glIsSync(GLsync sync);
void glGetObjectPtrLabel(const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label);
void glMatrixLoadTranspose3x3fNV(GLenum matrixMode, const GLfloat *m);
void glCopyTexImage2D(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
void glProgramUniformMatrix2x4fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glDrawElementsBaseVertexEXT(GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
void glPathParameterivNV(GLuint path, GLenum pname, const GLint *value);
void glUniform4uiv(GLint location, GLsizei count, const GLuint *value);
void glGetIntegeri_vEXT(GLenum target, GLuint index, GLint *data);
void glPathStencilFuncNV(GLenum func, GLint ref, GLuint mask);
void glTextureViewOES(GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
void glInvalidateSubFramebuffer(GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height);
void glGetQueryObjectivEXT(GLuint id, GLenum pname, GLint *params);
void glDeleteSync(GLsync sync);
GLboolean glIsImageHandleResidentNV(GLuint64 handle);
void glUniform3iv(GLint location, GLsizei count, const GLint *value);
void glUseProgram(GLuint program);
void glGetProgramInfoLog(GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
void glProgramUniformMatrix3x2fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glBindFragDataLocationEXT(GLuint program, GLuint color, const GLchar *name);
void glGetBooleanv(GLenum pname, GLboolean *data);
void glDeleteShader(GLuint shader);
void glEnableiOES(GLenum target, GLuint index);
void glVertexAttribFormat(GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset);
void glTexParameterf(GLenum target, GLenum pname, GLfloat param);
void glVertexAttribBinding(GLuint attribindex, GLuint bindingindex);
void glTexParameteri(GLenum target, GLenum pname, GLint param);
void glGetShaderSource(GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source);
void glFramebufferSampleLocationsfvNV(GLenum target, GLuint start, GLsizei count, const GLfloat *v);
void glGetNextPerfQueryIdINTEL(GLuint queryId, GLuint *nextQueryId);
void glGetPathCoordsNV(GLuint path, GLfloat *coords);
void glGenProgramPipelines(GLsizei n, GLuint *pipelines);
void glVertexAttrib3fv(GLuint index, const GLfloat *v);
void glLinkProgram(GLuint program);
void glGetObjectLabel(GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label);
const GLubyte * glGetString(GLenum name);
void glGetPathParameterfvNV(GLuint path, GLenum pname, GLfloat *value);
void glExtGetShadersQCOM(GLuint *shaders, GLint maxShaders, GLint *numShaders);
void glEndQuery(GLenum target);
GLboolean glIsQueryEXT(GLuint id);
void glFramebufferParameteri(GLenum target, GLenum pname, GLint param);
void glDeleteTextures(GLsizei n, const GLuint *textures);
void glVertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
void glSamplerParameteri(GLuint sampler, GLenum pname, GLint param);
void glViewportArrayvNV(GLuint first, GLsizei count, const GLfloat *v);
void glGetActiveUniformBlockiv(GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params);
void glUniform1i(GLint location, GLint v0);
void glCullFace(GLenum mode);
void glProgramUniform4i(GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
void glProgramUniform4f(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
GLboolean glPointAlongPathNV(GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY);
void glAttachShader(GLuint program, GLuint shader);
void glDisableiEXT(GLenum target, GLuint index);
void glGetBufferParameteriv(GLenum target, GLenum pname, GLint *params);
void glTexParameterIuiv(GLenum target, GLenum pname, const GLuint *params);
void glGetQueryObjectuivEXT(GLuint id, GLenum pname, GLuint *params);
void glDeletePathsNV(GLuint path, GLsizei range);
void glGetnUniformfvKHR(GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
void glProgramUniform1iEXT(GLuint program, GLint location, GLint v0);
void glTexBufferRangeOES(GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
GLboolean glIsTransformFeedback(GLuint id);
void glGetObjectLabelEXT(GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label);
void glUniformHandleui64vNV(GLint location, GLsizei count, const GLuint64 *value);
GLboolean glIsProgramPipeline(GLuint pipeline);
void glUniformMatrix3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetnUniformfv(GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
void glFramebufferPixelLocalStorageSizeEXT(GLuint target, GLsizei size);
void glStencilFunc(GLenum func, GLint ref, GLuint mask);
void glUniformMatrix4x2fvNV(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetProgramPipelineiv(GLuint pipeline, GLenum pname, GLint *params);
void glDispatchComputeIndirect(GLintptr indirect);
void glGetShaderInfoLog(GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
void glVertexAttribI4i(GLuint index, GLint x, GLint y, GLint z, GLint w);
void glRenderbufferStorageMultisampleNV(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
void glUniformHandleui64NV(GLint location, GLuint64 value);
void glBlendEquationSeparate(GLenum modeRGB, GLenum modeAlpha);
void glEGLImageTargetRenderbufferStorageOES(GLenum target, GLeglImageOES image);
void glCompressedTexSubImage3DOES(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
void glBlitFramebufferNV(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
void glBeginPerfQueryINTEL(GLuint queryHandle);
void glProgramUniform1uivEXT(GLuint program, GLint location, GLsizei count, const GLuint *value);
void glDeleteBuffers(GLsizei n, const GLuint *buffers);
void glBindProgramPipeline(GLuint pipeline);
void glScissor(GLint x, GLint y, GLsizei width, GLsizei height);
void glGetSamplerParameterIuivEXT(GLuint sampler, GLenum pname, GLuint *params);
void glProgramUniform4i64NV(GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
void glProgramUniform4uivEXT(GLuint program, GLint location, GLsizei count, const GLuint *value);
const GLubyte * glGetStringi(GLenum name, GLuint index);
void glGetTexParameterIivOES(GLenum target, GLenum pname, GLint *params);
void glUniform2fv(GLint location, GLsizei count, const GLfloat *value);
void glProgramUniform1uiEXT(GLuint program, GLint location, GLuint v0);
void glBindBufferRange(GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
void glGetUniformi64vNV(GLuint program, GLint location, GLint64EXT *params);
void glProgramUniform3fvEXT(GLuint program, GLint location, GLsizei count, const GLfloat *value);
GLenum glClientWaitSync(GLsync sync, GLbitfield flags, GLuint64 timeout);
void glExtGetTexSubImageQCOM(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels);
void glReadnPixels(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
GLenum glPathGlyphIndexArrayNV(GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
void glCopyImageSubData(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
void glBindVertexBuffer(GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
void glDebugMessageInsert(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
void glGetPerfMonitorCounterDataAMD(GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten);
GLboolean glIsSampler(GLuint sampler);
GLboolean glIsVertexArrayOES(GLuint array);
GLenum glCheckFramebufferStatus(GLenum target);
void glBindImageTexture(GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format);
void glDrawElementsInstancedBaseVertexEXT(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
void glTransformFeedbackVaryings(GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode);
void glProgramBinaryOES(GLuint program, GLenum binaryFormat, const void *binary, GLint length);
void glDrawRangeElements(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices);
void glBindBufferBase(GLenum target, GLuint index, GLuint buffer);
void glTexImage3DOES(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
void glUniformMatrix4x3fvNV(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glVertexAttribIFormat(GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
void glUniform3i64vNV(GLint location, GLsizei count, const GLint64EXT *value);
void glPathTexGenNV(GLenum texCoordSet, GLenum genMode, GLint components, const GLfloat *coeffs);
void glUniformMatrix3x2fvNV(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glBeginQuery(GLenum target, GLuint id);
void glGetProgramPipelineInfoLogEXT(GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
void glMakeImageHandleResidentNV(GLuint64 handle, GLenum access);
void glUniformMatrix2x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GLenum glGetError();
void glDeletePerfQueryINTEL(GLuint queryHandle);
void glTexStorage2DEXT(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
void glGetTexLevelParameterfv(GLenum target, GLint level, GLenum pname, GLfloat *params);
void glTextureStorage2DEXT(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
void glGetDriverControlStringQCOM(GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString);
void glProgramUniform2ui(GLuint program, GLint location, GLuint v0, GLuint v1);
void glProgramUniform4ui(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
void glExtGetFramebuffersQCOM(GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers);
void glTextureViewEXT(GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
void glProgramUniform1i64NV(GLuint program, GLint location, GLint64EXT x);
void glPixelStorei(GLenum pname, GLint param);
void glDrawRangeElementsBaseVertexEXT(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
void glDepthMask(GLboolean flag);
void glTexBufferRange(GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
void glUniform2i64NV(GLint location, GLint64EXT x, GLint64EXT y);
void glGetPerfCounterInfoINTEL(GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue);
void glUniform1ui64NV(GLint location, GLuint64EXT x);
void glProgramParameteri(GLuint program, GLenum pname, GLint value);
void glVertexAttribDivisorNV(GLuint index, GLuint divisor);
void glPathStencilDepthOffsetNV(GLfloat factor, GLfloat units);
void glPatchParameteriOES(GLenum pname, GLint value);
void glDebugMessageCallbackKHR(GLDEBUGPROCKHR callback, const void *userParam);
void glProgramUniformHandleui64vIMG(GLuint program, GLint location, GLsizei count, const GLuint64 *values);
void glFinishFenceNV(GLuint fence);
void glMemoryBarrierByRegion(GLbitfield barriers);
void glProgramUniform2uivEXT(GLuint program, GLint location, GLsizei count, const GLuint *value);
GLuint glCreateShader(GLenum type);
GLuint glGenPathsNV(GLsizei range);
void glGenRenderbuffers(GLsizei n, GLuint *renderbuffers);
void glCopyTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
void glBlendFuncSeparate(GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
void glReadnPixelsEXT(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
GLboolean glExtIsProgramBinaryQCOM(GLuint program);
void glGetProgramPipelineInfoLog(GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
void glWaitSync(GLsync sync, GLbitfield flags, GLuint64 timeout);
void glFramebufferTextureMultisampleMultiviewOVR(GLenum target, GLenum attachment, GLuint texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews);
void glBlendEquationSeparatei(GLuint buf, GLenum modeRGB, GLenum modeAlpha);
void glUniform3f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
void glActiveProgramEXT(GLuint program);
void glProgramUniform3uiv(GLuint program, GLint location, GLsizei count, const GLuint *value);
void glValidateProgramPipelineEXT(GLuint pipeline);
void glWeightPathsNV(GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights);
void glGetPerfMonitorCounterStringAMD(GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString);
void glDeleteFramebuffers(GLsizei n, const GLuint *framebuffers);
void glDrawArrays(GLenum mode, GLint first, GLsizei count);
void glClear(GLbitfield mask);
void glGetSamplerParameterfv(GLuint sampler, GLenum pname, GLfloat *params);
void glFlushMappedBufferRangeEXT(GLenum target, GLintptr offset, GLsizeiptr length);
void glSamplerParameterIiv(GLuint sampler, GLenum pname, const GLint *param);
void glMultiDrawElementsBaseVertexOES(GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex);
void glDrawElementsIndirect(GLenum mode, GLenum type, const void *indirect);
void glDeletePerfMonitorsAMD(GLsizei n, GLuint *monitors);
void glMatrixMult3x3fNV(GLenum matrixMode, const GLfloat *m);
void glPathSubCommandsNV(GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
void glGetUniformIndices(GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices);
void glScissorArrayvOES(GLuint first, GLsizei count, const GLint *v);
GLboolean glIsVertexArray(GLuint array);
void glDisableVertexAttribArray(GLuint index);
void glBeginQueryEXT(GLenum target, GLuint id);
void glDeleteVertexArraysOES(GLsizei n, const GLuint *arrays);
void glExtGetTexLevelParameterivQCOM(GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params);
void glGetProgramInterfaceiv(GLuint program, GLenum programInterface, GLenum pname, GLint *params);
void glScissorIndexedNV(GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
void glGetVertexAttribIiv(GLuint index, GLenum pname, GLint *params);
void glDeleteQueriesEXT(GLsizei n, const GLuint *ids);
void glPatchParameteri(GLenum pname, GLint value);
void glCopyTexSubImage3DOES(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
void glUniformMatrix3x4fvNV(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GLenum glGetGraphicsResetStatusKHR();
void glGetTexParameterIuivOES(GLenum target, GLenum pname, GLuint *params);
void glProgramUniform2iEXT(GLuint program, GLint location, GLint v0, GLint v1);
void glSampleCoverage(GLfloat value, GLboolean invert);
void glGetSyncivAPPLE(GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values);
void glUniform2i(GLint location, GLint v0, GLint v1);
void glFramebufferTextureLayer(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
void glProgramUniform2fv(GLuint program, GLint location, GLsizei count, const GLfloat *value);
void glInterpolatePathsNV(GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight);
void glPrimitiveBoundingBox(GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
GLenum glPathMemoryGlyphIndexArrayNV(GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
void glBlendEquationEXT(GLenum mode);
void glGenPerfMonitorsAMD(GLsizei n, GLuint *monitors);
void glGetInteger64i_v(GLenum target, GLuint index, GLint64 *data);
void glBlitFramebuffer(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
GLboolean glIsEnabledi(GLenum target, GLuint index);
void glViewportIndexedfNV(GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
void glSamplerParameterIivEXT(GLuint sampler, GLenum pname, const GLint *param);
void glCopyPathNV(GLuint resultPath, GLuint srcPath);
void glObjectPtrLabel(const void *ptr, GLsizei length, const GLchar *label);
GLuint glGetDebugMessageLog(GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
void glMakeTextureHandleNonResidentNV(GLuint64 handle);
void glUniform2i64vNV(GLint location, GLsizei count, const GLint64EXT *value);
void glTexParameterIivOES(GLenum target, GLenum pname, const GLint *params);
void glExtGetRenderbuffersQCOM(GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers);
void glTransformPathNV(GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues);
void glGetPointervKHR(GLenum pname, void **params);
void glGetTranslatedShaderSourceANGLE(GLuint shader, GLsizei bufsize, GLsizei *length, GLchar *source);
GLenum glClientWaitSyncAPPLE(GLsync sync, GLbitfield flags, GLuint64 timeout);
void glPushDebugGroup(GLenum source, GLuint id, GLsizei length, const GLchar *message);
void glCopyTexSubImage3D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
void glGetActiveUniformBlockName(GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName);
void glProgramUniform4i64vNV(GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
void glPopDebugGroupKHR();
void glGetPerfMonitorCounterInfoAMD(GLuint group, GLuint counter, GLenum pname, void *data);
void glBlendEquationSeparateiOES(GLuint buf, GLenum modeRGB, GLenum modeAlpha);
void glProgramUniform2f(GLuint program, GLint location, GLfloat v0, GLfloat v1);
void glBindVertexArray(GLuint array);
void glBlendFuncSeparateiOES(GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
void glBindFramebuffer(GLenum target, GLuint framebuffer);
void glProgramUniform4ivEXT(GLuint program, GLint location, GLsizei count, const GLint *value);
void glProgramUniform3ui64vNV(GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
GLsync glFenceSyncAPPLE(GLenum condition, GLbitfield flags);
void glViewport(GLint x, GLint y, GLsizei width, GLsizei height);
GLboolean glIsRenderbuffer(GLuint renderbuffer);
void glTexSubImage3DOES(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
void glBlendFunciEXT(GLuint buf, GLenum src, GLenum dst);
void glTexStorage2DMultisample(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
GLboolean glIsProgramPipelineEXT(GLuint pipeline);
void glTexBuffer(GLenum target, GLenum internalformat, GLuint buffer);
void glValidateProgram(GLuint program);
void glActiveShaderProgram(GLuint pipeline, GLuint program);
void glPathCoverDepthFuncNV(GLenum func);
void glConservativeRasterParameteriNV(GLenum pname, GLint param);
void glBindTexture(GLenum target, GLuint texture);
void glDetachShader(GLuint program, GLuint shader);
void glGetPathColorGenivNV(GLenum color, GLenum pname, GLint *value);
GLint glGetProgramResourceLocation(GLuint program, GLenum programInterface, const GLchar *name);
void glDrawElementsInstancedNV(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
void glDrawElementsBaseVertex(GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
void glTexStorage1DEXT(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
void glExtGetBuffersQCOM(GLuint *buffers, GLint maxBuffers, GLint *numBuffers);
void glGetUniformiv(GLuint program, GLint location, GLint *params);
void glGetDriverControlsQCOM(GLint *num, GLsizei size, GLuint *driverControls);
void glBindBuffer(GLenum target, GLuint buffer);
void glUniform4ui(GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
void glDeleteProgramPipelinesEXT(GLsizei n, const GLuint *pipelines);
void glProgramUniformHandleui64IMG(GLuint program, GLint location, GLuint64 value);
void glQueryCounterEXT(GLuint id, GLenum target);
void glBlendEquationSeparateiEXT(GLuint buf, GLenum modeRGB, GLenum modeAlpha);
GLuint glCreateShaderProgramv(GLenum type, GLsizei count, const GLchar *const*strings);
void glGenerateMipmap(GLenum target);
void glPolygonOffsetClampEXT(GLfloat factor, GLfloat units, GLfloat clamp);
void glSamplerParameterIivOES(GLuint sampler, GLenum pname, const GLint *param);
void glBlendEquationiOES(GLuint buf, GLenum mode);
GLboolean glUnmapBuffer(GLenum target);
void glProgramUniform2fvEXT(GLuint program, GLint location, GLsizei count, const GLfloat *value);
void glProgramUniform2ui64vNV(GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
void glReleaseShaderCompiler();
void glReadPixels(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels);
void glDepthRangeArrayfvOES(GLuint first, GLsizei count, const GLfloat *v);
GLboolean glIsEnablediNV(GLenum target, GLuint index);
void glGetSamplerParameterIivEXT(GLuint sampler, GLenum pname, GLint *params);
void glUseProgramStages(GLuint pipeline, GLbitfield stages, GLuint program);
void glReadBuffer(GLenum src);
void glGetnUniformuiv(GLuint program, GLint location, GLsizei bufSize, GLuint *params);
void glTextureStorage1DEXT(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
void glGenBuffers(GLsizei n, GLuint *buffers);
void glGetnUniformuivKHR(GLuint program, GLint location, GLsizei bufSize, GLuint *params);
void glFramebufferTextureOES(GLenum target, GLenum attachment, GLuint texture, GLint level);
GLboolean glIsFramebuffer(GLuint framebuffer);
void glPolygonModeNV(GLenum face, GLenum mode);
void glProgramUniformMatrix4fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glUniform3i64NV(GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
void glActiveShaderProgramEXT(GLuint pipeline, GLuint program);
void glNamedFramebufferSampleLocationsfvNV(GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
void glFragmentCoverageColorNV(GLuint color);
void glGetPerfMonitorGroupsAMD(GLint *numGroups, GLsizei groupsSize, GLuint *groups);
void glGetBufferParameteri64v(GLenum target, GLenum pname, GLint64 *params);
void glGetInteger64vAPPLE(GLenum pname, GLint64 *params);
void glTexPageCommitmentEXT(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit);
GLuint glGetDebugMessageLogKHR(GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
void glGetFloati_vNV(GLenum target, GLuint index, GLfloat *data);
void glProgramUniformMatrix4x3fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glTexStorage3DMultisampleOES(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
void glDepthFunc(GLenum func);
void glSamplerParameterf(GLuint sampler, GLenum pname, GLfloat param);
void glBlendFunci(GLuint buf, GLenum src, GLenum dst);
GLuint glCreateShaderProgramEXT(GLenum type, const GLchar *string);
void glColorMaskiOES(GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
GLint glGetUniformLocation(GLuint program, const GLchar *name);
void glUniform2ui64vNV(GLint location, GLsizei count, const GLuint64EXT *value);
void glUniform4fv(GLint location, GLsizei count, const GLfloat *value);
void glDrawArraysInstancedBaseInstanceEXT(GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance);
void glEndPerfQueryINTEL(GLuint queryHandle);
void glRenderbufferStorageMultisampleIMG(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
void glProgramUniform2uiv(GLuint program, GLint location, GLsizei count, const GLuint *value);
void glGetQueryObjectuiv(GLuint id, GLenum pname, GLuint *params);
void glLabelObjectEXT(GLenum type, GLuint object, GLsizei length, const GLchar *label);
void glProgramUniform1uiv(GLuint program, GLint location, GLsizei count, const GLuint *value);
void glFramebufferTexture(GLenum target, GLenum attachment, GLuint texture, GLint level);
void glProgramUniformMatrix2x4fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
GLuint64 glGetImageHandleNV(GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format);
void glBlendParameteriNV(GLenum pname, GLint value);
void glMinSampleShadingOES(GLfloat value);
void glGetPathMetricsNV(GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics);
void glGetFloatv(GLenum pname, GLfloat *data);
void glMatrixLoad3x3fNV(GLenum matrixMode, const GLfloat *m);
void glGetIntegerv(GLenum pname, GLint *data);
GLfloat glGetPathLengthNV(GLuint path, GLsizei startSegment, GLsizei numSegments);
void glUniform4i64NV(GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
GLboolean glIsQuery(GLuint id);
void glTexImage2D(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
GLboolean glIsEnablediEXT(GLenum target, GLuint index);
void glGetFramebufferParameteriv(GLenum target, GLenum pname, GLint *params);
void glGetSamplerParameteriv(GLuint sampler, GLenum pname, GLint *params);
void glCopyBufferSubData(GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
void glGetActiveUniform(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
GLenum glPathGlyphIndexRangeNV(GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint baseAndCount[2]);
void glCreatePerfQueryINTEL(GLuint queryId, GLuint *queryHandle);
void glTexParameterIuivEXT(GLenum target, GLenum pname, const GLuint *params);
void glMinSampleShading(GLfloat value);
void glFramebufferRenderbuffer(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
void glDepthRangeIndexedfNV(GLuint index, GLfloat n, GLfloat f);
void glPathParameterfvNV(GLuint path, GLenum pname, const GLfloat *value);
void glBindSampler(GLuint unit, GLuint sampler);
void glLineWidth(GLfloat width);
void glGetIntegeri_v(GLenum target, GLuint index, GLint *data);
void glDisableiOES(GLenum target, GLuint index);
void glGetTransformFeedbackVarying(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
GLuint64 glGetTextureHandleNV(GLuint texture);
void glDepthRangef(GLfloat n, GLfloat f);
void glEnablei(GLenum target, GLuint index);
void glReadBufferIndexedEXT(GLenum src, GLint index);
void glGetBufferPointervOES(GLenum target, GLenum pname, void **params);
void glSampleMaski(GLuint maskNumber, GLbitfield mask);
void glUniformMatrix3x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetInternalformativ(GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params);
void glProgramUniformMatrix3fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glProgramUniform1ui(GLuint program, GLint location, GLuint v0);
void * glMapBufferRangeEXT(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
void glColorMaski(GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
void glRenderbufferStorageMultisampleANGLE(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
void glStartTilingQCOM(GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask);
void glDepthRangeIndexedfOES(GLuint index, GLfloat n, GLfloat f);
void glTexStorage3D(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
void glUniformMatrix3x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glSubpixelPrecisionBiasNV(GLuint xbits, GLuint ybits);
void glProgramUniformMatrix4x3fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glUniform4ui64vNV(GLint location, GLsizei count, const GLuint64EXT *value);
void glGetFramebufferAttachmentParameteriv(GLenum target, GLenum attachment, GLenum pname, GLint *params);
void glTexParameteriv(GLenum target, GLenum pname, const GLint *params);
void glProgramUniform4ui64vNV(GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
void glProgramUniform2iv(GLuint program, GLint location, GLsizei count, const GLint *value);
void glGetQueryiv(GLenum target, GLenum pname, GLint *params);
void glObjectLabel(GLenum identifier, GLuint name, GLsizei length, const GLchar *label);
void glProgramUniform3i64NV(GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
void glDebugMessageControlKHR(GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
void glCoverageModulationTableNV(GLsizei n, const GLfloat *v);
void glViewportIndexedfvNV(GLuint index, const GLfloat *v);
void * glMapBufferOES(GLenum target, GLenum access);
void glGetFirstPerfQueryIdINTEL(GLuint *queryId);
void glProgramUniform1ui64vNV(GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
GLuint64 glGetTextureHandleIMG(GLuint texture);
void glDrawArraysInstancedEXT(GLenum mode, GLint start, GLsizei count, GLsizei primcount);
void glClearBufferuiv(GLenum buffer, GLint drawbuffer, const GLuint *value);
void glReadnPixelsKHR(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
void glVertexAttribIPointer(GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
void glFlush();
void glUniformMatrix2x4fvNV(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glDrawTransformFeedbackEXT(GLenum mode, GLuint id);
void glGetTexLevelParameteriv(GLenum target, GLint level, GLenum pname, GLint *params);
void glStencilFillPathNV(GLuint path, GLenum fillMode, GLuint mask);
void glGetQueryivEXT(GLenum target, GLenum pname, GLint *params);
void glTexStorage3DMultisample(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
void glPrimitiveBoundingBoxEXT(GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
void glProgramUniform3uivEXT(GLuint program, GLint location, GLsizei count, const GLuint *value);
void glClearStencil(GLint s);
void glMakeTextureHandleResidentNV(GLuint64 handle);
void glViewportIndexedfOES(GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
void glBlendEquationiEXT(GLuint buf, GLenum mode);
GLboolean glIsTexture(GLuint texture);
void glPolygonOffset(GLfloat factor, GLfloat units);
void glTexImage3D(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
void glGetProgramiv(GLuint program, GLenum pname, GLint *params);
void glProgramUniform4fv(GLuint program, GLint location, GLsizei count, const GLfloat *value);
void glBlendBarrier();
void glFlushMappedBufferRange(GLenum target, GLintptr offset, GLsizeiptr length);
void glTexStorage2D(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
void glProgramUniformMatrix2fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGenQueries(GLsizei n, GLuint *ids);
void glTexSubImage3D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
void glDeleteSamplers(GLsizei count, const GLuint *samplers);
void glGetPathCommandsNV(GLuint path, GLubyte *commands);
void glProgramUniformMatrix4x2fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetPerfQueryInfoINTEL(GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask);
void glGetPathSpacingNV(GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing);
void glGetSamplerParameterIuivOES(GLuint sampler, GLenum pname, GLuint *params);
void glMatrixLoad3x2fNV(GLenum matrixMode, const GLfloat *m);
void glUniform3fv(GLint location, GLsizei count, const GLfloat *value);
void glMakeImageHandleNonResidentNV(GLuint64 handle);
void glDrawBuffers(GLsizei n, const GLenum *bufs);
void glWindowRectanglesEXT(GLenum mode, GLsizei count, const GLint *box);
void glBindTransformFeedback(GLenum target, GLuint id);
void glViewportIndexedfvOES(GLuint index, const GLfloat *v);
void glUseProgramStagesEXT(GLuint pipeline, GLbitfield stages, GLuint program);
void glGenFencesNV(GLsizei n, GLuint *fences);
void glUniform2uiv(GLint location, GLsizei count, const GLuint *value);
void glFinish();
void glBindFragDataLocationIndexedEXT(GLuint program, GLuint colorNumber, GLuint index, const GLchar *name);
void glUniform1uiv(GLint location, GLsizei count, const GLuint *value);
void glScissorArrayvNV(GLuint first, GLsizei count, const GLint *v);
void glCopyTextureLevelsAPPLE(GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount);
void glFramebufferTexture2DDownsampleIMG(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint xscale, GLint yscale);
void glTexStorage3DEXT(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
void glDrawBuffersNV(GLsizei n, const GLenum *bufs);
void glBlendBarrierNV();
void glUniform4i64vNV(GLint location, GLsizei count, const GLint64EXT *value);
void glGetPathTexGenivNV(GLenum texCoordSet, GLenum pname, GLint *value);
void glVertexAttribDivisorANGLE(GLuint index, GLuint divisor);
void glDrawElementsInstancedANGLE(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
void glDeleteQueries(GLsizei n, const GLuint *ids);
void glScissorIndexedvOES(GLuint index, const GLint *v);
void glPrimitiveBoundingBoxOES(GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
void glPopGroupMarkerEXT();
void glProgramUniform1ui64NV(GLuint program, GLint location, GLuint64EXT x);
void glGetVertexAttribfv(GLuint index, GLenum pname, GLfloat *params);
void glDispatchCompute(GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z);
void glBufferStorageEXT(GLenum target, GLsizeiptr size, const void *data, GLbitfield flags);
void glGetActiveAttrib(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
void glUniform3i(GLint location, GLint v0, GLint v1, GLint v2);
void glTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
void glProgramUniformMatrix3x4fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glBindVertexArrayOES(GLuint array);
void glStencilStrokePathNV(GLuint path, GLint reference, GLuint mask);
void glUniform3ui(GLint location, GLuint v0, GLuint v1, GLuint v2);
void glShaderBinary(GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length);
void glDrawElements(GLenum mode, GLsizei count, GLenum type, const void *indices);
void glProgramUniform4iv(GLuint program, GLint location, GLsizei count, const GLint *value);
void glProgramUniform3ui64NV(GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
void glUniform1iv(GLint location, GLsizei count, const GLint *value);
void glDrawArraysInstanced(GLenum mode, GLint first, GLsizei count, GLsizei instancecount);
void glTexBufferEXT(GLenum target, GLenum internalformat, GLuint buffer);
void glProgramUniform1iv(GLuint program, GLint location, GLsizei count, const GLint *value);
void glBindRenderbuffer(GLenum target, GLuint renderbuffer);
void glProgramUniformHandleui64vNV(GLuint program, GLint location, GLsizei count, const GLuint64 *values);
GLboolean glIsProgram(GLuint program);
void glVertexAttrib4fv(GLuint index, const GLfloat *v);
void glCopyImageSubDataOES(GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
GLboolean glTestFenceNV(GLuint fence);
void glVertexAttrib2fv(GLuint index, const GLfloat *v);
void glUniform3ui64NV(GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
void glBeginConditionalRenderNV(GLuint id, GLenum mode);
void glPathFogGenNV(GLenum genMode);
void glStencilThenCoverFillPathNV(GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode);
GLboolean glIsSyncAPPLE(GLsync sync);
void glProgramUniform3i(GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
GLint glGetFragDataIndexEXT(GLuint program, const GLchar *name);
void glStencilThenCoverFillPathInstancedNV(GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
GLuint64 glGetTextureSamplerHandleIMG(GLuint texture, GLuint sampler);
void glExtGetTexturesQCOM(GLuint *textures, GLint maxTextures, GLint *numTextures);
void glEndConditionalRenderNV();
void glProgramUniform2uiEXT(GLuint program, GLint location, GLuint v0, GLuint v1);
void glRenderbufferStorageMultisampleAPPLE(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
void glFramebufferTexture2DMultisampleIMG(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples);
GLboolean glIsTextureHandleResidentNV(GLuint64 handle);
void glGetPerfQueryIdByNameINTEL(GLchar *queryName, GLuint *queryId);
void glGetInteger64v(GLenum pname, GLint64 *data);
void glClearBufferiv(GLenum buffer, GLint drawbuffer, const GLint *value);
void glDrawElementsInstancedBaseVertexOES(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
void glMatrixMult3x2fNV(GLenum matrixMode, const GLfloat *m);
void glGetSamplerParameterIuiv(GLuint sampler, GLenum pname, GLuint *params);
void glExtGetProgramsQCOM(GLuint *programs, GLint maxPrograms, GLint *numPrograms);
void glUniform2f(GLint location, GLfloat v0, GLfloat v1);
void glClearDepthf(GLfloat d);
GLboolean glIsFenceNV(GLuint fence);
void glBeginPerfMonitorAMD(GLuint monitor);
void glExtGetBufferPointervQCOM(GLenum target, void **params);
void glProgramUniform4fvEXT(GLuint program, GLint location, GLsizei count, const GLfloat *value);
void glTexParameterIuivOES(GLenum target, GLenum pname, const GLuint *params);
void glVertexAttribI4ui(GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
void glGetPathDashArrayNV(GLuint path, GLfloat *dashArray);
void glColorMask(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
void glBlendEquation(GLenum mode);
void glGetQueryObjectui64vEXT(GLuint id, GLenum pname, GLuint64 *params);
void glUniform1i64vNV(GLint location, GLsizei count, const GLint64EXT *value);
void glGetProgramResourceiv(GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params);
void glBeginTransformFeedback(GLenum primitiveMode);
void glProgramUniform3iEXT(GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
void glDeleteTransformFeedbacks(GLsizei n, const GLuint *ids);
void glDrawRangeElementsBaseVertex(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
void glBindAttribLocation(GLuint program, GLuint index, const GLchar *name);
void glProgramUniform2ivEXT(GLuint program, GLint location, GLsizei count, const GLint *value);
void glPushGroupMarkerEXT(GLsizei length, const GLchar *marker);
void glResolveDepthValuesNV();
void glBlendFuncSeparatei(GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
void glUniform2ui(GLint location, GLuint v0, GLuint v1);
void glApplyFramebufferAttachmentCMAAINTEL();
void glDisablei(GLenum target, GLuint index);
void glGetSynciv(GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values);
void glExtGetProgramBinarySourceQCOM(GLuint program, GLenum shadertype, GLchar *source, GLint *length);
void glRenderbufferStorageMultisampleEXT(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
void glProgramUniform2i(GLuint program, GLint location, GLint v0, GLint v1);
void glGetProgramBinary(GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary);
void glExtTexObjectStateOverrideiQCOM(GLenum target, GLenum pname, GLint param);
void glDisableDriverControlQCOM(GLuint driverControl);
void glPauseTransformFeedback();
void glVertexAttribI4iv(GLuint index, const GLint *v);
void glDrawTransformFeedbackInstancedEXT(GLenum mode, GLuint id, GLsizei instancecount);
void glProgramUniform3uiEXT(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
void glTexParameterfv(GLenum target, GLenum pname, const GLfloat *params);
void glScissorIndexedOES(GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
void glStencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask);
void glProgramUniform3fv(GLuint program, GLint location, GLsizei count, const GLfloat *value);
void glProgramUniform1fvEXT(GLuint program, GLint location, GLsizei count, const GLfloat *value);
void glWaitSyncAPPLE(GLsync sync, GLbitfield flags, GLuint64 timeout);
void glGetProgramBinaryOES(GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary);
void glProgramUniform1fv(GLuint program, GLint location, GLsizei count, const GLfloat *value);
void glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetnUniformivEXT(GLuint program, GLint location, GLsizei bufSize, GLint *params);
void glMultiDrawArraysEXT(GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount);
void glClearPixelLocalStorageuiEXT(GLsizei offset, GLsizei n, const GLuint *values);
void glPathParameteriNV(GLuint path, GLenum pname, GLint value);
void glDeleteProgramPipelines(GLsizei n, const GLuint *pipelines);
void glSamplerParameterIuivEXT(GLuint sampler, GLenum pname, const GLuint *param);
void glTexBufferOES(GLenum target, GLenum internalformat, GLuint buffer);
void glProgramUniform1i64vNV(GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
void glCompileShader(GLuint shader);
void glFramebufferTextureEXT(GLenum target, GLenum attachment, GLuint texture, GLint level);
void glGenVertexArraysOES(GLsizei n, GLuint *arrays);
void glInvalidateFramebuffer(GLenum target, GLsizei numAttachments, const GLenum *attachments);
void glCompressedTexImage2D(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data);
void glVertexAttrib1f(GLuint index, GLfloat x);
void glUniform1ui64vNV(GLint location, GLsizei count, const GLuint64EXT *value);
void glProgramUniform3ivEXT(GLuint program, GLint location, GLsizei count, const GLint *value);
void glDeleteProgram(GLuint program);
void glUniformMatrix4x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glClearBufferfv(GLenum buffer, GLint drawbuffer, const GLfloat *value);
void glEnableiNV(GLenum target, GLuint index);
void glDebugMessageInsertKHR(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
void glClearBufferfi(GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);
void glDrawArraysIndirect(GLenum mode, const void *indirect);
void glGenVertexArrays(GLsizei n, GLuint *arrays);
void glUniformHandleui64IMG(GLint location, GLuint64 value);
void glVertexBindingDivisor(GLuint bindingindex, GLuint divisor);
void glGetSamplerParameterIiv(GLuint sampler, GLenum pname, GLint *params);
void glGetCoverageModulationTableNV(GLsizei bufsize, GLfloat *v);
void glUniformMatrix4x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glVertexAttrib3f(GLuint index, GLfloat x, GLfloat y, GLfloat z);
void glGetTexParameterIivEXT(GLenum target, GLenum pname, GLint *params);
void glBlendBarrierKHR();
void glUniform1ui(GLint location, GLuint v0);
void glMemoryBarrier(GLbitfield barriers);
GLint glGetFragDataLocation(GLuint program, const GLchar *name);
void glFramebufferTexture3DOES(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
void glSamplerParameterIuivOES(GLuint sampler, GLenum pname, const GLuint *param);
GLboolean glIsShader(GLuint shader);
void glVertexAttribDivisorEXT(GLuint index, GLuint divisor);
void glEnable(GLenum cap);
void glGetActiveUniformsiv(GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params);
void glBlendEquationi(GLuint buf, GLenum mode);
GLint glGetAttribLocation(GLuint program, const GLchar *name);
void glPathSubCoordsNV(GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords);
void glProgramUniform3ui(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
void glViewportArrayvOES(GLuint first, GLsizei count, const GLfloat *v);
void glGenProgramPipelinesEXT(GLsizei n, GLuint *pipelines);
void glEnableDriverControlQCOM(GLuint driverControl);
void glProgramUniformMatrix2x3fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glPathCoordsNV(GLuint path, GLsizei numCoords, GLenum coordType, const void *coords);
void glProgramUniform1i(GLuint program, GLint location, GLint v0);
void glProgramUniform1f(GLuint program, GLint location, GLfloat v0);
void glProgramParameteriEXT(GLuint program, GLenum pname, GLint value);
void glProgramUniform3iv(GLuint program, GLint location, GLsizei count, const GLint *value);
void glEGLImageTargetTexture2DOES(GLenum target, GLeglImageOES image);
void glCompressedTexImage3DOES(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
void glEndQueryEXT(GLenum target);
void glGetPathParameterivNV(GLuint path, GLenum pname, GLint *value);
void glUniform4iv(GLint location, GLsizei count, const GLint *value);
void glGenTextures(GLsizei n, GLuint *textures);
void glUniform3ui64vNV(GLint location, GLsizei count, const GLuint64EXT *value);
void glTexParameterIivEXT(GLenum target, GLenum pname, const GLint *params);
void glGetPerfQueryDataINTEL(GLuint queryHandle, GLuint flags, GLsizei dataSize, GLvoid *data, GLuint *bytesWritten);
void glPathParameterfNV(GLuint path, GLenum pname, GLfloat value);
void glVertexAttribPointer(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
void glGenQueriesEXT(GLsizei n, GLuint *ids);
void glUniform1f(GLint location, GLfloat v0);
void glUniformHandleui64vIMG(GLint location, GLsizei count, const GLuint64 *value);
void glDeleteFencesNV(GLsizei n, const GLuint *fences);
void glProgramUniform1ivEXT(GLuint program, GLint location, GLsizei count, const GLint *value);
void glProgramUniform4ui64NV(GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
void glUniform2iv(GLint location, GLsizei count, const GLint *value);
void glFramebufferTexture2DMultisampleEXT(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples);
void glGetShaderiv(GLuint shader, GLenum pname, GLint *params);
void glAlphaFuncQCOM(GLenum func, GLclampf ref);
void glGetPerfMonitorCountersAMD(GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters);
void glGetnUniformfvEXT(GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
void glProgramUniform2ui64NV(GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y);
void glDrawRangeElementsBaseVertexOES(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
void glGetFloati_vOES(GLenum target, GLuint index, GLfloat *data);
void glMultiDrawElementsBaseVertexEXT(GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex);
void glCoverageModulationNV(GLenum components);
void glCompressedTexImage3D(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
void glGetVertexAttribiv(GLuint index, GLenum pname, GLint *params);
void glDrawArraysInstancedANGLE(GLenum mode, GLint first, GLsizei count, GLsizei primcount);
void glGetFenceivNV(GLuint fence, GLenum pname, GLint *params);
void glUniformMatrix2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glBlendFunciOES(GLuint buf, GLenum src, GLenum dst);
void glCoverFillPathNV(GLuint path, GLenum coverMode);
void glObjectLabelKHR(GLenum identifier, GLuint name, GLsizei length, const GLchar *label);
void glSetFenceNV(GLuint fence, GLenum condition);
void glGetTexParameterIiv(GLenum target, GLenum pname, GLint *params);
GLboolean glIsPathNV(GLuint path);
void glFramebufferTextureMultiviewOVR(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
void glDebugMessageCallback(GLDEBUGPROC callback, const void *userParam);
void glStencilThenCoverStrokePathInstancedNV(GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
void glDrawElementsInstancedBaseInstanceEXT(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance);
GLuint glCreateShaderProgramvEXT(GLenum type, GLsizei count, const GLchar **strings);
void glGetBooleani_v(GLenum target, GLuint index, GLboolean *data);
void glCoverageOperationNV(GLenum operation);
void glHint(GLenum target, GLenum mode);
void glGetProgramResourceName(GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name);
void glCoverFillPathInstancedNV(GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
void glDrawBuffersIndexedEXT(GLint n, const GLenum *location, const GLint *indices);
void glStencilOpSeparate(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
void glGetTexParameteriv(GLenum target, GLenum pname, GLint *params);
void glGetVertexAttribPointerv(GLuint index, GLenum pname, void **pointer);
void glProgramUniform3i64vNV(GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
void glDrawArraysInstancedNV(GLenum mode, GLint first, GLsizei count, GLsizei primcount);
void glScissorIndexedvNV(GLuint index, const GLint *v);
void glGetPathMetricRangeNV(GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics);
void glDisable(GLenum cap);
void glDrawElementsBaseVertexOES(GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
GLboolean glIsPointInFillPathNV(GLuint path, GLuint mask, GLfloat x, GLfloat y);
void glProgramUniform4uiv(GLuint program, GLint location, GLsizei count, const GLuint *value);
void glGetProgramResourcefvNV(GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLfloat *params);
void glGetSamplerParameterIivOES(GLuint sampler, GLenum pname, GLint *params);
GLuint64 glGetTextureSamplerHandleNV(GLuint texture, GLuint sampler);
GLenum glGetGraphicsResetStatusEXT();
GLboolean glIsEnablediOES(GLenum target, GLuint index);
void glProgramUniform1fEXT(GLuint program, GLint location, GLfloat v0);
void glGetnUniformivKHR(GLuint program, GLint location, GLsizei bufSize, GLint *params);
void glUniform3uiv(GLint location, GLsizei count, const GLuint *value);
void glMultiDrawArraysIndirectEXT(GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride);
void glStencilFillPathInstancedNV(GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues);
GLsizei glGetFramebufferPixelLocalStorageSizeEXT(GLuint target);
void glEnableiEXT(GLenum target, GLuint index);
void glBlendColor(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
void glSamplerParameterIuiv(GLuint sampler, GLenum pname, const GLuint *param);
void glDrawElementsInstancedEXT(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
void glBlitFramebufferANGLE(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
void glEndTilingQCOM(GLbitfield preserveMask);
void glUniform4i(GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
void glActiveTexture(GLenum texture);
void glEnableVertexAttribArray(GLuint index);
void glProgramUniformHandleui64NV(GLuint program, GLint location, GLuint64 value);
void glUniform4f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
void glRenderbufferStorageMultisample(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
void glDrawElementsInstancedBaseVertex(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
void glBlendFuncSeparateiEXT(GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
void glTexBufferRangeEXT(GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
void glPopDebugGroup();
void glUniformBlockBinding(GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding);
void glBufferSubData(GLenum target, GLintptr offset, GLsizeiptr size, const void *data);
void glPathGlyphRangeNV(GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
void glProgramUniformMatrix3x2fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glResolveMultisampleFramebufferAPPLE();
void glMultiDrawElementsIndirectEXT(GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride);
void glProgramUniform3fEXT(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
void glDeleteSyncAPPLE(GLsync sync);
void glBlendFunc(GLenum sfactor, GLenum dfactor);
GLuint glCreateProgram();
void glGetObjectLabelKHR(GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label);
void glCoverageMaskNV(GLboolean mask);
void glProgramUniformMatrix2fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glSamplerParameteriv(GLuint sampler, GLenum pname, const GLint *param);
void glUniform1i64NV(GLint location, GLint64EXT x);
void glPathStringNV(GLuint path, GLenum format, GLsizei length, const void *pathString);
void glPathDashArrayNV(GLuint path, GLsizei dashCount, const GLfloat *dashArray);
void glClearColor(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
void glUseShaderProgramEXT(GLenum type, GLuint program);
void glGetnUniformiv(GLuint program, GLint location, GLsizei bufSize, GLint *params);
void glGetPerfMonitorGroupStringAMD(GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString);
void glStencilMask(GLuint mask);
void glVertexAttribI4uiv(GLuint index, const GLuint *v);
GLuint glGetProgramResourceIndex(GLuint program, GLenum programInterface, const GLchar *name);
void glClearTexImageEXT(GLuint texture, GLint level, GLenum format, GLenum type, const void *data);
void glProgramUniformMatrix2x3fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glPathColorGenNV(GLenum color, GLenum genMode, GLenum colorFormat, const GLfloat *coeffs);
void glUniformMatrix2x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGenTransformFeedbacks(GLsizei n, GLuint *ids);
void glGetVertexAttribIuiv(GLuint index, GLenum pname, GLuint *params);
void glGetTexParameterIuivEXT(GLenum target, GLenum pname, GLuint *params);
void glCompressedTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
void glDiscardFramebufferEXT(GLenum target, GLsizei numAttachments, const GLenum *attachments);
void glProgramBinary(GLuint program, GLenum binaryFormat, const void *binary, GLsizei length);
void glGetTexParameterfv(GLenum target, GLenum pname, GLfloat *params);
void glCopyBufferSubDataNV(GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
void glGetObjectPtrLabelKHR(const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label);
void glPatchParameteriEXT(GLenum pname, GLint value);
void glMatrixMultTranspose3x3fNV(GLenum matrixMode, const GLfloat *m);
void glTexParameterIiv(GLenum target, GLenum pname, const GLint *params);
void glEndTransformFeedback();
void glUniform4ui64NV(GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
void glVertexAttribDivisor(GLuint index, GLuint divisor);
GLint glGetProgramResourceLocationIndexEXT(GLuint program, GLenum programInterface, const GLchar *name);
void * glMapBufferRange(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
void glProgramUniformMatrix4x2fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetShaderPrecisionFormat(GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision);
void glRasterSamplesEXT(GLuint samples, GLboolean fixedsamplelocations);
void glShaderSource(GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
void glDeleteRenderbuffers(GLsizei n, const GLuint *renderbuffers);
void glDisableiNV(GLenum target, GLuint index);
void glReadBufferNV(GLenum mode);
void glViewportSwizzleNV(GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew);
void glBufferData(GLenum target, GLsizeiptr size, const void *data, GLenum usage);
void glUniformMatrix2x3fvNV(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetPathTexGenfvNV(GLenum texCoordSet, GLenum pname, GLfloat *value);
void glFramebufferTexture2D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
void glGetBufferPointerv(GLenum target, GLenum pname, void **params);
void glDrawBuffersEXT(GLsizei n, const GLenum *bufs);
void glDrawElementsInstancedBaseVertexBaseInstanceEXT(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance);
void glSamplerParameterfv(GLuint sampler, GLenum pname, const GLfloat *param);
void glUniform1fv(GLint location, GLsizei count, const GLfloat *value);
void glBindProgramPipelineEXT(GLuint pipeline);
void glVertexAttrib2f(GLuint index, GLfloat x, GLfloat y);
void glMultiDrawElementsEXT(GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount);
void glPushDebugGroupKHR(GLenum source, GLuint id, GLsizei length, const GLchar *message);
GLuint glGetUniformBlockIndex(GLuint program, const GLchar *uniformBlockName);
void glFrontFace(GLenum mode);
GLboolean glIsPointInStrokePathNV(GLuint path, GLfloat x, GLfloat y);
'''
| StarcoderdataPython |
8032393 | <filename>newrelic_api/users.py
from .base import Resource
class Users(Resource):
"""
An interface for interacting with the NewRelic user API.
"""
def list(self, filter_email=None, filter_ids=None, page=None):
"""
This API endpoint returns a paginated list of the Users
associated with your New Relic account. Users can be filtered
by their email or by a list of user IDs.
:type filter_email: str
:param filter_email: Filter by user email
:type filter_ids: list of ints
:param filter_ids: Filter by user ids
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"users": [
{
"id": "integer",
"first_name": "string",
"last_name": "string",
"email": "string",
"role": "string"
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/users.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/users.json?page=2",
"rel": "next"
}
}
}
"""
filters = [
'filter[email]={0}'.format(filter_email) if filter_email else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}users.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
)
def show(self, id):
"""
This API endpoint returns a single User, identified its ID.
:type id: int
:param id: User ID
:rtype: dict
:return: The JSON response of the API
::
{
"user": {
"id": "integer",
"first_name": "string",
"last_name": "string",
"email": "string",
"role": "string"
}
}
"""
return self._get(
url='{0}users/{1}.json'.format(self.URL, id),
headers=self.headers,
)
| StarcoderdataPython |
9655606 | import json
from flask import Flask, request, render_template, url_for
from interface import Interface
app = Flask(__name__)
_interface = Interface()
_interface.query('machine learning')
@app.route("/")
def welcome():
return render_template('index.html')
@app.route("/query", methods=['POST'])
def query():
assert request.method == 'POST'
query_str = request.form['inputValue']
return json.dumps(_interface.query(query_str))
if __name__ == "__main__":
app.run() | StarcoderdataPython |
9629664 | <filename>seed_message_sender/settings.py
"""
Django settings for seed_message_sender project.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
from kombu import Exchange, Queue
from getenv import env
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY", "REPLACEME")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env("DEBUG", False)
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = (
# admin
"django.contrib.admin",
# core
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
# 3rd party
"raven.contrib.django.raven_compat",
"rest_framework",
"rest_framework.authtoken",
"django_filters",
"rest_hooks",
"storages",
"django_prometheus",
# us
"message_sender",
)
SITE_ID = 1
USE_SSL = os.environ.get("USE_SSL", "false").lower() == "true"
USE_SSL = env("USE_SSL", False)
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
ROOT_URLCONF = "seed_message_sender.urls"
WSGI_APPLICATION = "seed_message_sender.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
"default": dj_database_url.config(
default=env(
"MESSAGE_SENDER_DATABASE",
"postgres://postgres:@localhost/seed_message_sender",
),
engine="django_prometheus.db.backends.postgresql",
)
}
PROMETHEUS_EXPORT_MIGRATIONS = False
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = "en-gb"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django.contrib.staticfiles.finders.FileSystemFinder",
)
STATIC_ROOT = "static"
STATIC_URL = "/static/"
MEDIA_ROOT = "media"
MEDIA_URL = "/media/"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
# Sentry configuration
RAVEN_CONFIG = {
# DevOps will supply you with this.
"dsn": env("MESSAGE_SENDER_SENTRY_DSN", None)
}
# REST Framework conf defaults
REST_FRAMEWORK = {
"PAGE_SIZE": 1000,
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.CursorPagination",
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"seed_message_sender.auth.CachedTokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",),
}
# Webhook event definition
HOOK_EVENTS = {
# 'any.event.name': 'App.Model.Action' (created/updated/deleted)
# 'dummymodel.added': 'message_sender.DummyModel.created+'
"outbound.delivery_report": None,
"whatsapp.failed_contact_check": None,
"identity.no_address": None,
}
HOOK_DELIVERER = "message_sender.tasks.deliver_hook_wrapper"
HOOK_AUTH_TOKEN = env("HOOK_AUTH_TOKEN", "<PASSWORD>")
CELERY_BROKER_URL = env("BROKER_URL", "redis://localhost:6379/0")
CELERY_TASK_DEFAULT_QUEUE = "seed_message_sender"
CELERY_TASK_QUEUES = (
Queue(
"seed_message_sender",
Exchange("seed_message_sender"),
routing_key="seed_message_sender",
),
)
CELERY_TASK_ALWAYS_EAGER = False
# Tell Celery where to find the tasks
CELERY_IMPORTS = ("message_sender.tasks",)
CELERY_TASK_CREATE_MISSING_QUEUES = True
CELERY_TASK_ROUTES = {
"celery.backend_cleanup": {"queue": "mediumpriority"},
"message_sender.tasks.deliver_hook_wrapper": {"queue": "priority"},
"message_sender.tasks.send_message": {"queue": "lowpriority"},
"message_sender.tasks.fire_metric": {"queue": "metrics"},
"message_sender.tasks.requeue_failed_tasks": {"queue": "mediumpriority"},
}
METRICS_REALTIME = [
"vumimessage.tries.sum",
"vumimessage.maxretries.sum",
"vumimessage.obd.tries.sum",
"message.failures.sum",
"message.sent.sum",
"sender.send_message.connection_error.sum",
"sender.send_message.http_error.400.sum",
"sender.send_message.http_error.401.sum",
"sender.send_message.http_error.403.sum",
"sender.send_message.http_error.404.sum",
"sender.send_message.http_error.500.sum",
"sender.send_message.timeout.sum",
]
METRICS_SCHEDULED = []
METRICS_SCHEDULED_TASKS = []
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_IGNORE_RESULT = True
CELERY_WORKER_MAX_TASKS_PER_CHILD = 50
MESSAGE_BACKEND_VOICE = env("MESSAGE_SENDER_MESSAGE_BACKEND_VOICE", "vumi")
MESSAGE_BACKEND_TEXT = env("MESSAGE_SENDER_MESSAGE_BACKEND_TEXT", "vumi")
VUMI_API_URL_VOICE = env(
"MESSAGE_SENDER_VUMI_API_URL_VOICE",
"http://example.com/api/v1/go/http_api_nostream",
)
VUMI_ACCOUNT_KEY_VOICE = env("MESSAGE_SENDER_VUMI_ACCOUNT_KEY_VOICE", "acc-key")
VUMI_CONVERSATION_KEY_VOICE = env(
"MESSAGE_SENDER_VUMI_CONVERSATION_KEY_VOICE", "conv-key"
)
VUMI_ACCOUNT_TOKEN_VOICE = env("MESSAGE_SENDER_VUMI_ACCOUNT_TOKEN_VOICE", "conv-token")
VOICE_TO_ADDR_FORMATTER = env(
"VOICE_TO_ADDR_FORMATTER", "message_sender.formatters.noop"
)
TEXT_TO_ADDR_FORMATTER = env("TEXT_TO_ADDR_FORMATTER", "message_sender.formatters.noop")
VUMI_API_URL_TEXT = env(
"MESSAGE_SENDER_VUMI_API_URL_TEXT", "http://example.com/api/v1/go/http_api_nostream"
)
VUMI_ACCOUNT_KEY_TEXT = env("MESSAGE_SENDER_VUMI_ACCOUNT_KEY_TEXT", "acc-key")
VUMI_CONVERSATION_KEY_TEXT = env(
"MESSAGE_SENDER_VUMI_CONVERSATION_KEY_TEXT", "conv-key"
)
VUMI_ACCOUNT_TOKEN_TEXT = env("MESSAGE_SENDER_VUMI_ACCOUNT_TOKEN_TEXT", "conv-token")
JUNEBUG_API_URL_VOICE = env(
"MESSAGE_SENDER_JUNEBUG_API_URL_VOICE",
"http://example.com/jb/channels/abc-def/messages",
)
JUNEBUG_API_AUTH_VOICE = env("MESSAGE_SENDER_JUNEBUG_API_AUTH_VOICE", None)
JUNEBUG_API_FROM_VOICE = env("MESSAGE_SENDER_JUNEBUG_API_FROM_VOICE", None)
JUNEBUG_API_URL_TEXT = env(
"MESSAGE_SENDER_JUNEBUG_API_URL_TEXT",
"http://example.com/jb/channels/def-abc/messages",
)
JUNEBUG_API_AUTH_TEXT = env("MESSAGE_SENDER_JUNEBUG_API_AUTH_TEXT", None)
JUNEBUG_API_FROM_TEXT = env("MESSAGE_SENDER_JUNEBUG_API_FROM_TEXT", None)
MESSAGE_SENDER_MAX_RETRIES = env("MESSAGE_SENDER_MAX_RETRIES", 3)
MESSAGE_SENDER_MAX_FAILURES = env("MESSAGE_SENDER_MAX_FAILURES", 5)
METRICS_URL = env("METRICS_URL", None)
METRICS_AUTH = (
env("METRICS_AUTH_USER", "REPLACEME"),
env("METRICS_AUTH_PASSWORD", "<PASSWORD>"),
)
REDIS_HOST = env("REDIS_HOST", "localhost")
REDIS_PORT = env("REDIS_PORT", 6379)
REDIS_DB = env("REDIS_DB", 0)
# A value of 0 disables cuncurrency limiter
CONCURRENT_VOICE_LIMIT = env("CONCURRENT_VOICE_LIMIT", 0)
# Seconds to wait before retrying a waiting message
VOICE_MESSAGE_DELAY = env("VOICE_MESSAGE_DELAY", 0)
# Seconds until we assume a message has finished
VOICE_MESSAGE_TIMEOUT = env("VOICE_MESSAGE_TIMEOUT", 0)
# A value of 0 disables cuncurrency limiter
CONCURRENT_TEXT_LIMIT = env("CONCURRENT_TEXT_LIMIT", 0)
# Seconds to wait before retrying a waiting message
TEXT_MESSAGE_DELAY = env("TEXT_MESSAGE_DELAY", 0)
# Seconds until we assume a message has finished
TEXT_MESSAGE_TIMEOUT = env("TEXT_MESSAGE_TIMEOUT", 0)
CACHES = {
"default": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": ["%s:%s" % (REDIS_HOST, REDIS_PORT)],
"OPTIONS": {"DB": REDIS_DB},
},
"locmem": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"},
}
REDIS_PASSWORD = env("REDIS_PASSWORD", None)
if REDIS_PASSWORD:
CACHES["default"]["OPTIONS"]["PASSWORD"] = REDIS_PASSWORD
DEFAULT_REQUEST_TIMEOUT = env("DEFAULT_REQUEST_TIMEOUT", 30)
IDENTITY_STORE_URL = env("IDENTITY_STORE_URL", "http://is/api/v1")
IDENTITY_STORE_TOKEN = env("IDENTITY_STORE_TOKEN", "<PASSWORD>")
AGGREGATE_OUTBOUND_BACKTRACK = env("AGGREGATE_OUTBOUND_BACKTRACK", 30)
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", None)
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", None)
AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME", None)
AWS_S3_ENCRYPTION = True
if AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_STORAGE_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
SAFE_TIME_INTERVAL = env("SAFE_TIME_INTERVAL", None)
| StarcoderdataPython |
9665558 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.errorreporting_v1beta1.types import common
__protobuf__ = proto.module(
package='google.devtools.clouderrorreporting.v1beta1',
manifest={
'GetGroupRequest',
'UpdateGroupRequest',
},
)
class GetGroupRequest(proto.Message):
r"""A request to return an individual group.
Attributes:
group_name (str):
Required. The group resource name. Written as
``projects/{projectID}/groups/{group_name}``. Call
```groupStats.list`` <https://cloud.google.com/error-reporting/reference/rest/v1beta1/projects.groupStats/list>`__
to return a list of groups belonging to this project.
Example: ``projects/my-project-123/groups/my-group``
"""
group_name = proto.Field(
proto.STRING,
number=1,
)
class UpdateGroupRequest(proto.Message):
r"""A request to replace the existing data for the given group.
Attributes:
group (google.cloud.errorreporting_v1beta1.types.ErrorGroup):
Required. The group which replaces the
resource on the server.
"""
group = proto.Field(
proto.MESSAGE,
number=1,
message=common.ErrorGroup,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| StarcoderdataPython |
4944024 | <filename>cm_custom/doc_events/customer.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
def before_insert(doc, method):
if not doc.mobile_no:
doc.mobile_no = doc.cm_mobile_no
| StarcoderdataPython |
5136900 | <reponame>shongololo/aiovectortiler
from aiovectortiler.config_handler import Recipe, Layer, Query
def test_basic_recipe():
recipe = Recipe({
"name": "myrecipe",
"layers": [{
"name": "mylayer",
"queries": [{
"sql": "SELECT * FROM table"
}]
}]
})
assert isinstance(recipe, Recipe)
assert isinstance(recipe.layers['mylayer'], Layer)
assert isinstance(recipe.layers['mylayer'].queries[0], Query)
def test_query_inherit_from_parents():
recipe = Recipe({
"name": "myrecipe",
"srid": 3857,
"buffer": 256,
"layers": [{
"name": "mylayer",
"buffer": 128,
"queries": [{
"sql": "SELECT * FROM table"
}]
}]
})
query = recipe.layers['mylayer'].queries[0]
assert query.sql == "SELECT * FROM table"
assert query.buffer == 128
assert query.srid == 3857
assert query.unknown is None
| StarcoderdataPython |
6508943 | # coding: latin-1
# This program converts a folder of OBS text files
# in the older(?) format (one .txt file per OBS story)
# to a set of corresponding, OBS story files in Markdown format.
# Outputs .md files to a content folder under the input folder.
import re # regular expression module
import io
import os
import codecs
import string
# import json
import sys
def merge(image_list, txtfile, mdfile):
titleLinesOut = 0
lines = txtfile.readlines()
nChunks = countChunks(lines)
chunksOut = 0
images_inline = (len(image_list) == nChunks)
for line in lines:
line = line.strip()
if line:
if titleLinesOut == 0:
mdfile.write(u"# " + line + u"\n") # title
titleLinesOut = 1
elif titleLinesOut == 1:
mdfile.write(u"## " + line + u"\n") # subtitle
titleLinesOut = 2
elif chunksOut < nChunks:
if images_inline and chunksOut < len(image_list):
mdfile.write(u"\n" + image_list[chunksOut])
mdfile.write(u"\n")
mdfile.write(line + u"\n")
chunksOut += 1
else:
mdfile.write(u"\n" + line + u"\n")
chunksOut += 1 # necessary?
if not images_inline:
mdfile.write(u"\n\n")
for image in image_list:
mdfile.write(image)
refstart_re = re.compile(r'\:\s*$') # match colon at end of line
# Returns the number of lines that are not part of the title and not part of the references at the bottom
def countChunks(lines):
nChunks = -2 # start at -1 to account for title and subtitle
for line in lines:
line = line.strip()
if line: # don't count blank lines
if refstart_re.search(line):
break
else:
nChunks += 1
return nChunks
image_re = re.compile(r'\!\[OBS Image\]')
def listImages(mdpath):
image_list = []
enc = detect_by_bom(mdpath, default="utf-8")
input = io.open(mdpath, "tr", 1, encoding=enc)
for line in input.readlines():
if image_re.match(line):
image_list.append(line)
input.close()
return image_list
def detect_by_bom(path, default):
with open(path, 'rb') as f:
raw = f.read(4)
f.close
for enc,boms in \
('utf-8-sig',(codecs.BOM_UTF8)),\
('utf-16',(codecs.BOM_UTF16_LE,codecs.BOM_UTF16_BE)),\
('utf-32',(codecs.BOM_UTF32_LE,codecs.BOM_UTF32_BE)):
if any(raw.startswith(bom) for bom in boms):
return enc
return default
en_contentdir = r'C:\Users\Larry\Documents\GitHub\English\en_obs\content'
obsfilename = re.compile(r'([0-5][0-9])\.txt')
# Convert each .txt file in the specified folder to equivalent .md format
def convertStories(folder):
for filename in os.listdir(folder):
obsmatch = obsfilename.match(filename)
if obsmatch:
sys.stdout.write(filename + "\n") # to show progress on stdout
story = obsmatch.group(1)
english_md_path = os.path.join(en_contentdir, story + '.md')
if not os.access(english_md_path, os.F_OK):
sys.stderr.write("Cannot access English OBS file: " + english_md_path + "\n")
continue
image_list = listImages(english_md_path)
inputpath = os.path.join(folder, filename)
enc = detect_by_bom(inputpath, default="utf-8")
input = io.open(inputpath, "tr", 1, encoding=enc)
outputpath = os.path.join(folder, "content")
if not os.path.isdir(outputpath):
os.mkdir(outputpath)
outputpath = os.path.join(outputpath, story + ".md")
output = io.open(outputpath, "tw", buffering=1, encoding='utf-8', newline='\n')
merge(image_list, input, output) # converts this .txt file to .md
output.close()
input.close()
# Processes all .txt files in specified directory, one at a time
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1] == '.': # use current directory
folder = os.getcwd()
elif sys.argv[1] == 'hard-coded-path':
folder = r'C:\Users\Larry\Documents\GitHub\Swahili\sw_obs_text_obs\01'
else:
folder = sys.argv[1]
if folder and os.path.isdir(folder):
convertStories(folder)
else:
sys.stderr.write("Usage: python obs_txt2md.py <folder>\n Use . for current folder.\n")
| StarcoderdataPython |
8033642 | from django.urls import include, path
from rest_framework.authtoken import views
from rest_framework.routers import DefaultRouter
from .views import UserCreate, UserViewSet
router = DefaultRouter()
router.register('users', UserViewSet)
urlpatterns = [
path('', include(router.urls)),
path('register/', UserCreate.as_view()),
path('token/', views.obtain_auth_token),
]
| StarcoderdataPython |
6554775 | <reponame>ngc92/branchedflowsim<gh_stars>0
import os
try:
import unittest.mock as mock
except ImportError:
import mock as mock
from ..test_utils import *
from .result_file import ResultFile
from . import DataSpec, write_int
class Dummy(ResultFile):
_SPEC_ = ()
_FILE_NAME_ = "default_name"
def __init__(self, arg=None):
super(Dummy, self).__init__(arg)
def test_spec_check():
class Bad(ResultFile):
pass
with pytest.raises(ValueError):
Bad()
###############################################################################
# Tests for init dispatch
###############################################################################
def test_init_from_file(file_):
with mock.patch.object(Dummy, 'from_file') as ff:
# open directly from file
d = Dummy(file_)
ff.assert_called_once_with(file_)
def mock_is_file(path):
return path == "test_file"
@mock.patch("os.path.isfile", side_effect=mock_is_file)
def test_init_from_filename(_):
# direct file name lookup
with mock.patch.object(Dummy, 'from_file') as ff:
# open directly from file
d = Dummy("test_file")
ff.assert_called_once_with("test_file")
# using the default name
with mock.patch.object(Dummy, 'from_file') as ff:
# open directly from file
d = Dummy("path")
ff.assert_called_once_with(os.path.join("path", "default_name"))
def test_init_non_existing_file(monkeypatch):
monkeypatch.delattr(Dummy, "_FILE_NAME_")
with pytest.raises(IOError):
d = Dummy("test_file")
def test_init_from_dict():
with mock.patch.object(Dummy, "from_dict") as fd:
# open directly from file
data = {"a": 5, "b": 8}
d = Dummy(data)
fd.assert_called_once_with(data)
###############################################################################
# Test for from_file / from_dict
###############################################################################
def test_from_file_header_check(file_):
d = Dummy()
d._FILE_HEADER_ = "dummy"
with pytest.raises(IOError):
d.from_file(file_)
d.from_dict = mock.MagicMock()
file_.seek(0)
file_.write("dummy")
file_.seek(0)
d.from_file(file_)
d.from_dict.assert_called_once_with({})
def test_from_file(file_, monkeypatch):
spec = (DataSpec("value", int, 1),)
monkeypatch.setattr(Dummy, "_SPEC_", spec)
write_int(file_, 5)
file_.seek(0)
with mock.patch.object(Dummy, "_from_file") as _from_file: # type: mock.MagicMock
with mock.patch.object(Dummy, "from_dict") as from_dict:
d = Dummy()
d.from_file(file_)
_from_file.assert_called_once_with(file_, {"value": 5})
from_dict.assert_called_once_with({"value": 5})
def test_from_dict(monkeypatch):
spec = (DataSpec("value", int, 1), DataSpec("spam", int, 1, is_attr=False))
monkeypatch.setattr(Dummy, "_SPEC_", spec)
data = {"value": 8, "spam": 10}
with mock.patch.object(Dummy, "_from_dict") as _from_dict: # type: mock.MagicMock
d = Dummy()
d.from_dict(data)
_from_dict.assert_called_once_with(data)
assert d.value == 8
assert not hasattr(d, "spam")
def test_from_dict_type_check():
d = Dummy()
with pytest.raises(TypeError):
d.from_dict("spam")
###############################################################################
# Test for to_file
###############################################################################
@pytest.fixture()
def results(monkeypatch):
spec = (DataSpec("value", int, 1), DataSpec("spam", int, 1, is_attr=False))
monkeypatch.setattr(Dummy, "_SPEC_", spec)
data = {"value": 5, "spam": 10}
return Dummy(data)
def test_write_header(file_):
d = Dummy()
d._FILE_HEADER_ = "dummy"
d.to_file(file_)
file_.seek(0)
assert file_.read() == "dummy"
def test_to_file(file_, results):
def to_file_check(data):
assert data == {"value": 5}
data["spam"] = 10
with mock.patch.object(results, "_to_file", side_effect=to_file_check) as _to_file: # type: mock.MagicMock
results.to_file(file_)
assert _to_file.call_count == 1
def test_to_file_errors_missing_data(file_, results):
# missing data for non-spec value
with pytest.raises(KeyError):
results.to_file(file_)
with pytest.raises(AttributeError):
del results.value
results.to_file(file_)
def test_to_file_errors_wrong_type(file_, results):
# wrong data type for a spec
results.value = 5.5
with pytest.raises(TypeError):
results.to_file(file_)
def test_to_file_errors_wrong_shape(file_, results):
# wrong shape for a spec
results.value = [1, 2, 3]
with pytest.raises(ValueError):
results.to_file(file_)
###############################################################################
# Test for reduce
###############################################################################
def test_reduce(monkeypatch):
spec = (DataSpec("value", int, 1, reduction="add"), DataSpec("useless", int, 1, is_attr=False))
monkeypatch.setattr(Dummy, "_SPEC_", spec)
d1 = Dummy({"value": 5})
d2 = Dummy({"value": 10})
d3 = d1.reduce(d2)
assert d3.value == 15
def test_reduce_errors():
d = Dummy()
with pytest.raises(TypeError):
d.reduce("str")
def test_reduce_unary():
d = Dummy()
assert d.reduce(None) is d
| StarcoderdataPython |
1885127 | """
General functions to handle permutations, cyclic shifts, etc
Look for major bottlenecks here
"""
def create_keystring(key, size):
return bin(key)[2:].zfill(size)
def permute_key(key, permutation):
rstring=""
for i in permutation:
rstring+=key[i]
return rstring
def left_shift(key, shift):
return key[shift:]+key[:shift]
| StarcoderdataPython |
6688271 | # Global Settings.If finished deployments,just reset the items below.
# LOCAL_ASDATA_PATH should be a Navigraph data of Aerosoft.
SET_NAVDAT_PATH = "navidata_2201.map"
SET_APDAT_PATH = "airport_2201.air"
LOCAL_ASDATA_PATH = "/path/to/your/asdata"
NAVDAT_CYCLE = "AIRAC;2201,27JAN24FEB/22,2113,30DEC26JAN/22"
# Website functions.
LISTEN_PORT = 9807
METAR_UPDATE_MINUTE = 15
YourBingMapsKey = "Use Your Own Key"
BackstageKey = "set your self"
| StarcoderdataPython |
12814091 | <reponame>helotism/plarin
# -*- coding: utf-8 -*-
"""
An implementation of a ring buffer.
"""
#
#http://forum.micropython.org/viewtopic.php?t=601#p3491
#https://forum.micropython.org/viewtopic.php?t=1702
#
import array
class Circularbuffer:
"""A ring buffer that may be queried piecewise or by a threshold value.
Attributes:
size (int): How many items the buffer shall hold.
"""
def __init__(self, size, typecode='f'):
self.appends = 0
self.write_pointer = 0
self.size = size
self.read_pointer = -1
self.threshold = self.size // 2
self.data = array.array(typecode, [0 for i in range(self.size)])
self.emtpy = True
self.full = False
self.avg_exp_decayed = 0
def append(self, value):
self.appends += 1
#print(value, self.avg_exp_decayed, abs(value - self.avg_exp_decayed))
if self.appends > 10 and abs(value - self.avg_exp_decayed) > 2 * self.avg_exp_decayed:
value = self.avg_exp_decayed
print("rewriting sensor history")
self.avg_exp_decayed = (0.2 * value) + (0.8 * self.avg_exp_decayed)
#print()
self.data[self.write_pointer] = value
if (self.write_pointer + 1) >= self.size:
self.full = True
if self.emtpy is True:
self.emtpy = False
self.write_pointer = (self.write_pointer + 1) % self.size
def sum(self):
print(sum(self.data))
def info(self):
print(self.data)
print("len: ", len(self.data))
print("write_pointer: ", self.write_pointer)
print("full: ", self.full)
def get_by_threshold(self):
if self.read_pointer == -1:
#initial read
self.read_pointer = (self.write_pointer - 1) % self.size
for j in range(self.threshold):
if self.full is False and self.read_pointer > 0 and self.read_pointer >= self.write_pointer:
print("+++1", self.read_pointer, self.data[self.read_pointer], self.read_pointer - 1)
self.read_pointer = self.read_pointer - 1
elif self.full is True:
print("+++2", self.read_pointer, self.data[self.read_pointer], (self.read_pointer - 1) % self.size)
if self.read_pointer == 0:
self.full = False
self.read_pointer = (self.read_pointer - 1) % self.size
else:
self.emtpy = True
j += 1
#b = Circularbuffer(8)
#
#for i in [ 1, 2, 1, 3, 2, 4, 1, 3, 2, 4, 2, 12, 1]:
# b.append(i)
#This also works:
#http://www.onlamp.com/pub/a/python/excerpt/pythonckbk_chap1/index1.html
#class RingBuffer(object):
# def __init__(self,size_max):
# self.max = size_max
# self.data = [ ]
# def _full_append(self, x):
# print("f", x)
# self.data[self.cur] = x
# self.cur = (self.cur+1) % self.max
# def _full_get(self):
# return self.data[self.cur:]+self.data[:self.cur]
# def append(self, x):
# print("_", x)
# self.data.append(x)
# if len(self.data) == self.max:
# self.cur = 0
# # Permanently change self's methods from non-full to full
# self.append = self._full_append
# self.tolist = self._full_get
# def tolist(self):
# return self.data
| StarcoderdataPython |
272389 | <filename>unpack/unpack.py
#!/bin/env python3
import argparse, os
import fileformats, romfs, exefs, slb2, scecaf, self, ncch, ncsd
modes = {
"guess": fileformats.guess,
"romfs": romfs.process,
"exefs": exefs.process,
"slb2": slb2.process,
"scecaf": scecaf.process,
"self": self.process,
"ncch": ncch.process,
"ncsd": ncsd.process
}
parser = argparse.ArgumentParser(description="pack/unpack tool for various 3ds file formats")
parser.add_argument("--input", "-i", type=str)
parser.add_argument("--output", "-o", type=str, default="dump")
parser.add_argument("--mode", "-m", type=str, default="guess")
parser.add_argument("--unpack", "-u", help="unpacks file", action="store_true")
parser.add_argument("--pack", "-p", help="packs directory", action="store_true")
args = parser.parse_args()
if args.pack == args.unpack:
parser.print_help()
exit(1)
if not args.mode in modes.keys():
print("Available modes:")
for key, val in modes.items():
print(f"\t{key}")
exit(1)
modes[args.mode](args.input, args.output, args.pack)
| StarcoderdataPython |
86543 | """Configuration for developing the remote project feature in TARGET mode"""
from .local import * # noqa
import socket
import os
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Uses django-environ to accept uri format
# See: https://django-environ.readthedocs.io/en/latest/#supported-types
DATABASES["default"]["NAME"] = "sodar_core_target"
DATABASES["default"]["ATOMIC_REQUESTS"] = False
# General site settings
# ------------------------------------------------------------------------------
SITE_TITLE = "SODAR Core Target Dev Site"
SITE_SUBTITLE = env.str("SITE_SUBTITLE", "Beta")
SITE_INSTANCE_TITLE = env.str(
"SITE_INSTANCE_TITLE", "SODAR Core Target Example"
)
# Local App Settings
# ------------------------------------------------------------------------------
PROJECTROLES_SITE_MODE = "TARGET"
# Admin user to replace non-LDAP project owners in remote sync (for TARGET site)
PROJECTROLES_ADMIN_OWNER = "admin_target"
| StarcoderdataPython |
1925137 | <filename>c3bottles/views/user.py
from json import loads
from re import sub
from flask import Blueprint, redirect, render_template, request, url_for
from flask_babel import lazy_gettext
from flask_login import current_user, login_user, logout_user
from werkzeug.routing import BuildError
from c3bottles.model.user import User
from c3bottles.views.forms import LoginForm
bp = Blueprint("user", __name__)
@bp.route("/login", methods=("POST", "GET"))
def login():
if request.method == "GET":
return redirect(url_for("main.index"))
form = LoginForm()
if form.validate_on_submit():
try:
back = redirect(url_for(form.back.data, **loads(sub("( u)?'", '"', form.args.data))))
except (BuildError, ValueError):
back = redirect(url_for("main.index"))
if current_user.is_authenticated:
return back
user = User.get(form.username.data)
if user and user.is_active and user.validate_password(form.password.data):
login_user(user, remember=True)
return back
return render_template(
"error.html",
heading=lazy_gettext("Login failed!"),
text=lazy_gettext("Wrong user name or password."),
back=form.back.data,
args=form.args.data,
)
@bp.route("/logout")
def logout():
logout_user()
return redirect(url_for("main.index"))
| StarcoderdataPython |
3371614 | """
It's somewhat of a fool's errand to introduce a Python ORM in 2013, with
`SQLAlchemy`_ ascendant (`Django's ORM`_ not-withstanding). And yet here we
are. SQLAlchemy is mature and robust and full-featured. This makes it complex,
difficult to learn, and kind of scary. The ORM we introduce here is simpler: it
targets PostgreSQL only, it depends on raw SQL (it has no object model for
schema definition nor one for query construction), and it never updates your
database for you. You are in full, direct control of your application's
database usage.
.. _SQLAlchemy: http://www.sqlalchemy.org/
.. _Django's ORM: http://www.djangobook.com/en/2.0/chapter05.html
The fundamental technique we employ, introduced by `<NAME> at PyOhio
2013`_, is to write SQL queries that "typecast" results to table types, and then
use a :class:`~psycopg2.extras.CompositeCaster` subclass to map
these to Python objects. This means we get to define our schema in SQL, and we
get to write our queries in SQL, and we get to explicitly indicate in our SQL
queries how Python should map the results to objects, and then we can write
Python objects that contain only business logic and not schema definitions.
.. _<NAME> at PyOhio 2013: https://www.youtube.com/watch?v=Wz1_GYc4GmU#t=25m06s
Introducing Table Types
-----------------------
Every table in PostgreSQL has a type associated with it, which is the column
definition for that table. These are composite types just like any other
composite type in PostgreSQL, meaning we can use them to cast query results.
When we do, we get a single field that contains our query result, nested one
level::
test=# CREATE TABLE foo (bar text, baz int);
CREATE TABLE
test=# INSERT INTO foo VALUES ('blam', 42);
INSERT 0 1
test=# INSERT INTO foo VALUES ('whit', 537);
INSERT 0 1
test=# SELECT * FROM foo;
+------+-----+
| bar | baz |
+------+-----+
| blam | 42 |
| whit | 537 |
+------+-----+
(2 rows)
test=# SELECT foo FROM foo;
+------------+
| foo |
+------------+
| (blam,42) |
| (whit,537) |
+------------+
(2 rows)
test=#
The same thing works for views::
test=# CREATE VIEW bar AS SELECT bar FROM foo;
CREATE VIEW
test=# SELECT * FROM bar;
+------+
| bar |
+------+
| blam |
| whit |
+------+
(2 rows)
test=# SELECT bar FROM bar;
+--------+
| bar |
+--------+
| (blam) |
| (whit) |
+--------+
(2 rows)
test=#
:mod:`psycopg2` provides a :func:`~psycopg2.extras.register_composite`
function that lets us map PostgreSQL composite types to Python objects. This
includes table and view types, and that is the basis for
:mod:`postgres.orm`. We map based on types, not tables.
.. _orm-tutorial:
ORM Tutorial
------------
First, write a Python class that subclasses :class:`~postgres.orm.Model`::
>>> from postgres.orm import Model
>>> class Foo(Model):
... typname = "foo"
...
Your model must have a :attr:`typname` attribute, which is the name of the
PostgreSQL type for which this class is an object mapping. (``typname``,
spelled without an "e," is the name of the relevant column in the ``pg_type``
table in your database.)
Second, register your model with your :class:`~postgres.Postgres` instance:
>>> db.register_model(Foo)
That will plug your model into the :mod:`psycopg2` composite casting
machinery, and you'll now get instances of your model back from
:meth:`~postgres.Postgres.one` and :meth:`~postgres.Postgres.all` when
you cast to the relevant type in your query. If your query returns more than
one column, you'll need to dereference the column containing the model just as
with any other query:
>>> rec = db.one(\"""
... SELECT foo, bar.*
... FROM foo
... JOIN bar ON foo.bar = bar.bar
... ORDER BY foo.bar
... LIMIT 1
... \""")
>>> rec.foo.bar
'blam'
>>> rec.bar
'blam'
And as usual, if your query only returns one column, then
:meth:`~postgres.Postgres.one` and :meth:`~postgres.Postgres.all`
will do the dereferencing for you:
>>> foo = db.one("SELECT foo FROM foo WHERE bar='blam'")
>>> foo.bar
'blam'
>>> [foo.bar for foo in db.all("SELECT foo FROM foo")]
['blam', 'whit']
To update your database, add a method to your model:
>>> db.unregister_model(Foo)
>>> class Foo(Model):
...
... typname = "foo"
...
... def update_baz(self, baz):
... self.db.run("UPDATE foo SET baz=%s WHERE bar=%s", (baz, self.bar))
... self.set_attributes(baz=baz)
...
>>> db.register_model(Foo)
Then use that method to update the database:
>>> db.one("SELECT baz FROM foo WHERE bar='blam'")
42
>>> foo = db.one("SELECT foo FROM foo WHERE bar='blam'")
>>> foo.update_baz(90210)
>>> foo.baz
90210
>>> db.one("SELECT baz FROM foo WHERE bar='blam'")
90210
We never update your database for you. We also never sync your objects for you:
note the use of the :meth:`~postgres.orm.Model.set_attributes` method to
sync our instance after modifying the database.
The Model Base Class
--------------------
"""
# Exceptions
# ==========
class ReadOnlyAttribute(AttributeError):
def __str__(self):
return "{} is a read-only attribute. Your Model should implement " \
"methods to change data; use set_attributes from your methods " \
"to sync local state.".format(self.args[0])
class UnknownAttributes(AttributeError):
def __str__(self):
return "The following attribute(s) are unknown to us: {}." \
.format(", ".join(self.args[0]))
# Stuff
# =====
class Model:
"""This is the base class for models in :mod:`postgres.orm`.
Instances of subclasses of :class:`~postgres.orm.Model` will have an
attribute for each field in the composite type for which the subclass is
registered (for table and view types, these will be the columns of the
table or view). These attributes are read-only. We don't update your
database. You are expected to do that yourself in methods on your subclass.
To keep instance attributes in sync after a database update, use the
:meth:`~postgres.orm.Model.set_attributes` helper.
"""
__slots__ = ()
typname = None # an entry in pg_type
db = None # will be set to a Postgres object
attnames = None # set in ModelCaster._from_db()
def __init__(self, values):
if getattr(self, '__slots__', None):
_setattr = super(Model, self).__setattr__
for name, value in zip(self.__class__.attnames, values):
_setattr(name, value)
else:
self.__dict__.update(zip(self.__class__.attnames, values))
def __setattr__(self, name, value):
if name in self.__class__.attnames:
raise ReadOnlyAttribute(name)
return super(Model, self).__setattr__(name, value)
def set_attributes(self, **kw):
"""Set instance attributes, according to :attr:`kw`.
:raises: :exc:`~postgres.orm.UnknownAttributes`
Call this when you update state in the database and you want to keep
instance attributes in sync. Note that the only attributes we can set
here are the ones that were given to us by the :mod:`psycopg2`
composite caster machinery when we were first instantiated. These will
be the fields of the composite type for which we were registered, which
will be column names for table and view types.
"""
unknown = None
attnames = self.__class__.attnames
for name in kw:
if name not in attnames:
if unknown is None:
unknown = [name]
else:
unknown.append(name)
if unknown:
raise UnknownAttributes(unknown)
_setattr = super(Model, self).__setattr__
for name, value in kw.items():
_setattr(name, value)
if __name__ == '__main__': # pragma: no cover
from postgres import Postgres
db = Postgres()
db.run("DROP SCHEMA IF EXISTS public CASCADE")
db.run("CREATE SCHEMA public")
db.run("DROP TABLE IF EXISTS foo CASCADE")
db.run("CREATE TABLE foo (bar text, baz int)")
db.run("INSERT INTO foo VALUES ('blam', 42)")
db.run("INSERT INTO foo VALUES ('whit', 537)")
db.run("CREATE VIEW bar AS SELECT bar FROM foo")
import doctest
doctest.testmod()
| StarcoderdataPython |
4913195 | # Generated by Django 2.0.8 on 2019-05-13 18:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blitz_api', '0017_actiontoken_data_change_email'),
]
operations = [
migrations.CreateModel(
name='ExportMedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='', verbose_name='file')),
],
),
]
| StarcoderdataPython |
330566 | <reponame>osolovyoff/pixelate<filename>clear.py
import os
import shutil
files = ['Pixelate.sln','Pixelate.VC.db']
folders = ['Intermediate', 'Binaries', 'Saved']
for file in files:
if os.path.exists(file):
os.remove(file)
for folder in folders:
if os.path.exists(folder):
shutil.rmtree(folder) | StarcoderdataPython |
6610420 |
from driver import (
db_msg,
)
contacts_map = dict()
def load_contacts():
for doc in db_msg.contact_cleansed.find({}, {'NickName': 1}):
contacts_map[doc['_id']] = doc['NickName']
def get_nickname(username):
if username not in contacts_map:
load_contacts()
return contacts_map.get(username, username)
if __name__ == '__main__':
# load_contacts()
print(get_nickname('@9b5e4f9b9ba78e43764f4bf4b332bb47'))
| StarcoderdataPython |
9628387 | ''' '''
'''
ISC License
Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
## \defgroup Tutorials_6_4
## @{
# Demonstrates how to use the MRP_Steering() module to stabilize the attitude relative to the Hill Frame using the
# BSK_sim architecture.
#
# BSK Simulation: Attitude Steering {#scenario_AttSteering}
# ====
#
# Scenario Description
# -----
# This script sets up a 6-DOF spacecraft orbiting Earth. The goal of this tutorial is to demonstrate
# how to configure and use the MRP_Steering module with a rate sub-servo system
# the new BSK_Sim architecture.
#
# To run the default scenario, call the python script from a Terminal window through
#
# python scenario_AttSteering.py
#
# The simulation mimics the basic simulation simulation in the earlier tutorial in
# [scenarioAttSteering.py](@ref scenarioAttitudeSteering).
#
# The simulation layout is shown in the following illustration.
# 
#
# The scenario is initialized through:
# ~~~~~~~~~~~~~{.py}
# class scenario_AttSteering(BSKScenario):
# def __init__(self, masterSim):
# super(scenario_AttSteering, self).__init__(masterSim)
# self.name = 'scenario_AttSteering'
# ~~~~~~~~~~~~~
#
# Within configure_initial_conditions(), the user needs to first define the spacecraft FSW mode for the simulation
# through:
# ~~~~~~~~~~~~~{.py}
# self.masterSim.modeRequest = "steeringRW"
# ~~~~~~~~~~~~~
#
# which triggers the `initiateSteeringRW` event within the BSK_FSW.py script.
#
# The initial conditions for the scenario are the same as found within [scenario_FeedbackRW.py](@ref scenario_FeedbackRW)
#
# Within BSK_Scenario.py log_outputs(), the user must log the relevant messages to observe how the spacecraft corrected
# for its initial tumbling through:
# ~~~~~~~~~~~~~{.py}
# # FSW process outputs
# samplingTime = self.masterSim.get_FswModel().processTasksTimeStep
# self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().trackingErrorData.outputDataName, samplingTime)
# self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().mrpSteeringData.outputDataName, samplingTime)
# self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName, samplingTime)
# ~~~~~~~~~~~~~
# The data is then pulled using:
# ~~~~~~~~~~~~~{.py}
# num_RW = 4 # number of wheels used in the scenario
#
# # Dynamics process outputs: pull log messages below if any
# RW_speeds = self.masterSim.pullMessageLogData( # dataOmegaRW
# self.masterSim.get_DynModel().rwStateEffector.OutputDataString + ".wheelSpeeds", range(num_RW))
# # FSW process outputs
# dataUsReq = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName + ".motorTorque", range(num_RW))
# sigma_BR = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".sigma_BR", range(3))
# omega_BR_B = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".omega_BR_B", range(3))
# omega_BR_ast = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().mrpSteeringData.outputDataName + ".omega_BastR_B", range(3))
#
# ~~~~~~~~~~~~~
# and then plot the results using:
# ~~~~~~~~~~~~~{.py}
# # Plot results
# timeData = dataUsReq[:, 0] * macros.NANO2MIN
# BSK_plt.plot_attitude_error(timeData, sigma_BR)
# BSK_plt.plot_rw_cmd_torque(timeData, dataUsReq, num_RW)
# BSK_plt.plot_rate_error(timeData, omega_BR_B)
# BSK_plt.plot_rw_speeds(timeData, RW_speeds, num_RW)
# ~~~~~~~~~~~~~
#
#
#
# Custom Dynamics Configurations Instructions
# -----
# The dynamics setup is the same as in [scenario_FeedbackRW.py](@ref scenario_FeedbackRW).
#
# Custom FSW Configurations Instructions
# -----
# To configure the desired "steeringRW" FSW mode the user must add the following modules to BSK_FSW.py
# within BSK_FSW.py:
# ~~~~~~~~~~~~~{.py}
# self.mrpSteeringData = MRP_Steering.MRP_SteeringConfig()
# self.mrpSteeringWrap = SimBase.setModelDataWrap(self.mrpSteeringData)
# self.mrpSteeringWrap.ModelTag = "MRP_Steering"
#
# self.rateServoData = rateServoFullNonlinear.rateServoFullNonlinearConfig()
# self.rateServoWrap = SimBase.setModelDataWrap(self.rateServoData)
# self.rateServoWrap.ModelTag = "rate_servo"
#
# ~~~~~~~~~~~~~
# each of which prepare various configuration messages to be attached to the various FSW task. The following code shows
# how to create a new control task, `mrpSteeringRWsTask`, and how to attach the configuration data to the task.
# ~~~~~~~~~~~~~{.py}
# SimBase.AddModelToTask("mrpSteeringRWsTask", self.mrpSteeringWrap, self.mrpSteeringData, 10)
# SimBase.AddModelToTask("mrpSteeringRWsTask", self.rateServoWrap, self.rateServoData, 9)
# SimBase.AddModelToTask("mrpSteeringRWsTask", self.rwMotorTorqueWrap, self.rwMotorTorqueData, 8)
# ~~~~~~~~~~~~~
# The advantage of the BSK_Sim architecture becomes apparent here again, as the `trackingErrorData` and `rwMotorTorqueData`
# data were already defined from an earlier scenario as well as the entire `hillPointingTask`.
# The user can simply add them to their desired task without
# having to manually reconfigure the messages. These tasks are then enabled when the user sets the modeRequest variable
# to `steeringRW` in BSK_scenario.py.
# ~~~~~~~~~~~~~{.py}
# SimBase.createNewEvent("initiateSteeringRW", self.processTasksTimeStep, True,
# ["self.modeRequest == 'steeringRW'"],
# ["self.fswProc.disableAllTasks()",
# "self.enableTask('hillPointTask')",
# "self.enableTask('mrpSteeringRWsTask')"])
# ~~~~~~~~~~~~~
#
#
# Numerical Simulation Results
# ------------
# If this simulation is run, then the following plots should be shown.
# 
# 
# 
# 
#
## @}
# Import utilities
from Basilisk.utilities import orbitalMotion, macros, unitTestSupport
# Get current file path
import sys, os, inspect
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
# Import master classes: simulation base class and scenario base class
sys.path.append(path + '/..')
from BSK_masters import BSKSim, BSKScenario
import BSK_Dynamics, BSK_Fsw
# Import plotting file for your scenario
sys.path.append(path + '/../plotting')
import BSK_Plotting as BSK_plt
# Create your own scenario child class
class scenario_AttitudeSteeringRW(BSKScenario):
def __init__(self, masterSim):
super(scenario_AttitudeSteeringRW, self).__init__(masterSim)
self.name = 'scenario_AttitudeSteeringRW'
self.masterSim = masterSim
def configure_initial_conditions(self):
print '%s: configure_initial_conditions' % self.name
# Configure FSW mode
self.masterSim.modeRequest = 'steeringRW'
# Configure Dynamics initial conditions
oe = orbitalMotion.ClassicElements()
oe.a = 10000000.0 # [m]
oe.e = 0.01
oe.i = 33.3 * macros.D2R
oe.Omega = 48.2 * macros.D2R
oe.omega = 347.8 * macros.D2R
oe.f = 85.3 * macros.D2R
mu = self.masterSim.get_DynModel().gravFactory.gravBodies['earth'].mu
rN, vN = orbitalMotion.elem2rv(mu, oe)
orbitalMotion.rv2elem(mu, rN, vN)
self.masterSim.get_DynModel().scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # [m]
self.masterSim.get_DynModel().scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # [m/s]
self.masterSim.get_DynModel().scObject.hub.sigma_BNInit = [[0.5], [0.6], [-0.3]]
self.masterSim.get_DynModel().scObject.hub.omega_BN_BInit = [[0.01], [-0.01], [-0.01]]
def log_outputs(self):
print '%s: log_outputs' % self.name
samplingTime = self.masterSim.get_DynModel().processTasksTimeStep
# Dynamics process outputs:
self.masterSim.TotalSim.logThisMessage(self.masterSim.get_DynModel().rwStateEffector.OutputDataString, samplingTime)
# FSW process outputs
samplingTime = self.masterSim.get_FswModel().processTasksTimeStep
self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().trackingErrorData.outputDataName, samplingTime)
self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().mrpSteeringData.outputDataName, samplingTime)
self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName, samplingTime)
return
def pull_outputs(self, showPlots):
print '%s: pull_outputs' % self.name
num_RW = 4 # number of wheels used in the scenario
# Dynamics process outputs: pull log messages below if any
RW_speeds = self.masterSim.pullMessageLogData( # dataOmegaRW
self.masterSim.get_DynModel().rwStateEffector.OutputDataString + ".wheelSpeeds", range(num_RW))
# FSW process outputs
dataUsReq = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName + ".motorTorque", range(num_RW))
sigma_BR = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".sigma_BR", range(3))
omega_BR_B = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".omega_BR_B", range(3))
omega_BR_ast = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().mrpSteeringData.outputDataName + ".omega_BastR_B", range(3))
# Plot results
BSK_plt.clear_all_plots()
timeData = dataUsReq[:, 0] * macros.NANO2MIN
BSK_plt.plot_attitude_error(timeData, sigma_BR)
BSK_plt.plot_rw_cmd_torque(timeData, dataUsReq, num_RW)
BSK_plt.plot_rate_error(timeData, omega_BR_B)
BSK_plt.plot_rw_speeds(timeData, RW_speeds, num_RW)
figureList = {}
if showPlots:
BSK_plt.show_all_plots()
else:
fileName = os.path.basename(os.path.splitext(__file__)[0])
figureNames = ["attitudeErrorNorm", "rwMotorTorque", "rateError", "rwSpeed"]
figureList = BSK_plt.save_all_plots(fileName, figureNames)
return figureList
def run(showPlots):
# Instantiate base simulation
TheBSKSim = BSKSim()
TheBSKSim.set_DynModel(BSK_Dynamics)
TheBSKSim.set_FswModel(BSK_Fsw)
TheBSKSim.initInterfaces()
# Configure a scenario in the base simulation
TheScenario = scenario_AttitudeSteeringRW(TheBSKSim)
TheScenario.log_outputs()
TheScenario.configure_initial_conditions()
# Initialize simulation
TheBSKSim.InitializeSimulationAndDiscover()
# Configure run time and execute simulation
simulationTime = macros.min2nano(10.)
TheBSKSim.ConfigureStopTime(simulationTime)
print 'Starting Execution'
TheBSKSim.ExecuteSimulation()
print 'Finished Execution. Post-processing results'
# Pull the results of the base simulation running the chosen scenario
figureList = TheScenario.pull_outputs(showPlots)
return figureList
if __name__ == "__main__":
run(True)
| StarcoderdataPython |
11342693 | import datetime
import uuid
from datetime import timezone
from app.main import db
from app.main.model.channel import Channel
norilsk_time = timezone(datetime.timedelta(0, 25200), 'Asia/Krasnoyarsk')
def add_channel(data):
channel = None
if data.get('public_id'):
channel = Channel.query.filter_by(public_id=data['public_id']).first()
if not channel:
new_channel = Channel(
name=data['name'],
last_change=datetime.datetime.now(tz=norilsk_time),
channel_type=data['channel_type'],
state=data['state'],
dimmer_state=data['dimmer_state'],
public_id=str(uuid.uuid4()),
)
channel_id = save_obj(new_channel)
response_object = {
'status': 'success',
'message': 'Successfully created.',
'public_id': channel_id,
}
return response_object, 201
else:
response_object = {
'status': 'fail',
'message': 'Channel already exists',
}
return response_object, 409
def update_channel(public_id, data):
channel = None
if public_id:
# channel = Channel.query.filter_by(public_id=public_id).first()
channel = Channel.query.filter_by(public_id=public_id).update(
dict(
name=data['name'],
dimmer_state=data['dimmer_state'],
last_change=datetime.datetime.now(tz=norilsk_time),
channel_type=data['channel_type'],
state=data['state']
)
)
if not bool(channel):
response_object = {
'status': 'fail',
'message': 'Channel not found',
}
return response_object, 404
else:
db.session.commit()
return channel, 204
def get_all_channels():
return Channel.query.all()
def get_channel_state(channel_id: object) -> object:
return Channel.query.filter_by(public_id=channel_id).first()
def save_obj(data):
db.session.add(data)
db.session.commit()
return data.get_public_id
| StarcoderdataPython |
1600405 | <reponame>DrugoLebowski/nram-executor
# Vendor
import numpy as np
# Project
from tasks.Task import Task
class TaskListK(Task):
""" [ListK]
Given a pointer to the head of a linked list and a number k, find the value of the
k-th element on the list. List nodes are represented as two adjacent memory cells: a pointer
to the next node and a value. Elements are in random locations in the memory, so that
the network needs to follow the pointers to find the correct element. Input is given as:
head, k, out, ... where head is a pointer to the first node on the list, k indicates how many
hops are needed and out is a cell where the output should be put.
"""
def create(self) -> (np.ndarray, np.ndarray, np.ndarray):
list_size = int((self.max_int - 4) / 2)
hops = np.random.randint(0, list_size, size=(self.batch_size))
list_elements = np.random.randint(0, self.max_int, size=(self.batch_size, list_size))
lists_elements_permutations = np.stack([np.random.permutation(list_size) for _ in range(self.batch_size)], axis=0)
init_mem = np.zeros((self.batch_size, self.max_int), dtype=np.int32)
# Create for each example the list
for example in range(self.batch_size):
for j, permidx in enumerate(lists_elements_permutations[example]):
next_element_pointer = np.where(lists_elements_permutations[example] == permidx + 1)[0]
if permidx == 0: # If the node is the first than set the pointer in the first memory position
init_mem[example, 0] = 3 + 2 * j
init_mem[example, 3 + (2 * j)] = \
-1.0 if len(next_element_pointer) == 0 else 3 + (2 * next_element_pointer[0]) # Set the pointer to the next list node
init_mem[example, 3 + (2 * j) + 1] = list_elements[example, j] # Set the value of the list node
init_mem[:, 2] = 2
init_mem[:, 1] = hops
init_mem[:, -1] = -1
out_mem = init_mem.copy()
for example in range(self.batch_size):
output_value = -1.0
pointer = out_mem[example, 0]
for hop in range(out_mem[example, 1] + 1):
output_value = out_mem[example, pointer + 1]
pointer = out_mem[example, pointer]
out_mem[example, out_mem[example, 2]] = output_value
cost_mask = np.zeros((self.batch_size, self.max_int), dtype=np.int8)
cost_mask[:, 2] = 1
return init_mem, out_mem, cost_mask
| StarcoderdataPython |
11322385 | <reponame>mcnigno/gea
from flask_appbuilder.widgets import ListWidget, FormWidget
class MyListWidget(ListWidget):
template = 'widgets/listRev1.html'
class MyEditWidget(FormWidget):
template = 'widgets/edit_form_sub.html'
| StarcoderdataPython |
1736022 | <gh_stars>1-10
from __future__ import annotations
import logging
import typing
from typing import Any, Dict, List
import fileseq
from silex_client.action.command_base import CommandBase
from silex_client.utils.parameter_types import ListParameterMeta, PathParameterMeta
if typing.TYPE_CHECKING:
from silex_client.action.action_query import ActionQuery
import os
import pathlib
class Rename(CommandBase):
"""
Rename the given files
"""
parameters = {
"src": {
"label": "Source path",
"type": PathParameterMeta(multiple=True),
"value": None,
"tooltip": "Select the file or the directory you want to rename",
},
"name": {
"label": "New name",
"type": ListParameterMeta(str),
"value": None,
"tooltip": "Insert the new name for the given file",
},
}
@CommandBase.conform_command()
async def __call__(
self,
parameters: Dict[str, Any],
action_query: ActionQuery,
logger: logging.Logger,
):
source_paths: List[pathlib.Path] = parameters["src"]
new_names: List[str] = parameters["name"]
new_paths = []
source_sequences = fileseq.findSequencesInList(source_paths)
name_sequences = fileseq.findSequencesInList(new_names)
logger.info("Renaming %s to %s", source_sequences, name_sequences)
# Loop over all the files to copy
for index, source_path in enumerate(source_paths):
# If only one new name is given, this will still work thanks to the modulo
new_name = new_names[index % len(new_names)]
# Check the file to rename
if not os.path.exists(source_path):
raise Exception(f"Source path {source_path} does not exists")
# Find the sequence this file belongs to
sequence = next(
sequence
for sequence in source_sequences
if source_path in [pathlib.Path(file_path) for file_path in sequence]
)
# Construct the new name
extension = str(sequence.extension())
new_name = os.path.splitext(new_name)[0] + extension
new_path = source_path.parent / new_name
if new_path.exists():
os.remove(new_path)
os.rename(source_path, new_path)
new_paths.append(new_path)
return {
"source_paths": source_paths,
"new_paths": new_paths,
}
| StarcoderdataPython |
11253148 | from dks.base.activation_getter import (
get_activation_function as _get_numpy_activation_function,
)
from dks.base.activation_transform import _get_activations_params
def subnet_max_func(x, r_fn):
depth = 7
res_x = r_fn(x)
x = r_fn(x)
for _ in range(depth):
x = r_fn(r_fn(x)) + x
return max(x, res_x)
def subnet_max_func_v2(x, r_fn):
depth = 2
res_x = r_fn(x)
x = r_fn(x)
for _ in range(depth):
x = 0.8 * r_fn(r_fn(x)) + 0.2 * x
return max(x, res_x)
def get_transformed_activations(
activation_names,
method="TAT",
dks_params=None,
tat_params=None,
max_slope_func=None,
max_curv_func=None,
subnet_max_func=None,
activation_getter=_get_numpy_activation_function,
):
params = _get_activations_params(
activation_names,
method=method,
dks_params=dks_params,
tat_params=tat_params,
max_slope_func=max_slope_func,
max_curv_func=max_curv_func,
subnet_max_func=subnet_max_func,
)
return params
params = get_transformed_activations(
["swish"], method="TAT", subnet_max_func=subnet_max_func
)
print(params)
params = get_transformed_activations(
["leaky_relu"], method="TAT", subnet_max_func=subnet_max_func_v2
)
print(params)
| StarcoderdataPython |
1700382 | <filename>nlgen/tests/cfg/test_terminal.py
from nlgen.cfg import CFG, PTerminal
def test_simple_terminal():
cfg = CFG([
("S", PTerminal("foo"))
])
assert list(cfg.permutation_values("S")) == [("foo",)]
def test_equal():
assert (PTerminal("I", features={"person": "1"}) ==
PTerminal("I", features={"person": "1"}))
def test_coerce_features():
""" we should be able to coerce common feature descriptions. """
# string as value
assert PTerminal("foo", features={"num": "1"}).features == {"num": {"1"}}
# list as value
assert PTerminal("foo", features={"num": ["1", "2"]}).features == {"num": {"1", "2"}}
# coerce all else as strings.
assert PTerminal("foo", features={"num": 1}).features == {"num": {"1"}}
| StarcoderdataPython |
11309920 | <gh_stars>10-100
from hashlib import sha256
from charm.schemes.pk_vrf import VRF10
from charm.toolbox.pairinggroup import PairingGroup
from pai.pouw.ticket_selection.similarity import cosine_similarity
# ticket preferences vector
ticket_prefs = [0, 7, 100]
# corresponding task properties vector
task_props = [0, 2, 1]
def get_bit(data, num):
base = int(num // 8)
shift = int(num % 8)
return (data[base] & (1 << shift)) >> shift
def bytearray_to_bits(data):
return [get_bit(data, i) for i in range(len(data) * 8)]
def setup(message):
message_hash = sha256(message).digest()
bit_message = bytearray_to_bits(message_hash)
# block of bits
n = len(bit_message)
grp = PairingGroup('MNT224')
vrf = VRF10(grp)
(pk, sk) = vrf.setup(n)
return bit_message, pk, sk, vrf
def get_hash_threshold(st):
hash_obj = sha256(repr(st['y']).encode())
actual_hash = int.from_bytes(hash_obj.digest(), 'big')
max_hash = 2 ** 256 - 1
hash_threshold = actual_hash / max_hash
return hash_threshold
def main():
# we pass the role: miner, supervisor etc and the
message = bytearray("miner9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08".encode())
bit_message, pk, sk, vrf = setup(message)
# generate proof over block x (using sk)
st = vrf.prove(sk, bit_message)
hash_threshold = get_hash_threshold(st)
print(f'Ratio: {hash_threshold:.2f}')
# verify bits using pk and proof
if vrf.verify(pk, bit_message, st):
print('Verification succeeded.')
else:
print('Verification failed.')
cos_sim = cosine_similarity(ticket_prefs, task_props)
print(f'Similarity: {cos_sim:.4f}')
if cos_sim >= hash_threshold:
print('Ticket selected to work.')
else:
print('Ticket should wait for another task.')
if __name__ == "__main__":
main() | StarcoderdataPython |
11324574 | <reponame>GaoSida/Neural-SampleRank
import os
import pytest
from collections import Counter
from torchtext.vocab import Vocab
@pytest.fixture()
def dummy_vocabs():
token_list = ["at", "happy", "is", "peter", "such", "won"]
char_list = [chr(c) for c in range(ord('A'), ord('Z') + 1)]
char_list += [chr(c) for c in range(ord('a'), ord('z') + 1)]
label_list = ["B-LOC", "B-PER", "I-LOC", "I-PER", "O"]
# torchtext vocabs are sorted by dictionary order internally
return Vocab(Counter(token_list)), Vocab(Counter(char_list)), \
Vocab(Counter(label_list))
| StarcoderdataPython |
3351611 | <gh_stars>1-10
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library that defines a panoramic transformer which rotates the world.
The panoramic transformer performs a yaw rotation about the camera's z-axis
by translating an equirectangular representation. We assume a camera convention
of z-axis up.
Below is a top-down view of the camera coordinate system. The +z-axis points
into this viewpoint. A positive yaw rotation corresponds to a CCW rotation about
the z-axis when looking down.
y
^
|
|
+-----> x
One thing to note is that a CCW rotation of the camera is equivalent to a
CW rotation of the scene. Therefore rotating the scene by an offset phi
is equivalent to rotating the camera by -phi.
Usage:
pano_image = ... # a pano or a representation derived from a pano
phi = ... # a differentiable estimate of a yaw rotation that is
# predicted from pano_image. Units in radians.
# Rotating the scene by phi is equivalent to rotating the camera by -phi.
rotation_normalized_pano_image = shift_pano_by_rotation(pano_image, -phi)
rotation_normalized_output = network(rotation_normalized_pano_image)
# Optional inverse rotate to get restore pano_image's original orientation.
output = shift_pano_by_rotation(rotation_normalized_output, alpha_phi)
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import resampler
def rotate_pano_horizontally(input_feature_map, yaw_angle):
"""Rotates input_feature_map by yaw_angle by horizontally translating pixels.
The layer is differentiable with respect to yaw_angle and input_feature_map.
yaw_angle is positive for CCW rotation about the z-axis where the coordinates
are constructed with z-axis facing up.
Args:
input_feature_map: panoramic image or neural feature maps of shape [B, H, W,
C].
yaw_angle: A tensor of shape `[B]` which represents the desired rotation of
input_feature_map. yaw_angle is in units of radians. A positive yaw_angle
rolls pixels left.
Returns:
A rotated feature map with dimensions `[B, H, W, C]`
Reference:
[1]: 'Spatial Transformer Networks', Jaderberg et. al,
(https://arxiv.org/abs/1506.02025)
"""
# Number of input dimensions.
tfshape = tf.shape(input_feature_map)
batch_size = tfshape[0]
height = tfshape[1]
width = tfshape[2]
float32_width = tf.cast(width, dtype=tf.float32)
float32_height = tf.cast(height, dtype=tf.float32)
x_offset = (yaw_angle / 2 / np.pi) * float32_width
x_grid = tf.linspace(0., float32_width - 1, width) # (W)
# 0.5 * original_image_width to match the convention described in comment
x_pixel_coord = x_grid[tf.newaxis] + x_offset[:, tf.newaxis] # (B, W)
x_pixel_coord = tf.tile(x_pixel_coord[:, tf.newaxis, :],
[1, height, 1]) # (B, H, W)
y_pixel_coord = tf.linspace(0., float32_height - 1,
height)[tf.newaxis, :, tf.newaxis] # (1, H, 1)
y_pixel_coord = tf.tile(y_pixel_coord, [batch_size, 1, width])
wrapped_x_pixel_coord = tf.floormod(x_pixel_coord, float32_width)
# Because these are panoramas, we can concatenate the first column to the
# right side. This allows us to interpolate values for coordinates that
# correspond to pixels that connects the left and right edges of the
# panorama.
input_feature_map = tf.concat(
[input_feature_map, input_feature_map[:, :, :1]], axis=2)
return resampler.resampler(
input_feature_map,
tf.stack([wrapped_x_pixel_coord, y_pixel_coord], axis=-1))
| StarcoderdataPython |
1642317 | <filename>baseline/tilscore.py
from .utils import dist_to_px, get_mask_area, write_json
from .nms import slide_nms, to_wsd
from .constants import TUMOR_STROMA_MASK_PATH
def create_til_score(image_path, xml_path, output_path):
"""slide_level_nms"""
points = slide_nms(image_path, xml_path, 256)
wsd_points = to_wsd(points)
print(len(wsd_points))
"""Compute TIL score and write to output"""
til_area = dist_to_px(8, 0.5) ** 2
tils_area = len(wsd_points) * til_area
stroma_area = get_mask_area(TUMOR_STROMA_MASK_PATH)
tilscore = (100 / int(stroma_area)) * int(tils_area)
print(f"tilscore = {tilscore}")
write_json(tilscore, output_path) | StarcoderdataPython |
3378746 | <gh_stars>100-1000
import json
from concurrent.futures import ThreadPoolExecutor
from retry import RetryOnException as retry
from proxypool import (
ProxyPoolValidator,
ProxyPoolScraper,
RedisProxyPoolClient
)
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
class ProxyPoolOperator(BaseOperator):
@apply_defaults
def __init__(
self,
proxy_webpage,
number_of_proxies,
testing_url,
max_workers,
redis_config,
redis_key,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.proxy_webpage = proxy_webpage
self.testing_url = testing_url
self.number_of_proxies = number_of_proxies
self.max_workers = max_workers
self.redis_config = redis_config
self.redis_key = redis_key
@retry(5)
def execute(self, context):
proxy_scraper = ProxyPoolScraper(self.proxy_webpage)
proxy_validator = ProxyPoolValidator(self.testing_url)
proxy_stream = proxy_scraper.get_proxy_stream(self.number_of_proxies)
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
results = executor.map(
proxy_validator.validate_proxy, proxy_stream
)
valid_proxies = filter(lambda x: x.is_valid is True, results)
sorted_valid_proxies = sorted(
valid_proxies, key=lambda x: x.health, reverse=True
)
with RedisProxyPoolClient(self.redis_key, self.redis_config) as client:
client.override_existing_proxies(
[
json.dumps(record.proxy)
for record in sorted_valid_proxies[:5]
]
)
| StarcoderdataPython |
1718989 | import threading
import time
def func_1():
while True:
print(f"[{threading.current_thread().name}] Printing this message every 2 seconds")
time.sleep(2)
# initiate the thread with daemon set to True
daemon_thread = threading.Thread(target=func_1, name="daemon-thread", daemon=True)
# or
# daemon_thread.daemon = True
# or
# daemon_thread.setDaemon(True)
daemon_thread.start()
# sleep for 10 seconds and end the main thread
time.sleep(4)
# the main thread ends | StarcoderdataPython |
3599458 | <gh_stars>10-100
# Exercise 7.10
# Author: <NAME>
class Hello:
def __call__(self, string):
print 'Hello, ' + string + '!'
def __str__(self):
return 'Hello, World!'
a = Hello()
print a('students')
print a
"""
Sample run:
python Hello.py
Hello, students!
None
Hello, World!
"""
| StarcoderdataPython |
176362 | <reponame>kkcookies99/UAST
def XXX(self, nums1: List[int], nums2: List[int]) -> float:
def findKthElement(arr1,arr2,k):
len1,len2 = len(arr1),len(arr2)
if len1 > len2:
return findKthElement(arr2,arr1,k)
if not arr1:
return arr2[k-1]
if k == 1:
return min(arr1[0],arr2[0])
i,j = min(k//2,len1)-1,min(k//2,len2)-1
if arr1[i] > arr2[j]:
return findKthElement(arr1,arr2[j+1:],k-j-1)
else:
return findKthElement(arr1[i+1:],arr2,k-i-1)
l1,l2 = len(nums1),len(nums2)
left,right = (l1+l2+1)//2,(l1+l2+2)//2
return (findKthElement(nums1,nums2,left)+findKthElement(nums1,nums2,right))/2
| StarcoderdataPython |
12845685 | # Nested Lists and Dictionaries
def run():
my_list = [1, "Hello", True, 4.5]
my_dict = {
"firstname": "Mauricio",
"lastname": "Valadez"
}
super_list = [
{"firstname": "Mauricio", "lastname": "Valadez"},
{"firstname": "Carlos", "lastname": "García"},
{"firstname": "Francisco", "lastname": "Hernández"},
{"firstname": "Laura", "lastname": "Pérez"},
{"firstname": "Gabriela", "lastname": "Rojas"}
]
super_dict = {
"natural_nums": [1,2,3,4,5],
"integer_nums": [-1, -2, 0, 1, 2],
"float_nums": [1.2, 3.7, 9.86]
}
for key, value in super_dict.items():
print(key, "-", value)
for item in super_list:
print(item["firstname"], "-", item["lastname"])
if __name__ == '__main__':
run() | StarcoderdataPython |
5075378 | # -*- coding:utf-8 -*-
import re
import SpiderUtils
class ImageSpider(object):
num = 0
def __init__(self):
pass
def getImageFormUrl(self, url):
print "--------------------------------解析网页代码"
print "--网页地址 | url = " + url
content = SpiderUtils.getHtmlContent(url)
# print "--打印网页内容"
# print content
imageUrls = SpiderUtils.findTextArray(content, re.compile(r"(?<=onerror=\"img_error\(this\);\" src=\").+?\.jpg(?=\" referrerpolicy=\"no-referrer\")"))
print "打印图片列表"
print imageUrls
for imageUrl in imageUrls:
imagePath = imageDir + ("%d.jpg" % ImageSpider.num)
SpiderUtils.saveImage(imageUrl, imagePath)
ImageSpider.num += 1
if __name__ == '__main__':
print "--------------------------------创建下载目录"
imageDir = SpiderUtils.getHomeDir() + '/Downloads/python/ImageSpider-dbmeinv/'
print "--下载目录 | imageDir = " + str(imageDir)
SpiderUtils.makeDirs(imageDir)
print "--------------------------------输入页码"
pageFrom = raw_input("输入开始页:")
pageTo = raw_input("输入结束页:")
# pageFrom = 1
# pageTo = 1
print "开始页码 | pageFrom = " + str(pageFrom)
print "结束页码 | pageTo = " + str(pageTo)
print "--------------------------------开始爬虫"
imageSpider = ImageSpider()
pageFromInt = int(pageFrom)
pageToInt = int(pageTo)
while pageFromInt <= pageToInt:
print "--------------------------------开始解析"
url = "http://www.dbmeinv.com/?pager_offset=" + str(pageFromInt)
print ("正在处理第%d页" % pageFromInt) + " | url = " + url
imageSpider.getImageFormUrl(url)
pageFromInt += 1
print "--------------------------------解析结束"
| StarcoderdataPython |
3541885 | <filename>gpytorch/lazy/interpolated_lazy_tensor.py
#!/usr/bin/env python3
import torch
# from .block_diag_lazy_tensor import BlockDiagLazyTensor
from .lazy_tensor import LazyTensor
from .non_lazy_tensor import lazify, NonLazyTensor
from .root_lazy_tensor import RootLazyTensor
from ..utils import sparse
from ..utils.broadcasting import _pad_with_singletons
from ..utils.getitem import _noop_index
from ..utils.interpolation import left_interp, left_t_interp
class InterpolatedLazyTensor(LazyTensor):
def _check_args(
self, base_lazy_tensor, left_interp_indices, left_interp_values, right_interp_indices, right_interp_values
):
if left_interp_indices.size() != left_interp_values.size():
return "Expected left_interp_indices ({}) to have the same size as left_interp_values ({})".format(
left_interp_indices.size(), left_interp_values.size()
)
if right_interp_indices.size() != right_interp_values.size():
return "Expected right_interp_indices ({}) to have the same size as right_interp_values ({})".format(
right_interp_indices.size(), right_interp_values.size()
)
if left_interp_indices.shape[:-2] != right_interp_indices.shape[:-2]:
return (
"left interp size ({}) is incompatible with right interp size ({}). Make sure the two have the "
"same number of batch dimensions".format(left_interp_indices.size(), right_interp_indices.size())
)
if left_interp_indices.shape[:-2] != base_lazy_tensor.shape[:-2]:
return (
"left interp size ({}) is incompatible with base lazy tensor size ({}). Make sure the two have the "
"same number of batch dimensions".format(left_interp_indices.size(), base_lazy_tensor.size())
)
def __init__(
self,
base_lazy_tensor,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=None,
right_interp_values=None,
):
base_lazy_tensor = lazify(base_lazy_tensor)
if left_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
left_interp_indices = torch.arange(0, num_rows, dtype=torch.long, device=base_lazy_tensor.device)
left_interp_indices.unsqueeze_(-1)
left_interp_indices = left_interp_indices.expand(*base_lazy_tensor.batch_shape, num_rows, 1)
if left_interp_values is None:
left_interp_values = torch.ones(
left_interp_indices.size(), dtype=base_lazy_tensor.dtype, device=base_lazy_tensor.device
)
if right_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
right_interp_indices = torch.arange(0, num_rows, dtype=torch.long, device=base_lazy_tensor.device)
right_interp_indices.unsqueeze_(-1)
right_interp_indices = right_interp_indices.expand(*base_lazy_tensor.batch_shape, num_rows, 1)
if right_interp_values is None:
right_interp_values = torch.ones(
right_interp_indices.size(), dtype=base_lazy_tensor.dtype, device=base_lazy_tensor.device
)
if left_interp_indices.shape[:-2] != base_lazy_tensor.batch_shape:
try:
base_lazy_tensor = base_lazy_tensor._expand_batch(left_interp_indices.shape[:-2])
except RuntimeError:
raise RuntimeError(
"interp size ({}) is incompatible with base_lazy_tensor size ({}). ".format(
right_interp_indices.size(), base_lazy_tensor.size()
)
)
super(InterpolatedLazyTensor, self).__init__(
base_lazy_tensor, left_interp_indices, left_interp_values, right_interp_indices, right_interp_values
)
self.base_lazy_tensor = base_lazy_tensor
self.left_interp_indices = left_interp_indices
self.left_interp_values = left_interp_values
self.right_interp_indices = right_interp_indices
self.right_interp_values = right_interp_values
def _approx_diag(self):
base_diag_root = self.base_lazy_tensor.diag().sqrt()
left_res = left_interp(self.left_interp_indices, self.left_interp_values, base_diag_root.unsqueeze(-1))
right_res = left_interp(self.right_interp_indices, self.right_interp_values, base_diag_root.unsqueeze(-1))
res = left_res * right_res
return res.squeeze(-1)
def _expand_batch(self, batch_shape):
return self.__class__(
self.base_lazy_tensor._expand_batch(batch_shape),
self.left_interp_indices.expand(*batch_shape, *self.left_interp_indices.shape[-2:]),
self.left_interp_values.expand(*batch_shape, *self.left_interp_values.shape[-2:]),
self.right_interp_indices.expand(*batch_shape, *self.right_interp_indices.shape[-2:]),
self.right_interp_values.expand(*batch_shape, *self.right_interp_values.shape[-2:]),
)
def _get_indices(self, row_index, col_index, *batch_indices):
left_interp_indices = self.left_interp_indices.__getitem__((*batch_indices, row_index)).unsqueeze(-2)
right_interp_indices = self.right_interp_indices.__getitem__((*batch_indices, col_index)).unsqueeze(-1)
base_vals = self.base_lazy_tensor._get_indices(
left_interp_indices, right_interp_indices,
*[batch_index.view(*batch_index.shape, 1, 1) for batch_index in batch_indices]
)
left_interp_values = self.left_interp_values.__getitem__((*batch_indices, row_index)).unsqueeze(-2)
right_interp_values = self.right_interp_values.__getitem__((*batch_indices, col_index)).unsqueeze(-1)
interp_values = left_interp_values * right_interp_values
res = (base_vals * interp_values).sum([-2, -1])
return res
def _getitem(self, row_index, col_index, *batch_indices):
# Handle batch dimensions
# Construt a new LazyTensor
base_lazy_tensor = self.base_lazy_tensor
left_interp_indices = self.left_interp_indices
left_interp_values = self.left_interp_values
right_interp_indices = self.right_interp_indices
right_interp_values = self.right_interp_values
if len(batch_indices):
base_lazy_tensor = base_lazy_tensor._getitem(_noop_index, _noop_index, *batch_indices)
# Special case: if both row and col are not indexed, then we are done
if (row_index is _noop_index and col_index is _noop_index):
left_interp_indices = left_interp_indices[batch_indices]
left_interp_values = left_interp_values[batch_indices]
right_interp_indices = right_interp_indices[batch_indices]
right_interp_values = right_interp_values[batch_indices]
return self.__class__(
base_lazy_tensor, left_interp_indices, left_interp_values,
right_interp_indices, right_interp_values, **self._kwargs
)
# Normal case: we have to do some processing on either the rows or columns
# We will handle this through "interpolation"
left_interp_indices = left_interp_indices[(*batch_indices, row_index, _noop_index)]
left_interp_values = left_interp_values[(*batch_indices, row_index, _noop_index)]
right_interp_indices = right_interp_indices[(*batch_indices, col_index, _noop_index)]
right_interp_values = right_interp_values[(*batch_indices, col_index, _noop_index)]
# Construct interpolated LazyTensor
res = self.__class__(
base_lazy_tensor, left_interp_indices, left_interp_values,
right_interp_indices, right_interp_values, **self._kwargs
)
return res
def _matmul(self, rhs):
# Get sparse tensor representations of left/right interp matrices
left_interp_t = self._sparse_left_interp_t(self.left_interp_indices, self.left_interp_values)
right_interp_t = self._sparse_right_interp_t(self.right_interp_indices, self.right_interp_values)
if rhs.ndimension() == 1:
is_vector = True
rhs = rhs.unsqueeze(-1)
else:
is_vector = False
# right_interp^T * rhs
right_interp_res = sparse.bdsmm(right_interp_t, rhs)
# base_lazy_tensor * right_interp^T * rhs
base_res = self.base_lazy_tensor._matmul(right_interp_res)
# left_interp * base_lazy_tensor * right_interp^T * rhs
left_interp_mat = left_interp_t.transpose(-1, -2)
res = sparse.bdsmm(left_interp_mat, base_res)
# Squeeze if necessary
if is_vector:
res = res.squeeze(-1)
return res
def _mul_constant(self, other):
# We're using a custom method here - the constant mul is applied to the base_lazy tensor
# This preserves the interpolated structure
return self.__class__(
self.base_lazy_tensor._mul_constant(other),
self.left_interp_indices,
self.left_interp_values,
self.right_interp_indices,
self.right_interp_values,
)
def _t_matmul(self, rhs):
# Get sparse tensor representations of left/right interp matrices
left_interp_t = self._sparse_left_interp_t(self.left_interp_indices, self.left_interp_values)
right_interp_t = self._sparse_right_interp_t(self.right_interp_indices, self.right_interp_values)
if rhs.ndimension() == 1:
is_vector = True
rhs = rhs.unsqueeze(-1)
else:
is_vector = False
# right_interp^T * rhs
left_interp_res = sparse.bdsmm(left_interp_t, rhs)
# base_lazy_tensor * right_interp^T * rhs
base_res = self.base_lazy_tensor._t_matmul(left_interp_res)
# left_interp * base_lazy_tensor * right_interp^T * rhs
right_interp_mat = right_interp_t.transpose(-1, -2)
res = sparse.bdsmm(right_interp_mat, base_res)
# Squeeze if necessary
if is_vector:
res = res.squeeze(-1)
return res
def _quad_form_derivative(self, left_vecs, right_vecs):
# Get sparse tensor representations of left/right interp matrices
left_interp_t = self._sparse_left_interp_t(self.left_interp_indices, self.left_interp_values)
right_interp_t = self._sparse_right_interp_t(self.right_interp_indices, self.right_interp_values)
if left_vecs.ndimension() == 1:
left_vecs = left_vecs.unsqueeze(1)
right_vecs = right_vecs.unsqueeze(1)
# base_lazy_tensor grad
left_res = sparse.bdsmm(left_interp_t, left_vecs)
right_res = sparse.bdsmm(right_interp_t, right_vecs)
base_lv_grad = list(self.base_lazy_tensor._quad_form_derivative(left_res, right_res))
# left_interp_values grad
n_vecs = right_res.size(-1)
n_left_rows = self.left_interp_indices.size(-2)
n_right_rows = self.right_interp_indices.size(-2)
n_left_interp = self.left_interp_indices.size(-1)
n_right_interp = self.right_interp_indices.size(-1)
n_inducing = right_res.size(-2)
# left_interp_values grad
right_interp_right_res = self.base_lazy_tensor._matmul(right_res).contiguous()
batch_shape = torch.Size(right_interp_right_res.shape[:-2])
batch_size = batch_shape.numel()
if len(batch_shape):
batch_offset = torch.arange(0, batch_size, dtype=torch.long, device=self.device).view(*batch_shape)
batch_offset.unsqueeze_(-1).unsqueeze_(-1).mul_(n_inducing)
batched_right_interp_indices = self.right_interp_indices
batched_left_interp_indices = (self.left_interp_indices + batch_offset).view(-1)
else:
batched_left_interp_indices = self.left_interp_indices.view(-1)
flattened_right_interp_right_res = right_interp_right_res.view(batch_size * n_inducing, n_vecs)
selected_right_vals = flattened_right_interp_right_res.index_select(0, batched_left_interp_indices)
selected_right_vals = selected_right_vals.view(*batch_shape, n_left_rows, n_left_interp, n_vecs)
left_values_grad = (selected_right_vals * left_vecs.unsqueeze(-2)).sum(-1)
# right_interp_values_grad
left_interp_left_res = self.base_lazy_tensor._t_matmul(left_res).contiguous()
batch_shape = left_interp_left_res.shape[:-2]
batch_size = batch_shape.numel()
if len(batch_shape):
batch_offset = torch.arange(0, batch_size, dtype=torch.long, device=self.device).view(*batch_shape)
batch_offset.unsqueeze_(-1).unsqueeze_(-1).mul_(n_inducing)
batched_right_interp_indices = (self.right_interp_indices + batch_offset).view(-1)
else:
batched_right_interp_indices = self.right_interp_indices.view(-1)
flattened_left_interp_left_res = left_interp_left_res.view(batch_size * n_inducing, n_vecs)
selected_left_vals = flattened_left_interp_left_res.index_select(0, batched_right_interp_indices)
selected_left_vals = selected_left_vals.view(*batch_shape, n_right_rows, n_right_interp, n_vecs)
right_values_grad = (selected_left_vals * right_vecs.unsqueeze(-2)).sum(-1)
# Return zero grad for interp indices
res = tuple(
base_lv_grad
+ [
torch.zeros_like(self.left_interp_indices),
left_values_grad,
torch.zeros_like(self.right_interp_indices),
right_values_grad,
]
)
return res
def _size(self):
return torch.Size(
self.base_lazy_tensor.batch_shape + (self.left_interp_indices.size(-2), self.right_interp_indices.size(-2))
)
def _transpose_nonbatch(self):
res = self.__class__(
self.base_lazy_tensor.transpose(-1, -2),
self.right_interp_indices,
self.right_interp_values,
self.left_interp_indices,
self.left_interp_values,
**self._kwargs
)
return res
def _sparse_left_interp_t(self, left_interp_indices_tensor, left_interp_values_tensor):
if hasattr(self, "_sparse_left_interp_t_memo"):
if torch.equal(self._left_interp_indices_memo, left_interp_indices_tensor) and torch.equal(
self._left_interp_values_memo, left_interp_values_tensor
):
return self._sparse_left_interp_t_memo
left_interp_t = sparse.make_sparse_from_indices_and_values(
left_interp_indices_tensor, left_interp_values_tensor, self.base_lazy_tensor.size()[-1]
)
self._left_interp_indices_memo = left_interp_indices_tensor
self._left_interp_values_memo = left_interp_values_tensor
self._sparse_left_interp_t_memo = left_interp_t
return self._sparse_left_interp_t_memo
def _sparse_right_interp_t(self, right_interp_indices_tensor, right_interp_values_tensor):
if hasattr(self, "_sparse_right_interp_t_memo"):
if torch.equal(self._right_interp_indices_memo, right_interp_indices_tensor) and torch.equal(
self._right_interp_values_memo, right_interp_values_tensor
):
return self._sparse_right_interp_t_memo
right_interp_t = sparse.make_sparse_from_indices_and_values(
right_interp_indices_tensor, right_interp_values_tensor, self.base_lazy_tensor.size()[-1]
)
self._right_interp_indices_memo = right_interp_indices_tensor
self._right_interp_values_memo = right_interp_values_tensor
self._sparse_right_interp_t_memo = right_interp_t
return self._sparse_right_interp_t_memo
def _sum_batch(self, dim):
left_interp_indices = self.left_interp_indices
left_interp_values = self.left_interp_values
right_interp_indices = self.right_interp_indices
right_interp_values = self.right_interp_values
# Increase interpolation indices appropriately
left_factor = torch.arange(0, left_interp_indices.size(dim), dtype=torch.long, device=self.device)
left_factor = _pad_with_singletons(left_factor, 0, self.dim() - dim - 1)
left_factor = left_factor * self.base_lazy_tensor.size(-2)
left_interp_indices = left_interp_indices.add(left_factor)
right_factor = torch.arange(0, right_interp_indices.size(dim), dtype=torch.long, device=self.device)
right_factor = _pad_with_singletons(right_factor, 0, self.dim() - dim - 1)
right_factor = right_factor * self.base_lazy_tensor.size(-1)
right_interp_indices = right_interp_indices.add(right_factor)
# Rearrange the indices and values
permute_order = (*range(0, dim), *range(dim + 1, self.dim()), dim)
left_shape = (*left_interp_indices.shape[:dim], *left_interp_indices.shape[dim + 1:-1], -1)
right_shape = (*right_interp_indices.shape[:dim], *right_interp_indices.shape[dim + 1:-1], -1)
left_interp_indices = left_interp_indices.permute(permute_order).reshape(left_shape)
left_interp_values = left_interp_values.permute(permute_order).reshape(left_shape)
right_interp_indices = right_interp_indices.permute(permute_order).reshape(right_shape)
right_interp_values = right_interp_values.permute(permute_order).reshape(right_shape)
# Make the base_lazy tensor block diagonal
from .block_diag_lazy_tensor import BlockDiagLazyTensor
block_diag = BlockDiagLazyTensor(self.base_lazy_tensor, block_dim=dim)
# Finally! We have an interpolated lazy tensor again
return InterpolatedLazyTensor(
block_diag, left_interp_indices, left_interp_values, right_interp_indices, right_interp_values
)
def diag(self):
if isinstance(self.base_lazy_tensor, RootLazyTensor) and isinstance(self.base_lazy_tensor.root, NonLazyTensor):
left_interp_vals = left_interp(
self.left_interp_indices, self.left_interp_values, self.base_lazy_tensor.root.evaluate()
)
right_interp_vals = left_interp(
self.right_interp_indices, self.right_interp_values, self.base_lazy_tensor.root.evaluate()
)
return (left_interp_vals * right_interp_vals).sum(-1)
else:
return super(InterpolatedLazyTensor, self).diag()
def matmul(self, tensor):
# We're using a custom matmul here, because it is significantly faster than
# what we get from the function factory.
# The _matmul_closure is optimized for repeated calls, such as for inv_matmul
if tensor.ndimension() == 1:
is_vector = True
tensor = tensor.unsqueeze(-1)
else:
is_vector = False
# right_interp^T * tensor
base_size = self.base_lazy_tensor.size(-1)
right_interp_res = left_t_interp(self.right_interp_indices, self.right_interp_values, tensor, base_size)
# base_lazy_tensor * right_interp^T * tensor
base_res = self.base_lazy_tensor.matmul(right_interp_res)
# left_interp * base_lazy_tensor * right_interp^T * tensor
res = left_interp(self.left_interp_indices, self.left_interp_values, base_res)
# Squeeze if necessary
if is_vector:
res = res.squeeze(-1)
return res
def zero_mean_mvn_samples(self, num_samples):
base_samples = self.base_lazy_tensor.zero_mean_mvn_samples(num_samples)
batch_iter = tuple(range(1, base_samples.dim()))
base_samples = base_samples.permute(*batch_iter, 0)
res = left_interp(self.left_interp_indices, self.left_interp_values, base_samples).contiguous()
batch_iter = tuple(range(res.dim() - 1))
return res.permute(-1, *batch_iter).contiguous()
| StarcoderdataPython |
9682704 | <filename>resources/profiles.py
from os import environ
import jwt
from flask import request
from flask_restful import Resource
from marshmallow import ValidationError
from sqlalchemy.exc import IntegrityError
from werkzeug.exceptions import BadRequest, NotFound
from werkzeug.security import generate_password_hash, check_password_hash
from authorize import auth
from db import db
from models import CarHubUser
from schemas.profiles import RegisterSchema, LoginSchema, AllProfilesSchema
class RegisterUser(Resource):
def post(self):
register_schema = RegisterSchema()
json_data = request.get_json()
try:
data = register_schema.load(json_data)
except ValidationError as err:
return err.messages, 422
username, email, password = data["username"], data["email"], data["password"]
password = generate_password_hash(password)
user = CarHubUser(username, email, password)
db.session.add(user)
try:
db.session.commit()
except IntegrityError:
raise BadRequest("User with this email and/or username already exists.")
return register_schema.dump(user), 201
class LoginUser(Resource):
def post(self):
login_schema = LoginSchema()
json_data = request.get_json()
try:
data = login_schema.load(json_data)
except ValidationError as err:
return err.messages, 422
user = CarHubUser.query.filter_by(username=data["username"]).first()
if not user:
raise BadRequest("Invalid user.")
if not check_password_hash(user.password, data["password"]):
raise BadRequest("Wrong password.")
token = jwt.encode({"sub": user.id}, key=environ.get('JWT_SECRET_KEY'))
return {"token": token}
class DeleteProfile(Resource):
@auth.login_required
def delete(self, profile_id):
current_user = auth.current_user()
user = CarHubUser.query.filter_by(id=profile_id).first()
if not user:
raise NotFound(
'User does not exists. If you entered the URL manually please check your spelling and try again.')
if not (current_user.role == 'ADMIN' or user.id == current_user.id):
return 'Permission denied', 403
db.session.delete(user)
db.session.commit()
return 'User Deleted', 200
class ListAllUsers(Resource):
@auth.login_required
def get(self):
current_user = auth.current_user()
if not current_user.role == 'ADMIN':
return 'Permission denied', 403
profiles = CarHubUser.query.all()
schema = AllProfilesSchema()
return schema.dump(profiles, many=True)
| StarcoderdataPython |
3214503 | #!/usr/bin/env python
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
#
import sys
import json
import boto3
import argparse
#aws ec2 describe-images --owners 309956199498 --region us-west-2 --filters Name=name,Values=RHEL-7.3_HVM_GA-20161026-x86_64-1-Hourly2-GP2
def arg_parse():
parser = argparse.ArgumentParser(prog='get_ami_id')
parser.add_argument('--amzn',
dest='alinux',
type=str,
help='Base Amazon Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Amazon Linux: '
' https://aws.amazon.com/amazon-linux-ami/',
required=False
)
parser.add_argument('--amzn2',
dest='alinux2',
type=str,
help='Base Amazon Linux 2 AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Amazon Linux 2: '
' https://aws.amazon.com/amazon-linux-ami/',
required=False
)
parser.add_argument('--centos6',
dest='centos6',
type=str,
help='Base CentOS6 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for CentOS AMI info: '
' https://wiki.centos.org/Cloud/AWS',
required=False
)
parser.add_argument('--centos7',
dest='centos7',
type=str,
help='Base Centos7 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for CentOS AMI info: '
' https://wiki.centos.org/Cloud/AWS',
required=False
)
parser.add_argument('--rhel7',
dest='rhel7',
type=str,
help='Base RHEL7 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for RHEL 7 AMI info'
' AWS Console',
required=False
)
parser.add_argument('--suse11',
dest='suse11',
type=str,
help='Base SUSE 11 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for SuSE 11 info: '
' AWS Console',
required=False
)
parser.add_argument('--suse12',
dest='suse12',
type=str,
help='Base SUSE 12 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for SuSE 12 info: '
' AWS Console',
required=False
)
parser.add_argument('--ubuntu14',
dest='ubuntu14',
type=str,
help='Base Ubuntu 14 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Ubuntu14: '
' AWS Console',
required=False
)
parser.add_argument('--ubuntu16',
dest='ubuntu16',
type=str,
help='Base Ubuntu 16 Linux AMI ID (xx-xxxxxxxxx) *specifically* in us-east-1, '
'use this site for Ubuntu16: '
' AWS Console',
required=False
)
return parser.parse_args()
def image_info(client, owners, ami_name, region):
response = client.describe_images(
DryRun=False,
Owners=[
owners,
],
Filters=[
{
'Name': 'name',
'Values': [
ami_name,
]
},
]
)
try:
if response["Images"][0]["ImageId"]:
return response
except:
print("Does the AMI requested exist in {0}? Not adding region {0} to list. Continuing...".format(region))
return "NONE"
def get_image_info(client, ami_id):
try:
response = client.describe_images(
DryRun=False,
ImageIds=[
ami_id,
],
)
except Exception as e:
print(e)
print("Does {0} exist in us-east-1? Checking next region ...".format(ami_id))
sys.exit(1)
ami_name = response["Images"][0]["Name"]
owners = 'NONE'
description = 'NONE'
ena = 'NONE'
sriov = 'NONE'
try:
owners = response["Images"][0]["OwnerId"]
description = response["Images"][0]["Description"]
ena = response["Images"][0]["EnaSupport"]
sriov = response["Images"][0]["SriovNetSupport"]
except KeyError as e:
pass
return ami_name, owners, description, ena, sriov
def print_image_info(args, client):
for arg_n, ami_id in vars(args).items():
if ami_id:
(ami_name, owners, description, ena, sriov) = get_image_info(client, ami_id)
print('Building mappings for:\n'
' Argument Name: {0}\n'
' AMI Name: {1}\n'
' AMI ID: {2}\n'
' Owners ID: {3}\n'
' AMI Desc: {4}\n'
' ENA Support: {5}\n'
' SRIOV Support: {6}\n'
.format(arg_n, ami_name, ami_id, owners, description, ena, sriov))
def main():
rc = 0
ami_map = dict()
args = arg_parse()
client_iad = boto3.client('ec2', region_name='us-east-1')
r_response_iad = client_iad.describe_regions()
print_image_info(args, client_iad)
print("Getting AMI IDs from regions: ")
for r in r_response_iad["Regions"]:
region=r["RegionName"]
print(" " + region)
client = boto3.client('ec2', region_name=region)
response = dict()
ami_map[region] = dict()
for arg_n, ami_id_iad in vars(args).items():
if ami_id_iad:
(ami_name, owners, description, ena, sriov) = get_image_info(client_iad, ami_id_iad)
response[arg_n] = image_info(client, owners, ami_name, region)
if response[arg_n] is not "NONE":
ami_map[region].update({arg_n: response[arg_n]["Images"][0]["ImageId"]})
ami_map = { "AWSRegionAMI": ami_map }
ami_map = { "Mappings": ami_map }
print(json.dumps(ami_map, indent=2, sort_keys=True))
##print(ami_map)
return rc
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
print('\nReceived Keyboard interrupt.')
print('Exiting...')
except ValueError as e:
print('ERROR: {0}'.format(e))
| StarcoderdataPython |
8077449 | import unittest
from src.factory import DayFactory, TransactionsFactory
from src.value_objects import Stock, StockTransaction, CashTransaction
class FactoryTest(unittest.TestCase):
def test_create_day(self):
"""
Assert factory method returns a dictionary
:return:
"""
D0 = DayFactory().create("D0", ["AAPL 100", "GOOG 200", "SP500 175.75"], 1000)
self.assertEqual(D0.catalog, ["AAPL", "GOOG", "SP500"])
self.assertEqual(D0.cash, 1000)
stock = Stock('AAPL', 100)
popped = D0.stocks.pop(0)
self.assertEqual(popped.symbol, stock.symbol)
self.assertEqual(popped.amount, stock.amount)
def test_create_translation(self):
T0 = TransactionsFactory().create([
"AAPL SELL 100 30000",
"GOOG BUY 10 10000",
"CASH DEPOSIT 0 1000",
"CASH FEE 0 50",
"GOOG DIVIDEND 0 50",
"TD BUY 100 10000"
])
"""
Assert factory method returns list of dictionary
:return:
"""
self.assertEqual(T0[0].symbol, "AAPL")
self.assertEqual(T0[0].amount, -100.0)
self.assertEqual(T0[0].strike, 30000.0)
self.assertEqual(T0[0].type, "SELL")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4822415 | <filename>src/application/__init__.py
from flask import Flask
from config import configure_app
application = Flask(__name__)
configure_app(application)
@application.errorhandler(500)
def internal_server_error(error):
application.logger.error('Server Error: %s', (error))
error = 'Server Error: %s', (error)
return 'Server Error:'
#@<EMAIL>handler(Exception)
#def unhandled_exception(error):
# application.logger.error('Unhandled Exception: %s', (error))
# error = 'Unhandled Exception: %s', (error)
# return 'Server Error:'
if __name__ == 'application':
from application import views
| StarcoderdataPython |
76373 | import re
import os
import posixpath
from fabric.api import cd, sudo, puts
from fabric.contrib import files
from .containers import conf, MissingVarException
from .task import Task
from .users import list_users
from .files import read_file, exists
from .utils import home_path, split_lines
__all__ = [
'push_key',
'list_authorized_files',
'list_keys',
'enable_key',
'disable_key',
]
class PushKey(Task):
@conf
def abs_pub_key_file(self):
return os.path.expanduser(self.conf.pub_key_file)
def do(self):
with open(self.conf.abs_pub_key_file, 'rt') as f:
ssh_key = f.read()
with cd(home_path(self.conf.user)):
sudo('mkdir --parents .ssh')
files.append('.ssh/authorized_keys', ssh_key, use_sudo=True)
sudo('chown --recursive %(user)s:%(user)s .ssh' % self.conf)
push_key = PushKey()
class SshManagementTask(Task):
def before_do(self):
super(SshManagementTask, self).before_do()
self.conf.setdefault('exclude_users', [])
@conf
def authorized_file(self):
if 'user' in self.conf and self.conf.user:
return posixpath.join(
home_path(self.conf.user), '.ssh', 'authorized_keys')
raise MissingVarException()
class ListAuthorizedFiles(SshManagementTask):
def get_authorized_files(self, exclude_users=None):
users = list_users.get_users(exclude_users=exclude_users)
authorized_files = []
for user in users:
dirpath = home_path(user)
authorized_file = '%s/.ssh/authorized_keys' % dirpath
if exists(authorized_file, use_sudo=True, shell=False):
authorized_files.append((user, authorized_file))
return authorized_files
def do(self):
authorized_files = self.get_authorized_files(
exclude_users=self.conf.exclude_users)
for user, authorized_file in authorized_files:
puts(authorized_file)
list_authorized_files = ListAuthorizedFiles()
class ListKeys(SshManagementTask):
def get_keys(self, authorized_file):
body = read_file(authorized_file, use_sudo=True, shell=False)
return filter(lambda row: not row.startswith('#'), split_lines(body))
def do(self):
authorized_files = list_authorized_files.get_authorized_files()
for user, auth_file in authorized_files:
puts(user)
puts('-' * 40)
for key in self.get_keys(auth_file):
puts(key)
puts('-' * 40)
list_keys = ListKeys()
class DisableKey(SshManagementTask):
def disable_key(self, authorized_file, key):
key_regex = re.escape(key)
key_regex = key_regex.replace('\/', '/')
key_regex = '^%s$' % key_regex
backup = '.%s.bak' % self.conf.current_time
files.comment(authorized_file, key_regex, use_sudo=True, backup=backup)
def do(self):
if 'authorized_file' in self.conf:
self.disable_key(self.conf.authorized_file, self.conf.key)
else:
authorized_files = list_authorized_files.get_authorized_files(
exclude_users=self.conf.exclude_users)
for user, authorized_file in authorized_files:
self.disable_key(authorized_file, self.conf.key)
disable_key = DisableKey()
class EnableKey(SshManagementTask):
def enable_key(self, authorized_file, key):
backup = '.%s.bak' % self.conf.current_time
regex = '%s' % re.escape(key)
commented_key = '#' + regex
if files.contains(
authorized_file, commented_key, exact=True, use_sudo=True):
files.uncomment(authorized_file, regex, use_sudo=True,
backup=backup)
else:
files.append(authorized_file, key, use_sudo=True)
def do(self):
if 'authorized_file' in self.conf:
self.enable_key(self.conf.authorized_file, self.conf.key)
else:
authorized_files = list_authorized_files.get_authorized_files(
exclude_users=self.conf.exclude_users)
for user, authorized_file in authorized_files:
self.enable_key(authorized_file, self.conf.key)
enable_key = EnableKey()
| StarcoderdataPython |
3209587 | <gh_stars>0
idade = int(input('Insira a idade do Gato: '))
castrado = input('O gato é Castrado (sim/não) ?')
sexo = input('Insira o sexo do gato (f/m): ')
fivFelv = input('Possui Fiv e Felv?(positivo/negativo): ')
if idade >= 0 and sexo == 'm' and fivFelv == 'positivo':
print('SalaD')
elif idade >= 0 and fivFelv == 'positivo' and sexo == 'f':
print('Fêmeas são alocadas para outro gatil')
elif idade <= 2 and castrado == 'sim' or sexo == 'm' and sexo == 'f' and castrado == 'não' and fivFelv == 'negativo':
print('SalaA')
elif idade > 2 and castrado == 'sim':
print('SalaB')
elif idade > 2 and castrado == 'não' and sexo == 'm':
print('SalaC') | StarcoderdataPython |
185426 | <gh_stars>0
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.contrib.auth.models import User
from django.db import models
from django.conf import settings
class Config(models.Model):
petrol_bonus_limit = models.PositiveSmallIntegerField(default=500)
daily_car_create_limit = models.PositiveIntegerField(default=3)
daily_car_bonus_check_limit = models.PositiveIntegerField(default=3)
def __str__(self):
return 'petrol bonus limit: %d, daily car create limit: %d' % (self.petrol_bonus_limit, self.daily_car_create_limit)
def clean(self):
if not self.pk and Config.objects.exists():
# if you'll not check for self.pk
# then error will also raised in update of exists model
raise ValidationError('There can be only one Config instance')
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
settings.PETROL_BONUS_LIMIT = self.petrol_bonus_limit
settings.PETROL_DAILY_CAR_CREATE_LIMIT = self.daily_car_create_limit
settings.PETROL_DAILY_CAR_BONUS_CHECK_LIMIT = self.daily_car_bonus_check_limit
return super(Config, self).save(force_insert, force_update, using, update_fields)
class Member(User):
MANAGER = 'manager'
STAFF = 'staff'
OIL = 'oil'
PETROL = 'petrol'
GENERAL = 'general'
class Meta:
proxy = True
@property
def is_general_staff(self):
if self:
return self.first_name.lower().strip() == self.STAFF and self.last_name.lower().strip() == self.GENERAL
else:
return False
@property
def is_oil_staff(self):
if self:
return self.first_name.lower().strip() == self.STAFF and self.last_name.lower().strip() == self.OIL
else:
return False
@property
def is_petrol_staff(self):
if self:
return self.first_name.lower().strip() == self.STAFF and self.last_name.lower().strip() == self.PETROL
else:
return False
@property
def is_manager(self):
if self:
return self.first_name.lower().strip() == self.MANAGER
else:
return False
@property
def is_user_staff(self):
if self:
return self.first_name.lower().strip() == self.STAFF
else:
return False
def get_super_user():
return Member.objects.get(is_superuser=True).id
class CarModel(models.Model):
name = models.CharField(primary_key=True, max_length=31)
description = models.TextField(blank=True, null=True)
created = models.DateField(auto_now_add=True)
last_updated = models.DateField(auto_now=True)
def __str__(self):
return self.name
class Car(models.Model):
carNumber = models.CharField(primary_key=True, max_length=8)
model = models.ForeignKey(CarModel, related_name='cars', on_delete=models.CASCADE)
used_bonuses = models.PositiveSmallIntegerField(default=0)
total_bought_litres = models.PositiveSmallIntegerField(default=0)
total_bought_price = models.DecimalField(default=0, decimal_places=2, max_digits=12)
total_litres_after_bonus = models.PositiveSmallIntegerField(default=0)
created = models.DateField(auto_now_add=True)
last_updated = models.DateField(auto_now=True)
created_by = models.ForeignKey(Member, related_name='created_cars', on_delete=models.SET(get_super_user), default=get_super_user)
@property
def get_litres_after_bonuses(self):
return self.total_bought_litres - self.used_bonuses * settings.PETROL_BONUS_LIMIT
def __str__(self):
return '{carNumber} {model}'.format(carNumber=self.carNumber, model=self.model)
class Petrol(models.Model): # Benzin
brand = models.CharField(primary_key=True, max_length=50)
price = models.DecimalField(max_digits=12, decimal_places=2)
def __str__(self):
return self.brand
class Meta:
verbose_name = verbose_name_plural = 'Бензин'
class Trade(models.Model): # for Petrol
car = models.ForeignKey(Car, related_name='trades', on_delete=models.CASCADE)
petrol = models.ForeignKey(Petrol, related_name='trades', on_delete=models.CASCADE)
litre = models.PositiveSmallIntegerField()
tradeDateTime = models.DateTimeField(blank=True)
price = models.DecimalField(max_digits=12, decimal_places=2, blank=True)
created_by = models.ForeignKey(Member, related_name='created_petrol_trades', on_delete=models.SET(get_super_user), default=get_super_user)
def __str__(self):
return '{car} {petrol} {litre} litr {time}'.format(car=self.car.carNumber, petrol=self.petrol.brand, litre=self.litre, time=self.tradeDateTime.strftime("%Y-%m-%d %H:%M"))
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self.litre <= settings.PETROL_BONUS_LIMIT:
try:
self.price = self.litre * self.petrol.price
self.car.total_bought_litres += self.litre
self.car.total_litres_after_bonus += self.litre
self.car.total_bought_price += self.price
self.car.save()
except ObjectDoesNotExist:
self.price = 0
except MultipleObjectsReturned:
self.price = 0
super(Trade, self).save(force_insert, force_update, using, update_fields)
def delete(self, using=None, keep_parents=False):
self.car.total_bought_litres -= self.litre
if self.litre > self.car.total_litres_after_bonus:
self.car.total_litres_after_bonus = settings.PETROL_BONUS_LIMIT + self.car.total_litres_after_bonus - self.litre
self.car.used_bonuses -= 1
else:
self.car.total_litres_after_bonus -= self.litre
self.car.total_bought_price -= self.price
self.car.save()
super(Trade, self).delete(using, keep_parents)
class Meta:
verbose_name = verbose_name_plural = 'Refuelling'
class Oil(models.Model):
name = models.CharField(primary_key=True, max_length=63)
price = models.DecimalField(max_digits=12, decimal_places=2, default=0)
RemainingLitres = models.DecimalField(max_digits=12, decimal_places=2, default=0)
RemainingBottles = models.PositiveIntegerField(default=0)
bottleVolume = models.DecimalField(max_digits=7, decimal_places=2, default=0)
color = models.CharField(null=True, blank=True, max_length=7)
created = models.DateField(auto_now=True)
created_by = models.ForeignKey(Member, related_name='created_oils', on_delete=models.SET(get_super_user), default=get_super_user)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Oil'
verbose_name_plural = 'Oils'
class OilTrade(models.Model):
oil = models.ForeignKey(Oil, related_name='trades', on_delete=models.CASCADE)
litreSold = models.DecimalField(decimal_places=1, max_digits=4)
tradePrice = models.DecimalField(decimal_places=2, max_digits=12, default=0)
dateTime = models.DateTimeField(blank=True)
created_by = models.ForeignKey(Member, related_name='created_oil_trades', on_delete=models.SET(get_super_user), default=get_super_user)
def __str__(self):
return '{oil} dan {litre} litr'.format(oil=self.oil, litre=self.litreSold)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
try:
self.tradePrice = self.oil.price * self.litreSold
openedOilLitre = self.oil.RemainingLitres % self.oil.bottleVolume
if self.oil.RemainingLitres < self.litreSold:
return
self.oil.RemainingLitres -= self.litreSold
if openedOilLitre < self.litreSold: # new bottle is opened
if self.litreSold - openedOilLitre == self.oil.bottleVolume:
self.oil.RemainingBottles -= 1
else:
self.oil.RemainingBottles -= ((self.litreSold - openedOilLitre) // self.oil.bottleVolume) + 1
self.oil.save()
except ObjectDoesNotExist:
self.tradePrice = 0
except MultipleObjectsReturned:
self.tradePrice = 0
super(OilTrade, self).save(force_insert, force_update, using, update_fields)
def delete(self, using=None, keep_parents=False):
self.oil.RemainingLitres += self.litreSold
self.oil.RemainingBottles = self.oil.RemainingLitres // self.oil.bottleVolume
self.oil.save()
super(OilTrade, self).delete(using, keep_parents)
class Meta:
verbose_name = 'Oil Trade'
verbose_name_plural = 'Oil Trades'
class OilCheckIn(models.Model):
oil = models.ForeignKey(Oil, related_name='checkins', on_delete=models.CASCADE)
bottles = models.PositiveIntegerField(default=0)
date = models.DateField(blank=True)
created_by = models.ForeignKey(Member, related_name='created_oil_checkins', on_delete=models.SET(get_super_user), default=get_super_user)
def __str__(self):
return '{oil} dan {bottles} ta keldi'.format(oil=self.oil, bottles=self.bottles)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.oil.RemainingBottles += self.bottles
self.oil.RemainingLitres += self.bottles * self.oil.bottleVolume
self.oil.save()
super(OilCheckIn, self).save(force_insert, force_update, using, update_fields)
def delete(self, using=None, keep_parents=False):
self.oil.RemainingBottles -= self.bottles
self.oil.RemainingLitres -= self.bottles * self.oil.bottleVolume
self.oil.save()
super(OilCheckIn, self).delete(using, keep_parents)
class ProductCategory(models.Model):
LITRE = 'Litre'
PIECE = 'Piece'
METRE = 'Metre'
KILOGRAM = 'Kilogram'
LITRE_UZB = 'Litr'
PIECE_UZB = 'Dona'
METRE_UZB = 'Metr'
KILOGRAM_UZB = 'Kg'
QUANTITY_MEASURES = [
(LITRE_UZB, LITRE),
(PIECE_UZB, PIECE),
(METRE_UZB, METRE),
(KILOGRAM_UZB, KILOGRAM),
]
name = models.CharField(max_length=63)
quantity_measure = models.CharField(max_length=31, choices=QUANTITY_MEASURES)
slug = models.SlugField(null=False, unique=True)
has_bonus = models.BooleanField(default=False)
bonus_limit_quantity = models.DecimalField(decimal_places=2, max_digits=12, default=0)
staffs = models.ManyToManyField(Member, related_name='product_categories', blank=True, null=True)
def __str__(self):
return self.name
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self.has_bonus:
settings.PRODUCTS_BONUS_LIMITS[self.slug] = self.bonus_limit_quantity
else:
self.bonus_limit_quantity = 0
if self.slug in settings.PRODUCTS_BONUS_LIMITS:
settings.PRODUCTS_BONUS_LIMITS.pop(self.slug)
super(ProductCategory, self).save(force_insert, force_update, using, update_fields)
def get_product_default_category():
return ProductCategory.objects.get_or_create(name='Default', quantity_measure='Piece', slug='default').id
class Product(models.Model):
category = models.ForeignKey(ProductCategory, related_name='products', on_delete=models.SET(get_product_default_category))
name = models.CharField(max_length=63)
price = models.DecimalField(decimal_places=2, max_digits=12, default=0)
remaining_quantity = models.DecimalField(decimal_places=2, max_digits=12, default=0)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(Member, related_name='created_products', on_delete=models.SET(get_super_user), default=get_super_user)
def __str__(self):
return '%s: %s' % (self.category, self.name)
class ProductTrade(models.Model):
product = models.ForeignKey(Product, related_name='trades', on_delete=models.CASCADE)
tradePrice = models.DecimalField(decimal_places=2, max_digits=12, default=0)
sold_product_quantity = models.DecimalField(decimal_places=2, max_digits=12, default=0)
dateTime = models.DateTimeField()
created_by = models.ForeignKey(Member, related_name='created_product_trades', on_delete=models.SET(get_super_user), default=get_super_user)
def __str__(self):
return '{product} dan {quantity}'.format(product=self.product, quantity=self.sold_product_quantity)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
try:
if self.product.remaining_quantity < self.sold_product_quantity:
return
self.tradePrice = self.product.price * self.sold_product_quantity
self.product.remaining_quantity -= self.sold_product_quantity
self.product.save()
except ObjectDoesNotExist:
self.tradePrice = 0
except MultipleObjectsReturned:
self.tradePrice = 0
super(ProductTrade, self).save(force_insert, force_update, using, update_fields)
def delete(self, using=None, keep_parents=False):
self.product.remaining_quantity += self.sold_product_quantity
self.product.save()
super(ProductTrade, self).delete(using, keep_parents)
class ProductCheckIn(models.Model):
product = models.ForeignKey(Product, related_name='checkins', on_delete=models.CASCADE)
checkin_product_quantity = models.DecimalField(decimal_places=2, max_digits=12, default=0)
date = models.DateField()
created_by = models.ForeignKey(Member, related_name='created_product_checkins', on_delete=models.SET(get_super_user), default=get_super_user)
def __str__(self):
return '{product} dan {quantity}'.format(product=self.product, quantity=self.checkin_product_quantity)
@property
def cost(self):
if self.checkin_product_quantity > 0:
return round(self.product.price * self.checkin_product_quantity, 2)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.product.remaining_quantity += self.checkin_product_quantity
self.product.save()
super(ProductCheckIn, self).save(force_insert, force_update, using, update_fields)
def delete(self, using=None, keep_parents=False):
self.product.remaining_quantity -= self.checkin_product_quantity
self.product.save()
super(ProductCheckIn, self).delete(using, keep_parents) | StarcoderdataPython |
6600028 | from views import view
from PyInquirer import prompt
class MainView(view.View):
"""
A base class for views which is not intended to be instantiated
"""
def getMainAction(self):
"""
Asks the user what course of action they want to take
Returns
-------
'Post a question' or 'Search for posts' or 'Exit'
"""
options = [
{
'type': 'list',
'message': 'Select an action',
'name': 'action method',
'choices': [
"Post a question",
"Search for questions",
"Exit"
]
}
]
return prompt(options, style=self.style)
def getQuestionPostValues(self):
"""
Prompts the user to enter title and body for question
Returns
-------
The title and body of question
"""
postQuestionPrompts = [
{
'type': 'input',
'message': 'Enter question title:',
'name': 'title'
},
{
'type': 'input',
'message': 'Enter question body: ',
'name': 'text'
},
{
'type': 'input',
'message': 'Enter zero or more tags: ',
'name': 'tags'
}
]
return prompt(postQuestionPrompts, style=self.style)
def getSearchValues(self):
"""
Prompts the user to enter keyword to search for posts
Returns
-------
keywords for searching
"""
SearchPrompts = [
{
'type': 'input',
'message': 'Enter one or more keyword to Search: ',
'name': 'keywords'
}
]
return prompt(SearchPrompts, style=self.style)
def findMaxLength(self, result):
max_len = [0,0,0,0,0]
for r in result:
if len(r["Id"]) > max_len[0]:
max_len[0] = len(r["Id"])
if len(r["Title"]) > max_len[1]:
max_len[1] = len(r["Title"])
if len(r["CreationDate"]) > max_len[2]:
max_len[2] = len(r["CreationDate"])
if len(str(r["Score"])) > max_len[3]:
max_len[3] = len(str(r["Score"]))
if len(str(r["AnswerCount"])) > max_len[4]:
max_len[4] = len(str(r["AnswerCount"]))
return max_len
def getQuestionSearchAction(self, results, showprompt):
"""
Prompts the user to choose post from Search result
Returns
-------
list of results that are selectable
"""
header = ' Id' + 5 * ' '
header += 'Title' + 79 * ' '
header += 'CreationDate' + 12 * ' '
header += 'Score' + 2 * ' '
header += 'AnswerCount'
if(showprompt):
results.append("Show more results")
results.append("Back")
postSearchPrompts = [
{
'type': 'list',
'message': header,
'name': 'action method',
'choices': results
}
]
return prompt(postSearchPrompts, style=self.style)
| StarcoderdataPython |
3522472 | <gh_stars>10-100
import random
import pysam
import os
import sys
import argparse
import util
class ReadStats(object):
def __init__(self):
# number of reads discarded becaused not mapped
self.discard_unmapped = 0
# number of reads discarded because mate unmapped
self.discard_mate_unmapped = 0
# number of reads discarded because not proper pair
self.discard_improper_pair = 0
# paired reads map to different chromosomes
self.discard_different_chromosome = 0
# number of reads discarded because secondary match
self.discard_secondary = 0
# reads where we expected to see other pair, but it was missing
# possibly due to read-pairs with different names
self.discard_missing_pair = 0
# reads with only one paired mapped
self.discard_single = 0
# reads discarded because duplicated
self.discard_dup = 0
# number of read pairs kept
self.keep_pair = 0
def write(self, file_handle):
sys.stderr.write("DISCARD reads:\n"
" unmapped: %d\n"
" mate unmapped: %d\n"
" improper pair: %d\n"
" different chromosome: %d\n"
" secondary alignment: %d\n"
" missing pairs (e.g. mismatched read names): %d\n"
" not paired: %d\n"
" duplicate pairs: %d\n"
"KEEP reads:\n"
" pairs: %d\n" %
(self.discard_unmapped,
self.discard_improper_pair,
self.discard_improper_pair,
self.discard_different_chromosome,
self.discard_secondary,
self.discard_missing_pair,
self.discard_single,
self.discard_dup,
self.keep_pair))
def main(input_bam, output_bam):
if input_bam.endswith(".sam") or input_bam.endswith("sam.gz"):
infile = pysam.Samfile(input_bam, "r")
else:
# assume binary BAM file
infile = pysam.Samfile(input_bam, "rb")
if output_bam.endswith(".sam"):
# output in text SAM format
outfile = pysam.Samfile(output_bam, "w", template=infile)
elif output_bam.endswith(".bam"):
# output in binary compressed BAM format
outfile = pysam.Samfile(output_bam, "wb", template=infile)
else:
raise ValueError("name of output file must end with .bam or .sam")
filter_reads(infile, outfile)
infile.close()
outfile.close()
def update_read_cache(cur_by_mpos, keep_cache, discard_cache,
read_stats, outfile):
for mpos, read_list in list(cur_by_mpos.items()):
# only keep one read from list with same pos,mate_pos pair
# shuffle order of reads in list and take first
# as 'keep' read
random.shuffle(read_list)
keep_read = read_list.pop()
if keep_read.qname in keep_cache:
raise ValueError("read %s is already "
"in keep cache" % keep_read.qname)
keep_cache[keep_read.qname] = keep_read
# rest of reads get discarded
for discard_read in read_list:
# corner case: if reads are completely overlapping
# (same start pos) then we either want to keep both
# or discard both right now
if discard_read.qname in discard_cache:
# discard both reads from pair
del discard_cache[discard_read.qname]
elif discard_read.qname == keep_read.qname:
# keep both reads from pair
read_stats.keep_pair += 1
outfile.write(keep_read)
outfile.write(discard_read)
del keep_cache[keep_read.qname]
else:
discard_cache[discard_read.qname] = discard_read
def filter_reads(infile, outfile):
read_stats = ReadStats()
cur_tid = None
seen_chrom = set([])
# name of reads to keep
keep_cache = {}
# name of reads to discard
discard_cache = {}
cur_by_mpos = {}
read_count = 0
# current position on chromosome
cur_pos = None
# lists of reads at current position,
# grouped by the mate pair position
cur_by_mpos = {}
for read in infile:
read_count += 1
if read.is_unmapped:
read_stats.discard_unmapped += 1
continue
if (cur_tid is None) or (read.tid != cur_tid):
# this is a new chromosome
cur_chrom = infile.getrname(read.tid)
if cur_pos:
update_read_cache(cur_by_mpos, keep_cache, discard_cache,
read_stats, outfile)
if len(keep_cache) + len(discard_cache) != 0:
sys.stderr.write("WARNING: failed to find pairs for %d "
"reads on this chromosome\n" %
(len(keep_cache) + len(discard_cache)))
read_stats.discard_missing_pair += len(keep_cache) + \
len(discard_cache)
sys.stderr.write("keep_cache:\n")
for r in list(keep_cache.values()):
sys.stderr.write(" %s\n" % r.qname)
sys.stderr.write("discard_cache:\n")
for r in list(discard_cache.values()):
sys.stderr.write(" %s\n" % r.qname)
keep_cache = {}
discard_cache = {}
cur_pos = None
cur_by_mpos = {}
read_count = 0
if cur_chrom in seen_chrom:
# sanity check that input bam file is sorted
raise ValueError("expected input BAM file to be sorted "
"but chromosome %s is repeated\n" % cur_chrom)
seen_chrom.add(cur_chrom)
cur_tid = read.tid
sys.stderr.write("starting chromosome %s\n" % cur_chrom)
sys.stderr.write("processing reads\n")
if read.mate_is_unmapped:
read_stats.discard_mate_unmapped += 1
continue
if read.is_secondary:
# this is a secondary alignment (i.e. read was aligned more than
# once and this has align score that <= best score)
read_stats.discard_secondary += 1
continue
if (not read.is_paired) or (read.next_reference_name is None):
read_stats.discard_single += 1
continue
if (read.next_reference_name != cur_chrom) and \
(read.next_reference_name != "="):
# other side of pair mapped to different chromosome
read_stats.discard_different_chromosome += 1
continue
if not read.is_proper_pair:
read_stats.discard_improper_pair += 1
continue
if (cur_pos is not None) and (read.pos < cur_pos):
raise ValueError("expected input BAM file to be sorted "
"but reads are out of order")
if cur_pos is None or read.pos > cur_pos:
# we have advanced to a new start position
# decide which of reads at last position to keep or discard
update_read_cache(cur_by_mpos, keep_cache, discard_cache,
read_stats, outfile)
# create new list of reads at current position
cur_pos = read.pos
cur_by_mpos = {}
if read.qname in keep_cache:
# we already saw prev side of pair, retrieve from cache
read1 = keep_cache[read.qname]
read2 = read
del keep_cache[read.qname]
if read2.next_reference_start != read1.reference_start:
sys.stderr.write("WARNING: read pair positions "
"do not match for pair %s\n" % read.qname)
read_stats.keep_pair += 1
outfile.write(read1)
outfile.write(read2)
elif read.qname in discard_cache:
# we already saw prev side of pair, but decided to discard
# because read duplicated
del discard_cache[read.qname]
read_stats.discard_dup += 1
else:
# we have not seen other side of this read yet
# add read to list of those at current position
# grouping by mate-pair position
if read.mpos in cur_by_mpos:
cur_by_mpos[read.mpos].append(read)
else:
cur_by_mpos[read.mpos] = [read]
# final update of read cache is just to cache strange corner case
# where final read pair on chromosome were overlapping (same start pos)
if cur_pos:
update_read_cache(cur_by_mpos, keep_cache, discard_cache,
read_stats, outfile)
if (len(keep_cache) + len(discard_cache)) != 0:
sys.stderr.write("WARNING: failed to find pairs for %d "
"keep reads and %d discard reads on this "
"chromosome\n" % (len(keep_cache), len(discard_cache)))
read_stats.discard_missing_pair += len(keep_cache) + len(discard_cache)
read_stats.write(sys.stderr)
if __name__ == "__main__":
sys.stderr.write("command line: %s\n" % " ".join(sys.argv))
sys.stderr.write("python version: %s\n" % sys.version)
sys.stderr.write("pysam version: %s\n" % pysam.__version__)
util.check_pysam_version()
parser = argparse.ArgumentParser()
parser.add_argument('input_bam', help="input BAM or SAM file (must "
"be sorted!)")
parser.add_argument("output_bam", help="output BAM or SAM file (not "
"sorted!)")
options = parser.parse_args()
main(options.input_bam, options.output_bam)
| StarcoderdataPython |
11376250 | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import time
import numpy as np
from PIL import Image
import dataset
import imagenet
import coco
from backend_tf import BackendTensorflow
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"default": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
backend = BackendTensorflow()
def load_model():
global backend
# find backend
backend = get_backend("tensorflow")
# load model to backend
t1 = time.time()
model = backend.load("/model/ssd_mobilenet_v1_coco_2018_01_28.pb", inputs=['image_tensor:0'], outputs=['num_detections:0', 'detection_boxes:0', 'detection_scores:0', 'detection_classes:0'])
t2 = time.time()
print("load model time= %f" % (t2 - t1))
file_list = os.listdir("/data/coco-300/val2017/")
item_count = len(file_list)
def inference_model():
global backend
global file_list
global item_count
for _ in range(1):
t1 = time.time()
image = Image.open("/data/coco-300/val2017/"+file_list[np.random.randint(0, item_count)])
imag_np = np.array(image)
image_new = imag_np[np.newaxis,:]
t2 = time.time()
print("image process time= %f" % (t2 - t1))
t1 = time.time()
backend.predict({backend.inputs[0]: image_new})
t2 = time.time()
print("inference time= %f" % (t2 - t1))
| StarcoderdataPython |
1831011 | # MIT License
#
# Copyright (c) 2019 74wny0wl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from sim import directory
class Contact:
contact_name: str
phone_number: str
@staticmethod
def empty():
return Contact("", "")
def __init__(self, contact_name, phone_number):
self.contact_name = contact_name
self.phone_number = phone_number
def __str__(self):
return self.to_string()
def __unicode__(self):
return self.to_string()
def to_string(self):
return f'{self.contact_name}::{self.phone_number}'
class ContactNameFactory:
@staticmethod
def create_contact_name(contact_name_entry) -> str:
contact_name = contact_name_entry.replace(b'\xff', b'').decode('utf-8')
return contact_name
class PhoneNumberFactory:
@staticmethod
def create_phone_number(phone_number_entry: bytes) -> str:
phone_number = ""
if phone_number_entry[0] == 0xa1:
phone_number += '*'
phone_number_entry = phone_number_entry[1::]
if phone_number_entry[0] == 0x81:
phone_number += "+"
phone_number_entry = phone_number_entry[1::]
for phone_number_entry_part in phone_number_entry:
phone_number_element = ((phone_number_entry_part & 0x0F) << 4) | ((phone_number_entry_part & 0xF0) >> 4)
phone_number_element = "{:02x}".format(phone_number_element)
phone_number += phone_number_element
if phone_number[-1] == 'f':
phone_number = phone_number[:-1]
return phone_number
class ContactFactory:
contact_name_factory: ContactNameFactory
phone_number_factory: PhoneNumberFactory
def __init__(self, contact_name_factory, phone_number_factory):
self.contact_name_factory = contact_name_factory
self.phone_number_factory = phone_number_factory
def create_contact(self, contact_bulk_data) -> Contact:
contact_name = self.contact_name_factory.create_contact_name(contact_bulk_data[:16])
phone_number_entry_size = contact_bulk_data[16]
phone_number_entry = contact_bulk_data[17:17 + phone_number_entry_size]
phone_number = self.phone_number_factory.create_phone_number(phone_number_entry)
contact = Contact(contact_name=contact_name, phone_number=phone_number)
return contact
def dump(sim_dump_directory_path: str, contacts_file_name="6F3A"):
contacts_file_path = directory.find_file(sim_dump_directory_path, contacts_file_name)
with open(contacts_file_path, 'rb') as contacts_file:
contacts_file_content = contacts_file.read()
contact_name_factory = ContactNameFactory()
phone_number_factory = PhoneNumberFactory()
contact_factory = ContactFactory(contact_name_factory, phone_number_factory)
while contacts_file_content.count(b'\xff') != len(contacts_file_content):
contact_bulk_data = contacts_file_content[:30]
contact = contact_factory.create_contact(contact_bulk_data)
contacts_file_content = contacts_file_content[30::]
yield contact
| StarcoderdataPython |
11212254 | # coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="imaging_base.py">
# Copyright (c) 2019 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
import os
import shutil
import asposeimagingcloud.models.requests as requests
class ImagingBase:
"""Base class for examples"""
# The example images folder path
EXAMPLE_IMAGES_FOLDER = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'Images')
# The output folder path
OUTPUT_FOLDER = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'Output')
# The cloud path
CLOUD_PATH = 'Examples'
def __init__(self, imaging_api):
self._imaging_api = imaging_api
def _get_sample_image_file_name(self):
raise NotImplementedError('get_sample_image_file_name must be implemented')
def _get_modified_sample_image_file_name(self, from_request=False, new_format_extension=None):
"""Gets the name of the modified sample image file"""
if new_format_extension:
filename_part, extension = os.path.splitext(self._get_sample_image_file_name())
filename = filename_part + '.' + new_format_extension
else:
filename = self._get_sample_image_file_name()
return 'ModifiedFromRequest' + filename if from_request \
else 'Modified' + filename
def _upload_sample_image_to_cloud(self):
"""Uploads the example image to cloud"""
local_input_image = os.path.join(ImagingBase.EXAMPLE_IMAGES_FOLDER, self._get_sample_image_file_name())
self._upload_image_to_cloud(self._get_sample_image_file_name(), local_input_image)
def _upload_image_to_cloud(self, image_name, image):
"""Uploads the image to cloud"""
upload_file_request = requests.UploadFileRequest(os.path.join(ImagingBase.CLOUD_PATH, image_name), image)
result = self._imaging_api.upload_file(upload_file_request)
if result.errors:
print('Uploading errors count: ' + str(len(result.errors)))
else:
print('Image ' + image_name + ' is uploaded to cloud storage')
def _save_updated_sample_image_to_output(self, updated_image, from_request, new_format_extension=None):
"""Saves the updated image to local output folder"""
new_file_name = self._get_modified_sample_image_file_name(from_request, new_format_extension)
self._save_updated_image_to_output(new_file_name, updated_image)
def _save_updated_image_to_output(self, image_name, updated_image):
"""Saves the updated image to output folder"""
path = os.path.abspath(os.path.join(ImagingBase.OUTPUT_FOLDER, image_name))
shutil.copy(updated_image, path)
print('Image ' + image_name + ' is saved to ' + os.path.dirname(path))
def _output_properties_to_file(self, file_name, imaging_response):
"""Outputs the properties to file"""
path = os.path.abspath(os.path.join(ImagingBase.OUTPUT_FOLDER, file_name))
with open(path, 'w') as output_file:
output_file.write('Width: ' + str(imaging_response.width))
output_file.write('Height: ' + str(imaging_response.height))
output_file.write('Horizontal resolution: ' + str(imaging_response.horizontal_resolution))
output_file.write('Vertical resolution: ' + str(imaging_response.vertical_resolution))
output_file.write('Bits per pixel: ' + str(imaging_response.bits_per_pixel))
if imaging_response.tiff_properties:
output_file.write('Tiff properties:')
output_file.write('Frames count: ' + str(len(imaging_response.tiff_properties.frames)))
output_file.write('Camera owner name: ' +
imaging_response.tiff_properties.exif_data.camera_owner_name if imaging_response.tiff_properties.exif_data else '')
output_file.write('Byte order: ' + str(imaging_response.tiff_properties.byte_order))
print('File ' + file_name + ' is saved to ' + os.path.dirname(path))
def _print_header(self, header):
print(header)
print()
| StarcoderdataPython |
3387545 | """This module implements methods to load dictonaries from text-based configuration files.
""" # noqa: E501
import sys
import os
import io
from os import PathLike
import errno
from pathlib import Path
import yaml
import toml
import json
from functools import partial
from typing import Any, MutableMapping, Dict, Callable, cast, Union, Sequence, AnyStr
from pydantic import BaseSettings
from .config_data_types import ConfigDataTypes
from ..utility.typing import FilePathOrBuffer, Buffer, mmap
from ..utility.dict_deep_update import dict_deep_update
MAX_CONFIG_FILE_SIZE = 1024 * 1024 * 1024
class DictLoadError(Exception):
"""Specific exception to harmonize as far as possible
the exceptions raised when parsing files for dictionaries with different
libraries (eg. PyYaml).
Args:
message: an error message from the parsing library describing the nature of the parsing error.
document: full or parts of the documents parsed into a dict (actual content depends on parsing library)
position: character position where the parsing error occurred, counting from document start
line_number: line number in the read file where the parsing error occurred
column_number: column number in the line where the parsing error occurred
"""
def __init__(
self,
message: str,
document: str = None,
position: int = None,
line_number: int = None,
column_number: int = None,
):
# Call the base class constructor with the parameters it needs
super().__init__(message)
self.message = message
self.document = document
self.position = position
self.line_number = line_number
self.column_number = column_number
def _determine_config_file_type(file_path: Union[Path, str]) -> ConfigDataTypes:
"""Determine the file type of a given file from its suffix and return determined type as enum-value
Currently the following data-types are known:
Suffixes | Return Value | Data Type
------ | ---- | ----
`.json`, `.jsn` | `ConfigDataTypes.json` | JSON
`.toml`, `.tml`, `.ini`, `.config`, `.cfg` | `ConfigDataTypes.toml` | TOML/INI
`.yaml`, `.yml` | `ConfigDataTypes.yaml` | YAML
anything else | `ConfigDataTypes.unknown` | unknown
Args:
file_path: path to the file for which the type shall be determined
Returns:
enum-value determining the data type or a enum-value for an unknown data type
Example:
```python
>>> from pycmdlineapp_groundwork.config.config_file_loaders import _determine_config_file_type
>>> _determine_config_file_type("my_config.ini")
<ConfigDataTypes.toml: 'toml'>
```
"""
if isinstance(file_path, str):
file_path = Path(file_path)
if file_path.suffix.lower() in [".json", ".jsn"]:
return ConfigDataTypes.json
elif file_path.suffix.lower() in [
".toml",
".tml",
".ini",
".config",
".cfg",
]:
return ConfigDataTypes.toml
elif file_path.suffix.lower() in [".yml", ".yaml"]:
return ConfigDataTypes.yaml
else:
return ConfigDataTypes.unknown
def _load_dict_from_json_stream_or_file(
file_path: Union[PathLike[str], Buffer[AnyStr]],
data_type: ConfigDataTypes = ConfigDataTypes.infer,
encoding: str = "utf-8",
) -> Union[MutableMapping[str, Any], None]:
"""Load the content of a structured text file or stream into a dictionary using one
of the JSON parsing library (from standard lib https://docs.python.org/3/library/json.html)
Internal function which assumes checking of file existance, accessibility and size has been done elsewhere.
Args:
file_path: path to the file to be parsed or opened stream or buffer
data_type: optional, pre-defines the data type to be parsed; if not provided, data type is determined by file name's suffix or file/stream content.
encoding: encoding type passed to open-function, in case path is given; ignored in case an already opened file/stream/buffer is given as `file_path`
Raises:
DictLoadError: if the given file/stream/buffer could not be read into a dictionary (eg due to wrong syntax) and given data type is `ConfigDataTypes.json` (otherwise, no Exception is raised, instead None is returned)
Returns:
dictionary with parsed file/buffer/stream content or None in case of error and no exception was raised. In case of error, resets file-pointer to 0, if open file was given.
"""
try:
if isinstance(file_path, Path):
return json.loads(file_path.read_text(encoding=encoding))
else:
return json.load(file_path) # type: ignore
except AttributeError as e:
raise DictLoadError(
message=f"Invalid file provided {str(file_path)}.",
document="",
position=0,
line_number=0,
column_number=0,
)
except json.JSONDecodeError as e:
if data_type == ConfigDataTypes.json:
raise DictLoadError(
message=e.msg,
document=e.doc,
position=e.pos,
line_number=e.lineno,
column_number=e.colno,
)
# on error, reset file pointer, if opened file-like was given
if not isinstance(file_path, Path):
file_path.seek(0) # type: ignore
return None
def _load_dict_from_toml_stream_or_file(
file_path: Union[PathLike[str], Buffer[AnyStr]],
data_type: ConfigDataTypes = ConfigDataTypes.infer,
encoding: str = "utf-8",
) -> Union[MutableMapping[str, Any], None]:
"""Load the content of a structured text file or stream into a dictionary using one
of the TOML parsing library (from https://github.com/uiri/toml)
Internal function which assumes checking of file existance, accessibility and size has been done elsewhere.
Args:
file_path: path to the file to be parsed or opened stream or buffer
data_type: optional, pre-defines the data type to be parsed; if not provided, data type is determined by file name's suffix or file/stream content.
encoding: encoding type used to decode binary files/streams/buffers; ignored, if string, `Path` or file opened in text-mode is given as `file_path`
Raises:
DictLoadError: if the given file/stream/buffer could not be read into a dictionary (eg due to wrong syntax) and given data type is `ConfigDataTypes.toml` (otherwise, no Exception is raised, instead None is returned)
Returns:
dictionary with parsed file/buffer/stream content or None in case of error and no exception was raised. In case of error, resets file-pointer to 0, if open file was given.
"""
try:
if (hasattr(file_path, "mode") and "b" in file_path.mode) or isinstance( # type: ignore
file_path, (io.RawIOBase, io.BufferedIOBase, mmap)
):
return toml.loads(file_path.read().decode(encoding)) # type: ignore
elif isinstance(file_path, io.StringIO):
return toml.loads(file_path.getvalue()) # type: ignore
else:
return toml.load(file_path) # type: ignore
except TypeError as e:
raise DictLoadError(
message=f"Invalid file provided {str(file_path)}.",
document="",
position=0,
line_number=0,
column_number=0,
)
except toml.TomlDecodeError as e:
if data_type == ConfigDataTypes.toml:
raise DictLoadError(
message=e.msg, # type: ignore
document=e.doc, # type: ignore
position=e.pos, # type: ignore
line_number=e.lineno, # type: ignore
column_number=e.colno, # type: ignore
)
# on error, reset file pointer, if opened file-like was given
if not isinstance(file_path, Path):
file_path.seek(0) # type: ignore
return None
def _load_dict_from_yaml_stream_or_file(
file_path: Union[PathLike[str], Buffer[AnyStr]],
data_type: ConfigDataTypes = ConfigDataTypes.infer,
encoding: str = "utf-8",
) -> Union[MutableMapping[str, Any], None]:
"""Load the content of a structured text file or stream into a dictionary using one
of the YAML parsing library (from https://pyyaml.org/)
Internal function which assumes checking of file existance, accessibility and size has been done elsewhere.
Args:
file_path: path to the file to be parsed or opened stream or buffer
data_type: optional, pre-defines the data type to be parsed; if not provided, data type is determined by file name's suffix or file/stream content.
encoding: encoding type passed to open-function, in case path is given; ignored in case an already opened file/stream/buffer is given as `file_path`
Raises:
DictLoadError: if the given file/stream/buffer could not be read into a dictionary (eg due to wrong syntax) and given data type is `ConfigDataTypes.yaml` (otherwise, no Exception is raised, instead None is returned)
Returns:
dictionary with parsed file/buffer/stream content or None in case of error and no exception was raised. In case of error, resets file-pointer to 0, if open file was given.
"""
try:
if isinstance(file_path, Path):
return yaml.safe_load(file_path.read_text(encoding=encoding))
else:
return yaml.safe_load(file_path) # type: ignore
except AttributeError as e:
raise DictLoadError(
message=f"Invalid file provided {str(file_path)}.",
document="",
position=0,
line_number=0,
column_number=0,
)
except yaml.YAMLError as e:
if data_type == ConfigDataTypes.yaml:
if hasattr(e, 'problem_mark'):
raise DictLoadError(
message=f"YAML syntax error. {e.problem_mark}", # type: ignore
)
else:
raise DictLoadError(
message=(
f" {str(file_path)}."
"Undetermined error while trying to parse as yaml file"
)
)
# on error, reset file pointer, if opened file-like was given
if not isinstance(file_path, Path):
file_path.seek(0) # type: ignore
return None
def load_dict_from_file(
file_path: FilePathOrBuffer,
data_type: ConfigDataTypes = ConfigDataTypes.infer,
encoding: str = "utf-8",
max_file_size: int = MAX_CONFIG_FILE_SIZE,
) -> MutableMapping[str, Any]:
"""Load the content of a structured text file or stream into a dictionary using one
of the standard parsing libraries (eg. PyYaml).
Depending on the determined file type, the files are parsed using the following parsers:
Data Type | Parser
---- | -----------
`ConfigDataTypes.json` | Python standard-lib [JSON parser](https://docs.python.org/3/library/json.html#json.JSONDecodeError)
`ConfigDataTypes.toml` | [toml parser](https://pypi.org/project/toml/)
`ConfigDataTypes.yaml` | [PyYAML parser](https://pyyaml.org/)
`ConfigDataTypes.infer`| try to determine one of the above by checking file suffix or by parsing the buffer/stream without errors
`ConfigDataTypes.unknown`| raises `ValueError()`
If `data_type` is `ConfigDataTypes.infer` (the standard), then the function tries to determine the file/stream/buffer
content in the following order:
1. if it is a file, check file suffix against known data-types, see: [_determine_config_file_type][pycmdlineapp_groundwork.config.config_file_loaders.determine_config_file_type]
2. if it is not a file or suffix is not known, try to load the data into a dictionary, trying one parser after the other
3. if a data format has been provided in `data_type`, but parsing did not succeed, raise a DictLoadError exception
When trying parsers, JSON is always preferred, then TOML, finally YAML. This order is adapted, if
a `data_type`has been provided.
Data Type | Load order
---- | -----------
`ConfigDataTypes.json` | JSON, TOML, YAML
`ConfigDataTypes.toml` | TOML, JSON, YAML
`ConfigDataTypes.yaml` | YAML, JSON, TOML
`ConfigDataTypes.unknown`| JSON, TOML, YAML
Args:
file_path: path to the file to be parsed or opened stream or buffer
data_type: optional, pre-defines the data type to be parsed; if `ConfigDataTypes.unknown` or `ConfigDataTypes.infer`, data type is tried to be determined by file name's suffix or file/stream content.
encoding: encoding type passed to an open-function, in case path is given; ignored in case an already opened file/stream/buffer is given as `file_path`
max_config_file_size: maximum size a config file may have, otherwise an exception is raised
Raises:
DictLoadError: if the given file/stream/buffer could not be read into a dictionary (eg due to wrong syntax)
ValueError: if trying to read a file whose size > max_file_size
FileNotFoundError: if `file_path` could not be resolved and/or file was not accessible
IsADirectoryError: if `file_path` could be resolved, but is a directory instead of a file
Returns:
dictionary with parsed file/buffer/stream content
Example:
```python
>>> from tempfile import mkdtemp
>>> from shutil import rmtree
>>> from pydantic import BaseSettings
>>> from pycmdlineapp_groundwork.config.config_file_loaders import load_dict_from_file
>>> # create a temporary config file for this example
>>> dirpath = mkdtemp()
>>> temp_config_name = Path(dirpath) / "config.ini"
>>> temp_config = open(temp_config_name, "wt")
>>> _ = temp_config.write('foobar = "johndoe"')
>>> temp_config.close()
>>> temp_config = open(temp_config_name)
>>> # load dictionary from file, file type is determined by trying to parse content w/o error
>>> load_dict_from_file(temp_config)
{'foobar': 'johndoe'}
>>> temp_config.close()
>>> rmtree(dirpath)
```
"""
if file_path is None:
raise ValueError("Data input object is None.")
if isinstance(file_path, str):
file_path = Path(file_path)
determined_data_type = data_type
# if the given input is a path to a file try to make sure it exists
# and is readable. Also try to determine the data type by looking at
# the file suffix, in case the function's caller has not defined the data type
if isinstance(file_path, Path):
file_path = file_path.resolve()
if not file_path.is_file():
if not file_path.exists():
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), str(file_path)
)
if file_path.is_dir():
raise IsADirectoryError(
errno.ENOENT, "Is a directory instead of a file", str(file_path)
)
file_size = file_path.stat().st_size
if file_size > max_file_size:
raise ValueError(
f"File {str(file_path)}: File size {file_size} exceeds max allowed size"
f" {max_file_size}."
)
determined_data_type = (
_determine_config_file_type(file_path)
if data_type == ConfigDataTypes.infer
else data_type
)
# set the order in which file is tried to be loaded in a way that a given (by argument
# or by file-ending) data-type is done first, then json as this is the most significant
# regarding the data and finally the remaining types.
resolve_order = [ConfigDataTypes.json, ConfigDataTypes.toml, ConfigDataTypes.yaml]
if determined_data_type == ConfigDataTypes.toml:
resolve_order = [
ConfigDataTypes.toml,
ConfigDataTypes.json,
ConfigDataTypes.yaml,
]
elif determined_data_type == ConfigDataTypes.yaml:
resolve_order = [
ConfigDataTypes.yaml,
ConfigDataTypes.json,
ConfigDataTypes.toml,
]
for resolve_data_type in resolve_order:
if resolve_data_type == ConfigDataTypes.json:
result = _load_dict_from_json_stream_or_file(
file_path, determined_data_type, encoding=encoding
)
if result is not None:
return result
elif resolve_data_type == ConfigDataTypes.toml:
result = _load_dict_from_toml_stream_or_file(
file_path, determined_data_type, encoding=encoding
)
if result is not None:
return result
elif resolve_data_type == ConfigDataTypes.yaml:
result = _load_dict_from_yaml_stream_or_file(
file_path, determined_data_type, encoding=encoding
)
if result is not None:
return result
raise DictLoadError(
message=(
f"Format of config data {str(file_path)} (type {type(file_path)}) could not"
" be determined to be one of"
f" [{', '.join(ConfigDataTypes.allowed_names())}]."
),
document=file_path.read_text() if isinstance(file_path, Path) else file_path.read(), # type: ignore
position=0,
line_number=0,
column_number=0,
)
def _settings_config_load(
settings: BaseSettings,
file_path: Union[FilePathOrBuffer, Sequence[FilePathOrBuffer]] = None,
data_type: ConfigDataTypes = ConfigDataTypes.infer,
encoding: str = "utf-8",
error_handling: str = "propagate",
) -> Dict[str, Any]:
"""Loads settings from a file, stream or buffer into a dictionary that can be loaded by pydantic into settings classes.
This function is not intended to be called directly, but to be used in connection [get_settings_config_load_function][pycmdlineapp_groundwork.config.config_file_loaders.get_settings_config_load_function]
In case of permission errors or load errors the standard behaviour of
this function is to propagate the exceptions to the caller. If successful, this
function loads the first data source found/accessible/readable into a dictionary
and returns this dictionary so that pydantic can try to set values in settings from it. All further
sources (e.g. not found files) do not lead to exceptions or abortion, but are silently discarded.
Args:
file_path: path, list of paths, stream or list of streams to configuration data
data_type: type of configuration data, if known or pre-defined
encoding: encoding type passed to an open-function, in case path is given; ignored in case an already opened file/stream/buffer is given as `file_path`
error_handling: one of `["abort", "ignore", "propagate"]`, where
`abort` calls `sys-exit()` on load error,
`ignore` does nothing and ultimatley returns an empty dictionary, if no data could be loaded and
`propagate` raises the exceptions and leaves handling to the caller
Default to `propagate`, if no value or `None` is given.
Raises:
ValueError: if error_handling is not one of `["abort", "ignore", "propagate"]` or if file_path is None
IOError: if eg. permission to a given file is denied and error_handling is `propagate`
DictLoadError: if the given data could not be read into a dictionary (eg due to wrong syntax)
Returns:
a dictionary with the values and structures read from the given file, stream or buffer
"""
allowed_error_handling = ["abort", "ignore", "propagate"]
if error_handling is None:
error_handling = "propagate"
if error_handling not in allowed_error_handling:
raise ValueError(
f"Invalid error handling type. Expected one of: {allowed_error_handling}"
)
if file_path is None:
if error_handling == "abort":
print(f"File path is not a valid type.\nAbort!")
sys.exit()
elif error_handling == "propagate":
raise ValueError("File path is not a valid type.")
else:
return {}
config_data_elements: Sequence[FilePathOrBuffer] = list()
if isinstance(file_path, list):
config_data_elements = file_path
elif isinstance(file_path, tuple):
config_data_elements = list(file_path)
else:
config_data_elements = [file_path] # type: ignore
result_dict: Dict[str, Any] = {}
for config_data in config_data_elements:
exists = False
if isinstance(config_data, str):
config_data = Path(config_data)
if isinstance(config_data, Path):
config_data = config_data.resolve()
exists = config_data.is_file()
else:
exists = True
if exists:
try:
load_result: Dict[str, Any] = cast(
Dict[str, Any],
load_dict_from_file(config_data, data_type, encoding=encoding),
)
dict_deep_update(result_dict, load_result) # type: ignore
except DictLoadError as e:
if error_handling == "abort":
print(
f"{e.message}\nContext:\n{e.document}\nPosition ="
f" {e.position}, line number = {e.line_number},"
f" column_number = {e.column_number}"
)
sys.exit()
elif error_handling == "propagate":
raise e
except IOError as e:
# catch permission errors, which are propagated instead of returning false for is_file()
if error_handling == "abort":
print(f"{e}\nAbort!")
sys.exit()
elif error_handling == "propagate":
raise e
return result_dict
def get_settings_config_load_function(
file_path: Union[FilePathOrBuffer, Sequence[FilePathOrBuffer]] = None,
data_type: ConfigDataTypes = ConfigDataTypes.infer,
encoding: str = "utf-8",
error_handling: str = "abort",
) -> Callable[[BaseSettings], Dict[str, Any]]:
"""
Returns a function that can be used in a Config class in pydantic's
BaseSettings classes to load configurations from predefined file location(s).
Args:
file_path: path or list of paths to configuration files, streams or buffers
data_type: type of configuration data, if known/pre-defined
encoding: encoding type passed to an open-function, in case path is given; ignored in case an already opened file/stream/buffer is given as `file_path`
error_handling: one of `["abort", "ignore", "propagate"]`, where
`abort` calls `sys-exit()` on load error,
`ignore` does nothing and ultimatley returns an empty dictionary, if no data could be loaded and
`propagate` raises the exceptions and leaves handling to the caller
Raises:
ValueError: if error_handling is not one of `["abort", "ignore", "propagate"]`
Returns:
a function that returns a dictionary with values read from file, buffer or stream or empty dict,
in case of any ignored error. See [pydantic documentation](https://pydantic-docs.helpmanual.io/usage/settings/)
Example:
Adapted from [pydantic documentation](https://pydantic-docs.helpmanual.io/usage/settings/#adding-sources).
```python
>>> from tempfile import mkdtemp
>>> from shutil import rmtree
>>> from pydantic import BaseSettings
>>> from pycmdlineapp_groundwork import get_settings_config_load_function
>>> # create a temporary config file for this example
>>> dirpath = mkdtemp()
>>> temp_config_name = Path(dirpath) / "config.ini"
>>> temp_config = open(temp_config_name, "wt")
>>> _ = temp_config.write('foobar = "johndoe"')
>>> temp_config.close()
>>> # define settings class as per pydantic documentation
>>> class Settings(BaseSettings):
... foobar: str = ""
... baz: int = 42
... class Config:
... # define the load order as needed
... @classmethod
... def customise_sources(
... cls,
... init_settings,
... env_settings,
... file_secret_settings,
... ):
... return (
... init_settings,
... # the following returns a function that loads the temp_config file
... # this takes any predefined filename or list of filenames appropriate for your app
... get_settings_config_load_function(temp_config.name),
... env_settings,
... file_secret_settings,
... )
>>> my_settings = Settings()
>>> my_settings.foobar
'johndoe'
>>> my_settings.baz
42
>>> rmtree(dirpath)
```
"""
return partial(
_settings_config_load,
file_path=file_path,
data_type=data_type,
encoding=encoding,
error_handling=error_handling,
)
| StarcoderdataPython |
5177122 | import os
import json
from .grades import AssignmentComponentGrade
from .constants import SUBMISSION_META_FILE, SUBMISSION_FILES_DIRECTORY
from .utils import ConfigDictMixin, datetime_to_string, copy_globs, \
FileNotFoundError
class BrokenSubmissionError(Exception):
def __init__(self, message, verbose=None):
super(BrokenSubmissionError, self).__init__(message)
self.message = message
self.verbose = verbose
class Submission(ConfigDictMixin):
def __init__(self, student_name, assignment, metadata_path, files_path,
graded, id=None, seconds_late=None, error=None,
component_grades=None):
self.student_name = student_name
self.assignment = assignment
self.metadata_path = metadata_path
self.files_path = files_path
self.graded = graded
self.id = id
self.seconds_late = seconds_late
if component_grades is not None and error is not None:
raise ValueError('either specify component_grades or error, '
'but not both')
self.error = error
if component_grades is None:
self.component_grades = None
else:
self.component_grades = [
AssignmentComponentGrade.from_config_dict(grade_dict)
for grade_dict in component_grades]
# TODO: Validate the path - make sure everything that's needed for the
# assignment is available in the path
@staticmethod
def get_paths(path):
"""
Calculate the metadata and files paths for a standard submission
from `zucc load'.
"""
metadata_path = os.path.join(path, SUBMISSION_META_FILE)
files_path = os.path.join(path, SUBMISSION_FILES_DIRECTORY)
return metadata_path, files_path
@classmethod
def load_from_empty_dir(cls, assignment, path, **kwargs):
"""
Load a Submission instance from an uninitialized submission directory.
"""
metadata_path, files_path = cls.get_paths(path)
return cls(assignment=assignment, metadata_path=metadata_path,
files_path=files_path, **kwargs)
@classmethod
def load_from_dir(cls, assignment, path):
"""Load a Submission instance from a submission directory."""
metadata_path, files_path = cls.get_paths(path)
with open(metadata_path) as meta_file:
meta_json = json.load(meta_file)
return cls.from_config_dict(meta_json, assignment=assignment,
metadata_path=metadata_path,
files_path=files_path)
@classmethod
def load_from_raw_files(cls, assignment, files_path):
return Submission(student_name='', assignment=assignment,
metadata_path=None, files_path=files_path,
graded=False)
@classmethod
def load_from_component_grades_json(cls, assignment, component_grades_fp,
seconds_late=None):
component_grades = json.load(component_grades_fp)
return Submission(student_name='', assignment=assignment,
metadata_path=None, files_path=None, graded=True,
seconds_late=seconds_late,
component_grades=component_grades)
def _meta_json(self):
"""Return a json representation of this instance"""
meta = self.to_config_dict('assignment', 'metadata_path', 'files_path')
if 'submission-time' in meta:
meta['submission-time'] = datetime_to_string(self.submission_time)
return meta
def _write_meta_json(self):
"""Json-ify this instance and write to the metadata json file"""
meta = self._meta_json()
with open(self.metadata_path, 'w') as meta_file:
json.dump(meta, meta_file, sort_keys=True, indent=2,
separators=(',', ': '))
def initialize_metadata(self):
"""Create initial meta.json"""
self._write_meta_json()
def is_broken(self):
"""
Return true if this assignment has been graded but at least one
component grade flagged a broken submisison.
"""
return self.error is not None or self.component_grades is not None \
and any(component.is_broken()
for component in self.component_grades)
# XXX Support copying directories
def copy_files(self, files, path, allow_fail=False):
try:
copy_globs(files, self.files_path, path)
except FileNotFoundError as err:
if not allow_fail:
raise BrokenSubmissionError(str(err))
def write_grade(self, component_grades): # (Dict[object, object]) -> None
"""
Set the component grades to `component_grades' and write the
new submission metadata to the metadata file.
"""
self.graded = True
self.error = None
self.component_grades = component_grades
self._write_meta_json()
| StarcoderdataPython |
1875459 | <reponame>ljmcgann/python-ironicclient<filename>ironicclient/tests/functional/osc/v1/test_baremetal_deploy_template_basic.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import ddt
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from ironicclient.tests.functional.osc.v1 import base
@ddt.ddt
class BaremetalDeployTemplateTests(base.TestCase):
"""Functional tests for baremetal deploy template commands."""
@staticmethod
def _get_random_trait():
return data_utils.rand_name('CUSTOM', '').replace('-', '_')
def setUp(self):
super(BaremetalDeployTemplateTests, self).setUp()
self.steps = json.dumps([{
'interface': 'bios',
'step': 'apply_configuration',
'args': {},
'priority': 10,
}])
name = self._get_random_trait()
self.template = self.deploy_template_create(
name, params="--steps '%s'" % self.steps)
def tearDown(self):
if self.template is not None:
self.deploy_template_delete(self.template['uuid'])
super(BaremetalDeployTemplateTests, self).tearDown()
def test_list(self):
"""Check baremetal deploy template list command.
Test steps:
1) Create baremetal deploy template in setUp.
2) List baremetal deploy templates.
3) Check deploy template name and UUID in deploy templates list.
"""
template_list = self.deploy_template_list()
self.assertIn(self.template['name'],
[template['Name']
for template in template_list])
self.assertIn(self.template['uuid'],
[template['UUID']
for template in template_list])
def test_list_long(self):
"""Check baremetal deploy template list --long command
Test steps:
1) Create baremetal deploy template in setUp.
2) List baremetal deploy templates with detail=True.
3) Check deploy template fields in output.
"""
template_list = self.deploy_template_list(params='--long')
template = [template for template in template_list
if template['Name'] == self.template['name']][0]
self.assertEqual(self.template['extra'], template['Extra'])
self.assertEqual(self.template['name'], template['Name'])
self.assertEqual(self.template['steps'], template['Steps'])
self.assertEqual(self.template['uuid'], template['UUID'])
def test_show(self):
"""Check baremetal deploy template show command with UUID.
Test steps:
1) Create baremetal deploy template in setUp.
2) Show baremetal deploy template calling it by UUID.
3) Check deploy template fields in output.
"""
template = self.deploy_template_show(self.template['uuid'])
self.assertEqual(self.template['extra'], template['extra'])
self.assertEqual(self.template['name'], template['name'])
self.assertEqual(self.template['steps'], template['steps'])
self.assertEqual(self.template['uuid'], template['uuid'])
def test_delete(self):
"""Check baremetal deploy template delete command.
Test steps:
1) Create baremetal deploy template in setUp.
2) Delete baremetal deploy template by UUID.
3) Check that deploy template deleted successfully and not in list.
"""
output = self.deploy_template_delete(self.template['uuid'])
self.assertIn('Deleted deploy template {0}'.format(
self.template['uuid']), output)
template_list = self.deploy_template_list()
self.assertNotIn(self.template['name'],
[template['Name'] for template in template_list])
self.assertNotIn(self.template['uuid'],
[template['UUID'] for template in template_list])
self.template = None
def test_set_steps(self):
"""Check baremetal deploy template set command for steps.
Test steps:
1) Create baremetal deploy template in setUp.
2) Set steps for deploy template.
3) Check that baremetal deploy template steps were set.
"""
steps = [{
'interface': 'bios',
'step': 'apply_configuration',
'args': {},
'priority': 20,
}]
self.openstack("baremetal deploy template set --steps '{0}' {1}"
.format(json.dumps(steps), self.template['uuid']))
show_prop = self.deploy_template_show(self.template['uuid'],
fields=['steps'])
self.assertEqual(steps, show_prop['steps'])
def test_set_unset(self):
"""Check baremetal deploy template set and unset commands.
Test steps:
1) Create baremetal deploy template in setUp.
2) Set extra data for deploy template.
3) Check that baremetal deploy template extra data was set.
4) Unset extra data for deploy template.
5) Check that baremetal deploy template extra data was unset.
"""
extra_key = 'ext'
extra_value = 'testdata'
self.openstack(
'baremetal deploy template set --extra {0}={1} {2}'
.format(extra_key, extra_value, self.template['uuid']))
show_prop = self.deploy_template_show(self.template['uuid'],
fields=['extra'])
self.assertEqual(extra_value, show_prop['extra'][extra_key])
self.openstack('baremetal deploy template unset --extra {0} {1}'
.format(extra_key, self.template['uuid']))
show_prop = self.deploy_template_show(self.template['uuid'],
fields=['extra'])
self.assertNotIn(extra_key, show_prop['extra'])
@ddt.data(
('--uuid', '', 'expected one argument'),
('--uuid', '!@#$^*&%^', 'Expected a UUID'),
('', '', 'the following arguments are required'),
('', 'not/a/name', 'Deploy template name must be a valid trait'),
('', 'foo', 'Deploy template name must be a valid trait'),
('--steps', '', 'expected one argument'),
('--steps', '[]', 'No deploy steps specified'))
@ddt.unpack
def test_create_negative(self, argument, value, ex_text):
"""Check errors on invalid input parameters."""
base_cmd = 'baremetal deploy template create'
if argument != '':
base_cmd += ' %s' % self._get_random_trait()
if argument != '--steps':
base_cmd += " --steps '%s'" % self.steps
command = self.construct_cmd(base_cmd, argument, value)
self.assertRaisesRegex(exceptions.CommandFailed, ex_text,
self.openstack, command)
| StarcoderdataPython |
5074436 | <gh_stars>1-10
### Kmeans Algorithm for IRIS dataset
#This is easily extandable to any problem/dataset
### author: Dr. <NAME>
### year: 2015
### contact: <EMAIL>
import numpy as np
class KMeans:
def __init__(self, dataraw):
#make sure that the iris dataset is located in the current directory
self.y = dataraw[:,-1] # outputs - cluster types
self.x = dataraw[:,0:-1] # inputs - cluster data
def initializecluster(self, x, y):
#r1 = np.random.randint(0,49) # first class
#C1 = x[r1, :]
#r2 = np.random.randint(50,99) # second class
#r3 = np.random.randint(100,149) # third class
#C2 = x[r2, :]
#C3 = x[r3, :]
C1 = x[np.random.randint(0,len(y)), :]
C2 = x[np.random.randint(0,len(y)), :]
C3 = x[np.random.randint(0,len(y)), :]
return C1, C2, C3
def assignmentstep(self, x, C1, C2, C3):
C = np.zeros(len(x))
for i in range(0, len(x)):
dist1 = np.linalg.norm(x[i]-C1)
dist2 = np.linalg.norm(x[i]-C2)
dist3 = np.linalg.norm(x[i]-C3)
if (dist1 < dist2) & (dist1 < dist3):
C[i] = 1
elif (dist2 < dist1) & (dist2 < dist3):
C[i] = 2
elif (dist3 < dist1) & (dist3 < dist2):
C[i] = 3
return C
def movecentroidstep(self, x, C):
C1 = np.mean(x[C==1], axis=0)
C2 = np.mean(x[C==2], axis=0)
C3 = np.mean(x[C==3], axis=0)
return C1, C2, C3
def calculatecost(self, x, C, C1, C2, C3):
if (len(x[C==1]) != 0):
J1 = (1./len(x[C==1]))*np.power(np.linalg.norm(x[C==1]-C1), 2)
else:
J1=0
if (len(x[C==2]) != 0):
J2 = (1./len(x[C==2]))*np.power(np.linalg.norm(x[C==2]-C2), 2)
else:
J2=0
if (len(x[C==3]) != 0):
J3 = (1./len(x[C==3]))*np.power(np.linalg.norm(x[C==3]-C3), 2)
else:
J3=0
J = J1+J2+J3
return J
############################################################################
def runKM():
dataraw = np.loadtxt("iris.txt", comments="#", delimiter=",", unpack=False)
KM = KMeans(dataraw)
x = KM.x
y = KM.y
[C1, C2, C3] = KM.initializecluster(x, y)
Cinit = KM.assignmentstep(x, C1, C2, C3)
Maxit =1000
J = np.zeros((Maxit, np.shape(x)[1]))
for i in range (0, Maxit):
C = KM.assignmentstep(x, C1, C2, C3)
[C1, C2, C3] = KM.movecentroidstep(x, C)
J[i, :] = KM.calculatecost(x, C, C1, C2, C3) # check cost J
accuracy=(np.mean(C==y))*100
return accuracy, C
##########################################################################
if __name__ == '__main__':
acc = 0
Center = np.zeros((150))
for i in range (0, 10): # np.random.seed(145) gives 88.67% accuracy
[accuracy, C] = runKM()
if acc < accuracy:
acc = accuracy
Center = C
print acc
print Center
| StarcoderdataPython |
1928352 | # LIBTBX_SET_DISPATCHER_NAME mmtbx.ssm_rmsd_for_chains
from __future__ import absolute_import, division, print_function
import sys
from mmtbx.geometry_restraints.torsion_restraints import utils
from libtbx.utils import Sorry
def run(args):
if len(args) != 2:
raise Sorry("mmtbx.ssm_rmsd_for_chains requires two PDB files as input")
file1 = args[0]
file2 = args[1]
import iotbx.pdb
pdb1 = iotbx.pdb.input(file_name=file1).construct_hierarchy()
pdb2 = iotbx.pdb.input(file_name=file2).construct_hierarchy()
chains1 = []
chains2 = []
for model in pdb1.models():
for i, chain_i in enumerate(model.chains()):
if not chain_i.is_protein() and not chain_i.is_na():
continue
chains1.append(chain_i)
for model in pdb2.models():
for i, chain_i in enumerate(model.chains()):
if not chain_i.is_protein() and not chain_i.is_na():
continue
chains2.append(chain_i)
print("### SSM RMSD for chains")
print()
print("PDB_1 = %s" % file1)
print("PDB_2 = %s" % file2)
print()
print("PDB_1 chainID PDB_2 chainID SSM RMSD")
print("--------------------------------------------")
for i, chain_i in enumerate(chains1):
for j, chain_j in enumerate(chains2):
ssm = None
try: #do SSM alignment
ssm, ssm_align = utils._ssm_align(
reference_chain = chain_i,
moving_chain = chain_j)
except RuntimeError as e:
if (str(e) != "can't make graph for first structure" and \
str(e) != "secondary structure does not match"):
raise e
if ssm is not None:
print("%13s"%chain_i.id, "%17s"%chain_j.id, "%12.3f"%ssm.ssm.rmsd)
else:
print("%13s"%chain_i.id, "%17s"%chain_j.id, "%12s"% ("None"))
if __name__ == "__main__":
run(args=sys.argv[1:])
| StarcoderdataPython |
8077609 | # -----------------------------------------------------
# Simulation Constants
# -----------------------------------------------------
# Simulation steps
MAX_TIME = 60
ACTION_REPEAT = 10
# Simulation timing
NUM_BULLET_SOLVER_ITERATIONS = 30
SIMULATION_TIME_STEP = 0.001
# Camera
RENDER_HEIGHT = 360
RENDER_WIDTH = 480
CAMERA_DISTANCE = 1.0
CAMERA_YAW = 0
CAMERA_PITCH = -30
| StarcoderdataPython |
3547008 |
a = int(input("Please enter a whole number for variable 'a': "))
b = None
c = None
if a < 10:
b = 0
c = 1
print(f'Since a is less than 10, b = {b} and c = {c}')
else:
if b is None and c is None:
b = "'Nothing'"
c = "'Nothing'"
print(f'Since a is greater than or equal to 10, b = {b} and c = {c}.')
| StarcoderdataPython |
5189490 | <filename>pyscf/pbc/gto/_pbcintor.py
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import numpy
from pyscf import lib
libpbc = lib.load_library('libpbc')
def _fpointer(name):
return ctypes.addressof(getattr(libpbc, name))
class PBCOpt(object):
def __init__(self, cell):
self._this = ctypes.POINTER(_CPBCOpt)()
natm = ctypes.c_int(cell._atm.shape[0])
nbas = ctypes.c_int(cell._bas.shape[0])
libpbc.PBCinit_optimizer(ctypes.byref(self._this),
cell._atm.ctypes.data_as(ctypes.c_void_p), natm,
cell._bas.ctypes.data_as(ctypes.c_void_p), nbas,
cell._env.ctypes.data_as(ctypes.c_void_p))
def init_rcut_cond(self, cell, precision=None):
if precision is None: precision = cell.precision
rcut = numpy.array([cell.bas_rcut(ib, precision)
for ib in range(cell.nbas)])
natm = ctypes.c_int(cell._atm.shape[0])
nbas = ctypes.c_int(cell._bas.shape[0])
libpbc.PBCset_rcut_cond(self._this,
rcut.ctypes.data_as(ctypes.c_void_p),
cell._atm.ctypes.data_as(ctypes.c_void_p), natm,
cell._bas.ctypes.data_as(ctypes.c_void_p), nbas,
cell._env.ctypes.data_as(ctypes.c_void_p))
return self
def del_rcut_cond(self):
self._this.contents.fprescreen = _fpointer('PBCnoscreen')
return self
def __del__(self):
libpbc.PBCdel_optimizer(ctypes.byref(self._this))
class _CPBCOpt(ctypes.Structure):
_fields_ = [('rrcut', ctypes.c_void_p),
('fprescreen', ctypes.c_void_p)]
| StarcoderdataPython |
159893 | <gh_stars>0
import click
import pickle
from tqdm import tqdm
import os
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Pool
from utility.dataset import Preprocess_Dataset, buildPreprocessDataset
from utility.transform import ExtractCliques, ExtractMel
from utility.algorithmsWrapper import (
AlgoSeqRecur,
GenerateSSM,
GroudTruthStructure,
MsafAlgos,
MsafAlgosBdryOnly,
)
from models.classifier import GetAlgoData
from configs.configs import NUM_WORKERS, logger
from configs.trainingConfigs import (
CHORUS_CLASSIFIER_TRAIN_DATA_FILE,
CHORUS_CLASSIFIER_VAL_DATA_FILE,
CLF_TRAIN_SET,
CLF_VAL_SET,
USING_DATASET,
)
from models.classifier import ChorusClassifier
def starGetCliqueClassData(t):
getData, baseset, idx = t
res = getData(baseset, idx)
return res
def buildCCDataset(cpath, baseset, getData, force=True):
if not os.path.exists(cpath) or force:
X = []
y = []
logger.info(
f"building clique class Data for <{baseset.__class__.__name__}> @ {cpath}"
)
with Pool(NUM_WORKERS) as p:
N = len(baseset)
results = list(
tqdm(
p.imap(
starGetCliqueClassData,
zip([getData] * N, [baseset] * N, range(N)),
),
total=N,
)
)
for features, clabels in results:
X.extend([feature for feature in features])
y.extend([clabel for clabel in clabels])
with open(cpath, "wb") as f:
pickle.dump((X, y), f)
def testCCDataset(method):
logger.info(f"testCC method:{method}")
cpath_train = CHORUS_CLASSIFIER_TRAIN_DATA_FILE[method]
cpath_val = CHORUS_CLASSIFIER_VAL_DATA_FILE[method]
_clf = ChorusClassifier(cpath_train)
_clf.train()
clf = _clf.clf
Xt, yt = _clf.loadData(cpath_val)
with np.printoptions(precision=3, suppress=True):
if hasattr(clf, "feature_importances_"):
logger.info(
f'feature importance, {[f"{s}={x*len(_clf.feature_names):.3f}" for x, s in sorted(zip(clf.feature_importances_, _clf.feature_names))]}'
)
logger.info(f"test classifier on valid data, score={clf.score(Xt, yt):.3f}")
# build Preprocess Dataset for feature extraction
transforms = {
"extract-mel": ExtractMel(),
"generate-ssm": GenerateSSM(dataset=USING_DATASET),
"extract-cliques": ExtractCliques(dataset=USING_DATASET),
}
trainData = CHORUS_CLASSIFIER_TRAIN_DATA_FILE
methods = {
"seqRecur": GetAlgoData(AlgoSeqRecur(trainData["seqRecur"])),
"scluster": GetAlgoData(MsafAlgos("scluster", trainData["scluster"])),
"cnmf": GetAlgoData(MsafAlgos("cnmf", trainData["cnmf"])),
"sf": GetAlgoData(MsafAlgosBdryOnly("sf", trainData["sf"])),
"olda": GetAlgoData(MsafAlgosBdryOnly("olda", trainData["olda"])),
"foote": GetAlgoData(MsafAlgosBdryOnly("foote", trainData["foote"])),
"gtBoundary": GetAlgoData(GroudTruthStructure(trainData["gtBoundary"])),
}
@click.group()
def cli():
pass
@click.command()
@click.option(
"--transform", nargs=1, type=click.Choice(transforms.keys()), default=None
)
@click.option("--force", nargs=1, type=click.BOOL, default=False)
def build(transform, force):
buildTransforms = (
transforms.values() if transform is None else [transforms[transform]]
)
for tf in buildTransforms:
buildPreprocessDataset(USING_DATASET, tf, force=force)
@click.command()
@click.option("--method", nargs=1, type=click.Choice(methods.keys()), default=None)
def train(method):
trainMethods = methods.items() if method is None else [(method, methods[method])]
for name, getDataFun in trainMethods:
cpath_train = CHORUS_CLASSIFIER_TRAIN_DATA_FILE[name]
cpath_val = CHORUS_CLASSIFIER_VAL_DATA_FILE[name]
buildCCDataset(cpath_train, CLF_TRAIN_SET, getDataFun)
buildCCDataset(cpath_val, CLF_VAL_SET, getDataFun)
testCCDataset(name)
cli.add_command(build)
cli.add_command(train)
if __name__ == "__main__":
cli()
| StarcoderdataPython |
9663249 | import ast
import re
from django.utils.deprecation import MiddlewareMixin
from django.utils.timezone import now
from .conf import settings, TrackingConfig
from .models import RequestLog
class LoggingMiddleware(MiddlewareMixin):
"""
Adapted from DRF-Tracking - drf-tracking.readthedocs.io
Applied as middleware to catch all API requests rather than per view/apiview
"""
_CLEANED_SUBSTITUTE = "********************"
_SENSITIVE_FIELDS = {"api", "token", "key", "secret", "password", "<PASSWORD>", "<PASSWORD>", "signature"}
_PREFIX = TrackingConfig.Meta.prefix
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log = {}
self._SENSITIVE_FIELDS.update({f.lower() for f in getattr(settings, f"{self._PREFIX}_SENSITIVE_FIELDS")})
def process_request(self, request):
"""
Begin processing request, make initial log key/values
:param request: request instance
:return: None
"""
self.log = dict(
requested_at=now(),
method=request.method,
path=request.path,
host=request.get_host(),
data=self._clean_data(getattr(request, request.method, request.body))
)
def process_response(self, request, response):
"""
Finish processing request, make final log key/values and save log to database
:param request: request instance
:param response: response instance
:return: None
"""
if self._should_log(request, response):
self.log.update(dict(
remote_addr=self._get_ip_address(request),
view=self._get_view_name(request),
view_method=self._get_view_method(request),
query_params=self._clean_data(getattr(request, "query_params", {})),
user=self._get_user(request),
response_ms=self._get_response_ms(),
response=response.rendered_content if hasattr(response, "rendered_content") else response.getvalue(),
status_code=response.status_code
))
RequestLog.objects.create(**self.log)
return response
def process_exception(self, request, exception):
"""
Gracefully process the exception that was raised
:param request: request instance
:param exception: exception raised
:return:
"""
print(f"Tracking Exception - {exception.__class__.__name__} - {exception}")
def _should_log(self, request, response):
"""
Check if the request should be logged
:param request: request instance
:param response: response instance
"""
log_prefixes = getattr(settings, f"{self._PREFIX}_URL_PREFIXES")
log_levels = getattr(settings, f"{self._PREFIX}_REQUEST_LEVELS")
return (
any(re.compile(prefix).match(request.path) for prefix in log_prefixes)
and
any(response.status_code in levels for levels in log_levels)
)
def _get_user(self, request):
"""
Get requesting user, if authenticated
:param request: request instance
:return: user of the request or None
"""
user = request.user
return None if user.is_anonymous else user
def _get_ip_address(self, request):
"""
Get the remote ip address the request was generated from
:param request: request instance
:return: remote IP Address
"""
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
return ipaddr.split(",")[0].strip() if ipaddr else request.META.get("REMOTE_ADDR", "")
def _get_view_method(self, request):
"""
Get view method
:param request: request instance
:return: method of the request
"""
return getattr(self, "action", request.method).lower()
def _get_view_name(self, request):
"""
Get view name
:param request: request instance
:return: function name that was called
"""
return getattr(request.resolver_match, 'view_name', None)
def _get_response_ms(self):
"""
Get the duration of the request response cycle is milliseconds, 0 if a negative
:return: duration of the response in milliseconds
"""
response_timedelta = now() - self.log["requested_at"]
response_ms = int(response_timedelta.total_seconds() * 1000)
return max(response_ms, 0)
def _clean_data(self, data):
"""
Clean a dictionary of data of potentially sensitive info before sending to the database
:param data: dictionary to clean
:return: cleaned dictionary
"""
if isinstance(data, list):
return [self._clean_data(d) for d in data]
if isinstance(data, dict):
clean_data = dict(data)
for key, value in clean_data.items():
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
if isinstance(value, (dict, list)):
clean_data[key] = self._clean_data(value)
if key.lower() in self._SENSITIVE_FIELDS:
clean_data[key] = self._CLEANED_SUBSTITUTE
return clean_data
return data
| StarcoderdataPython |
264308 | <reponame>mkduer/code-nibbles
from helpers import Helpers
def fibonacci_recurse(flist: [int], limit: int) -> [int]:
"""
Recursively calculates the fibonacci sequence up to the defined limit value (inclusive)
:param flist: the list of fibonacci numbers
:param limit: the maximum value for the fibonacci sequence
:return: the fibonacci list of integers
"""
next_fib = 1
if limit < 1:
return flist
if not flist:
return fibonacci_recurse([next_fib], limit)
length = len(flist)
if flist[length - 1] <= limit:
if length == 1:
next_fib = flist[0] + flist[0]
else:
next_fib = flist[length - 1] + flist[length - 2]
if next_fib <= limit:
flist.append(next_fib)
return fibonacci_recurse(flist, limit)
return flist
def fibonacci_iter(limit: int) -> [int]:
"""
Iteratively calculates the fibonacci sequence up to the defined limit value (inclusive)
:param flist: the list of fibonacci numbers
:param limit: the maximum value for the fibonacci sequence
:return: the fibonacci list of integers
"""
flist = [1, 2]
length = len(flist)
while flist[length - 1] < limit:
next_fib = flist[length - 1] + flist[length - 2]
if next_fib > limit:
return flist
flist.append(next_fib)
length += 1
return flist
def even_modulo(value: int) -> int:
"""
Returns a value if it is even
:param value: the value to check
:return: the even value or -1 if not even
"""
if not value % 2:
return value
return -1
def fibonacci_even(start_list: [int]) -> [int]:
"""
Takes an original list of fibonacci numbers and returns only the even values as a list
:param start_list: the original list of fibonacci numbers
:return: list of even fibonacci integers
"""
even_list = []
evens = map(even_modulo, start_list)
for e in evens:
even_list.append(e)
return list(filter(lambda x: x is not -1, even_list))
def main():
helper = Helpers()
limit = 5432
flist = fibonacci_iter(limit)
print(f'\niterating fibonacci values up to {limit}: ')
helper.multiline_print(flist)
limit = 1010
flist = fibonacci_recurse([], limit)
print(f'\nrecursing fibonacci values up to {limit}: ')
helper.multiline_print(flist)
limit = 51235324
start_list = fibonacci_iter(limit)
even_flist = fibonacci_even(start_list)
print(f'\neven fibonacci values up to {limit}: ')
helper.multiline_print(even_flist)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6556502 | """ Serializers for course advanced settings"""
from typing import Type, Dict as DictType
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.fields import Field as SerializerField
from xblock.fields import (
Boolean,
DateTime,
Dict,
Field as XBlockField,
Float,
Integer,
List,
String,
)
from xmodule.course_module import CourseFields, EmailString
from xmodule.fields import Date
from cms.djangoapps.models.settings.course_metadata import CourseMetadata
# Maps xblock fields to their corresponding Django Rest Framework serializer field
XBLOCK_DRF_FIELD_MAP = [
(Boolean, serializers.BooleanField),
(String, serializers.CharField),
(List, serializers.ListField),
(Dict, serializers.DictField),
(Date, serializers.DateField),
(DateTime, serializers.DateTimeField),
(Integer, serializers.IntegerField),
(EmailString, serializers.EmailField),
(Float, serializers.FloatField),
]
class AdvancedSettingsFieldSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Serializer for a single course setting field.
This serializer accepts a ``value_field`` parameter that allows you to
specify what field to use for a particular instance of this serializer.
Args:
value_field (SerializerField): The ``value`` field will have this type
"""
deprecated = serializers.BooleanField(read_only=True, help_text=_("Marks a field as deprecated."))
display_name = serializers.CharField(read_only=True, help_text=_("User-friendly display name for the field"))
help = serializers.CharField(read_only=True, help_text=_("Help text that describes the setting."))
def __init__(self, value_field: SerializerField, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["value"] = value_field
class CourseAdvancedSettingsSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Serializer for course advanced settings.
"""
@staticmethod
def _get_drf_field_type_from_xblock_field(xblock_field: XBlockField) -> Type[SerializerField]:
"""
Return the corresponding DRF Serializer field for an XBlock field.
Args:
xblock_field (XBlockField): An XBlock field
Returns:
Type[SerializerField]: Return the DRF Serializer type
corresponding to the XBlock field.
"""
for xblock_type, drf_type in XBLOCK_DRF_FIELD_MAP:
if isinstance(xblock_field, xblock_type):
return drf_type
return serializers.JSONField
def get_fields(self) -> DictType[str, SerializerField]:
"""
Return the fields for this serializer.
This method dynamically generates the fields and field types based on
fields available on the Course.
Returns:
DictType[str, SerializerField]: A mapping of field names to field serializers
"""
fields = {}
for field, field_type in vars(CourseFields).items():
if isinstance(field_type, XBlockField) and field not in CourseMetadata.FIELDS_EXCLUDE_LIST:
fields[field] = AdvancedSettingsFieldSerializer(
required=False,
label=field_type.name,
help_text=field_type.help,
value_field=self._get_drf_field_type_from_xblock_field(field_type)(),
)
return fields
| StarcoderdataPython |
8059946 | <filename>tests/test_py_examples.py<gh_stars>10-100
"""
Validate all shaders in our examples. This helps ensure that our
exampples are actually valid, but also allows us to increase test
coverage simply by writing examples.
"""
import os
import types
import importlib.util
import pyshader
import pytest
from testutils import validate_module, run_test_and_print_new_hashes
EXAMPLES_DIR = os.path.abspath(os.path.join(__file__, "..", "..", "examples_py"))
def get_pyshader_examples():
shader_modules = {} # shader descriptive name -> shader object
# Collect shader modules
for fname in os.listdir(EXAMPLES_DIR):
if not fname.endswith(".py"):
continue
# Load module
filename = os.path.join(EXAMPLES_DIR, fname)
modname = fname[:-3]
spec = importlib.util.spec_from_file_location(modname, filename)
m = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m)
# Collect shader module objects from the module
for val in m.__dict__.values():
if isinstance(val, pyshader.ShaderModule):
fullname = modname + "." + val.input.__qualname__
val.input.__qualname__ = fullname
shader_modules[fullname] = val
elif isinstance(val, types.FunctionType):
funcname = val.__name__
if "_shader" in funcname:
raise RuntimeError(f"Undecorated shader {funcname}")
return shader_modules
shader_modules = get_pyshader_examples()
@pytest.mark.parametrize("shader_name", list(shader_modules.keys()))
def test(shader_name):
print("Testing shader", shader_name)
shader = shader_modules[shader_name]
validate_module(shader, HASHES)
HASHES = {
"compute.compute_shader_copy": ("6e6849aa811ccf8a", "1ac33233b60b9f13"),
"compute.compute_shader_multiply": ("a2d0cb9798632bd1", "3229b7f2d61e79a8"),
"compute.compute_shader_tex_colorwap": ("454cefdbf0ce1acc", "0dc6c0301d583b8e"),
"mesh.vertex_shader": ("fdc3b4b279b3a31e", "80db45b376a75fe3"),
"mesh.fragment_shader_flat": ("21049f547e057152", "bca0edd57ffb8e98"),
"textures.compute_shader_tex_add": ("74c7c482a598349d", "9e271b832b0971d1"),
"textures.fragment_shader_tex": ("7188891541d70435", "28c84baac74b973e"),
"triangle.vertex_shader": ("738e0ac3bd22ebac", "e4209550a51f8b5a"),
"triangle.fragment_shader": ("494975dea607787e", "4c6ac6942205ebfc"),
}
if __name__ == "__main__":
run_test_and_print_new_hashes(globals())
| StarcoderdataPython |
6601573 | """Utility functions to help other callback functions"""
import base64
from PIL import Image
import io
from pathlib import Path
from skimage import draw, morphology
from skimage.transform import resize
from scipy import ndimage
import numpy as np
import matplotlib.image as mpimg
import json
def b64_2_numpy(string):
"""Converts base64 encoded image to numpy array"""
decoded = base64.b64decode(string)
im = Image.open(io.BytesIO(decoded))
return np.array(im)
def numpy_2_b64(arr, enc_format='png'):
"""Converts numpy array to base64 encoded image"""
img_pil = Image.fromarray(arr)
buff = io.BytesIO()
img_pil.save(buff, format=enc_format)
return base64.b64encode(buff.getvalue()).decode("utf-8")
def upload_demo():
"""Returns demo img as base64 string"""
fname = Path(__file__).parents[1] / 'demo_img.jpg'
img = mpimg.imread(fname)
return numpy_2_b64(img, enc_format='jpeg')
def apply_edits(data, ypred, size_distr_json):
"""Applies user applied edits from dash canvas"""
labeled = {}
for obj in data['objects'][1:]:
mask = parse_obj(obj).astype(np.uint8)
# if the stroke is white, add stroke to prediction
if obj['stroke'] == 'white':
ypred = np.bitwise_or(ypred, mask)
# if the stroke is red, remove entire particle labeled
# by stroke
elif obj['stroke'] == 'red':
# cache labeled array for if there are multiple red strokes
if 'cache' not in labeled:
size_distr = json.loads(size_distr_json)
cache = np.asarray(size_distr['labeled_list'])
labeled['cache'] = resize(
cache,
(576, 768),
order=0,
preserve_range=True
).astype(np.int32)
# remove any particles that "touches" red stroke
remove = np.unique(labeled['cache'][np.nonzero(mask)])
for r in remove:
ypred[np.where(labeled['cache'] == r)] = 0
# otherwise, the stroke is black and stroke should be erased
# from image
else:
ypred = np.bitwise_and(ypred, 1 - mask)
return ypred
def parse_obj(obj):
"""Create (576, 768) binary mask from object data"""
scale = 1 / obj['scaleX']
path = obj['path']
rr, cc = [], []
# find indices of SVG pathusing bezier curve
for (Q1, Q2) in zip(path[:-2], path[1:-1]):
inds = draw.bezier_curve(int(round(Q1[-1] / scale)),
int(round(Q1[-2] / scale)),
int(round(Q2[2] / scale)),
int(round(Q2[1] / scale)),
int(round(Q2[4] / scale)),
int(round(Q2[3] / scale)), 1)
rr += list(inds[0])
cc += list(inds[1])
radius = round(obj['strokeWidth'] / 2. / scale)
# create mask
mask = np.zeros((576, 768), dtype=np.bool)
mask[rr, cc] = 1
mask = ndimage.binary_dilation(
mask,
morphology.disk(radius)
)
return mask
| StarcoderdataPython |
6420421 | <reponame>StylishTriangles/adversarial_patch<gh_stars>1-10
from keras import Model
from keras import backend as K
from keras.preprocessing import image
import numpy as np
import os
# Assumes 3 channels in input
def get_input_shape(img_width: int, img_height: int):
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
return input_shape
def summarize_accuracy(model: Model, classes: list, validation_dir: str, img_width: int, img_height: int):
"""
Runs predictions on all validation data and displays accuracy for each class.
"""
num_classes = len(classes)
ans = dict(zip(classes, [0]*num_classes))
total = dict(zip(classes, [0]*num_classes))
directory = validation_dir
for dirname in os.listdir(directory):
if dirname not in classes:
print(dirname, "directory skipped, not in classes list")
continue
for filename in os.listdir(os.path.join(directory, dirname)):
img = image.load_img(os.path.join(directory, dirname, filename), target_size=(img_width, img_height))
y = image.img_to_array(img)
y = np.expand_dims(y, axis=0)
output = model.predict(y)[0]
total[dirname] += 1
for classname, value in zip(classes, output):
if classname == dirname and value >= max(output):
ans[dirname]+=1
for classname in classes:
rate = 1
if total[classname] != 0:
rate = ans[classname]/total[classname]
print(
classname,
"\tcount:", total[classname],
"\tsuccesses:", ans[classname],
"\trate:", rate
) | StarcoderdataPython |
9650299 | import os
from time import sleep
from Big_Data_Platform.Kubernetes.Cognition.example.src.classes.KubeAPI import KubeAPI
from Big_Data_Platform.Kubernetes.Kafka_Client.Confluent_Kafka_Python.src.classes.CKafkaPC import KafkaPC
def send_job_metrics():
print("Entering function send_job_metrics()")
job_res = k_api.get_job_metrics()
for job, job_info in job_res.items():
cpu = 0
mem = 0
for container in job_info.containers:
cpu += container.cpu_usage
mem += container.mem_usage
if cpu > 0:
cpu = cpu.to('millicpu')
if mem > 0:
mem = mem.to('Mi')
new_data_point = {
"algorithm": job,
"CPU_ms": cpu.m,
"RAM": mem.m,
}
print(f"Send: {new_data_point}")
new_pc.send_msg(new_data_point)
k_api = KubeAPI()
k_api.init_kube_connection()
env_vars = {
"config_path": os.getenv("config_path"),
"config_section": os.getenv("config_section"),
}
new_pc = KafkaPC(**env_vars)
while True:
send_job_metrics()
k_api.kube_cleanup_finished_jobs()
sleep(1)
| StarcoderdataPython |
1610233 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LogAnalyticsOperations(object):
"""LogAnalyticsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cdn.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_log_analytics_metrics(
self,
resource_group_name, # type: str
profile_name, # type: str
metrics, # type: List[Union[str, "_models.Get4ItemsItem"]]
date_time_begin, # type: datetime.datetime
date_time_end, # type: datetime.datetime
granularity, # type: Union[str, "_models.Enum31"]
group_by=None, # type: Optional[List[Union[str, "_models.Get8ItemsItem"]]]
continents=None, # type: Optional[List[str]]
country_or_regions=None, # type: Optional[List[str]]
custom_domains=None, # type: Optional[List[str]]
protocols=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "_models.MetricsResponse"
"""Get log report for AFD profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param metrics:
:type metrics: list[str or ~azure.mgmt.cdn.models.Get4ItemsItem]
:param date_time_begin:
:type date_time_begin: ~datetime.datetime
:param date_time_end:
:type date_time_end: ~datetime.datetime
:param granularity:
:type granularity: str or ~azure.mgmt.cdn.models.Enum31
:param group_by:
:type group_by: list[str or ~azure.mgmt.cdn.models.Get8ItemsItem]
:param continents:
:type continents: list[str]
:param country_or_regions:
:type country_or_regions: list[str]
:param custom_domains:
:type custom_domains: list[str]
:param protocols:
:type protocols: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.MetricsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricsResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_log_analytics_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['metrics'] = self._serialize.query("metrics", metrics, '[str]', div=',')
query_parameters['dateTimeBegin'] = self._serialize.query("date_time_begin", date_time_begin, 'iso-8601')
query_parameters['dateTimeEnd'] = self._serialize.query("date_time_end", date_time_end, 'iso-8601')
query_parameters['granularity'] = self._serialize.query("granularity", granularity, 'str')
if group_by is not None:
query_parameters['groupBy'] = self._serialize.query("group_by", group_by, '[str]', div=',')
if continents is not None:
query_parameters['continents'] = self._serialize.query("continents", continents, '[str]', div=',')
if country_or_regions is not None:
query_parameters['countryOrRegions'] = self._serialize.query("country_or_regions", country_or_regions, '[str]', div=',')
if custom_domains is not None:
query_parameters['customDomains'] = self._serialize.query("custom_domains", custom_domains, '[str]', div=',')
if protocols is not None:
query_parameters['protocols'] = self._serialize.query("protocols", protocols, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MetricsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_analytics_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/getLogAnalyticsMetrics'} # type: ignore
def get_log_analytics_rankings(
self,
resource_group_name, # type: str
profile_name, # type: str
rankings, # type: List[Union[str, "_models.Enum35"]]
metrics, # type: List[Union[str, "_models.Get5ItemsItem"]]
max_ranking, # type: float
date_time_begin, # type: datetime.datetime
date_time_end, # type: datetime.datetime
custom_domains=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "_models.RankingsResponse"
"""Get log analytics ranking report for AFD profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param rankings:
:type rankings: list[str or ~azure.mgmt.cdn.models.Enum35]
:param metrics:
:type metrics: list[str or ~azure.mgmt.cdn.models.Get5ItemsItem]
:param max_ranking:
:type max_ranking: float
:param date_time_begin:
:type date_time_begin: ~datetime.datetime
:param date_time_end:
:type date_time_end: ~datetime.datetime
:param custom_domains:
:type custom_domains: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RankingsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.RankingsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RankingsResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_log_analytics_rankings.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['rankings'] = self._serialize.query("rankings", rankings, '[str]', div=',')
query_parameters['metrics'] = self._serialize.query("metrics", metrics, '[str]', div=',')
query_parameters['maxRanking'] = self._serialize.query("max_ranking", max_ranking, 'float')
query_parameters['dateTimeBegin'] = self._serialize.query("date_time_begin", date_time_begin, 'iso-8601')
query_parameters['dateTimeEnd'] = self._serialize.query("date_time_end", date_time_end, 'iso-8601')
if custom_domains is not None:
query_parameters['customDomains'] = self._serialize.query("custom_domains", custom_domains, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RankingsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_analytics_rankings.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/getLogAnalyticsRankings'} # type: ignore
def get_log_analytics_locations(
self,
resource_group_name, # type: str
profile_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ContinentsResponse"
"""Get all available location names for AFD log analytics report.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ContinentsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.ContinentsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ContinentsResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_log_analytics_locations.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ContinentsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_analytics_locations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/getLogAnalyticsLocations'} # type: ignore
def get_log_analytics_resources(
self,
resource_group_name, # type: str
profile_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ResourcesResponse"
"""Get all endpoints and custom domains available for AFD log report.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourcesResponse, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.ResourcesResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourcesResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_log_analytics_resources.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourcesResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_analytics_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/getLogAnalyticsResources'} # type: ignore
def get_waf_log_analytics_metrics(
self,
resource_group_name, # type: str
profile_name, # type: str
metrics, # type: List[str]
date_time_begin, # type: datetime.datetime
date_time_end, # type: datetime.datetime
granularity, # type: Union[str, "_models.Enum37"]
actions=None, # type: Optional[List[Union[str, "_models.Enum38"]]]
group_by=None, # type: Optional[List[Union[str, "_models.Enum39"]]]
rule_types=None, # type: Optional[List[Union[str, "_models.Enum40"]]]
**kwargs # type: Any
):
# type: (...) -> "_models.WafMetricsResponse"
"""Get Waf related log analytics report for AFD profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param metrics:
:type metrics: list[str]
:param date_time_begin:
:type date_time_begin: ~datetime.datetime
:param date_time_end:
:type date_time_end: ~datetime.datetime
:param granularity:
:type granularity: str or ~azure.mgmt.cdn.models.Enum37
:param actions:
:type actions: list[str or ~azure.mgmt.cdn.models.Enum38]
:param group_by:
:type group_by: list[str or ~azure.mgmt.cdn.models.Enum39]
:param rule_types:
:type rule_types: list[str or ~azure.mgmt.cdn.models.Enum40]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WafMetricsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.WafMetricsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WafMetricsResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_waf_log_analytics_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['metrics'] = self._serialize.query("metrics", metrics, '[str]', div=',')
query_parameters['dateTimeBegin'] = self._serialize.query("date_time_begin", date_time_begin, 'iso-8601')
query_parameters['dateTimeEnd'] = self._serialize.query("date_time_end", date_time_end, 'iso-8601')
query_parameters['granularity'] = self._serialize.query("granularity", granularity, 'str')
if actions is not None:
query_parameters['actions'] = self._serialize.query("actions", actions, '[str]', div=',')
if group_by is not None:
query_parameters['groupBy'] = self._serialize.query("group_by", group_by, '[str]', div=',')
if rule_types is not None:
query_parameters['ruleTypes'] = self._serialize.query("rule_types", rule_types, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WafMetricsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_waf_log_analytics_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/getWafLogAnalyticsMetrics'} # type: ignore
def get_waf_log_analytics_rankings(
self,
resource_group_name, # type: str
profile_name, # type: str
metrics, # type: List[str]
date_time_begin, # type: datetime.datetime
date_time_end, # type: datetime.datetime
max_ranking, # type: float
rankings, # type: List[Union[str, "_models.Enum42"]]
actions=None, # type: Optional[List[Union[str, "_models.Enum43"]]]
rule_types=None, # type: Optional[List[Union[str, "_models.Enum44"]]]
**kwargs # type: Any
):
# type: (...) -> "_models.WafRankingsResponse"
"""Get WAF log analytics charts for AFD profile.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param metrics:
:type metrics: list[str]
:param date_time_begin:
:type date_time_begin: ~datetime.datetime
:param date_time_end:
:type date_time_end: ~datetime.datetime
:param max_ranking:
:type max_ranking: float
:param rankings:
:type rankings: list[str or ~azure.mgmt.cdn.models.Enum42]
:param actions:
:type actions: list[str or ~azure.mgmt.cdn.models.Enum43]
:param rule_types:
:type rule_types: list[str or ~azure.mgmt.cdn.models.Enum44]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WafRankingsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.WafRankingsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WafRankingsResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_waf_log_analytics_rankings.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['metrics'] = self._serialize.query("metrics", metrics, '[str]', div=',')
query_parameters['dateTimeBegin'] = self._serialize.query("date_time_begin", date_time_begin, 'iso-8601')
query_parameters['dateTimeEnd'] = self._serialize.query("date_time_end", date_time_end, 'iso-8601')
query_parameters['maxRanking'] = self._serialize.query("max_ranking", max_ranking, 'float')
query_parameters['rankings'] = self._serialize.query("rankings", rankings, '[str]', div=',')
if actions is not None:
query_parameters['actions'] = self._serialize.query("actions", actions, '[str]', div=',')
if rule_types is not None:
query_parameters['ruleTypes'] = self._serialize.query("rule_types", rule_types, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.AfdErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WafRankingsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_waf_log_analytics_rankings.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/getWafLogAnalyticsRankings'} # type: ignore
| StarcoderdataPython |
1818393 | <reponame>tpudlik/sbf
"""Spherical Bessel function algorithms developed by <NAME>
(2011).
"""
import numpy as np
def recurrence_pattern(n, z, f0, f1):
if n == 0:
return f0
if n == 1:
return f1
start_order = order(n, z)
jlp1 = 0
jl = 10**(-305)
zinv = 1/z # Complex division is slower than multiplication
for idx in xrange(start_order - n):
jlm1 = (2*(start_order - idx) + 1)*jl*zinv - jlp1
jlp1 = jl
jl = jlm1
out = jlm1
for idx in xrange(n):
jlm1 = (2*(n - idx) + 1)*jl*zinv - jlp1
jlp1 = jl
jl = jlm1
if np.abs(f1) <= np.abs(f0):
return out*(f0/jlm1)
else:
return out*(f1/jlp1)
def order(n, z):
s = np.abs(np.sin(np.angle(z)))
o_approx = np.floor((1.83 + 4.1*s**0.36)*np.abs(z)**(0.91 - 0.43*s**0.33) + 9*(1 - np.sqrt(s)))
o_min = n + 1
o_max = np.floor(235 + 50*np.sqrt(np.abs(z)))
if o_approx < o_min:
return o_min
if o_approx > o_max:
return int(o_max)
else:
return int(o_approx)
@np.vectorize
def sph_jn(n, z):
return recurrence_pattern(n, z, np.sin(z)/z,
np.sin(z)/z**2 - np.cos(z)/z)
| StarcoderdataPython |
8007761 | #coding=utf-8
#-*- coding: utf-8 -*-
import os
import re
import sys
import time
import math
import pytz
import numpy
import talib
import datetime
import urllib2
sys.path.append("../frame/")
import fetch_data
from loggingex import LOG_INFO
from loggingex import LOG_ERROR
from loggingex import LOG_WARNING
from job_base import job_base
from prepare_table import prepare_table
from mysql_manager import mysql_manager
from stock_conn_manager import stock_conn_manager
class update_stock_daily_average_info(job_base):
def __init__(self):
pass
def run(self):
share_ids = self._get_all_share_ids()
for share_id_item in share_ids:
share_id = share_id_item[0]
self._update_average(share_id)
LOG_INFO("run update_stock_daily_average_info")
def _get_all_share_ids(self):
date_info = time.strftime('%Y_%m_%d')
trade_table_name = "trade_info_%s" % (date_info)
#share_ids = fetch_data.get_data(fetch_data.select_db("daily_temp", trade_table_name, ["share_id"],{"share_id":[["000001","000010","000301","000601","000901","002101","002401","002701","300001","300301","600301","600601","601801","603001","603601","603901",],"in"]}, pre = "distinct"))
share_ids = fetch_data.get_data(fetch_data.select_db("daily_temp", trade_table_name, ["share_id"],{}, pre = "distinct"))
return share_ids
def _get_ma_empty_start_time(self, share_id, table_name):
stock_conn_manager_obj = stock_conn_manager()
conn_name = stock_conn_manager_obj.get_conn_name(share_id)
last_time = fetch_data.get_data(fetch_data.select_db(conn_name, table_name, ["time"], {"close_ma5":[0, "="]}, extend="order by time asc limit 1"))
if len(last_time) > 0:
last_day = last_time[0][0]
tz = pytz.timezone('Asia/Shanghai')
last_day_obj = datetime.datetime.fromtimestamp(last_day, tz)
time_str = last_day_obj.strftime("%Y%m%d")
return time.mktime(time.strptime(time_str, '%Y%m%d'))
else:
return 0
def _get_start_time(self, share_id, table_name, ma_empty_start_time):
stock_conn_manager_obj = stock_conn_manager()
conn_name = stock_conn_manager_obj.get_conn_name(share_id)
last_time = fetch_data.get_data(fetch_data.select_db(conn_name, table_name, ["time"], {"time":[ma_empty_start_time, "<="]}, extend="order by time desc limit 180"))
if len(last_time) > 0:
last_day = last_time[-1][0]
tz = pytz.timezone('Asia/Shanghai')
last_day_obj = datetime.datetime.fromtimestamp(last_day, tz)
time_str = last_day_obj.strftime("%Y%m%d")
return time.mktime(time.strptime(time_str, '%Y%m%d'))
else:
return ma_empty_start_time
def _get_close_volume(self, share_id, table_name, start_time):
stock_conn_manager_obj = stock_conn_manager()
conn_name = stock_conn_manager_obj.get_conn_name(share_id)
data = fetch_data.get_data(fetch_data.select_db(conn_name, table_name, ["time", "today_close", "volume"], {"time":[start_time, ">="]}))
time_list = []
close_list = []
volume_list = []
for item in data:
time_int = item[0]
close = item[1]
volume = item[2]
time_list.append(time_int)
close_list.append(close)
volume_list.append(volume)
return {"time":time_list, "close":close_list, "volume":volume_list}
def _get_ma_data(self, ori_data, periods):
ret_data = {}
float_data = [float(x) for x in ori_data]
for period in periods:
data = talib.MA(numpy.array(float_data), timeperiod = period)
data_list = data.tolist()
data_list = self._filter_data(data_list)
ret_data["%d" % period] = data_list
return ret_data
def _update_average(self, share_id):
table_name = "daily_info_ex_dividend_%s" % (share_id)
infos = self._calc_average_data(share_id, table_name)
for item in infos:
self._save_data(share_id, table_name, item)
def _calc_average_data(self, share_id, table_name):
ma_empty_start_time_int = self._get_ma_empty_start_time(share_id, table_name)
if ma_empty_start_time_int == 0:
return []
start_time_int = self._get_start_time(share_id, table_name, ma_empty_start_time_int)
stock_info = self._get_close_volume(share_id, table_name, start_time_int)
periods = [5, 10, 20, 30, 60, 90, 120, 150, 180]
#periods = [90, 180]
close_data = self._get_ma_data(stock_info["close"], periods)
volume_data = self._get_ma_data(stock_info["volume"], periods)
if len(stock_info["time"]) == len(close_data["180"]) and len(close_data["180"]) == len(volume_data["180"]):
pass
else:
LOG_WARNING("calc %s daily average error" % share_id)
return
infos = []
data_len = len(stock_info["time"])
for index in range(data_len):
info = {}
time_int = stock_info["time"][index]
if time_int < ma_empty_start_time_int:
continue
info["time"] = time_int
for period in periods:
info["close_ma%s" % period] = close_data["%s" % period][index]
info["volume_ma%s" % period] = volume_data["%s" % period][index]
infos.append(info)
return infos
def _filter_data(self, data):
for index in range(len(data)):
if math.isnan(data[index]):
data[index] = 0.01
else:
break
return data
def _save_data(self, share_id, table_name, data):
if len(data) < 2:
return
stock_conn_manager_obj = stock_conn_manager()
conn = stock_conn_manager_obj.get_conn(share_id)
conn.update(table_name, data, ["time"])
if __name__ == "__main__":
import os
os.chdir("../../")
sys.path.append("./src/frame/")
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from j_load_mysql_conf import j_load_mysql_conf
from j_load_regular_conf import j_load_regular_conf
from scheduler_frame_conf_inst import scheduler_frame_conf_inst
frame_conf_inst = scheduler_frame_conf_inst()
frame_conf_inst.load("./conf/frame.conf")
j_load_regular_conf_obj = j_load_regular_conf()
j_load_regular_conf_obj.run()
j_load_mysql_conf_obj = j_load_mysql_conf()
j_load_mysql_conf_obj.run()
a = update_stock_daily_average_info()
a.run()
| StarcoderdataPython |
1667609 | # 1. gpu id
gpu_id = 0
# 2. preprocess
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[1,1,1], to_rgb=True)
size_divisor = 32
preprocess = dict(
typename='Compose',
pipeline=[
dict(typename='Resize', dst_shape=(1100, 1650), keep_ratio=True),
dict(typename='ToFloat', keys=['img']),
dict(typename='PadIfNeeded', size_divisor=size_divisor,
value=img_norm_cfg['mean'][::-1]),
dict(typename='ImageToTensor', use_gpu=True),
dict(typename='Normalize', **img_norm_cfg, use_gpu=True),
dict(typename='Collect', keys=['img'])])
# 3. model
model = dict(
typename='Onnx',
model='tinaface_r50_fpn_bn.onnx',
max_batch_size=1,
min_input_shapes=[(3, 128, 128)], # Should be set when onnx model has dynamic shapes, the shape format is CxHxW. Otherwise, set None.
max_input_shapes=[(3, 1664, 1664)], # Should be set when onnx model has dynamic shapes, the shape format is CxHxW. Otherwise, set None.
fp16_mode=True)
# 4. postprocess
num_classes = 1
strides = [4, 8, 16, 32, 64, 128]
use_sigmoid = True
scales_per_octave = 3
ratios = [1.3]
num_anchors = scales_per_octave * len(ratios)
meshgrid = dict(
typename='BBoxAnchorMeshGrid',
strides=strides,
base_anchor=dict(
typename='BBoxBaseAnchor',
octave_base_scale=2**(4 / 3),
scales_per_octave=scales_per_octave,
ratios=ratios,
base_sizes=strides))
bbox_coder = dict(
typename='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])
converter=dict(
typename='IoUBBoxAnchorConverter',
num_classes=num_classes,
bbox_coder=bbox_coder,
nms_pre=10000,
use_sigmoid=use_sigmoid)
infer_cfg = dict(
min_bbox_size=0,
score_thr=0.4,
nms=dict(
typename='nms', iou_thr=0.45),
max_per_img=300)
postprocess = dict(
typename='Compose',
pipeline=[
dict(typename='ObjDetPostProcess', meshgrid=meshgrid,
converter=converter, num_classes=num_classes,
use_sigmoid=use_sigmoid, infer_cfg=infer_cfg),
dict(typename='Collect', keys=['out'])
])
# 5. class name
class_names = ('face',)
| StarcoderdataPython |
9703417 | import pandas as pd
import matplotlib.pylab as plt
import numpy as np
filepath = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/auto.csv"
headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style",
"drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type",
"num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower",
"peak-rpm","city-mpg","highway-mpg","price"]
dataframe_base = pd.read_csv(filepath, names=headers)
#dataframe_base.head() #to reads the dataframe
#below, is used the arg 'inplace=True' to prints multiple lines in data visualization
dataframe_base = dataframe_base.replace("?", np.NaN, inplace=True)
missing_data = dataframe_base.isnull()
for column in missing_data.columns.values.tolist():
''' #you can optionally print
print(column)
print(missing_data[column].value_counts())
print("")
'''
#using variable to visualize in the variable explorer
missing_data_count = missing_data[column].value_counts()
''' to normalize data and trate issues related the missing values in the database, we
can use many forms to do it:
1 - simply delete the whole row with a missing value
2 - replace the missing value by mean of their row
3 - replace by most frequent value in the row
'''
#first method:
dataframe_ln_drop = dataframe_base
dataframe_ln_drop.dropna(subset=['normalized-losses'], axis=0, inplace=True)
dataframe_base.reset_index(drop=True, inplace=True)
'''first we drop the whole row with missing values, and latter we refresh the index in dataframe
because we had removed a row.'''
#second method:
average_column_normalizedLosses = dataframe_base['normalized-losses'].astype("float").mean(axis=0)
dataframe_nl_mean = dataframe_base
dataframe_nl_mean['normalized-losses'].replace(np.nan, average_column_normalizedLosses, inplace=True)
#its will calculate the mean of normalized-losses column and replace all values that contains NaN putted previoulsy
#last method:
frequent_normalizedLosses = dataframe_base['normalized-losses'].value_counts.idxmax()
dataframe_nl_frequent = dataframe_base
dataframe_nl_frequent['normalized-losses'].replace(np.nan, frequent_normalizedLosses, inplace=True)
'''its will use the idxmax with value_counts to we see the most frequent value in this column and replace it
in original dataframe'''
#Changing the data type of the columns from dataframe to correct format
#first we see the most appropriate data type for each column from dataframe_base
'''we can do it using print(dataframe_base.dtypes)
or seeing it by variable explorer looking for 'dataframe_dtypes'
'''
dataframe_dtypes = dataframe_base.dtypes
#and now we will to convert to the correct types
dataframe_base[["bore", "stroke"]] = dataframe_base[["bore", "stroke"]].astype("float")
dataframe_base[["normalized-losses"]] = dataframe_base[["normalized-losses"]].astype("int")
dataframe_base[["price"]] = dataframe_base[["price"]].astype("float")
dataframe_base[["peak-rpm"]] = dataframe_base[["peak-rpm"]].astype("float")
| StarcoderdataPython |
4893028 | # Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from os.path import expanduser, join
from platformio import __version__, app, util
from platformio.project.helpers import (get_project_core_dir,
is_platformio_project)
class AppRPC(object):
APPSTATE_PATH = join(get_project_core_dir(), "homestate.json")
@staticmethod
def load_state():
with app.State(AppRPC.APPSTATE_PATH, lock=True) as state:
storage = state.get("storage", {})
# base data
caller_id = app.get_session_var("caller_id")
storage['cid'] = app.get_cid()
storage['coreVersion'] = __version__
storage['coreSystype'] = util.get_systype()
storage['coreCaller'] = (str(caller_id).lower()
if caller_id else None)
storage['coreSettings'] = {
name: {
"description": data['description'],
"default_value": data['value'],
"value": app.get_setting(name)
}
for name, data in app.DEFAULT_SETTINGS.items()
}
storage['homeDir'] = expanduser("~")
storage['projectsDir'] = storage['coreSettings']['projects_dir'][
'value']
# skip non-existing recent projects
storage['recentProjects'] = [
p for p in storage.get("recentProjects", [])
if is_platformio_project(p)
]
state['storage'] = storage
return state.as_dict()
@staticmethod
def get_state():
return AppRPC.load_state()
@staticmethod
def save_state(state):
with app.State(AppRPC.APPSTATE_PATH, lock=True) as s:
# s.clear()
s.update(state)
return True
| StarcoderdataPython |
4812204 | <reponame>The-CJ/Phaazebot
from datetime import datetime
from Utils.Classes.undefined import UNDEFINED
from Utils.Classes.contentclass import ContentClass
class OsuUser(ContentClass):
"""
Represents a osu! user with all its stats in a specific game mode
"""
def __repr__(self):
return f"<{self.__class__.__name__} name='{self.username}' mode='{self.mode}'>"
def __init__(self, data:dict, mode:str="0"):
self.mode_number:str = mode
self.user_id:str = self.asString(data.get("user_id", UNDEFINED))
self.username:str = self.asString(data.get("username", UNDEFINED))
self.JoinDate:datetime = self.asDatetime(data.get("join_date", "1970-01-01 00:00:00"))
self.count300:str = self.asString(data.get("count300", UNDEFINED))
self.count100:str = self.asString(data.get("count100", UNDEFINED))
self.count50:str = self.asString(data.get("count50", UNDEFINED))
self.playcount:str = self.asString(data.get("playcount", UNDEFINED))
self.ranked_score:str = self.asString(data.get("ranked_score", UNDEFINED))
self.total_score:str = self.asString(data.get("total_score", UNDEFINED))
self.pp_rank:str = self.asString(data.get("pp_rank", UNDEFINED))
self.pp_country_rank:str = self.asString(data.get("pp_country_rank", UNDEFINED))
self.level:str = self.asString(data.get("level", UNDEFINED))
self.pp_raw:str = self.asString(data.get("pp_raw", UNDEFINED))
self.accuracy:str = self.asString(data.get("accuracy", UNDEFINED))
self.count_rank_ssh:str = self.asString(data.get("count_rank_ssh", UNDEFINED))
self.count_rank_ss:str = self.asString(data.get("count_rank_ss", UNDEFINED))
self.count_rank_sh:str = self.asString(data.get("count_rank_sh", UNDEFINED))
self.count_rank_s:str = self.asString(data.get("count_rank_s", UNDEFINED))
self.count_rank_a:str = self.asString(data.get("count_rank_a", UNDEFINED))
self.country:str = self.asString(data.get("country", UNDEFINED))
self.total_seconds_played:str = self.asString(data.get("total_seconds_played", UNDEFINED))
@property
def mode(self) -> str:
if self.mode_number == "0": return "osu!"
elif self.mode_number == "1": return "osu!taiko"
elif self.mode_number == "2": return "osu!ctb"
elif self.mode_number == "3": return "osu!mania"
else: return "Unknown"
def toJSON(self, count_objects:bool=True, ranks:bool=True) -> dict:
""" Returns a json save dict representation of all values for API, storage, etc... """
j:dict = dict()
j["mode"] = self.asString(self.mode)
j["user_id"] = self.asString(self.user_id)
j["username"] = self.asString(self.username)
j["join_date"] = self.asString(self.JoinDate)
j["playcount"] = self.asString(self.playcount)
j["country"] = self.asString(self.country)
j["ranked_score"] = self.asString(self.ranked_score)
j["total_score"] = self.asString(self.total_score)
j["pp_rank"] = self.asString(self.pp_rank)
j["pp_country_rank"] = self.asString(self.pp_country_rank)
j["level"] = self.asString(self.level)
j["pp_raw"] = self.asString(self.pp_raw)
j["accuracy"] = self.asString(self.accuracy)
j["total_seconds_played"] = self.asString(self.total_seconds_played)
if count_objects:
j["count300"] = self.asString(self.count300)
j["count100"] = self.asString(self.count100)
j["count50"] = self.asString(self.count50)
if ranks:
j["count_rank_ssh"] = self.asString(self.count_rank_ssh)
j["count_rank_ss"] = self.asString(self.count_rank_ss)
j["count_rank_sh"] = self.asString(self.count_rank_sh)
j["count_rank_s"] = self.asString(self.count_rank_s)
j["count_rank_a"] = self.asString(self.count_rank_a)
return j
| StarcoderdataPython |
3449953 | <gh_stars>0
"""JSON plugin module."""
from dataclasses import asdict
from json import dumps
from dbsg.lib.plugin import PluginABC
REGISTRY_NAME = 'json'
class Plugin(PluginABC):
"""JSON plugin."""
def __init__(self, configuration, introspection, ir, **kwargs):
"""Initialize JSON plugin."""
self.configuration = configuration
self.introspection = introspection
self.ir = ir
self.kwargs = kwargs or {
'ensure_ascii': False,
'indent': 4,
}
@classmethod
def name(cls):
"""Alias in REGISTRY."""
return REGISTRY_NAME
def save(self, **kwargs):
"""Save JSON representation into an appropriate file."""
kwargs = kwargs or self.kwargs
path = self.configuration.path.absolute()
path.mkdir(parents=True, exist_ok=True)
for db in self.ir:
data = dumps({db.name: asdict(db)}, **kwargs)
(path / db.name).mkdir(exist_ok=True)
file = path / db.name / f'{db.name}.json'
with file.open('w', encoding='utf8') as fh:
fh.write(str(data))
JSONPlugin = Plugin # for direct imports
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.