id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1861037 | <reponame>fish-quant/sim-fish
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Functions to simulate spots patterns.
"""
import numpy as np
import bigfish.stack as stack
# TODO add a pattern with different densities per area
def simulate_ground_truth(n_spots=30, random_n_spots=False,
n_clusters=0, random_n_clusters=False,
n_spots_cluster=0, random_n_spots_cluster=False,
centered_cluster=False, frame_shape=(128, 128),
voxel_size_z=None, voxel_size_yx=100,
sigma_z=None, sigma_yx=150, random_sigma=0.05,
amplitude=5000, random_amplitude=0.05,
probability_map=None):
""" Simulate ground truth information about the simulated spots like their
coordinates, standard deviations and amplitude.
Parameters
----------
n_spots : int
Expected number of spots to simulate.
random_n_spots : bool
Make the number of spots follow a Poisson distribution with
expectation n_spots, instead of a constant predefined value.
n_clusters : int
Expected number of clusters to simulate.
random_n_clusters : bool
Make the number of clusters follow a Poisson distribution with
expectation n_clusters, instead of a constant predefined value.
n_spots_cluster : int
Expected number of spots to simulate per cluster.
random_n_spots_cluster : bool
Make the number of spots follow a Poisson distribution with
expectation n_spots_cluster, instead of a constant predefined value.
centered_cluster : bool
Center the simulated cluster. Only used if one cluster is simulated.
frame_shape : Tuple[int or float] or List[int of float]
Shape (z, y, x) or (y, x) of the image to simulate.
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, we
consider a 2-d image.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
sigma_z : int, float or None
Standard deviation of the gaussian along the z axis, in nanometer. If
None, we consider a 2-d image.
sigma_yx : int or float
Standard deviation of the gaussian along the yx axis, in nanometer.
random_sigma : int of float
Sigmas follow a normal distribution around the provided sigma values.
The scale used is scale = sigma_axis * random_sigma
amplitude : int or float
Amplitude of the gaussians.
random_amplitude : int or float
Margin allowed around the amplitude value. The formula used is
margin = parameter * random_level.
probability_map : np.ndarray, np.float32 or None
Array of probability, with shape (z, y, x) or (y, x). Sum to one.
Returns
-------
ground_truth : np.ndarray, np.float64
Ground truth array with shape (nb_spots, 6) or (nb_spots, 4).
- coordinate_z (optional)
- coordinate_y
- coordinate_x
- sigma_z (optional)
- sigma_yx
- amplitude
"""
# check parameters
stack.check_parameter(n_spots=int,
random_n_spots=bool,
n_clusters=int,
random_n_clusters=bool,
n_spots_cluster=int,
random_n_spots_cluster=bool,
centered_cluster=bool,
frame_shape=(tuple, list),
voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
sigma_z=(int, float, type(None)),
sigma_yx=(int, float),
random_sigma=(int, float),
amplitude=(int, float),
random_amplitude=(int, float))
if probability_map is not None:
stack.check_array(probability_map,
ndim=[2, 3],
dtype=[np.float32, np.float64])
# check dimensions
ndim = len(frame_shape)
if ndim not in [2, 3]:
raise ValueError("'frame_shape' should have 2 or 3 elements, not {0}."
.format(ndim))
if probability_map is not None and ndim != probability_map.ndim:
raise ValueError("'probability_map' dimension and 'frame_shape' are "
"not consistent.")
if ndim == 3 and sigma_z is None:
raise ValueError("Frame to simulate has 3 dimensions but 'sigma_z' is "
"missing.")
if ndim == 3 and voxel_size_z is None:
raise ValueError("Frame to simulate has 3 dimensions but "
"'voxel_size_z' is missing.")
# generate number of spots to simulate
nb_spots = _get_nb_spots(n_spots, random_n_spots)
# generate clusters
(positions_z_clusters, positions_y_clusters, positions_x_clusters,
remaining_spots) = _get_clusters(
frame_shape, ndim, nb_spots, n_clusters, random_n_clusters,
n_spots_cluster, random_n_spots_cluster, voxel_size_z, voxel_size_yx,
sigma_z, sigma_yx, centered_cluster, probability_map)
# simulate positions
(positions_z_spots, positions_y_spots,
positions_x_spots) = _get_spots_coordinates(
frame_shape, ndim, remaining_spots, probability_map)
# merge coordinates
if ndim == 3:
positions_z = np.concatenate((positions_z_clusters, positions_z_spots))
else:
positions_z = None
positions_y = np.concatenate((positions_y_clusters, positions_y_spots))
positions_x = np.concatenate((positions_x_clusters, positions_x_spots))
# generate sigma values
sigmas_z, sigmas_yx = _get_sigma(ndim, sigma_z, sigma_yx, random_sigma,
nb_spots)
# generate amplitude values
amplitudes = _get_amplitude(amplitude, random_amplitude, nb_spots)
# stack and format ground truth
if ndim == 3:
ground_truth = np.stack((positions_z, positions_y, positions_x,
sigmas_z, sigmas_yx, amplitudes)).T
else:
ground_truth = np.stack((positions_y, positions_x,
sigmas_yx, amplitudes)).T
ground_truth = ground_truth.astype(np.float64)
return ground_truth
def _get_nb_spots(n, random_n):
"""Generate number of spots to simulate.
Parameters
----------
n : int
Expected number of spots to simulate.
random_n : bool
Make the number of spots follow a Poisson distribution with
expectation n, instead of a constant predefined value.
Returns
-------
nb_spots : int
Number of spots to simulate.
"""
# generate number of spots to simulate
if random_n:
nb_spots = int(np.random.poisson(lam=n, size=1))
else:
nb_spots = n
return nb_spots
def _get_clusters(frame_shape, ndim, nb_spots, n_clusters, random_n_clusters,
n_spots_cluster, random_n_spots_cluster, voxel_size_z,
voxel_size_yx, sigma_z, sigma_yx, centered=False,
probability_map=None):
"""Generate number of clusters and coordinates for clustered spots.
Parameters
----------
frame_shape : Tuple[int or float] or List[int of float]
Shape (z, y, x) or (y, x) of the image to simulate.
ndim : int
Number of dimensions of the simulated image (2 or 3).
nb_spots : int
Total number of spots to simulate in the image (clustered or not).
n_clusters : int
Expected number of clusters to simulate.
random_n_clusters : bool
Make the number of clusters follow a Poisson distribution with
expectation n_clusters, instead of a constant predefined value.
n_spots_cluster : int
Expected number of spots to simulate per cluster.
random_n_spots_cluster : bool
Make the number of spots follow a Poisson distribution with
expectation n_spots_cluster, instead of a constant predefined value.
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, we
consider a 2-d image.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
sigma_z : int, float or None
Standard deviation of the gaussian along the z axis, in nanometer. If
None, we consider a 2-d image.
sigma_yx : int or float
Standard deviation of the gaussian along the yx axis, in nanometer.
centered : bool
Center the simulated cluster. Only used if one cluster is simulated.
probability_map : np.ndarray, np.float32 or None
Array of probability, with shape (z, y, x) or (y, x). Sum to one.
Returns
-------
positions_z : np.ndarray, np.int64
Array of coordinates along the z axis, or None.
positions_y : np.ndarray, np.int64
Array of coordinates along the y axis.
positions_x : np.ndarray, np.int64
Array of coordinates along the x axis.
remaining_spots : int
Remaining spots to simulate in the image.
"""
# generate number of clusters to simulate
nb_clusters = _get_nb_spots(n_clusters, random_n_clusters)
# no cluster to simulate
if nb_clusters == 0:
positions_z = np.array([], dtype=np.int64).reshape((0,))
positions_y = np.array([], dtype=np.int64).reshape((0,))
positions_x = np.array([], dtype=np.int64).reshape((0,))
return positions_z, positions_y, positions_x, nb_spots
# multiple clusters can't be simulated in the center of the frame
if nb_clusters > 1:
centered = False
# simulate cluster centers
if probability_map is not None:
frame_shape = probability_map.shape
sample = _sample_coordinates(nb_clusters, probability_map)
center_cluster_z = None
if ndim == 3:
center_cluster_z = sample[:, 0]
center_cluster_y = sample[:, ndim - 2]
center_cluster_x = sample[:, ndim - 1]
elif centered:
center_cluster_z = None
if ndim == 3:
center_cluster_z = np.array(frame_shape[0] / 2, dtype=np.int64)
center_cluster_z = np.reshape(center_cluster_z, -1)
center_cluster_y = np.array(frame_shape[ndim - 2] / 2, dtype=np.int64)
center_cluster_y = np.reshape(center_cluster_y, -1)
center_cluster_x = np.array(frame_shape[ndim - 1] / 2, dtype=np.int64)
center_cluster_x = np.reshape(center_cluster_x, -1)
else:
center_cluster_z = None
if ndim == 3:
center_cluster_z = np.random.uniform(0, frame_shape[0],
size=nb_clusters)
center_cluster_y = np.random.uniform(0, frame_shape[ndim - 2],
size=nb_clusters)
center_cluster_x = np.random.uniform(0, frame_shape[ndim - 1],
size=nb_clusters)
# get spots coordinates per cluster
remaining_spots = nb_spots
if ndim == 3:
positions_z = []
else:
positions_z = None
positions_y = []
positions_x = []
for i in range(nb_clusters):
# get number of spots
nb_spots_cluster = _get_nb_spots(n_spots_cluster,
random_n_spots_cluster)
nb_spots_cluster = min(nb_spots_cluster, remaining_spots)
remaining_spots -= nb_spots_cluster
# get spots coordinates
scale_z = None
if ndim == 3:
spots_scale_z = sigma_z / voxel_size_z
scale_z = spots_scale_z * 0.2 * nb_spots_cluster
spots_scale_yx = sigma_yx / voxel_size_yx
scale_yx = spots_scale_yx * 0.2 * nb_spots_cluster
if ndim == 3:
rho_z = np.abs(np.random.normal(loc=0.0, scale=scale_z,
size=nb_spots_cluster))
rho_yx = np.abs(np.random.normal(loc=0.0, scale=scale_yx,
size=nb_spots_cluster))
theta = np.random.uniform(0, np.pi, nb_spots_cluster)
phi = np.random.uniform(0, 2 * np.pi, nb_spots_cluster)
z = center_cluster_z[i] + rho_z * np.cos(theta)
positions_z.append(z)
y = center_cluster_y[i] + rho_yx * np.sin(phi) * np.sin(theta)
positions_y.append(y)
x = center_cluster_x[i] + rho_yx * np.cos(phi) * np.sin(theta)
positions_x.append(x)
else:
rho_yx = np.random.normal(loc=0.0, scale=scale_yx,
size=nb_spots_cluster)
phi = np.random.uniform(-np.pi, np.pi, nb_spots_cluster)
y = center_cluster_y[i] + rho_yx * np.sin(phi)
positions_y.append(y)
x = center_cluster_x[i] + rho_yx * np.cos(phi)
positions_x.append(x)
# concatenate and cast coordinates
if ndim == 3:
positions_z = np.concatenate(positions_z).astype(np.int64)
positions_y = np.concatenate(positions_y).astype(np.int64)
positions_x = np.concatenate(positions_x).astype(np.int64)
# filter out spots incorrectly simulated
mask_y = (positions_y >= 0) & (positions_y < frame_shape[ndim - 2])
mask_x = (positions_x >= 0) & (positions_x < frame_shape[ndim - 1])
if ndim == 3:
mask_z = (positions_z >= 0) & (positions_z < frame_shape[0])
mask = mask_z & mask_y & mask_x
positions_z = positions_z[mask]
positions_y = positions_y[mask]
positions_x = positions_x[mask]
else:
mask = mask_y & mask_x
positions_y = positions_y[mask]
positions_x = positions_x[mask]
if probability_map is not None:
if ndim == 3:
mask = probability_map[positions_z, positions_y, positions_x]
mask = mask > 0.
positions_z = positions_z[mask]
positions_y = positions_y[mask]
positions_x = positions_x[mask]
else:
mask = probability_map[positions_y, positions_x]
mask = mask > 0.
positions_y = positions_y[mask]
positions_x = positions_x[mask]
# compute remaining spots
remaining_spots = nb_spots - mask.sum()
return positions_z, positions_y, positions_x, remaining_spots
def _get_spots_coordinates(frame_shape, ndim, nb_spots, probability_map=None):
"""Generate spots coordinates in 2-d or 3-d.
Parameters
----------
frame_shape : Tuple[int or float] or List[int of float]
Shape (z, y, x) or (y, x) of the image to simulate.
ndim : int
Number of dimensions of the simulated image (2 or 3).
nb_spots : int
Number of spots to simulate.
probability_map : np.ndarray, np.float32 or None
Array of probability, with shape (z, y, x) or (y, x). Sum to one.
Returns
-------
positions_z : np.ndarray, np.int64
Array of coordinates along the z axis, or None.
positions_y : np.ndarray, np.int64
Array of coordinates along the y axis.
positions_x : np.ndarray, np.int64
Array of coordinates along the x axis.
"""
# simulate positions from a probability map
if probability_map is not None:
sample = _sample_coordinates(nb_spots, probability_map)
positions_z = None
if ndim == 3:
positions_z = sample[:, 0]
positions_y = sample[:, ndim - 2]
positions_x = sample[:, ndim - 1]
# simulate positions from scratch
else:
positions_z = None
if ndim == 3:
positions_z = np.random.uniform(0, frame_shape[0], nb_spots)
positions_y = np.random.uniform(0, frame_shape[ndim - 2], nb_spots)
positions_x = np.random.uniform(0, frame_shape[ndim - 1], nb_spots)
# cast coordinates
if ndim == 3:
positions_z = np.round(positions_z).astype(np.int64)
positions_y = np.round(positions_y).astype(np.int64)
positions_x = np.round(positions_x).astype(np.int64)
# filter out spots incorrectly simulated
if probability_map is not None:
if ndim == 3:
mask = probability_map[positions_z, positions_y, positions_x]
mask = mask > 0.
positions_z = positions_z[mask]
positions_y = positions_y[mask]
positions_x = positions_x[mask]
else:
mask = probability_map[positions_y, positions_x]
mask = mask > 0.
positions_y = positions_y[mask]
positions_x = positions_x[mask]
return positions_z, positions_y, positions_x
def _get_sigma(ndim, sigma_z, sigma_yx, random_sigma, nb_spots):
"""Get standard deviations of the gaussians.
Parameters
----------
ndim : int
Number of dimensions of the simulated image (2 or 3).
sigma_z : int, float or None
Standard deviation of the gaussian along the z axis, in nanometer. If
None, we consider a 2-d image.
sigma_yx : int or float
Standard deviation of the gaussian along the yx axis, in nanometer.
random_sigma : int of float
Sigmas follow a normal distribution around the provided sigma values.
The scale used is scale = sigma_axis * random_sigma
nb_spots : int
Number of spots to simulate.
Returns
-------
sigmas_z : np.ndarray, np.float64
Array of standard deviation along the z axis or None.
sigmas_yx : np.ndarray, np.float64
Array of standard deviation along the y or x axis.
"""
# generate sigma values
sigmas_z = None
if ndim == 3:
scale = sigma_z * random_sigma
sigmas_z = np.random.normal(loc=sigma_z, scale=scale, size=nb_spots)
sigmas_z[sigmas_z < 1] = 1.
scale = sigma_yx * random_sigma
sigmas_yx = np.random.normal(loc=sigma_yx, scale=scale, size=nb_spots)
sigmas_yx[sigmas_yx < 1] = 1.
return sigmas_z, sigmas_yx
def _get_amplitude(amplitude, random_amplitude, nb_spots):
"""Get amplitude of the simulated gaussians.
Parameters
----------
amplitude : int or float
Amplitude of the gaussians.
random_amplitude : int or float
Margin allowed around the amplitude value. The formula used is
margin = parameter * random_level.
nb_spots : int
Number of spots to simulate.
Returns
-------
amplitudes : np.ndarray, np.float64
Array of gaussian amplitudes.
"""
# generate amplitude values
margin = amplitude * random_amplitude
limit_down = amplitude - margin
limit_up = amplitude + margin
amplitudes = np.random.uniform(limit_down, limit_up, size=nb_spots)
return amplitudes
def _sample_coordinates(n, probability_map):
"""Randomly sample coordinates in 2-d or 3-d,according to a probability
map.
Parameters
----------
n : int
Number of coordinates to sample.
probability_map : np.ndarray, np.float32 or None
Array of probability, with shape (z, y, x) or (y, x). Sum to one.
Returns
-------
sample : np.ndarray, np.int64
Array of coordinates with shape (n, 3) or (n, 2).
"""
# get frame dimension
ndim = probability_map.ndim
# get frame shape
if ndim == 3:
z_size, y_size, x_size = probability_map.shape
z = np.linspace(0, z_size - 1, z_size)
else:
y_size, x_size = probability_map.shape
z_size = None
z = None
y = np.linspace(0, y_size - 1, y_size)
x = np.linspace(0, x_size - 1, x_size)
# get frame coordinates
if ndim == 3:
zz, yy, xx = np.meshgrid(z, y, x, indexing="ij")
coord_matrix = np.stack([zz, yy, xx], axis=-1)
coord = coord_matrix.reshape((z_size * y_size * x_size, 3))
else:
yy, xx = np.meshgrid(y, x, indexing="ij")
coord_matrix = np.stack([yy, xx], axis=-1)
coord = coord_matrix.reshape((y_size * x_size, 2))
# get coordinate indices
index_coord = np.array([i for i in range(coord.shape[0])])
# format probability array
probability_map = probability_map.ravel()
# sample coordinates
index_sample = np.random.choice(index_coord, size=n, replace=False,
p=probability_map)
sample = coord[index_sample]
return sample
| StarcoderdataPython |
299010 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ManufacturersAppConfig(AppConfig):
name = 'manufacturers'
verbose_name = _('Manufacturers')
default_app_config = 'manufacturers.ManufacturersAppConfig'
| StarcoderdataPython |
248680 |
def test_example():
assert 20 > 3
| StarcoderdataPython |
229083 | import sys
import json
def gen_label(taxo, topic_wts):
if "root" in taxo:
if taxo["root"] in topic_wts:
print(" \"%s\" [ weight=%.4f ];" % (taxo["root"], topic_wts[taxo["root"]]))
else:
print(" \"%s\";" % (taxo["root"]))
if "children" in taxo:
for child in taxo["children"]:
gen_label(child, topic_wts)
def gen_link(taxo):
if "root" in taxo and "children" in taxo:
src = taxo["root"]
for child in taxo["children"]:
trg = child["root"]
print(" \"" + src + "\" -> \"" + trg + "\";")
gen_link(child)
def main(args):
taxonomy = json.load(open(args[0]))
if len(args) >= 2:
topics = json.load(open(args[1]))
topic_wts = { x["topic_string"]: x["score"] for x in topics }
else:
topic_wts = {}
print("digraph G {")
gen_label(taxonomy, topic_wts)
gen_link(taxonomy)
print("}")
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
380629 | <reponame>andriidem308/python_practice
def decorator_type(Cls):
class NewCls(object):
def __init__(self, *args, **kwargs):
self.oInstance = Cls(*args, **kwargs)
def __getattribute__(self, s):
try:
x = super(NewCls, self).__getattribute__(s)
except AttributeError:
print("Incorrect Attribute!")
exit(0)
x = self.oInstance.__getattribute__(s)
print(type(x))
return NewCls
@cls_dec
class Rlist:
'''Реалізує кільцевий список на базі списку.
'''
def __init__(self):
'''Створити порожній список.
'''
self._lst = [] #список елементів
self._cur = None #індекс поточного елемента
def len(self):
return len(self._lst)
def next(self):
l = self.len()
if l != 0:
if self._cur == l-1:
self._cur = 0
else:
self._cur += 1
def getcurrent(self):
if self.len() == 0:
print('getcurrent: СЃРїРёСЃРѕРє РїРѕСЂРѕР¶РЅС–Р№')
exit(1)
data = self._lst[self._cur]
return data
def update(self, data):
if self.len() == 0:
print('update: СЃРїРёСЃРѕРє РїРѕСЂРѕР¶РЅС–Р№')
exit(1)
self._lst[self._cur] = data
def insert(self, data):
if self.len() == 0:
self._lst.append(data)
self._cur = 0
else:
self._lst.insert(self._cur, data)
self._cur += 1
def delete(self):
if self.len() == 0:
print('delete: СЃРїРёСЃРѕРє РїРѕСЂРѕР¶РЅС–Р№')
exit(1)
l = self.len()
del self._lst[self._cur]
if l == 1:
self._cur = None
elif self._cur == l-1:
self._cur = 0
#else: pass
def __del__(self):
del self._lst
obj = Rlist()
Rlist.save('output.txt')
# obj.save('output.txt')
# obj.save('output.txt')
| StarcoderdataPython |
6486378 | <filename>cintas01/apps/movimientos/admin.py
from django.contrib import admin
from apps.movimientos.models import (Cinta,Alojadores,Movimiento)
admin.site.register(Cinta)
admin.site.register(Alojadores)
admin.site.register(Movimiento)
| StarcoderdataPython |
3229459 | import _global
_global._import()
import sys
from pysms import Sms
from pygsm.errors import GsmError, GsmConnectError, GsmModemError, GsmWriteError
try:
sms = Sms("/dev/ttyUSB0", logger = False)
strangth = sms.gsm.signal_strength()
print(strangth)
except GsmConnectError as err:
print ("connect error", err)
except GsmModemError as err:
print ("modem error", err)
except GsmWriteError as err:
print ("write error", err)
except GsmError as err:
print ("error", err)
except IndexError as err:
print ("Phone number and message are required")
| StarcoderdataPython |
5174518 | import base64
def encode(value):
return base64.urlsafe_b64encode(str(value)).rstrip('=')
def decode(value):
return base64.urlsafe_b64decode(str(value) + '=' * (4 - len(value) % 4))
def decode_dict(value):
text = decode(str(value))
output = {}
for pair in text.split('\r\n'):
if '=' in pair:
key, val = pair.split('=')
output[key] = val
return output
| StarcoderdataPython |
1760132 | <reponame>Retraces/UkraineBot<gh_stars>1-10
/home/runner/.cache/pip/pool/87/a8/65/46c8c75345440a6c7fb21b2e2adcb806971af94ea0c9a196d612bb1adb | StarcoderdataPython |
3381449 | <reponame>afrozchakure/Python-Games
import pygame
import sys
import random # Pythons random library
pygame.init() # To initialize pygame
# Defining the width and height of the screen
WIDTH = 800 # Global variables
HEIGHT = 600
# Defining the color for player and enemy
RED = (255, 0, 0)
BLUE = (0, 0, 255)
YELLOW = (255,255,0)
GREEN = (0, 255, 0)
BACKGROUND_COLOR = (0, 0, 0) # Setting the background color
# Defining player position and player block size
PLAYER_SIZE = 50 # Its the size of the block
PLAYER_POS = [WIDTH/2, (HEIGHT-(2*PLAYER_SIZE))] # Its the x and y-axis position
# Defining an enemy
ENEMY_SIZE = 50
X_POS = random.randint(0, WIDTH - ENEMY_SIZE) # Its the starting position of the enemy block
ENEMY_POS = [X_POS, 0] # Setting the enemy position
ENEMY_LIST = [ENEMY_POS] # Defining an enemy list to contain multiple enemies
SPEED = 10 # Defining the speed at which the block falls
# Creating a screen
screen = pygame.display.set_mode((WIDTH, HEIGHT)) # We have a screen of WIDTH 800 and HEIGHT 600
game_over = False
score = 0 # Initializing the score
clock = pygame.time.Clock() # It defines a clock
myFont = pygame.font.SysFont("monospace", 35) # Defining the font in pygame (Monospace is font and 35 is in pixels)
endFont = pygame.font.SysFont("comicsansms", 40, True, False)
def set_level(score, SPEED):
if score < 20:
SPEED = 10
elif score < 40:
SPEED = 12
elif score < 60:
SPEED = 15
else:
SPEED = 20
return SPEED
def draw_enemies(enemy_list):
for enemy_pos in enemy_list:
# Drawing the enemy rectangle
pygame.draw.rect(screen, BLUE, (enemy_pos[0], enemy_pos[1], ENEMY_SIZE, ENEMY_SIZE))
def drop_enemies(ENEMY_LIST):
delay = random.random() # It generates a random value betwee 0 and 1
if len(ENEMY_LIST) < 10 and delay < 0.1: # When the no. of elements inside the list is less than 10
x_pos = random.randint(0, WIDTH-ENEMY_SIZE) # Assigning the x-coordinate to the new enemy randomly.
y_pos = 0
ENEMY_LIST.append([x_pos, y_pos]) # It appends new enemy coordinates to the enemy list
def update_enemy_positions(ENEMY_LIST, score):
for idx, ENEMY_POS in enumerate(ENEMY_LIST): # Using the enumerate function
# Updating the position of enemy and making the enemy block fall
if ENEMY_POS[1] in range(0, HEIGHT): # It allows the enemy block to move down, Checks if the enemy is onscreen
ENEMY_POS[1] += SPEED # It increments the value of height
else:
ENEMY_LIST.pop(idx) # It pops out the enemy from the enemy_list
score +=1 # Incrementing the score each time we pass it
return score # It returns the score
def collision_check(enemy_list, player_pos): # Causes the game to end if it returns True
for enemy_pos in enemy_list: # It iterates through each enemy_pos inside enemy_list
if detect_collision(player_pos, enemy_pos): # returns True if collision is detected for any enemy_pos
return True
return False
def detect_collision(PLAYER_POS, ENEMY_POS):
p_x = PLAYER_POS[0]
p_y = PLAYER_POS[1]
e_x = ENEMY_POS[0]
e_y = ENEMY_POS[1]
if (e_x >= p_x and e_x < (p_x + PLAYER_SIZE)) or (p_x >= e_x and p_x < (e_x + ENEMY_SIZE)): # Checks to see the x-overlap
if (e_y >= p_y and e_y < (p_y + PLAYER_SIZE)) or (p_y >= e_y and p_y < (e_y + ENEMY_SIZE)): # Checks to see the y-overlap
return True
return False # False is returned only when the above if statements do not get run.
def limit(PLAYER_POS): # Function to restrict the movement of the player
p_x = PLAYER_POS[0]
p_y = PLAYER_POS[1]
if p_x <=0 and p_y <=0:
p_x = 0
p_y = 0
elif p_x >=750 and p_y <=0:
p_x = 750
p_y = 0
elif p_x >=750 and p_y >= 550:
p_x = 750
p_y = 550
elif p_x <= 0 and p_y >= 550:
p_x = 0
p_y = 550
elif p_x >= 750 :
p_x = 750
elif p_x <= 0 :
p_x = 0
elif p_y >=550:
p_y = 550
elif p_y <= 0:
p_y = 0
# elif p_x >= 100:
# p_x = 100
#
# elif p_y >= 50:
# p_y = 50
PLAYER_POS = [p_x, p_y]
return PLAYER_POS
while not game_over : # It keeps running until we hit the game_over condition
for event in pygame.event.get(): # For loop to get an event
# print(event) # It prints the event each time
if event.type == pygame.QUIT: # When we click on the close button it exits the program
sys.exit()
if event.type == pygame.KEYDOWN: # (press Ctrl + /) for commenting many lines simultaneously
x = PLAYER_POS[0]
y = PLAYER_POS[1] # Just grabbing the x and y coordinates
if event.key == pygame.K_LEFT: # When left key is pressed
x -= PLAYER_SIZE # Decrementing the position of x by PLAYER_SIZE (moving it by one whole block)
elif event.key == pygame.K_RIGHT: # When right key is pressed
x += PLAYER_SIZE # Incrementing the position of x by PLAYER_SIZE (moving it by one whole block)
elif event.key == pygame.K_UP:
y -= PLAYER_SIZE
elif event.key == pygame.K_DOWN:
y += PLAYER_SIZE
PLAYER_POS = [x, y] # We are passing in the new x and y values
PLAYER_POS = limit(PLAYER_POS) # Calling the limit function
screen.fill(BACKGROUND_COLOR) # It takes in an RGB value and updates the screen
drop_enemies(ENEMY_LIST) # Calling the drop enemies function
score = update_enemy_positions(ENEMY_LIST, score) # It updates the enemy position and stores the score value
# print(score) # Prints score to the console
SPEED = set_level(score, SPEED)
text = "Score:" + str(score) # Storing our score to "text" variable
final_score = "Final Score: " + str(score)
msg = "Better Luck next time!!"
label1 = myFont.render(text, 1, YELLOW) #
screen.blit(label1, (WIDTH-200, HEIGHT-50))# Attaching our label to screen
if collision_check(ENEMY_LIST, PLAYER_POS): # It will enter the loop only when the function returns True
label2 = endFont.render(final_score, 1, RED) # The font will be printed in "red"
label3 = endFont.render(msg, 1, (0, 255, 0))
screen.blit(label2, (250, 250)) # It updates text to the specific part(position) of the screen
screen.blit(label3, (250, 300))
game_over = True
# break # It breaks out of the loop without showing the overlap
draw_enemies(ENEMY_LIST) # Calling the draw enemy function
# Drawing the player rectangle
pygame.draw.rect(screen, RED, (PLAYER_POS[0], PLAYER_POS[1], PLAYER_SIZE, PLAYER_SIZE)) # rect(Surface, Color, Rect, width=0) Look pygame documentatioN
clock.tick(30) # Setting the clock to 30 frames per second
pygame.display.update() # It will update the changes on our screen each time
if game_over:
pygame.time.wait(3000) # The wait value is in millisecond hence here the wait is 3 seconds
| StarcoderdataPython |
1951222 | # Copyright (c) 2020, <NAME>PORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import os
import pickle
import sys
from examples.tensorflow.decoder.utils.position import SinusoidalPositionEncoder
def finalize(beam_width, parent_ids, sequence_lengths, outputs, end_id, max_seq_len=None):
maximum_lengths = tf.reduce_max(tf.reshape(
sequence_lengths, [-1, beam_width]), axis=-1)
if max_seq_len != None:
array_shape = [max_seq_len, -1, beam_width]
else:
array_shape = [tf.reduce_max(maximum_lengths), -1, beam_width]
step_ids = tf.reshape(outputs, array_shape)
parent_ids = tf.reshape(parent_ids, array_shape)
ids = tf.contrib.seq2seq.gather_tree(
step_ids, parent_ids, maximum_lengths, end_id)
ids = tf.transpose(ids, perm=[1, 2, 0])
lengths = tf.not_equal(ids, end_id)
lengths = tf.cast(lengths, tf.int32)
lengths = tf.reduce_sum(lengths, axis=-1)
return ids, lengths
def ft_decoding(memory_tensor,
memory_sequence_length,
embedding_table,
decoding_vars,
decoding_args,
using_model_var=True,
checkpoint_filename=None):
'''
Run the decoding with beam search by TensorFlow.
Args:
memory_tensor: A tf.tensor with shape [batch_size * beam_width, max(memory_sequence_length), encoder_hidden_dimension].
The results of encoder transformer layer. The rank must be 3.
Note that it must be extended by beam_width times.
memory_sequence_length: A tf.Tensor with shape [batch_size * beam_width], type tf.int.
The lenght of each sentence of results of encoder.
Note that it must be extended by beam_width times.
embedding_table: A tf.Tensor with shape [vocab_size, hidden_dimension].
The embedding table of embedding lookup for each step.
decoder_vars: A list of tf.Tensor. The variables for decoding. A list of model variables of TensorFlow model.
decoder_args: The arguments for decoding. The details are in the class "DecodingBeamsearchArgument" of common.py
using_model_var: A bool value. Using the model variables of TensorFlow or not.
The details are described in 'preprocess_decoder_var' function in the following.
checkpoint_filename: A string. The checkpoint file name of storing the values of model.
The details are described in 'preprocess_decoder_var' function in the following.
Outputs:
finalized_output_ids: A tf.Tensor with shape [batch_size, beam_width, max(sequence_lengths)], with tf.int type.
Finalized output_ids by beam search algorithm and parent_ids.
finalized_sequence_lengths: A tf.Tensor with shape [batch_size * beam_width], with int type.
Finalized sequence_lengths by beam search algorithm and parent_ids.
output_ids: A tf.Tensor with shape [batch_size, beam_width, max(sequence_lengths)], with tf.int type.
The results of decoding. It contains the id of token of vocabulary.
parent_ids: A tf.Tensor with shape [batch_size, beam_width, max(sequence_lengths)], with tf.int type.
The beam index of output ids for each step.
sequence_lengths: A tf.Tensor with shape [batch_size * beam_width], with int type.
'''
decoder_args = decoding_args.decoder_args
decoding_op_module = tf.load_op_library(os.path.join('./lib/libtf_decoding.so'))
extended_memory = tf.contrib.seq2seq.tile_batch(
memory_tensor, multiplier=decoder_args.beam_width)
extended_memory_sequence_length = tf.contrib.seq2seq.tile_batch(
memory_sequence_length, multiplier=decoder_args.beam_width)
position_encoder = SinusoidalPositionEncoder()
position_encoding_table = position_encoder._create_position_encoding_table(
decoding_args.max_seq_len, decoder_args.head_num * decoder_args.size_per_head, decoder_args.dtype)
# shape of position_encoding_table: [max_seq_len, hidden_dim]
cross_key_kernel_list = []
cross_value_kernel_list = []
cross_key_bias_list = []
cross_value_bias_list = []
var_dict = {}
for v in decoding_vars:
var_dict[v.name] = v
for l in range(decoder_args.num_layer):
layer_prefix_name = "transformer/decoder/layer_%d/" % l
cross_key_kernel, cross_value_kernel = tf.split(var_dict[layer_prefix_name + 'multi_head/conv1d_1/kernel:0'], 2, axis=-1)
cross_key_bias, cross_value_bias = tf.split(var_dict[layer_prefix_name + 'multi_head/conv1d_1/bias:0'], 2, axis=-1)
cross_key_kernel_list.append(cross_key_kernel)
cross_value_kernel_list.append(cross_value_kernel)
cross_key_bias_list.append(cross_key_bias)
cross_value_bias_list.append(cross_value_bias)
output_ids, parent_ids, sequence_lengths = decoding_op_module.decoding(
extended_memory, # 1
extended_memory_sequence_length, # 2
[var_dict["transformer/decoder/layer_%d/masked_multi_head/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 7
[var_dict["transformer/decoder/layer_%d/masked_multi_head/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 8
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 9
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 10
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d_1/kernel:0" % l] for l in range(decoder_args.num_layer)], # 11
[var_dict["transformer/decoder/layer_%d/masked_multi_head/conv1d_1/bias:0" % l] for l in range(decoder_args.num_layer)], # 12
[var_dict["transformer/decoder/layer_%d/multi_head/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 13
[var_dict["transformer/decoder/layer_%d/multi_head/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 14
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 15
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 16
cross_key_kernel_list, # 17
cross_key_bias_list, # 18
cross_value_kernel_list, # 19
cross_value_bias_list, # 20
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d_2/kernel:0" % l] for l in range(decoder_args.num_layer)], # 21
[var_dict["transformer/decoder/layer_%d/multi_head/conv1d_2/bias:0" % l] for l in range(decoder_args.num_layer)], # 22
[var_dict["transformer/decoder/layer_%d/ffn/LayerNorm/beta:0" % l] for l in range(decoder_args.num_layer)], # 23
[var_dict["transformer/decoder/layer_%d/ffn/LayerNorm/gamma:0" % l] for l in range(decoder_args.num_layer)], # 24
[var_dict["transformer/decoder/layer_%d/ffn/conv1d/kernel:0" % l] for l in range(decoder_args.num_layer)], # 25
[var_dict["transformer/decoder/layer_%d/ffn/conv1d/bias:0" % l] for l in range(decoder_args.num_layer)], # 26
[var_dict["transformer/decoder/layer_%d/ffn/conv1d_1/kernel:0" % l] for l in range(decoder_args.num_layer)], # 27
[var_dict["transformer/decoder/layer_%d/ffn/conv1d_1/bias:0" % l] for l in range(decoder_args.num_layer)], # 28
var_dict['transformer/decoder/LayerNorm/beta:0'], # 28
var_dict['transformer/decoder/LayerNorm/gamma:0'], # 29
position_encoding_table, # 33
embedding_table, # 30
var_dict['transformer/decoder/dense/kernel:0'], # 31
var_dict['transformer/decoder/dense/bias:0'], # 32
max_seq_len=decoding_args.max_seq_len,
beam_width=decoder_args.beam_width,
head_num=decoder_args.head_num,
size_per_head=decoder_args.size_per_head,
inter_size=decoder_args.inter_size,
num_layer=decoder_args.num_layer,
start_id=decoding_args.start_id,
end_id=decoding_args.end_id,
beam_search_diversity_rate=decoding_args.beam_search_diversity_rate,
top_k=decoding_args.top_k,
top_p=decoding_args.top_p,
temperature=1.0,
len_penalty=1.0,
repetition_penalty=1.0)
if decoder_args.beam_width > 1:
output_ids = tf.transpose(output_ids, [1, 2, 0])
# TODO(bhsueh) Remove useless outputs
return output_ids, sequence_lengths, None, None, None
else:
output_ids = tf.transpose(output_ids, [1, 0])
return output_ids, sequence_lengths, None, None, None | StarcoderdataPython |
8043082 | <filename>test/api/gen/default/test_iproc.py
import unittest
import numpy as np
import pandas as pd
import xarray as xr
from xcube.api.gen.default.iproc import DefaultInputProcessor
from xcube.util.timecoord import get_time_in_days_since_1970
class DefaultInputProcessorTest(unittest.TestCase):
def setUp(self):
self.processor = DefaultInputProcessor()
def test_props(self):
self.assertEqual('default', self.processor.name)
self.assertEqual('Single-scene NetCDF/CF inputs in xcube standard format', self.processor.description)
self.assertEqual('netcdf4', self.processor.input_reader)
self.processor.configure(input_reader="zarr")
self.assertEqual('zarr', self.processor.input_reader)
def test_reprojection_info(self):
# noinspection PyNoneFunctionAssignment
reprojection_info = self.processor.get_reprojection_info(create_default_dataset())
self.assertIsNotNone(reprojection_info)
def test_get_time_range(self):
ds = create_default_dataset(time_mode="time")
t1, t2 = self.processor.get_time_range(ds)
self.assertEqual(get_time_in_days_since_1970("20100301T120000Z"), t1)
self.assertEqual(get_time_in_days_since_1970("20100301T120000Z"), t2)
ds = create_default_dataset(time_mode="time_bnds")
t1, t2 = self.processor.get_time_range(ds)
self.assertEqual(get_time_in_days_since_1970("20100301T000000Z"), t1)
self.assertEqual(get_time_in_days_since_1970("20100301T235959Z"), t2)
ds = create_default_dataset(time_mode="time_coverage")
t1, t2 = self.processor.get_time_range(ds)
self.assertEqual(get_time_in_days_since_1970("20100301T000000Z"), t1)
self.assertEqual(get_time_in_days_since_1970("20100301T235959Z"), t2)
ds = create_default_dataset(time_mode="start_stop_time")
t1, t2 = self.processor.get_time_range(ds)
self.assertEqual(get_time_in_days_since_1970("20100301T000000Z"), t1)
self.assertEqual(get_time_in_days_since_1970("20100301T235959Z"), t2)
ds = create_default_dataset(time_mode="no_time")
with self.assertRaises(ValueError) as cm:
self.processor.get_time_range(ds)
self.assertEqual("invalid input: missing time coverage information in dataset", f"{cm.exception}")
def test_pre_process(self):
ds1 = create_default_dataset(time_mode="time")
ds2 = self.processor.pre_process(ds1)
self.assertIsNot(ds1, ds2)
ds1 = create_default_dataset(time_mode="time_bnds")
ds2 = self.processor.pre_process(ds1)
self.assertIsNot(ds1, ds2)
ds1 = create_default_dataset(time_mode="time_coverage")
ds2 = self.processor.pre_process(ds1)
self.assertIs(ds1, ds2)
ds1 = create_default_dataset(time_mode="start_stop_time")
ds2 = self.processor.pre_process(ds1)
self.assertIs(ds1, ds2)
def test_post_process(self):
ds1 = create_default_dataset()
ds2 = self.processor.post_process(ds1)
self.assertIs(ds1, ds2)
def create_default_dataset(time_mode: str = "time_bnds"):
w = 7200
h = 3600
res = 180. / h
lon = np.linspace(-180 + 0.5 * res, 180 - 0.5 * res, w)
lat = np.linspace(-90 + 0.5 * res, 90 - 0.5 * res, h)
time = np.array([pd.to_datetime("20100301T120000Z")])
time_bnds = np.array([[pd.to_datetime("20100301T000000Z"), pd.to_datetime("20100301T235959Z")]])
coords = dict(
lon=(("lon",), lon, dict(long_name="longitude", units="degrees_east")),
lat=(("lat",), lat, dict(long_name="latitude", units="degrees_north")),
)
if time_mode == "time":
coords.update(dict(
time=(("time",), time,
dict(long_name="time", units="nanoseconds since 1970-01-01"))
))
var_dims = ("time", "lat", "lon")
var_shape = (1, h, w)
elif time_mode == "time_bnds":
coords.update(dict(
time=(
("time",), time,
dict(long_name="time", units="nanoseconds since 1970-01-01")),
time_bnds=(
("time", "bnds"), time_bnds,
dict(long_name="time bounds", units="nanoseconds since 1970-01-01")),
))
var_dims = ("time", "lat", "lon")
var_shape = (1, h, w)
else:
var_dims = ("lat", "lon")
var_shape = (h, w)
analysed_sst = np.zeros(shape=var_shape, dtype=np.float32)
analysis_error = np.zeros(shape=var_shape, dtype=np.float32)
mask = np.zeros(shape=var_shape, dtype=np.int32)
data_vars = dict(
analysed_sst=(var_dims, analysed_sst),
analysis_error=(var_dims, analysis_error),
mask=(var_dims, mask),
)
attrs = dict([
('title', 'ESA SST CCI OSTIA L4 product'),
('institution', 'ESACCI'),
('publisher_name', 'ESACCI'),
('processing_level', 'L4'),
('Metadata_Conventions', 'Unidata Dataset Discovery v1.0'),
('Conventions', 'CF-1.5, Unidata Observation Dataset v1.0'),
('geospatial_lat_max', 90.0),
('geospatial_lat_min', -90.0),
('geospatial_lon_max', 180.0),
('geospatial_lon_min', -180.0),
])
if time_mode == "time_coverage":
attrs.update(dict([
('time_coverage_start', '20100301T000000Z'),
('time_coverage_end', '20100301T235959Z'),
('time_coverage_duration', 'P1D'),
('time_coverage_resolution', 'P1D'),
]))
elif time_mode == "start_stop_time":
attrs.update(dict([
('start_time', '20100301T000000Z'),
('stop_time', '20100301T235959Z'),
]))
return xr.Dataset(coords=coords, data_vars=data_vars, attrs=attrs)
| StarcoderdataPython |
43531 | <gh_stars>10-100
#!/usr/bin/env python
import roslib
roslib.load_manifest('turtlebot_actions')
import rospy
import os
import sys
import time
from turtlebot_actions.msg import *
from actionlib_msgs.msg import *
import actionlib
def main():
rospy.init_node("find_fiducial_pose_test")
# Construct action ac
rospy.loginfo("Starting action client...")
action_client = actionlib.SimpleActionClient('find_fiducial_pose', FindFiducialAction)
action_client.wait_for_server()
rospy.loginfo("Action client connected to action server.")
# Call the action
rospy.loginfo("Calling the action server...")
action_goal = FindFiducialGoal()
action_goal.camera_name = "/camera/rgb"
action_goal.pattern_width = 7
action_goal.pattern_height = 6
action_goal.pattern_size = 0.027
action_goal.pattern_type = 1 # CHESSBOARD
if action_client.send_goal_and_wait(action_goal, rospy.Duration(50.0), rospy.Duration(50.0)) == GoalStatus.SUCCEEDED:
rospy.loginfo('Call to action server succeeded')
else:
rospy.logerr('Call to action server failed')
if __name__ == "__main__":
main()
| StarcoderdataPython |
3328392 | <filename>PersonalFinance/PersonalFinance/accountDb.py
import sqlite3
import clr
import uuid
class insertData(object):
def __init__(self, gName):
conn = sqlite3.connect("test_finance.db")
curs = conn.cursor()
curs.execute('''CREATE TABLE IF NOT EXISTS tblGroup(
guid VARCHAR(32) PRIMARY KEY NOT NULL,
groupName VARCHAR(255) NOT NULL)''')
conn.commit()
curs.execute("SELECT groupName FROM tblGroup WHERE groupName = ?", (gName,))
result = curs.fetchone()
if result:
Print("Record already exists.")
else:
guid = str(uuid.uuid4().hex)
curs.execute("INSERT INTO tblGroup (guid, groupName) values (? ,?)", (guid, gName))
conn.commit()
conn.close()
def main():
groups = ['Branch / Divisions', 'Capital Account', 'Current Assets', 'Current Liabilities',
'Direct Expenses', 'Direct Incomes', 'Fixed Assets', 'Indirect Expenses',
'Indirect Incomes', 'Investments', 'Loans (Liability)', 'Misc. Expenses (ASSET)',
'Purchase Accounts', 'Sales Accounts', 'Suspense A/c']
for groupName in groups:
insertData(groupName)
if __name__ == "__main__":
main() | StarcoderdataPython |
353222 | <reponame>nebulx29/LearnPython
def multi_print(number = 3, word = "Hallo"):
for i in range(0, number):
print(str(i) + " " + word)
multi_print(1, "Hallo")
print("--")
multi_print()
print("--")
multi_print(2)
print("--")
multi_print(word = "Welt")
print("--")
multi_print(word = "Welt", number = 5)
print("--")
| StarcoderdataPython |
5008285 | from restio.state import ModelState, ModelStateMachine, Transition
class TestModelStateMachine:
def test_get(self):
next_state_existing = ModelStateMachine.transition(
Transition.GET_OBJECT, ModelState.UNBOUND
)
next_state_missing = ModelStateMachine.transition(
Transition.GET_OBJECT, ModelState.DIRTY
)
assert next_state_existing == ModelState.CLEAN
assert next_state_missing == ModelState.DIRTY
| StarcoderdataPython |
6479277 | """ Import all the nodes for STL tree """
from .nodes import *
| StarcoderdataPython |
9674664 | """Config flow to configure the AIS WIFI Service component."""
from homeassistant import config_entries
from homeassistant.core import callback
from .const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_NAME
from homeassistant.components.ais_dom import ais_global
import time
import voluptuous as vol
import logging
import asyncio
G_WIFI_NETWORKS = []
_LOGGER = logging.getLogger(__name__)
DATA_AIS_WIFI_SERVICE_IMPL = "ais_wifi_service_flow_implementation"
@callback
def register_flow_implementation(hass, client_name, client_secret):
"""Register a ais wifi service implementation.
"""
hass.data.setdefault(DATA_AIS_WIFI_SERVICE_IMPL, {})
hass.data[DATA_AIS_WIFI_SERVICE_IMPL] = {
CONF_NAME: client_name,
CONF_PASSWORD: client_secret,
}
@callback
def configured_connections(hass):
"""Return a set of configured connections instances."""
return set(
entry.data.get(CONF_NAME) for entry in hass.config_entries.async_entries(DOMAIN)
)
def scan_for_wifi(hass, loop) -> []:
global G_WIFI_NETWORKS
_LOGGER.info("scan_for_wifi, no of try: " + str(loop))
# send scan request to frame
if loop == 0:
# reset the current status
hass.services.call("script", "ais_scan_android_wifi_network")
# wait
time.sleep(3)
# and check the answer
wifi_networks = hass.states.get("input_select.ais_android_wifi_network")
G_WIFI_NETWORKS = wifi_networks.attributes["options"]
return G_WIFI_NETWORKS
def connect_to_wifi(network, password) -> str:
import requests
# send add request to frame
url = ais_global.G_HTTP_REST_SERVICE_BASE_URL.format("127.0.0.1")
_LOGGER.info("connect_to_wifi: " + network + " pass: " + password)
try:
ssid = network.split(";")[0]
wifi_type = network.split(";")[-3]
bssid = network.split(";")[-1].replace("MAC:", "").strip()
requests.post(
url + "/command",
json={
"WifiConnectToSid": ssid,
"WifiNetworkPass": password,
"WifiNetworkType": wifi_type,
"bssid": bssid,
},
timeout=5,
)
except Exception as e:
_LOGGER.error("connect_to_wifi: " + str(e))
def check_wifi_connection(hass, loop) -> []:
global G_WIFI_NETWORKS
_LOGGER.info("check_wifi_connection, no of try: " + str(loop))
# wait
time.sleep(3)
# and check the answer
net_info = hass.states.get("sensor.ais_wifi_service_current_network_info")
ssid = net_info.attributes.get("ssid", "")
return ssid
@callback
def configured_service(hass):
"""Return a set of the configured hosts."""
return set(
"ais_wifi_service" for entry in hass.config_entries.async_entries(DOMAIN)
)
@config_entries.HANDLERS.register("ais_wifi_service")
class AisWiFilowHandler(config_entries.ConfigFlow):
"""AIS WiFi config flow."""
VERSION = 1
def __init__(self):
"""Initialize zone configuration flow."""
pass
async def async_step_discovery(self, discovery_info):
"""Handle a discovered AIS WiFi integration."""
# Abort if other flows in progress or an entry already exists
if self._async_in_progress() or self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
# Show selection form
return self.async_show_form(step_id="user")
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_confirm(user_input)
async def async_step_confirm(self, user_input=None):
"""Handle a flow start - confirmation from user"""
errors = {}
if user_input is not None:
return await self.async_step_one(user_input=None)
return self.async_show_form(step_id="confirm", errors=errors)
async def async_step_one(self, user_input=None):
"""Step one"""
errors = {}
if user_input is not None:
if "connect_to_hidden_ssid" in user_input:
if user_input["connect_to_hidden_ssid"]:
return await self.async_step_connect_to_hidden_wifi(user_input=None)
else:
return await self.async_step_search_wifi(user_input=None)
data_schema = vol.Schema(
{vol.Optional("connect_to_hidden_ssid", default=False): bool}
)
return self.async_show_form(
step_id="one", errors=errors, data_schema=data_schema
)
async def async_step_search_wifi(self, user_input=None):
"""Step - scan the wifi"""
errors = {}
# reset wifi's list
self.hass.async_run_job(
self.hass.services.async_call(
"input_select",
"set_options",
{
"entity_id": "input_select.ais_android_wifi_network",
"options": [ais_global.G_EMPTY_OPTION],
},
)
)
for x in range(0, 9):
result = await self.hass.async_add_executor_job(scan_for_wifi, self.hass, x)
_LOGGER.info("Szukam sieci WiFi: " + str(result))
if len(result) > 1:
return await self.async_step_connect_to_wifi(user_input=None)
else:
errors = {"base": "search_failed"}
#
data_schema = vol.Schema(
{vol.Optional("connect_to_hidden_ssid", default=False): bool}
)
return self.async_show_form(
step_id="one", errors=errors if errors else {}, data_schema=data_schema
)
async def async_step_connect_to_wifi(self, user_input=None):
"""Step four - connect to wifi"""
wifi_network = self.hass.states.get("input_select.ais_android_wifi_network")
networks = wifi_network.attributes["options"]
# remove empty option
if networks[0] == ais_global.G_EMPTY_OPTION:
networks.pop(0)
errors = {}
if len(networks) == 0:
errors["general"] = "wifi_error"
return self.async_abort(reason="search_failed")
if user_input is None:
data_schema = vol.Schema(
{
vol.Required("networks", default=networks[0]): vol.In(
list(networks)
),
vol.Optional(CONF_PASSWORD): str,
vol.Optional("rescan_wifi", default=False): bool,
}
)
else:
# check if user want to rescan
if "rescan_wifi" in user_input:
if user_input["rescan_wifi"]:
return await self.async_step_one(user_input=None)
password = ""
if CONF_PASSWORD in user_input:
password = <PASSWORD>_input[CONF_PASSWORD]
data_schema = vol.Schema(
{
vol.Required("networks", default=user_input["networks"]): vol.In(
list(networks)
),
vol.Optional(CONF_PASSWORD, default=password): str,
vol.Optional("rescan_wifi", default=False): bool,
}
)
# try to connect
if errors == {}:
# send a request to frame to add the new device
network = user_input["networks"]
text = "Łączę z siecią " + network.split(";")[0]
self.hass.async_run_job(
self.hass.services.async_call(
"ais_ai_service", "say_it", {"text": text}
)
)
await self.hass.async_add_executor_job(
connect_to_wifi, network, password
)
# request was correctly send, now check and wait for the answer
for x in range(0, 7):
result = await self.hass.async_add_executor_job(
check_wifi_connection, self.hass, x
)
_LOGGER.info("Spawdzam połączenie z siecią WiFi: " + str(result))
if result == network.split(";")[0]:
# remove if exists
exists_entries = [
entry.entry_id for entry in self._async_current_entries()
]
if exists_entries:
await asyncio.wait(
[
self.hass.config_entries.async_remove(entry_id)
for entry_id in exists_entries
]
)
# return await self.async_step_connect_to_wifi(user_input=None)
return self.async_create_entry(title="WiFi", data=user_input)
else:
errors = {"base": "conn_failed"}
# check wifi list len - without empty option
l_net = str(len(networks) - 1)
return self.async_show_form(
step_id="connect_to_wifi",
errors=errors if errors else {},
data_schema=data_schema,
description_placeholders={"wifi_number_info": l_net},
)
async def async_step_connect_to_hidden_wifi(self, user_input=None):
"""Step four - connect to hidden wifi"""
errors = {}
description_placeholders = {}
networks_types = ["WEP", "WPA", "WPA2", "Open"]
if user_input is None:
data_schema = vol.Schema(
{
vol.Required("networks_types", default="WPA"): vol.In(
list(networks_types)
),
vol.Required("hidden_ssid"): str,
vol.Optional(CONF_PASSWORD): str,
vol.Optional("rescan_wifi", default=False): bool,
}
)
else:
if user_input["rescan_wifi"]:
return await self.async_step_one(user_input=None)
else:
network_type = user_input["networks_types"]
password = ""
if CONF_PASSWORD in user_input:
password = <PASSWORD>_input[CONF_PASSWORD]
data_schema = vol.Schema(
{
vol.Required(
"networks_types", default=user_input["networks_types"]
): vol.In(list(networks_types)),
vol.Required(
"hidden_ssid", default=user_input["hidden_ssid"]
): str,
vol.Optional(CONF_PASSWORD, default=password): str,
vol.Optional("rescan_wifi", default=False): bool,
}
)
hidden_ssid = user_input["hidden_ssid"]
# custom validation
# if the netowrk type is selected not Open then password need to be provided
if network_type != "Open" and password == "":
errors = {"base": "no_password_to_protected_wifi"}
description_placeholders = {"selected_net_type": network_type}
# try to connect
if errors == {}:
# send a request to frame to add the new device
text = "Łączę z siecią " + hidden_ssid
self.hass.async_run_job(
self.hass.services.async_call(
"ais_ai_service", "say_it", {"text": text}
)
)
wni = (
hidden_ssid
+ "; "
+ "moc mnieznana (-10); "
+ network_type
+ "; MAC: 00:00:00:00:00:00"
)
await self.hass.async_add_executor_job(
connect_to_wifi, wni, password
)
# request was correctly send, now check and wait for the answer
for x in range(0, 9):
result = await self.hass.async_add_executor_job(
check_wifi_connection, self.hass, x
)
_LOGGER.info(
"Spawdzam połączenie z siecią WiFi: " + str(result)
)
if result == hidden_ssid:
# remove if exists
exists_entries = [
entry.entry_id
for entry in self._async_current_entries()
]
if exists_entries:
await asyncio.wait(
[
self.hass.config_entries.async_remove(entry_id)
for entry_id in exists_entries
]
)
# return await self.async_step_connect_to_wifi(user_input=None)
return self.async_create_entry(
title="WiFi", data=user_input
)
else:
errors = {"base": "conn_failed"}
return self.async_show_form(
step_id="connect_to_hidden_wifi",
errors=errors if errors else {},
data_schema=data_schema,
description_placeholders=description_placeholders,
)
| StarcoderdataPython |
3293219 | from flask import render_template, redirect, url_for, request, flash
from flask_login import login_user, login_required, logout_user
from .forms import RegistrationForm, LoginForm
from . import auth
from ..import db
from ..models import User
# registration route
@auth.route('templates/auth/reqister',methods=['GET','POST'])
def register():
"""
Function that registers the users
"""
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data, password = form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
title = "Registration"
return render_template('auth/register.html', registration_form = form, title = title)
# Login function
@auth.route('/login',methods=['GET','POST'])
def login():
"""
Function that checks if the form is validated
"""
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next')or url_for('main.index'))
flash('Invalid Username or Password')
title = "60sec Pitch|Login"
return render_template('auth/login.html', login_form = login_form, title = title)
#logout function
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('main.index')) | StarcoderdataPython |
3311998 | """
Example usage:
python event_average --wt <int value representing the window limit in seconds>
python event_average --wt 3
OR
DEFAULT
python event_average
"""
from __future__ import division
import random
import time
import sys
import argparse
class AverageWindow(object):
def __init__(self, window_limit):
self.window_limit = window_limit
self.data_list = []
self.avg_dict = {}
def flush_data(self):
self.data_list = []
self.avg_dict = {}
@staticmethod
def get_value():
event_id = random.randint(1, 10)
value = random.randint(1, 100) * random.random()
return time.time(), event_id, value
def process(self):
start_time = time.time()
while True:
time_stamp, event_id, value = self.get_value()
if time_stamp - start_time >= self.window_limit:
return
self.data_list.append((time_stamp, event_id, value))
def get_aggregate_data(self):
data_dict = {}
for each_data in self.data_list:
event_id = each_data[1]
value = each_data[2]
if event_id in data_dict.keys():
cur_sum = data_dict[event_id][0]
cur_counter = data_dict[event_id][1]
data_dict[event_id] = (cur_sum + value, cur_counter + 1)
else:
data_dict[event_id] = (value, 1)
return data_dict
def get_average(self):
data_dict = self.get_aggregate_data()
for event_id, sum_counter in data_dict.items():
self.avg_dict[event_id] = sum_counter[0] / sum_counter[1]
def print_average(self):
for event_id, avg in self.avg_dict.items():
print("Average for {} is {}".format(event_id, avg))
@staticmethod
def main(window_limit):
if window_limit is None:
window_limit = 60
awd = AverageWindow(window_limit)
awd.process()
awd.get_average()
awd.print_average()
awd.flush_data()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Find average per event over a window.')
parser.add_argument('--wt', metavar='wt', type=int,
help='An integer value for max limit on the data collection window')
args = parser.parse_args()
AverageWindow.main(args.wt)
| StarcoderdataPython |
1835292 | #!/usr/bin/python2.7
from __future__ import division
import os
import urllib, cStringIO
import pymongo as pm
import numpy as np
import scipy.stats as stats
import pandas as pd
import json
import re
from PIL import Image
import base64
import sys
'''
To generate main dataframe from pymongo database, run, e.g.:
exp1 = ['run3_size4_waiting', 'run4_generalization']
exp2 = ['run5_submitButton']
python generate_refgame_dataframe.py --iterationName run3_size4_waiting
python generate_refgame_dataframe.py --iterationName run4_generalization
python generate_refgame_dataframe.py --iterationName run5_submitButton
'''
# directory & file hierarchy
proj_dir = os.path.abspath('../../..')
analysis_dir = os.getcwd()
results_dir = os.path.join(proj_dir,'results')
plot_dir = os.path.join(results_dir,'plots')
csv_dir = os.path.join(results_dir,'csv')
exp_dir = os.path.abspath(os.path.join(proj_dir,'experiments'))
sketch_dir = os.path.abspath(os.path.join(proj_dir,'sketches'))
# set vars
auth = pd.read_csv('auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user
pswd = auth.values[0][0]
user = 'sketchloop'
host = 'stanford-cogsci.org' ## cocolab ip address
# have to fix this to be able to analyze from local
import pymongo as pm
conn = pm.MongoClient('mongodb://sketchloop:' + pswd + '@127.0.0.1')
db = conn['3dObjects']
coll = db['graphical_conventions']
# list of researcher mturk worker ID's to ignore
jefan = ['A1MMCS8S8CTWKU','A1MMCS8S8CTWKV','A1MMCS8S8CTWKS']
hawkrobe = ['A1BOIDKD33QSDK']
megsano = ['A1DVQQLVZR7W6I']
researchers = jefan + hawkrobe + megsano
# Assign variables within imported analysis helpers
import df_generation_helpers as h
if sys.version_info[0]>=3:
from importlib import reload
## add helpers to python path
if os.path.join(proj_dir,'analysis','python') not in sys.path:
sys.path.append(os.path.join(proj_dir,'analysis','python'))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--iterationName', type=str, \
help='options: run3_size4_waiting, run4_generalization, run5_submitButton',
default='run5_submitButton')
args = parser.parse_args()
iterationName = args.iterationName
## get total number of stroke and clickedObj events in the collection as a whole
S = coll.find({ '$and': [{'iterationName':iterationName}, {'eventType': 'stroke'}]}).sort('time')
C = coll.find({ '$and': [{'iterationName':iterationName}, {'eventType': 'clickedObj'}]}).sort('time')
## get list of all candidate games
all_games = coll.find({'iterationName':iterationName}).distinct('gameid')
## get list of complete and valid games
complete_games = h.get_complete_and_valid_games(all_games,coll,iterationName1,researchers=researchers, tolerate_undefined_worker=False)
## generate actual dataframe and get only valid games (filtering out games with low accuracy, timeouts)
D = h.generate_dataframe(coll, complete_games, iterationName, csv_dir)
# ## filter crazies and add column
D = h.find_crazies(D)
## add features for recognition experiment
D = h.add_recog_session_ids(D)
D = h.add_distractors_and_shapenet_ids(D)
## if generalization column still capitalized, fix it
try:
D = D.rename(index=str, columns={"Generalization": "generalization"})
except:
pass
## filter out single low accuracy game
D = D[D['low_acc'] != True]
## filter out games with missing data
missing_data_games = D[D['drawDuration'].isna()]['gameID'].values
D = D[-D['gameID'].isin(missing_data_games)]
## assign extra columns to keep track of category/subset/condition combinations
if iterationName=='run5_submitButton':
D = D.assign(category_subset = pd.Series(D['category'] + D['subset']))
D = D.assign(category_subset_condition = pd.Series(D['category'] + D['subset'] + D['condition']))
# save out master dataframe
D.to_csv(os.path.join(csv_dir, 'graphical_conventions_group_data_{}.csv'.format(iterationName)), index=False)
## write out bis dataframe to results dir
h.save_bis(D, csv_dir, iterationName)
| StarcoderdataPython |
3465972 | <filename>Code/SVD.py
# -*- coding: utf-8 -*-
import numpy as np
from scipy.sparse.linalg import eigs
def calculate_SandV(A):
'''
Calculate right singular vectors V and obtain homography matrix H
'''
A_Transpose_A = np.matmul(np.transpose(A), A)
eigen_values, eigen_vectors = eigs(A_Transpose_A, 8)
idx = eigen_values.argsort()[::-1]
sorted_eigen_values = np.real(np.round(eigen_values[idx]))
sorted_eigen_vectors = np.real(eigen_vectors[:,idx])
S_matrix = np.diag(np.sqrt(sorted_eigen_values))
V = sorted_eigen_vectors
H = np.dot(np.reshape(V[:,8],(3,3)),-1)
return S_matrix, V, eigen_values, H
def calculateU(A, V_matrix, eigen_values):
'''
Calculate diagonal matrix S and left singular vectors U
'''
'''
A_A_Transpose = np.matmul(A, np.transpose(A))
eigen_values, eigen_vectors = eigs(A_A_Transpose, 7)
idx = eigen_values.argsort()[::-1]
# sorted_eigen_values = eigen_values[idx]
sorted_eigen_vectors = eigen_vectors[:,idx]
U_matrix = sorted_eigen_vectors
'''
U_matrix = np.matmul(A, V_matrix)
for i in range(U_matrix.shape[0]):
U_matrix[:,i] = U_matrix[:,i]/np.sqrt(eigen_values[i])
return U_matrix
def main():
x1, y1 = 5, 5
xp1, yp1 = 100, 100
x2, y2 = 150, 5
xp2, yp2 = 200, 80
x3, y3 = 150, 150
xp3, yp3 = 220, 80
x4, y4 = 5, 150
xp4, yp4 = 100, 200
A = np.array([[-x1,-y1,-1,0,0,0,x1*xp1,y1*xp1,xp1],
[0,0,0,-x1,-y1,-1,x1*yp1,y1*yp1,yp1],
[-x2,-y2,-1,0,0,0,x2*xp2,y2*xp2,xp2],
[0,0,0,-x2,-y2,-1,x2*yp2,y2*yp2,yp2],
[-x3,-y3,-1,0,0,0,x3*xp3,y3*xp3,xp3],
[0,0,0,-x3,-y3,-1,x3*yp3,y3*yp3,yp3],
[-x4,-y4,-1,0,0,0,x4*xp4,y4*xp4,xp4],
[0,0,0,-x4,-y4,-1,x4*yp4,y4*yp4,yp4]], dtype = np.float64)
S_matrix, V_matrix, eigen_values, H = calculate_SandV(A)
U_matrix = calculateU(A, V_matrix, eigen_values)
A_estimate = np.matmul(U_matrix, np.matmul(S_matrix, np.transpose(V_matrix)))
print(np.round(A_estimate))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6412347 | <gh_stars>0
def ficha(nome = '<desconhecido>', gols = 0):
print(f'O jogador {nome} fez {gols} gol(s) no campeonato.')
print('-' * 20)
nome = input('Nome do jogador: ')
gols = input('Número de gols: ')
if gols.isnumeric and gols != '':
gols = int(gols)
else:
gols = 0
if nome != '':
ficha(nome, gols)
else:
ficha(gols=gols) | StarcoderdataPython |
7218 | <reponame>mentaal/r_map
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='r_map', # Required
version='0.9.0', # Required
description='A data structure for working with register map information', # Required
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/mentaal/r_map', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='<NAME>', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='<EMAIL>', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
keywords='register bitfield registermap', # Optional
packages=['r_map'],
python_requires='>=3.6',
project_urls={ # Optional
'Bug Reports': 'https://github.com/mentaal/r_map/issues',
'Source': 'https://github.com/mentaal/r_map',
},
)
| StarcoderdataPython |
117166 | import torch
def flipud(tensor):
"""
Flips a given tensor along the first dimension (up to down)
Parameters
----------
tensor
a tensor at least two-dimensional
Returns
-------
Tensor
the flipped tensor
"""
return torch.flip(tensor, dims=[0])
| StarcoderdataPython |
3481171 | """
Generating a txt file with the network architecture used in the experiment
Denoising_in_superresolution/src
@author: <NAME>
"""
import os
from tqdm import tqdm
import numpy as np
from matplotlib import pyplot as plt
import torch
import models as models
import lib.model_setup as model_setup
import lib.utils as utils
import lib.arguments as arguments
def main():
"""
Main logic for getting the network architecture and saving it as txt file
"""
# relevant paths and experiment data
exp_path = arguments.get_directory_argument()
exp_data = utils.load_configuration_file(exp_path)
model = model_setup.setup_model(exp_data=exp_data, exp_path=exp_path)
network_file = os.path.join(exp_path, "network_architecture.txt")
with open(network_file, "w") as file:
file.write(str(model))
return
if __name__ == "__main__":
os.system("clear")
main()
#
| StarcoderdataPython |
4812471 | <reponame>p1r473/opencanary<filename>opencanary/__init__.py
__version__="0.6.3" | StarcoderdataPython |
4832616 | <reponame>kullo/server
import os
from fabric.api import *
env.hosts = ['kullo2.kullo.net']
env.user = 'root'
KULLOSERVER_DIR = '/opt/kulloserver'
@task(default=True)
def deploy():
local('make')
#TODO run tests
with cd(KULLOSERVER_DIR):
execute(update_preregistrations)
execute(update_hooks)
execute(update_message_templates)
put('kulloserver', 'kulloserver-new', mode=0755)
run('rm kulloserver-old', warn_only=True)
run('mv kulloserver kulloserver-old', warn_only=True)
run('mv kulloserver-new kulloserver')
run('systemctl stop kulloserver', warn_only=True)
#TODO migrations
run('systemctl start kulloserver')
@task
def rollback():
with cd(KULLOSERVER_DIR):
run('mv kulloserver kulloserver-new && mv kulloserver-old kulloserver')
run('systemctl stop kulloserver', warn_only=True)
#TODO migrations
run('systemctl start kulloserver')
@task
def update_preregistrations():
with cd(KULLOSERVER_DIR):
put('config/preregistrations.csv', 'config/preregistrations.csv')
@task
def update_hooks():
with cd(KULLOSERVER_DIR):
put('config/hooks', 'config')
run('chmod +x config/hooks/*')
@task
def update_message_templates():
with cd(KULLOSERVER_DIR):
put('config/message_templates', 'config')
@task
def update_goose():
gopath = os.environ['GOPATH']
local('make goose')
with cd(KULLOSERVER_DIR):
put(gopath + '/bin/goose', 'goose', mode=0755)
| StarcoderdataPython |
8198512 | from vaccontrib.covid import get_reduced_vaccinated_susceptible_contribution_matrix_covid, get_reduced_population_contribution_matrix_covid
from vaccontrib.covid import get_next_generation_matrix_covid
from vaccontrib.main import get_reduced_vaccinated_susceptible_eigenvector, get_reduced_vaccinated_susceptible_contribution_matrix, get_next_generation_matrix_from_matrices, convert_4d_matrix_to_2d_block
from vaccontrib.linalg import get_spectral_radius_and_eigenvector
import numpy as np
np.set_printoptions(linewidth=100)
_a = np.array
gamma = _a([[ 1., 1.],
[ 10., 1.]])
S = _a([ [ 0.3, 0.7] ,
[ 1.5, 0.5] ])
N = _a([ 1., 2.])
s = _a([ [0., 0.8],
[0., 0.2] ])
r = _a([ [0., 0.8],
[0., 0.2] ])
a = _a([ [1., 1],
[1., 1] ])
b = _a([ [1., 1],
[1., 1] ])
R0 = 2
K0 = get_next_generation_matrix_from_matrices(R0, gamma, S, N, s, r, a, b)
y0 = get_reduced_vaccinated_susceptible_eigenvector(K0)
C0 = get_reduced_vaccinated_susceptible_contribution_matrix(K0)
s[:,0] = 1-np.sqrt(1/6)
r[:,0] = 1-np.sqrt(1/6)
s[:,0] = 1
r[:,0] = 1
K1 = get_next_generation_matrix_from_matrices(R0, gamma, S, N, s, r, a, b)
y1 = get_reduced_vaccinated_susceptible_eigenvector(K1)
C1 = get_reduced_vaccinated_susceptible_contribution_matrix(K1)
_K0 = convert_4d_matrix_to_2d_block(K0)
_K1 = convert_4d_matrix_to_2d_block(K1)
_, y0_full = get_spectral_radius_and_eigenvector(_K0)
_, y1_full = get_spectral_radius_and_eigenvector(_K1)
print("K_reduced =\n", K0.sum(axis=0).sum(axis=0))
print("K =\n", convert_4d_matrix_to_2d_block(K0))
print("C =\n", C0)
print("y_reduced =\n", y0)
print("y0_full =\n", y0_full)
print("R_eff =", C0.sum(axis=0))
print("C_normed =\n", C0/C0.sum())
print()
#print("with R =", R1)
print("K_reduced =\n", K1.sum(axis=0).sum(axis=0))
print("K =\n", convert_4d_matrix_to_2d_block(K1))
print("C =\n", C1)
print("y_reduced =\n", y1)
print("y0_full =\n", y1_full)
print("R_eff =", C1.sum(axis=0))
print("C_normed =\n", C1/C1.sum())
print()
y = np.array([1,1,1,1.])
growth = [1.]
VV_growth = [1.]
ytot = sum(y)
ytots = [ytot]
y_d_eig = []
for K in [_K0,_K1]:
_, eig = get_spectral_radius_and_eigenvector(K)
for g in range(10):
y_d_eig.append((y/np.linalg.norm(y)).dot(eig/np.linalg.norm(eig)))
ytot_old = ytot
yold = y.copy()
y = K.dot(y)
ytot = y.sum()
ytots.append(ytot)
growth.append(ytot/ytot_old)
VV_growth.append(y[[1,3]].sum()/yold[[1,3]].sum())
y_d_eig.append((y/np.linalg.norm(y)).dot(eig/np.linalg.norm(eig)))
import matplotlib.pyplot as pl
fig, ax = pl.subplots(2,2,figsize=(8,4))
ax[0,0].plot(growth)
ax[0,0].plot([0,len(growth)-1],[C1.sum()]*2,':')
ax[0,0].plot([0,len(growth)-1],[C0.sum()]*2,':')
ax[1,0].plot(VV_growth)
ax[1,0].plot([0,len(VV_growth)-1],[C1[1,1]]*2,':')
ax[1,0].plot([0,len(VV_growth)-1],[C0[1,1]]*2,':')
ax[0,1].plot(ytots)
ax[0,0].set_yscale('log')
ax[1,1].plot(y_d_eig)
pl.show()
| StarcoderdataPython |
108338 | import yaml
import inspect
from pcl2depth import velo_points_2_pano
import scipy.io
import numpy as np
import os
from os.path import join
import sys
from tqdm import tqdm
import matplotlib.pyplot as plt
import cv2
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
sys.path.insert(1, parentdir)
# get config
with open(join(parentdir, 'config.yaml'), 'r') as f:
cfg = yaml.safe_load(f)
# Select Platform
# platform = 'dataset_creation_handheld'
platform = 'dataset_creation_drone' # UAV
exp_names = cfg[platform]['all_exp_files']
pendrive_dir = cfg[platform]['dataroot']
v_fov = tuple(map(int, cfg[platform]['pcl2depth']['v_fov'][1:-1].split(',')))
h_fov = tuple(map(int, cfg[platform]['pcl2depth']['h_fov'][1:-1].split(',')))
nb_overlay_frames = cfg[platform]['pcl2depth']['nb_overlay_frames']
save_dir = pendrive_dir
for BAG_DATE in exp_names:
print('********* Processing {} *********'.format(BAG_DATE))
ROS_SAVE_DIR = join(save_dir, BAG_DATE)
MMWAVE_SAVE_PATH = os.path.join(*[ROS_SAVE_DIR, 'mmwave_middle'])
print(" Creating folder for mmWave depth images {}".format(MMWAVE_SAVE_PATH))
if not os.path.exists(MMWAVE_SAVE_PATH):
os.makedirs(MMWAVE_SAVE_PATH)
MMWAVE_READ_PATH = os.path.join(*[ROS_SAVE_DIR, 'mmwave_middle_pcl'])
mmwave_file_list = os.listdir(MMWAVE_READ_PATH)
mmwave_file_list.sort()
mmwave_ts_list = [int(i[:-4]) for i in mmwave_file_list]
###################
# Read all frames #
###################
frames = []
for file in mmwave_file_list:
mat = scipy.io.loadmat(os.path.join(MMWAVE_READ_PATH, file))
pc = np.array(mat['frame'])
upper_row_filter = (pc[:, 0] ** 2 + pc[:, 1] ** 2 + pc[:, 2] ** 2) ** 0.5 < cfg[platform]['pcl2depth']['mmwave_dist_max']
lower_row_filter = (pc[:, 0] ** 2 + pc[:, 1] ** 2 + pc[:, 2] ** 2) ** 0.5 > cfg[platform]['pcl2depth']['mmwave_dist_min']
row_filter = np.bitwise_and(upper_row_filter, lower_row_filter)
frames.append(pc[row_filter, :])
###################
# Overlay frames #
###################
frames = np.array(frames)
# overlay frames accounting for sparse pcl
overlay_frames = list()
# frames_array = np.array(frames)
for i in range(frames.shape[0]):
if i < nb_overlay_frames:
tmp = frames[i: i + nb_overlay_frames]
else:
tmp = frames[i - nb_overlay_frames:i]
try:
overlay_frames.append(np.concatenate(tmp))
except:
print('error')
###################
# Save Images #
###################
for timestamp, frame in tqdm(zip(mmwave_ts_list, overlay_frames), total=len(mmwave_ts_list)):
pano_img = velo_points_2_pano(frame,
cfg[platform]['pcl2depth']['v_res'],
cfg[platform]['pcl2depth']['h_res'],
v_fov,
h_fov,
cfg[platform]['pcl2depth']['max_v'],
depth=True)
try:
pano_img = cv2.resize(pano_img, (pano_img.shape[1] * 4, pano_img.shape[0] * 4))
pc_path = os.path.join(MMWAVE_SAVE_PATH, str(timestamp) + '.png')
cv2.imwrite(pc_path, pano_img)
except:
width = (h_fov[1] - h_fov[0] + 2) * 2
height = (v_fov[1] - v_fov[0] + 2) * 2
blank_image = np.zeros((height, width), np.uint8)
pc_path = os.path.join(MMWAVE_SAVE_PATH, str(timestamp) + '.png')
print('No point in the frame, empty image at: ' + pc_path)
cv2.imwrite(pc_path, blank_image)
| StarcoderdataPython |
9673164 | <filename>2017/day24/port.py
#!/usr/bin/env python
import sys
def chainsum(chain):
s = 0
for part in chain:
s += part[0]
s += part[1]
print("DONE %d %d" % (s, len(chain)))
def findlink(part1, part2):
if part1[0] == part2[0] or part1[1] == part2[0]:
return part2[1]
if part1[0] == part2[1] or part1[1] == part2[1]:
return part2[0]
def findnext(parts, nextlink):
possible = []
for p in parts:
if p[0] == nextlink or p[1] == nextlink:
possible.append(p)
return possible
def buildchain(start, parts, chain):
chain.append(start)
if len(chain) == 1:
nextlink = max(start[0], start[1])
else:
nextlink = findlink(chain[-2], chain[-1])
parts.remove(start)
nextparts = findnext(parts, nextlink)
if not nextparts:
return chainsum(chain)
else:
for part in nextparts:
buildchain(part, parts[:], chain[:])
def both(parts):
startingparts = [p for p in parts if p[0] == 0 or p[1] == 0]
for s in startingparts:
ret = buildchain(s, parts[:], [])
def main(args):
lines = [line.strip() for line in open(args[0]).readlines()]
parts2 = [x.split("/") for x in lines]
parts = [(int(x, 10), int(y, 10)) for x,y in parts2]
both(parts)
if __name__ == "__main__":
main(sys.argv[1:])
# eof
| StarcoderdataPython |
1657980 | <reponame>cuiliang0302/myblog
# Generated by Django 3.1.3 on 2020-11-22 14:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20201122_1420'),
]
operations = [
migrations.CreateModel(
name='FirstCatalogue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='名称')),
('order', models.IntegerField(verbose_name='序号')),
('note', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='blog.note', verbose_name='笔记名称')),
],
options={
'verbose_name': '笔记一级目录',
'verbose_name_plural': '笔记一级目录',
},
),
migrations.CreateModel(
name='SecondCatalogue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField(verbose_name='序号')),
('content', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='blog.content', verbose_name='笔记名称')),
('father', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='blog.firstcatalogue', verbose_name='一级目录名称')),
],
options={
'verbose_name': '笔记二级目录',
'verbose_name_plural': '笔记二级目录',
},
),
migrations.DeleteModel(
name='Catalogue',
),
]
| StarcoderdataPython |
6403938 | <reponame>avaddon/django-polymorphic-tree
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from django.db.models import Q
from django.test import TestCase
from polymorphic_tree.managers import PolymorphicMPTTModelManager
from .models import *
class PolymorphicTests(TestCase):
"""
Test Suite, largely derived from django-polymorphic tests
TODO: potentially port these tests from django_polymorphic.tests
test_foreignkey_field()
test_onetoone_field()
test_manytomany_field()
test_extra_method()
test_instance_of_filter()
test_polymorphic___filter()
test_delete()
test_combine_querysets()
test_multiple_inheritance()
test_relation_base()
test_user_defined_manager()
test_manager_inheritance()
test_queryset_assignment()
test_proxy_models()
test_proxy_get_real_instance_class()
test_content_types_for_proxy_models()
test_proxy_model_inheritance()
test_custom_pk()
test_fix_getattribute()
test_parent_link_and_related_name()
test_child_type_validation_in_memory()
"""
def create_model2abcd(self):
"""
Create the chain of objects of Model2,
this is reused in various tests.
"""
Model2A.objects.create(field1='A1')
Model2B.objects.create(field1='B1', field2='B2')
Model2C.objects.create(field1='C1', field2='C2', field3='C3')
Model2D.objects.create(field1='D1', field2='D2', field3='D3', field4='D4')
def test_simple_inheritance(self):
self.create_model2abcd()
objects = list(Model2A.objects.all())
self.assertEqual(repr(objects[0]), '<Model2A: id 1, parent None, field1 "A1", lft 1, rght 2, tree_id 1, level 0>')
self.assertEqual(repr(objects[1]), '<Model2B: id 2, parent None, field1 "B1", lft 1, rght 2, tree_id 2, level 0, field2 "B2">')
self.assertEqual(repr(objects[2]), '<Model2C: id 3, parent None, field1 "C1", lft 1, rght 2, tree_id 3, level 0, field2 "C2", field3 "C3">')
self.assertEqual(repr(objects[3]), '<Model2D: id 4, parent None, field1 "D1", lft 1, rght 2, tree_id 4, level 0, field2 "D2", field3 "D3", field4 "D4">')
def test_manual_get_real_instance(self):
self.create_model2abcd()
o = Model2A.objects.non_polymorphic().get(field1='C1')
self.assertEqual(repr(o.get_real_instance()), '<Model2C: id 3, parent None, field1 "C1", lft 1, rght 2, tree_id 3, level 0, field2 "C2", field3 "C3">')
def test_non_polymorphic(self):
self.create_model2abcd()
objects = list(Model2A.objects.all().non_polymorphic())
self.assertEqual(repr(objects[0]), '<Model2A: id 1, parent None, field1 "A1", lft 1, rght 2, tree_id 1, level 0>')
self.assertEqual(repr(objects[1]), '<Model2A: id 2, parent None, field1 "B1", lft 1, rght 2, tree_id 2, level 0>')
self.assertEqual(repr(objects[2]), '<Model2A: id 3, parent None, field1 "C1", lft 1, rght 2, tree_id 3, level 0>')
self.assertEqual(repr(objects[3]), '<Model2A: id 4, parent None, field1 "D1", lft 1, rght 2, tree_id 4, level 0>')
def test_get_real_instances(self):
self.create_model2abcd()
qs = Model2A.objects.all().non_polymorphic()
# from queryset
objects = qs.get_real_instances()
self.assertEqual(repr(objects[0]), '<Model2A: id 1, parent None, field1 "A1", lft 1, rght 2, tree_id 1, level 0>')
self.assertEqual(repr(objects[1]), '<Model2B: id 2, parent None, field1 "B1", lft 1, rght 2, tree_id 2, level 0, field2 "B2">')
self.assertEqual(repr(objects[2]), '<Model2C: id 3, parent None, field1 "C1", lft 1, rght 2, tree_id 3, level 0, field2 "C2", field3 "C3">')
self.assertEqual(repr(objects[3]), '<Model2D: id 4, parent None, field1 "D1", lft 1, rght 2, tree_id 4, level 0, field2 "D2", field3 "D3", field4 "D4">')
# from a manual list
objects = Model2A.objects.get_real_instances(list(qs))
self.assertEqual(repr(objects[0]), '<Model2A: id 1, parent None, field1 "A1", lft 1, rght 2, tree_id 1, level 0>')
self.assertEqual(repr(objects[1]), '<Model2B: id 2, parent None, field1 "B1", lft 1, rght 2, tree_id 2, level 0, field2 "B2">')
self.assertEqual(repr(objects[2]), '<Model2C: id 3, parent None, field1 "C1", lft 1, rght 2, tree_id 3, level 0, field2 "C2", field3 "C3">')
self.assertEqual(repr(objects[3]), '<Model2D: id 4, parent None, field1 "D1", lft 1, rght 2, tree_id 4, level 0, field2 "D2", field3 "D3", field4 "D4">')
def test_translate_polymorphic_q_object(self):
self.create_model2abcd()
q = Model2A.translate_polymorphic_Q_object(Q(instance_of=Model2C))
objects = Model2A.objects.filter(q)
self.assertEqual(repr(objects[0]), '<Model2C: id 3, parent None, field1 "C1", lft 1, rght 2, tree_id 3, level 0, field2 "C2", field3 "C3">')
self.assertEqual(repr(objects[1]), '<Model2D: id 4, parent None, field1 "D1", lft 1, rght 2, tree_id 4, level 0, field2 "D2", field3 "D3", field4 "D4">')
def test_base_manager(self):
def base_manager(model):
return (
type(model._base_manager),
model._base_manager.model
)
self.assertEqual(base_manager(PlainA), (models.Manager, PlainA))
self.assertEqual(base_manager(PlainB), (models.Manager, PlainB))
self.assertEqual(base_manager(PlainC), (models.Manager, PlainC))
# Unlike standard polymorphic, the manager persists everywhere.
# This makes sure that the features of MPTT are also available everywhere.
self.assertEqual(base_manager(Model2A), (PolymorphicMPTTModelManager, Model2A))
self.assertEqual(base_manager(Model2B), (PolymorphicMPTTModelManager, Model2B))
self.assertEqual(base_manager(Model2C), (PolymorphicMPTTModelManager, Model2C))
self.assertEqual(base_manager(One2OneRelatingModel), (PolymorphicMPTTModelManager, One2OneRelatingModel))
self.assertEqual(base_manager(One2OneRelatingModelDerived), (PolymorphicMPTTModelManager, One2OneRelatingModelDerived))
def test_instance_default_manager(self):
if django.VERSION >= (1, 10, 1):
def show_default_manager(instance):
return "{0} {1}".format(
repr(type(instance.__class__.objects)),
repr(instance.__class__.objects.model)
)
else:
def show_default_manager(instance):
return "{0} {1}".format(
repr(type(instance.__class__._default_manager)),
repr(instance.__class__._default_manager.model)
)
plain_a = PlainA(field1='C1')
plain_b = PlainB(field2='C1')
plain_c = PlainC(field3='C1')
model_2a = Model2A(field1='C1')
model_2b = Model2B(field2='C1')
model_2c = Model2C(field3='C1')
self.assertEqual(show_default_manager(plain_a), "<class 'django.db.models.manager.Manager'> <class 'polymorphic_tree.tests.models.PlainA'>")
self.assertEqual(show_default_manager(plain_b), "<class 'django.db.models.manager.Manager'> <class 'polymorphic_tree.tests.models.PlainB'>")
self.assertEqual(show_default_manager(plain_c), "<class 'django.db.models.manager.Manager'> <class 'polymorphic_tree.tests.models.PlainC'>")
self.assertEqual(show_default_manager(model_2a), "<class 'polymorphic_tree.managers.PolymorphicMPTTModelManager'> <class 'polymorphic_tree.tests.models.Model2A'>")
self.assertEqual(show_default_manager(model_2b), "<class 'polymorphic_tree.managers.PolymorphicMPTTModelManager'> <class 'polymorphic_tree.tests.models.Model2B'>")
self.assertEqual(show_default_manager(model_2c), "<class 'polymorphic_tree.managers.PolymorphicMPTTModelManager'> <class 'polymorphic_tree.tests.models.Model2C'>")
class MPTTTests(TestCase):
"""
Tests relating to tree structure of polymorphic objects
TODO: port some tests from https://github.com/django-mptt/django-mptt/blob/master/tests/myapp/tests.py
"""
def test_sibling_methods(self):
""" https://github.com/edoburu/django-polymorphic-tree/issues/37 """
root_node = Base.objects.create(field_b='root')
sibling_a = Base.objects.create(field_b='first', parent=root_node)
sibling_b = ModelX.objects.create(field_b='second', field_x='ModelX', parent=root_node)
sibling_c = ModelY.objects.create(field_b='third', field_y='ModelY', parent=root_node)
# sanity checks
self.assertEqual(list(root_node.get_descendants()), [sibling_a, sibling_b, sibling_c])
self.assertEqual(list(sibling_a.get_siblings()), [sibling_b, sibling_c])
self.assertEqual(list(sibling_b.get_siblings()), [sibling_a, sibling_c])
self.assertEqual(list(sibling_c.get_siblings()), [sibling_a, sibling_b])
# When looking for siblings, it should be done from the base model,
# not and not the child model type (which may not find all instances)
self.assertEqual(sibling_a.get_previous_sibling(), None)
self.assertEqual(sibling_a.get_next_sibling(), sibling_b)
self.assertEqual(sibling_b.get_previous_sibling(), sibling_a)
self.assertEqual(sibling_b.get_next_sibling(), sibling_c)
self.assertEqual(sibling_c.get_previous_sibling(), sibling_b)
self.assertEqual(sibling_c.get_next_sibling(), None)
def test_get_ancestors(self):
""" https://github.com/edoburu/django-polymorphic-tree/issues/32 """
root_node = Base.objects.create(field_b='root')
child = ModelX.objects.create(field_b='child', field_x='ModelX', parent=root_node)
grandchild = ModelY.objects.create(field_b='grandchild', field_y='ModelY', parent=child)
self.assertEqual(list(root_node.get_ancestors()), [])
self.assertEqual(list(child.get_ancestors()), [root_node])
self.assertEqual(list(grandchild.get_ancestors()), [root_node, child])
self.assertEqual(list(root_node.get_ancestors(include_self=True)), [root_node])
self.assertEqual(list(child.get_ancestors(include_self=True)), [root_node, child])
self.assertEqual(list(grandchild.get_ancestors(include_self=True)), [root_node, child, grandchild])
self.assertEqual(list(root_node.get_ancestors(ascending=True)), [])
self.assertEqual(list(child.get_ancestors(ascending=True)), [root_node])
self.assertEqual(list(grandchild.get_ancestors(ascending=True)), [child, root_node])
def test_is_ancestor_of(self):
root_node = Base.objects.create(field_b='root')
child = ModelX.objects.create(field_b='child', field_x='ModelX', parent=root_node)
grandchild = ModelY.objects.create(field_b='grandchild', field_y='ModelY', parent=child)
self.assertTrue(root_node.is_ancestor_of(child))
self.assertTrue(root_node.is_ancestor_of(grandchild))
self.assertFalse(child.is_ancestor_of(root_node))
self.assertTrue(child.is_ancestor_of(grandchild))
self.assertFalse(grandchild.is_ancestor_of(child))
self.assertFalse(grandchild.is_ancestor_of(root_node))
def test_node_type_checking(self):
root_node = Base.objects.create(field_b='root')
child = ModelX.objects.create(field_b='child', field_x='ModelX', parent=root_node)
grandchild = ModelY.objects.create(field_b='grandchild', field_y='ModelY', parent=child)
self.assertFalse(root_node.is_child_node())
self.assertFalse(root_node.is_leaf_node())
self.assertTrue(root_node.is_root_node())
self.assertTrue(child.is_child_node())
self.assertFalse(child.is_leaf_node())
self.assertFalse(child.is_root_node())
self.assertTrue(grandchild.is_child_node())
self.assertTrue(grandchild.is_leaf_node())
self.assertFalse(grandchild.is_root_node())
def test_child_type_validation_in_memory(self):
root_node = ModelRestrictedChildren.objects.create(field_b='root')
valid_child = ModelX(field_b='valid_child', field_x='ModelX', parent=root_node)
valid_child.clean()
with self.assertRaises(ValidationError) as context:
invalid_child = ModelY(field_b='invalid_child', field_y='ModelY', parent=root_node)
invalid_child.clean()
msg = context.exception.args[0]['parent']
self.assertIn('a model restricted children does not allow model y as a child!', msg)
def test_tree_manager(self):
# Having the tree manager correct is absolutely essential,
# so our move validation is also triggered.
self.assertIsInstance(Model2A()._tree_manager, PolymorphicMPTTModelManager)
self.assertIsInstance(Model2B()._tree_manager, PolymorphicMPTTModelManager)
self.assertIsInstance(Model2C()._tree_manager, PolymorphicMPTTModelManager)
def test_can_be_root(self):
node = ModelMustBeChild(field8="foo")
self.assertRaisesMessage(ValidationError, 'This node type should have a parent', lambda: node.clean())
parent = ModelMustBeChildRoot(field8="test")
parent.clean()
parent.save()
node.parent = parent
node.clean()
node.save()
| StarcoderdataPython |
6602202 | <filename>code/wordclock/__init__.py
'''
The wordclock package
'''
__version__ = '2'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__url__ = 'https://github.com/marksidell/wordclock'
__license__ = '(c) 2021 <NAME>'
__description__ = 'The Word Clock'
| StarcoderdataPython |
11223451 | <gh_stars>0
# -*- coding: utf-8 -*-
# @Time : 2019/7/8 15:19
# @Author : <NAME>
| StarcoderdataPython |
317304 | from sys import argv
from io import FileIO
Test_file_path = "ex15_sample.txt"
def print_all(file: FileIO):
print(file.read())
def rewind(file: FileIO):
file.seek(0)
def print_line(count: int, file: FileIO):
print("%d:\t %s" %(count, file.readline()))
current_file = open(Test_file_path, "r")
print("First let's print the whole file:")
print_all(current_file)
print("Now let's rewint the file:")
rewind(current_file)
print("Let's print three lines:")
line_count = 1
print_line(line_count, current_file)
line_count = line_count + 1
print_line(line_count,current_file)
line_count = line_count + 1
print_line(line_count,current_file)
current_file.close() | StarcoderdataPython |
1879641 | # Copyright 2018 Owkin, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from subprocess import call
from utils.setup_utils import registerIdentities, registerUsers, generateGenesis, enrollWithFiles, genTLSCert, writeFile
from utils.common_utils import create_directory
SUBSTRA_PATH = os.getenv('SUBSTRA_PATH', '/substra')
def generateMSPandTLS(node, org, msp_dir, admincerts=False):
##################################################################################################################
# Although a peer may use the same TLS key and certificate file for both inbound and outbound TLS, #
# we generate a different key and certificate for inbound and outbound TLS simply to show that it is permissible #
##################################################################################################################
# Node peer/orderer mounted volume, see docker_utils 'Client/Server TLS' binded volume.
tls_setup_dir = node['tls']['dir']['external']
# create external folders (client and server)
tls_server_dir = os.path.join(tls_setup_dir, node['tls']['server']['dir'])
tls_client_dir = os.path.join(tls_setup_dir, node['tls']['client']['dir'])
# Generate server TLS cert and key pair in container
genTLSCert(node, org,
cert_file=os.path.join(tls_server_dir, node['tls']['server']['cert']),
key_file=os.path.join(tls_server_dir, node['tls']['server']['key']),
ca_file=os.path.join(tls_server_dir, node['tls']['server']['ca']))
# Generate client TLS cert and key pair for the peer CLI (will be used by external tools)
# in a binded volume
genTLSCert(node, org,
cert_file=os.path.join(tls_client_dir, node['tls']['client']['cert']),
key_file=os.path.join(tls_client_dir, node['tls']['client']['key']),
ca_file=os.path.join(tls_client_dir, node['tls']['client']['ca']))
# Enroll the node to get an enrollment certificate and set up the core's local MSP directory for starting node
enrollWithFiles(node, org, msp_dir, admincerts=admincerts)
def init_org(conf, enrollmentAdmin):
for peer in conf['peers']:
setup_peer_msp_dir = os.path.join(conf['core_dir']['internal'], peer['name'], 'msp')
generateMSPandTLS(peer, conf, setup_peer_msp_dir, admincerts=False)
# copy the admincerts from the admin user for being able to install chaincode
# https://stackoverflow.com/questions/48221810/what-is-difference-between-admincerts-and-signcerts-in-hyperledge-fabric-msp
# https://lists.hyperledger.org/g/fabric/topic/17549225#1250
# https://github.com/hyperledger/fabric-sdk-go/blob/master/internal/github.com/hyperledger/fabric/msp/mspimpl.go#L460
# https://jira.hyperledger.org/browse/FAB-3840
admin = conf['users']['admin']
filename = os.path.join(setup_peer_msp_dir, 'admincerts', '%s-cert.pem' % admin['name'])
writeFile(filename, enrollmentAdmin._cert)
def init_orderer(conf):
for orderer in conf['orderers']:
setup_orderer_msp_dir = os.path.join(conf['core_dir']['internal'], orderer['name'], 'msp')
# copy the admincerts from the user for being able to launch orderer
generateMSPandTLS(orderer, conf, setup_orderer_msp_dir, admincerts=True)
def init(conf, enrollmentAdmin):
if 'peers' in conf:
init_org(conf, enrollmentAdmin)
if 'orderers' in conf:
init_orderer(conf)
create_directory(conf['broadcast_dir']['external'])
generateGenesis(conf)
if __name__ == '__main__':
conf = json.load(open(f'{SUBSTRA_PATH}/conf.json', 'r'))
registerIdentities(conf)
enrollmentAdmin = registerUsers(conf)
init(conf, enrollmentAdmin)
print('Finished setup', flush=True)
call(['touch', conf['misc']['setup_success_file']])
| StarcoderdataPython |
6454028 | <reponame>tavioalves/computerscience-psu
# My first program - <NAME>
print("""<NAME>
<EMAIL>
OS X Yosemite
Computer Engineer""")
| StarcoderdataPython |
8089681 | <gh_stars>0
"""
Class to save/restore configuration from file
"""
# CM0004
from __future__ import annotations
import ast
import base64
import logging
import pickle
import xml
import xml.etree.ElementTree as ET
import xml.dom.minidom as DOM
from pathlib import Path
from typing import ClassVar, Dict, List, Optional, Tuple, Union
MODULELOG = logging.getLogger(__name__)
MODULELOG.addHandler(logging.NullHandler())
class ConfigurationSettings:
"""
Maintains a dictionary that can be saved to an
xml file. It can restore the values reading the
file.
Examples:
.. code-block:: python
>>> cfg = ConfigurationSettings('file.xml')
>>> value = "Configuration Value"
>>> value
'Configuration Value'
>>> cfg.set('key', value)
>>> cfg.get('key')
'Configuration Value'
>>> cfg.saveToFile()
>>> cfg2 = ConfigurationSettings('file.xml')
>>> cfg2.readFromFile()
>>> value2 = cfg2.get('key')
>>> value2
'Configuration Value'
Next example using a class
.. code-block:: python
>>> from vsutillib.files import ConfigurationSettings
>>> class Abc():
... def __init__(self, param):
... self.value = param
... def getValue(self):
... return self.value
...
>>> cfg = ConfigurationSettings('file.xml')
>>> c = Abc(13)
>>> c.getValue()
13
>>> cfg.set('class', c, valueType='pickle')
>>> cfg.saveToFile()
>>> cfg2 = ConfigurationSettings('file.xml')
>>> cfg2.readFromFile()
>>> c2 = cfg2.get('class')
>>> c2.getValue()
13
keys have to be strings
It works with basic data types:
basic data types and lists, dictionaries,
tuples or sets of these data types
- bool, bytes, numbers: [int, float, complex], strings, set
theese types can be saved but not in lists, dictionaries,
tuples or sets
- range, bytearray, frozenset, function
Binary data or lists, dictionaries, tuple and sets with
binary data can be tried with **valueType='pickle'**. As seen
in the second example using a class is possible but it does not
always work. The scope of the class ConfigurationSettings is
for saving simple items that serve as a configuration setting
for an application in different operating systems. Is recommended
not to use values that have to be pickled this will make it system
dependent also it may be dependent of the Python version. That
said binary values have to be througly tested.
Args:
configFile (str): xml file name or Path object
Raises:
ValueError: Raised when the xml file can not be created
when first run and the file still don't exist
"""
# log state
__log = False
# data that can be treated as literal
# theese are human readable easier
# to use on different systems
_literal: ClassVar[List[str]] = [
"bool",
"bytes",
"complex",
"float",
"int",
"str",
"dict",
"list",
"tuple",
"set",
]
# pickable types are python specific maybe
# even version specific
_pickable: ClassVar[List[str]] = [
"range",
"bytearray",
"frozenset",
"function",
"pickle",
]
@classmethod
def classLog(cls, setLogging: Optional[Union[bool, None]] = ...) -> bool:
"""
get/set logging at class level
every class instance will log
unless overwritten
Args:
setLogging (bool):
- True class will log
- False turn off logging
- None returns current Value
Returns:
bool:
returns the current value set
"""
if setLogging is not None:
if isinstance(setLogging, bool):
cls.__log = setLogging
return cls.__log
def __init__(self, configFile: Union[str, Path] = None) -> None:
self._config = {}
self._configType = {}
self._configFile = configFile
# for iteration
self._current = 0
self._len = 0
# global logging override
self.__log = None
def __contains__(self, value: object) -> bool:
return value in self._config
def __getitem__(self, key: str) -> object:
return self._config[key]
def __setitem__(self, key: str, value: object) -> None:
self._config[key] = value
def __iter__(self) -> ConfigurationSettings:
return self
def __next__(self) -> Tuple[int, object]:
if self._current >= self._len:
self._current = 0
raise StopIteration
else:
self._current += 1
key = list(self._config)[self._current - 1]
return [key, self._config[key]]
def __bool__(self) -> bool:
return bool(len(self._config))
def __len__(self) -> int:
return len(self._config)
@property
def log(self) -> bool:
"""
class property can be used to override the class global
logging setting
Returns:
bool:
True if logging is enable
False otherwise
"""
if self.__log is not None:
return self.__log
return ConfigurationSettings.classLog()
@property
def configDictionary(self) -> Dict[str, object]:
return self._config
@log.setter
def log(self, value: bool) -> None:
"""set instance log variable"""
if isinstance(value, bool) or value is None:
self.__log = value
def set(self, key: str, value: object, valueType: Optional[str] = None) -> None:
"""
set value at key in dictionary
Args:
key (str): configuration element
value (obj): element value the type is as explained
valueType (str): specify the type to use to save value
"""
if isinstance(key, str):
self._config[key] = value
_valueType = type(value).__name__
if valueType is not None:
_valueType = valueType
if not ((_valueType in self._pickable) or (_valueType in self._literal)):
s = str(_valueType)
if self.log:
MODULELOG.debug("CFG0003: value type not supported - %s", str(s))
raise TypeError("value type not supported - {}".format(s))
self._configType[key] = _valueType
self._len = len(self._config)
else:
s = str(key)
if self.log:
MODULELOG.debug("CFG0002: key must be a string - %s", str(s))
raise TypeError("key must be a string - {}".format(s))
def get(self, key: str) -> Union[object, None]:
"""
get value from dictionary
Args:
key (str): configuration element
Returns:
object as explained
"""
if key in self._config:
return self._config[key]
if self.log:
s = str(key)
MODULELOG.debug("CFG0001: key not found - %s", s)
return None
def delete(self, key: str) -> Union[object, None]:
"""
delete remove value from dictionary
Args:
key (str): configuration element
Returns:
object removed None if key not found
"""
retVal = self._config.pop(key, None)
if retVal is None:
if self.log:
s = str(key)
MODULELOG.debug("CFG0004: key not found - %s", s)
return retVal
def toXML(
self, root: Optional[ET.Element] = None, name: Optional[str] = None
) -> ET.Element:
"""
Returns the configuration in XML format
if root is None returns the current configuration
Args:
root (xml.etree.ElementTree): root of document
name (str): root tag
Returns:
xml.etree.ElementTree
"""
if name is None:
name = "Config"
config = ET.Element(name)
if root is not None:
root.append(config)
for key, value in self:
valueType = type(value).__name__
if key in self._configType:
valueType = self._configType[key]
tValue = None
if valueType in self._literal:
tValue = value
elif valueType in self._pickable:
p = pickle.dumps(value)
u = base64.b64encode(p)
tValue = u
configElement = ET.SubElement(config, "ConfigSetting")
configElement.attrib = {"id": key, "type": valueType}
configElement.text = str(tValue)
if root is None:
return config
return root
def fromXML(self, xmlDoc: ET, name: Optional[str] = None):
"""
Restore configuration from xml name parameter
permit various configuration sets on same
xml document
Args:
xmlDoc (xml.etree.ElementTree): xml document containing
configuration data
"""
self._config = {}
if name is None:
searchIn = "Config/ConfigSetting"
else:
searchIn = name + "/ConfigSetting"
for setting in xmlDoc.findall(searchIn):
key = setting.attrib["id"]
valueType = setting.attrib["type"]
if valueType == "str":
value = setting.text
elif valueType in self._pickable:
u = ast.literal_eval(setting.text)
value = pickle.loads(base64.b64decode(u))
else:
value = ast.literal_eval(setting.text)
self.set(key, value, valueType=valueType)
def xmlPrettyPrint(self, root: Optional[ET.Element] = None) -> str:
"""
Returns configuration xml Pretty Printed
Args:
root (xml.etree.ElementTree)
Returns:
xml.dom.minidom
"""
if root is not None:
if not isinstance(root, xml.etree.ElementTree.Element):
return None
else:
root = self.toXML()
xmlDoc = DOM.parseString(ET.tostring(root))
xmlPretty = xmlDoc.toprettyxml(indent=" ")
return xmlPretty
def setConfigFile(self, xmlFile: Union[str, Path]):
"""
sets the file for reading and writing to
"""
p = Path(xmlFile)
if not p.anchor:
xf = Path(Path.home(), xmlFile)
else:
xf = p
self._configFile = xf
def saveToFile(
self, xmlFile: Optional[Union[str, Path]] = None, rootName: Optional[str] = None
) -> None:
"""
save configuration to file in xml format
Args:
xmlFile (Path|str): file to write to
rootName (str): root tag
"""
xf = xmlFile
if xmlFile is None:
xf = self._configFile
if rootName is None:
rootTag = "VergaraSoft"
else:
rootTag = rootName
root = ET.Element(rootTag)
xmlConfig = self.toXML(root)
tree = ET.ElementTree(xmlConfig)
tree.write(str(xf))
def readFromFile(self, xmlFile: Optional[Union[str, Path]] = None):
"""
save configuration to file in xml format
Args:
xmlFile (Path|str): file to read
rootName (str): root tag
Raises:
ValueError: raised when file con not be read
"""
xf = xmlFile
if xmlFile is None:
xf = self._configFile
f = Path(xf)
if f.is_file():
tree = ET.ElementTree(file=str(xf))
root = tree.getroot()
self.fromXML(root)
class Abc:
"""Test class"""
def __init__(self, param: object) -> None:
self.value = param
def getValue(self) -> object:
"""Test method"""
return self.value
def print13() -> None:
"""function for testing"""
print("Print from function = 13")
def test() -> None:
"""Testing read and write configuration to file"""
classInstance = Abc(13)
configFile = Path(Path.cwd(), "configmanager.xml")
configuration = ConfigurationSettings(configFile=configFile)
b = b"Sanson"
configuration.set("range", range(13))
configuration.set("set", {"r", "n", "a", "f", "e", "i"})
configuration.set("bytearray", bytearray(b"Itsue"))
configuration.set("frozenset", frozenset("Itsue"))
configuration.set("function", print13, valueType="pickle")
configuration.set("class", classInstance, valueType="pickle")
configuration.set("bool", True)
configuration.set(
"base64sting",
"AdnQywACAAAAAAHmAAAAoAAACM4AAAR5AAAB7wAAAMYAAAjFAAAEcAAAAAAAAAAACgA=",
)
configuration.set(
"base86bytes",
"AdnQywACAAAAAAHmAAAAoAAACM4AAAR5AAAB7wAAAMYAAAjFAAAEcAAAAAAAAAAACgA=".encode(),
)
configuration.set("dict", {"key1": 1, "key2": 2, 3: b})
configuration.set("list", [2, 3, "list", {"key1": 1, 2: [2]}], valueType="pickle")
configuration.set("int", 13)
configuration.set("float", 1.3e200)
configuration.set("complex", 1 + 3j)
configuration.set("tuple", (1.11, 2.22, 3.33))
print("\nConfiguration set\n")
for key, value in configuration:
print(
"Key = {0}, type = {2} value = {1}".format(key, value, type(value).__name__)
)
configuration.saveToFile()
configuration.readFromFile()
root = configuration.toXML()
print("\nRead from configuration file\n")
for key, value in configuration:
print(
"Key = {0}, type = {2}, value = {1}".format(
key, value, type(value).__name__
)
)
prettyXML = configuration.xmlPrettyPrint(root)
print()
print(prettyXML)
print("Call function: ")
f = configuration.get("function")
f()
c = configuration.get("class")
print("Calling class method = {} ".format(c.getValue()))
if __name__ == "__main__":
test()
| StarcoderdataPython |
131944 | """ Management command to create an ApiAccessRequest for given users """
import logging
from contextlib import contextmanager
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError
from django.db.models.signals import post_save, pre_save
from openedx.core.djangoapps.api_admin.models import (
ApiAccessConfig,
ApiAccessRequest,
send_decision_email,
send_request_email
)
logger = logging.getLogger(__name__)
@contextmanager
def disconnect_request_email_signals():
"""
Context manager to be used for temporarily disconnecting the `send_request_email`
and `send_decision_email` pre/post_save signal receivers from the `ApiAccessRequest` model.
"""
post_save.disconnect(
send_request_email, sender=ApiAccessRequest, dispatch_uid="api_access_request_post_save_email"
)
pre_save.disconnect(
send_decision_email, sender=ApiAccessRequest, dispatch_uid="api_access_request_pre_save_email"
)
try:
yield
finally:
post_save.connect(
send_request_email, sender=ApiAccessRequest, dispatch_uid="api_access_request_post_save_email"
)
pre_save.connect(
send_decision_email, sender=ApiAccessRequest, dispatch_uid="api_access_request_pre_save_email"
)
class Command(BaseCommand):
"""
Create an ApiAccessRequest for the given user
Example usage:
$ ./manage.py lms create_api_request <username> --create-config
"""
help = 'Create an ApiAccessRequest for the given user'
DEFAULT_WEBSITE = 'www.test-edx-example-website.edu'
DEFAULT_REASON = 'Generated by management job create_api_request'
def add_arguments(self, parser):
parser.add_argument('username')
parser.add_argument(
'--create-config',
action='store_true',
help='Create ApiAccessConfig if it does not exist'
)
parser.add_argument(
'--disconnect-signals',
action='store_true',
help='Disconnect the Django signal receivers that send emails when ApiAccessRequest records are saved'
)
parser.add_argument(
'--status',
choices=[choice[0] for choice in ApiAccessRequest.STATUS_CHOICES],
default=ApiAccessRequest.APPROVED,
help='Status of the created ApiAccessRequest'
)
parser.add_argument(
'--reason',
default=self.DEFAULT_REASON,
help='Reason that the ApiAccessRequest is being created'
)
parser.add_argument(
'--website',
default=self.DEFAULT_WEBSITE,
help='Website associated with the user of the created ApiAccessRequest'
)
def handle(self, *args, **options):
if options.get('disconnect_signals'):
with disconnect_request_email_signals():
self._handle(*args, **options)
else:
self._handle(*args, **options)
def _handle(self, *args, **options): # pylint: disable=unused-argument
if options.get('create_config'):
self.create_api_access_config()
user = self.get_user(options.get('username'))
self.create_api_access_request(
user,
options.get('status'),
options.get('reason'),
options.get('website'),
)
def get_user(self, username):
try:
return User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError(f'User {username} not found') # lint-amnesty, pylint: disable=raise-missing-from
def create_api_access_request(self, user, status, reason, website):
"""
Creates an ApiAccessRequest with the given values.
"""
try:
ApiAccessRequest.objects.create(
user=user,
status=status,
website=website,
reason=reason,
site=Site.objects.get_current(),
)
except OSError as e:
# Ignore a specific error that occurs in the downstream `send_request_email` receiver.
# see https://openedx.atlassian.net/browse/EDUCATOR-4478
error_msg = str(e)
if 'Permission denied' in error_msg and 'mako_lms' in error_msg:
logger.warning(f'Error sending email about access request: {error_msg}')
else:
raise CommandError(error_msg) # lint-amnesty, pylint: disable=raise-missing-from
except Exception as e:
msg = 'Unable to create ApiAccessRequest for {}. Exception is {}: {}'.format(
user.username,
type(e).__name__,
e
)
raise CommandError(msg) # lint-amnesty, pylint: disable=raise-missing-from
logger.info(f'Created ApiAccessRequest for user {user.username}')
def create_api_access_config(self):
"""
Creates an active ApiAccessConfig if one does not currectly exist
"""
try:
_, created = ApiAccessConfig.objects.get_or_create(enabled=True)
except Exception as e:
msg = f'Unable to create ApiAccessConfig. Exception is {type(e).__name__}: {e}'
raise CommandError(msg) # lint-amnesty, pylint: disable=raise-missing-from
if created:
logger.info('Created ApiAccessConfig')
else:
logger.info('ApiAccessConfig already exists')
| StarcoderdataPython |
6498952 | #!/usr/bin/env python
# -*- encoding: UTF-8 -*-
# Created by CaoDa on 2021/7/11 13:09
import json
import os
from typing import List
from myvc_app.db_info import DBInfo
from myvc_app.config import DATA_PATH
class DBs:
def __init__(self):
self.dbs = [] # type: List[DBInfo]
def get_db_info_by_id(self, db_id):
_ = list(
filter(
lambda db: db.id == db_id,
self.dbs
)
)
return _[0] if _ else None
def delete_db_info_by_id(self, db_id):
for i, db in enumerate(self.dbs):
if db.id == db_id:
self.dbs.pop(i)
break
def save(self):
_ = []
for db_info in self.dbs:
_.append(db_info.to_json())
with open(DATA_PATH, 'w') as f:
json.dump(_, f)
@classmethod
def load(cls):
# type: () -> DBs
dbs = DBs()
if os.path.exists(DATA_PATH):
with open(DATA_PATH, 'rb') as f:
_dict = json.load(f)
for db_info in _dict:
dbs.dbs.append(DBInfo.load_from_json(db_info))
return dbs
| StarcoderdataPython |
1815351 | '''
@author: doronv
'''
import numpy as np
import math
import re
# read line from file split it according to separator and convert it to type
def processInputLine(inputFile, inputSeparator = ' ', inputNumber = None, inputType = int):
inputLine = inputFile.readline()
if inputNumber == None:
inputVector = inputLine.rstrip().split(inputSeparator)
else:
inputVector = inputLine.rstrip().split(inputSeparator, inputNumber)
return map(inputType, inputVector)
def GCD(a, b):
a = abs(a)
b = abs(b)
while a:
a, b = b % a, a
return b
def LCM(a, b):
return (a * b) // GCD(a, b)
def GCDList(v):
return reduce(GCD, v)
def LCMList(v):
return reduce(LCM, v)
# solution
def solve(inputFile, outputFile):
# read case number
T = int(inputFile.readline())
# iterate on all cases
for t in range(T):
# read input B & N
B, N = processInputLine(inputFile)
# read input Mks
Mks = processInputLine(inputFile)
cycle = LCMList(Mks)
customerCycle = sum([cycle / Mk for Mk in Mks])
N_ = N % customerCycle
if N_ == 0:
N_ = customerCycle
Time = 0
customer = 1
working = [0] * B
while customer <= N_:
for i in xrange(B):
if working[i] == 0:
# assign customer to barber i
customer += 1
working[i] = Mks[i] - 1
if customer > N_:
servingCustomerN = i + 1
break
else:
working[i] -= 1
Time += 1
# Output case result
OutputLine = 'Case #' + str(t + 1) + ': ' + str(servingCustomerN)
if t < T - 1: OutputLine += '\r\n'
outputFile.write(OutputLine)
| StarcoderdataPython |
1993893 | from braces.views import MultiplePermissionsRequiredMixin
from django.contrib import messages
from django.contrib.auth import get_permission_codename
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
TemplateView,
UpdateView,
View,
)
from django.views.generic.edit import FormView
from django.views.generic.dates import TodayArchiveView
from django.views.generic.dates import DayArchiveView
from extra_views import CreateWithInlinesView, NamedFormsetsMixin, UpdateWithInlinesView
from multi_form_view import MultiFormView, MultiModelFormView
from ..mixins import HasPermissionsMixin, ModelOptsMixin, SuccessMessageMixin
from ..utils import admin_urlname, get_deleted_objects
MSG_CREATED = '"{}" created successfully.'
MSG_UPDATED = '"{}" updated successfully.'
MSG_DELETED = '"{}" deleted successfully.'
MSG_CANCELED = '"{}" canceled successfully.'
# -----------------------------------------------------------------------------
# Generic Views
# -----------------------------------------------------------------------------
class MyView(LoginRequiredMixin, PermissionRequiredMixin, View):
"""View with LoginRequiredMixin and PermissionRequiredMixin."""
pass
class MyLoginRequiredView(LoginRequiredMixin, View):
"""View with LoginRequiredMixin."""
pass
class MyTemplateView(LoginRequiredMixin, PermissionRequiredMixin, TemplateView):
"""TemplateView CBV with LoginRequiredMixin and PermissionRequiredMixin."""
pass
class MyFormView(LoginRequiredMixin, PermissionRequiredMixin, FormView):
"""FormView CBV with LoginRequiredMixin and PermissionRequiredMixin."""
pass
class MyListView(
LoginRequiredMixin,
PermissionRequiredMixin,
ModelOptsMixin,
HasPermissionsMixin,
ListView,
):
"""ListView CBV with LoginRequiredMixin and PermissionRequiredMixin."""
pass
class MyDetailView(
LoginRequiredMixin, PermissionRequiredMixin, ModelOptsMixin, DetailView
):
"""DetailView CBV with LoginRequiredMixin and PermissionRequiredMixin."""
pass
class MyCreateView(
LoginRequiredMixin,
PermissionRequiredMixin,
SuccessMessageMixin,
ModelOptsMixin,
HasPermissionsMixin,
CreateView,
):
"""CreateView CBV with LoginRequiredMixin, PermissionRequiredMixin
and SuccessMessageMixin."""
def get_success_message(self):
return MSG_CREATED.format(self.object)
def get_success_url(self):
# print("MyCreateView::get_success_url")
opts = self.model._meta
return reverse(admin_urlname(opts, "list"))
class MyUpdateView(
LoginRequiredMixin,
PermissionRequiredMixin,
SuccessMessageMixin,
ModelOptsMixin,
HasPermissionsMixin,
UpdateView,
):
"""UpdateView CBV with LoginRequiredMixin, PermissionRequiredMixin
and SuccessMessageMixin."""
def get_permission_required(self):
"""Default to view and change perms."""
# perms = super().get_permission_required()
opts = self.model._meta
codename_view = get_permission_codename("view", opts)
codename_change = get_permission_codename("change", opts)
view_perm = f"{opts.app_label}.{codename_view}"
change_perm = f"{opts.app_label}.{codename_change}"
perms = (view_perm, change_perm)
# print(perms)
return perms
def get_success_message(self):
return MSG_UPDATED.format(self.object)
def get_success_url(self):
# print("MyUpdateView::get_success_url")
opts = self.model._meta
return reverse(admin_urlname(opts, "list"))
# try:
# return reverse(admin_urlname(opts, "list"))
# except NoReverseMatch:
# return reverse(
# admin_urlname(opts, "update"), kwargs={"pk": self.get_object().pk}
# )
class MyDeleteView(
LoginRequiredMixin,
PermissionRequiredMixin,
SuccessMessageMixin,
ModelOptsMixin,
HasPermissionsMixin,
DeleteView,
):
"""CBV to delete a model record - both Ajax and POST requests."""
def get_success_message(self):
return MSG_DELETED.format(self.object)
def get_success_url(self):
# print("MyDeleteView:: get_success_url")
opts = self.model._meta
return reverse(admin_urlname(opts, "list"))
def delete(self, request, *args, **kwargs):
"""Override delete method."""
response = super().delete(request, *args, **kwargs)
messages.success(self.request, self.get_success_message())
if self.request.is_ajax():
response_data = {}
response_data["result"] = True
response_data["message"] = self.get_success_message()
return JsonResponse(response_data)
return response
def get_context_data(self, **kwargs):
"""Get deletable objects."""
# TODO: Move to deleted objects mixin and reference self?
ctx = super().get_context_data(**kwargs)
# print(ctx["opts"].__dict__.keys())
# Do some extra work here
opts = self.model._meta
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
# deleted_objects, model_count, perms_needed, protected = self.get_deleted_objects([obj], request)
deleted_objects, model_count, protected = get_deleted_objects([self.object])
object_name = str(opts.verbose_name)
# if perms_needed or protected:
if protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
ctx["title"] = title
ctx["deleted_objects"] = deleted_objects
ctx["model_count"] = dict(model_count).items()
ctx["protected"] = protected
return ctx
class MyCancelView(
LoginRequiredMixin,
PermissionRequiredMixin,
SuccessMessageMixin,
ModelOptsMixin,
HasPermissionsMixin,
DetailView,
):
"""CBV with LoginRequiredMixin, PermissionRequiredMixin
and SuccessMessageMixin."""
def get_success_message(self):
return MSG_CANCELED.format(self.object)
def get_success_url(self):
opts = self.model._meta
return reverse(admin_urlname(opts, "list"))
def cancel(self, request, *args, **kwargs):
"""Call `cancel` method on object."""
self.object = self.get_object()
success_url = self.get_success_url()
if "force" in kwargs:
self.object.cancel(force=True)
else:
self.object.cancel()
# TODO: self.request or request?
if self.request.is_ajax():
response_data = {}
response_data["result"] = True
response_data["message"] = self.get_success_message()
return JsonResponse(response_data)
messages.success(self.request, self.get_success_message())
return HttpResponseRedirect(success_url)
def post(self, request, *args, **kwargs):
return self.cancel(request, *args, **kwargs)
# -----------------------------------------------------------------------------
# Formset Views
# -----------------------------------------------------------------------------
class MyNewFormsetCreateView(
LoginRequiredMixin,
PermissionRequiredMixin,
NamedFormsetsMixin,
CreateWithInlinesView,
):
"""CreateView CBV with CreateWithInlinesView."""
def get_success_url(self):
# TODO: Should be moved to form_valid
messages.success(self.request, MSG_CREATED.format(self.object))
opts = self.model._meta
return reverse(admin_urlname(opts, "list"))
class MyNewFormsetUpdateView(
LoginRequiredMixin,
PermissionRequiredMixin,
NamedFormsetsMixin,
UpdateWithInlinesView,
):
"""UpdateView CBV with UpdateWithInlinesView."""
def get_success_url(self):
# TODO: Should be moved to form_valid
messages.success(self.request, MSG_UPDATED.format(self.object))
opts = self.model._meta
return reverse(admin_urlname(opts, "list"))
# -----------------------------------------------------------------------------
# Multi Form Views
# -----------------------------------------------------------------------------
class MyMultiModelFormView(
LoginRequiredMixin,
PermissionRequiredMixin,
SuccessMessageMixin,
ModelOptsMixin,
HasPermissionsMixin,
MultiModelFormView,
):
model = None
# Object should be set within `forms_valid`
object = None
pk_url_kwarg = "pk"
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
# Inject the primary object we may be editing
ctx["object"] = self.object
return ctx
def get_success_url(self):
# print("MyMultiModelFormView::get_success_url")
# return self.object.get_absolute_url()
opts = self.model._meta
return reverse(admin_urlname(opts, "list"))
# -----------------------------------------------------------------------------
# Date Views
# -----------------------------------------------------------------------------
class MyDayArchiveView(
LoginRequiredMixin,
PermissionRequiredMixin,
ModelOptsMixin,
HasPermissionsMixin,
DayArchiveView,
):
"""DayArchiveView CBV with LoginRequiredMixin and PermissionRequiredMixin."""
pass
class MyTodayArchiveView(
LoginRequiredMixin,
PermissionRequiredMixin,
ModelOptsMixin,
HasPermissionsMixin,
TodayArchiveView,
):
"""TodayArchiveView CBV with LoginRequiredMixin and PermissionRequiredMixin."""
pass
| StarcoderdataPython |
6496104 | import discord
from discord import embeds
from discord.ext import commands
import os,datetime,json,sys
import koreanbots
from variable import *
from channels.log_channels import *
from embed.help_embed import *
import other
class on(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f"{self.bot.user} 실행 완료")
await other.set_status(self.bot)
embed = discord.Embed(title="봇 실행 완료", description=f"[{datetime.datetime.now()}] 에 [{host_name}] 에서 봇이 실행되었습니다.", color=0x62c1cc)
await other.send_log(self.bot,log_bot_start_channel,f"`[{datetime.datetime.now()}] [{host_name}] 에서 봇이 실행되었습니다.`")
await other.user_data_backup(self.bot)
@commands.Cog.listener()
async def on_error(self,event, *args, **kwargs):
exc = sys.exc_info() #sys를 활용해서 에러를 확인합니다.
user = await self.bot.fetch_user(int(ADMIN_ID))
today = datetime.datetime.today().strftime("%Y_%m_%d")
now = datetime.datetime.now()
file = "./logs/error/" + today + '.txt'
if os.path.isfile(file):
f = open(file, 'a', encoding='utf-8')
f.write(f'\n[ {now.hour}:{now.minute}:{now.second} ] {event} : {str(exc[0].__name__)} : {str(exc[1])}')
f.close()
else:
f = open(file, 'w', encoding='utf-8')
f.write(f'[ {now.hour}:{now.minute}:{now.second} ] {event} : {str(exc[0].__name__)} : {str(exc[1])}')
f.close()
await user.send(f"에러 발생 : {event} : {str(exc[0].__name__)} : {str(exc[1])}")
@commands.Cog.listener()
async def on_guild_join(self,guild):
kb = koreanbots.Koreanbots(self.bot, KOR_TOKEN, run_task=True)
print(f"서버 갱신 완료 : {kb}")
await other.send_log(self.bot,log_server_join,f"[{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] `{guild.member_count-1}명`이 있는 [{guild}] 서버에 자가진단 봇이 추가되었습니다.")
try:
channels = guild.channels
global help_embed
system_channel = guild.system_channel.id
if system_channel != None:
channel = self.bot.get_channel(int(system_channel))
await channel.send(embed = help_embed)
except:
pass
#해당 리스트에 있는 단어가 채널이름에 있을 경우 도움말 표시
try:
for i in channels:
if i.id != system_channel and str(i.type) == "text":
if i.name in ["채팅","챗","수다","chat","Chat"]:
channel = self.bot.get_channel(int(i.id))
await channel.send(embed = help_embed)
except:
pass
@commands.Cog.listener()
async def on_guild_remove(self,guild):
await other.send_log(self.bot,log_server_remove,f"`[{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] {guild.member_count}명이 있는 [{guild}] 서버에 자가진단 봇이 삭제되었습니다.`")
@commands.Cog.listener()
async def on_command(self,ctx):
today = datetime.datetime.today().strftime("%Y_%m_%d")
now = datetime.datetime.now()
file = "./logs/command/" + today + '.txt'
if isinstance(ctx.channel, discord.abc.PrivateChannel) != True:
if os.path.isfile(file):
f = open(file, 'a', encoding='utf-8')
f.write(f'\n[ {now.hour}:{now.minute}:{now.second} ] {ctx.author}님이 {ctx.guild}에서 {ctx.command} 명령어를 사용했습니다.')
f.close()
else:
f = open(file, 'w', encoding='utf-8')
f.write(f'[ {now.hour}:{now.minute}:{now.second} ] {ctx.author}님이 {ctx.guild}에서 {ctx.command} 명령어를 사용했습니다.')
f.close()
else:
if os.path.isfile(file):
f = open(file, 'a', encoding='utf-8')
f.write(f'\n[ {now.hour}:{now.minute}:{now.second} ] {ctx.author}님이 DM에서 {ctx.command} 명령어를 사용했습니다.')
f.close()
else:
f = open(file, 'w', encoding='utf-8')
f.write(f'[ {now.hour}:{now.minute}:{now.second} ] {ctx.author}님이 DM에서 {ctx.command} 명령어를 사용했습니다.')
f.close()
def setup(bot):
bot.add_cog(on(bot)) | StarcoderdataPython |
1626 | import requests
from sense_hat import SenseHat
import smbus
import time
while True:
try:
pressure=0
sense = SenseHat()
pressure = sense.get_pressure()
data = {'pressure':pressure}
print(pressure)
#send http request to sense serverless function with pressure
#data
r=requests.post('http://127.0.0.1:8080/function/sensor',data)
print(r.text)
sense=SenseHat()
sense.show_message(r.text)
except KeyboardInterrupt:
sys.exit()
| StarcoderdataPython |
1874777 | <gh_stars>0
#!/usr/bin/env python
#
# note total_ordering makes this Python 2.7 dependent.
#
import os
import time
import urllib
import re
import boto
from functools import total_ordering
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from optparse import make_option
import kelvinator.tasks
from c2g.models import Video
class Command(BaseCommand):
help = """ Audit videos to see what commands need to be run to fix them
out. Looks out for missing thumbnails or smaller video files.
Does this by comparing contents of database to what is in S3.
Note that this doesn't do any *semantic* validation, just checks
for presence of files. For example, doesn't tell if they are
good thumbnails, or even the right thumbnails, just that there
are thumbnails.
Output is a set of commands to be run to fix it up.
"""
option_list = (
# Main options
make_option("-c", "--class", dest="handle",
help="restrict to course, identified by handle, eg \"cs144--Fall2012\""),
make_option("-q", "--quiet", dest="quiet", action="store_true",
help="don't print helpful warnings or summary info (still prints video names)"),
) + BaseCommand.option_list
def handle(self, *args, **options):
@total_ordering
class FoundVideo(object):
"""
Simple class to store info about videos. For identity and comparison, we use
{prefix,suffix,video_id}. But the params() command prints slug, since that's
what's used for further commands.
"""
def __init__(self, prefix, suffix, video_id, slug=None, file=None):
self.prefix = str(prefix)
self.suffix = str(suffix)
self.video_id = str(video_id)
self.slug = str(slug)
if file != None:
self.file = str(file)
def __eq__(self, other):
return self.prefix == other.prefix \
and self.suffix == other.suffix \
and self.video_id == other.video_id
def __lt__(self, other):
return self.prefix < other.prefix \
and self.suffix < other.suffix \
and self.video_id < self.video_id
def __hash__(self):
return hash((self.prefix, self.suffix, self.video_id))
def __str__(self):
return "%s %s %s %s %s" % (self.prefix, self.suffix, self.video_id, self.slug, self.file)
def fixup_params(self):
return "%s %s %s" % (self.prefix, self.suffix, self.slug)
def searchDatabase(limitHandle=None):
"""
Search the database and return the set of FoundVideo
objects that we should consider when looking for problems.
Optionally limit to one course identified by handle
(eg. "nlp--Fall2012")
"""
if limitHandle is None:
videosInDB = Video.objects.filter(is_deleted=0,mode="draft")
else:
videosInDB = Video.objects.filter(is_deleted=0,mode="draft",
course__handle=limitHandle)
foundVideos=set([])
for v in videosInDB:
fv = FoundVideo(v.course.prefix, v.course.suffix, v.id, v.slug, v.file)
foundVideos.add(fv)
return foundVideos
def searchStorage(awsKey, awsSecret, awsBucket, limitHandle=None):
"""
Search the S3 storage bucket to see what videos are. Returns a bunch of sets
for what's found in there:
videos
manifests -- proxy for existence of thumbnails
small -- small size
large -- large / normal size
FoundVideo objects. Optionally limit to one course identified by handle
(eg. "nlp--Fall2012")
Contents of S3 look like this:
nlp/Fall2012/videos/39/intro.m4v
nlp/Fall2012/videos/39/small/intro.m4v
nlp/Fall2012/videos/39/large/intro.m4v
nlp/Fall2012/videos/39/jpegs/manifest.txt
"""
store_contents=[]
if awsKey == "local" or awsSecret == "local":
media_root = getattr(settings, 'MEDIA_ROOT')
for (path, dirs, files) in os.walk(media_root):
for f in files:
p=os.path.join(path, f)
if p.startswith(media_root + "/"):
p = p[len(media_root)+1:]
store_contents.append(p)
else:
conn=boto.connect_s3(awsKey, awsSecret)
bucket_conn=conn.get_bucket(awsBucket)
store_contents_s3=bucket_conn.list(limitHandle)
store_contents=map(lambda x: x.name, store_contents_s3)
def filterStoragePaths(paths, regexp):
"""
Return set of FoundVideos for all paths matching regexp. Expect RE has four
match sections: prefix, suffix, video_id, filename
"""
foundSet=set([])
path_regexp=re.compile(regexp)
for store_entry in store_contents:
match = path_regexp.match(store_entry)
if match:
fv = FoundVideo(prefix=match.group(1), suffix=match.group(2),
video_id=match.group(3), file=match.group(4))
foundSet.add(fv)
return foundSet
# remember that video regexp'es need to handle spaces in file names
foundVideos = filterStoragePaths(store_contents,
r"(\w*)/(\w*)/videos/(\w*)/([^/]+)$")
foundManifests = filterStoragePaths(store_contents,
r"(\w*)/(\w*)/videos/(\w*)/jpegs/(manifest.txt)$") # dummy filename
foundSmalls = filterStoragePaths(store_contents,
r"(\w*)/(\w*)/videos/(\w*)/small/([^/]+)$")
foundLarges = filterStoragePaths(store_contents,
r"(\w*)/(\w*)/videos/(\w*)/large/([^/]+)$")
return (foundVideos, foundManifests, foundSmalls, foundLarges)
## MAIN
dbVideoSet=searchDatabase(options['handle'])
if not options['quiet']:
print "Videos found in database: %d " % len(dbVideoSet)
awsKey=getattr(settings, 'AWS_ACCESS_KEY_ID')
awsSecret=getattr(settings, 'AWS_SECRET_ACCESS_KEY')
awsBucket=getattr(settings, 'AWS_STORAGE_BUCKET_NAME')
(storeVideoSet, storeManifestSet, storeSmallSet, storeLargeSet) = \
searchStorage(awsKey, awsSecret, awsBucket, options['handle'])
if not options['quiet']:
print "Bucket: " + awsBucket
print "\tvideos found: %d" % len(storeVideoSet)
print "\tmanifests found: %d" % len(storeManifestSet)
print "\tsmall formats found: %d" % len(storeSmallSet)
print "\tlarge formats found: %d" % len(storeLargeSet)
missingVideoSet = dbVideoSet.difference(storeVideoSet)
missingThumbSet = dbVideoSet.difference(missingVideoSet).difference(storeManifestSet)
missingSmallSet = dbVideoSet.difference(missingVideoSet).difference(storeSmallSet)
missingLargeSet = dbVideoSet.difference(missingVideoSet).difference(storeLargeSet)
if not options['quiet']:
print "in database, but not in storage: %d" % len(missingVideoSet)
print "in database and storage, but no thumbnails: %d" % len(missingThumbSet)
print "in database and storage, but no small format: %d" % len(missingSmallSet)
print "in database and storage, but no large format: %d" % len(missingSmallSet)
print "=================================="
problems = sorted(missingThumbSet.union(missingSmallSet).union(missingLargeSet))
for p in problems:
if p in missingThumbSet:
print "./manage.py kelvinate " + p.fixup_params()
if p in missingSmallSet:
print "./manage.py resize small " + p.fixup_params()
if p in missingLargeSet:
print "./manage.py resize large " + p.fixup_params()
for m in missingVideoSet:
print "# missing video file \"%s\" for %s " % (m.file, m.fixup_params())
| StarcoderdataPython |
5180576 | <reponame>schmichael/firewall-admin<filename>firewall-admin/firewalladmin/lib/template.py
import commands, os
import cherrypy
from genshi.core import Stream
from genshi.output import encode, get_serializer
from genshi.template import Context, TemplateLoader
loader = TemplateLoader(
os.path.join(os.path.dirname(__file__), '..', 'templates'),
auto_reload=True
)
def theme(filename, method='xhtml', encoding='utf-8', **options):
"""Decorator for exposed methods to specify what template the should use
for rendering, and which serialization method and options should be
applied.
"""
def decorate(func):
def wrapper(*args, **kwargs):
cherrypy.thread_data.template = loader.load(filename)
if method == 'html':
options.setdefault('doctype', 'html')
serializer = get_serializer(method, **options)
stream = func(*args, **kwargs)
if not isinstance(stream, Stream):
return stream
return encode(serializer(stream), method=serializer,
encoding=encoding)
return wrapper
return decorate
def render(*args, **kwargs):
"""
Function to render the given data to the template specified via the
``@output`` decorator.
"""
if args:
assert len(args) == 1, \
'Expected exactly one argument, but got %r' % (args,)
template = loader.load(args[0])
else:
template = cherrypy.thread_data.template
ctxt = Context(url=cherrypy.url)
""" Setup menu """
if kwargs.get('show_menu', True):
menu = [{'title': 'home', 'url': '/'}]
if kwargs.get('menu'):
menu.append(kwargs.pop('menu'))
menu.extend([{'title': 'allow list', 'url': '/allowlist/' },
{'title': 'deny lists', 'url': '/denylist/'},
{'title': 'logout', 'url': '/do_logout'}])
kwargs['menu'] = menu
""" Add System Status """
kwargs.setdefault('system_status', commands.getoutput('uptime'))
ctxt.push(kwargs)
return template.generate(ctxt)
| StarcoderdataPython |
11372015 | <filename>BreaksPPU/PPU_Python/BaseDemo.py
"""
Demonstration of the use of basic logic primitives.
"""
import os
from BaseLogic import *
if __name__ == '__main__':
# Demonstration of basic logic primitives
a = 0
print ("not(0): ", NOT(a))
a = 0
b = 1
print ("nor(0,1): ", NOR(a, b))
a = 0
b = 1
print ("nand(0,1): ", NAND(a, b))
# Attempting to set the latch when D=0. The value on the latch should not change (/out = 1)
latch1 = DLatch()
latch1.set (1, 0)
print ("latch1: ", latch1.nget())
# Attempting to set the latch when D=1. The value on the latch must update (1) and therefore the output must be /out = 0 (inverted latch value).
latch1.set (1, 1)
print ("latch1: ", latch1.nget())
# Multiplexer demonstration
out = MUX(1, 0, 1)
print ("MUX out:", out)
| StarcoderdataPython |
11359088 | # Example filename: deepgram_test.py
from deepgram import Deepgram
import asyncio, json
import pyaudio
import wave
# the file name output you want to record into
filename = "main.mp4"
# set the chunk size of 1024 samples
chunk = 1024
# sample format
FORMAT = pyaudio.paInt16
# mono, change to 2 if you want stereo
channels = 1
# 44100 samples per second
sample_rate = 44100
record_seconds = 20
# initialize PyAudio object
p = pyaudio.PyAudio()
# open stream object as input & output
stream = p.open(format=FORMAT,
channels=channels,
rate=sample_rate,
input=True,
output=True,
frames_per_buffer=chunk)
frames = []
print("Recording...")
for i in range(int(sample_rate / chunk * record_seconds)):
data = stream.read(chunk)
# if you want to hear your voice while recording
# stream.write(data)
frames.append(data)
print("Finished recording.")
# stop and close stream
stream.stop_stream()
stream.close()
# terminate pyaudio object
p.terminate()
# save audio file
# open the file in 'write bytes' mode
wf = wave.open(filename, "wb")
# set the channels
wf.setnchannels(channels)
# set the sample format
wf.setsampwidth(p.get_sample_size(FORMAT))
# set the sample rate
wf.setframerate(sample_rate)
# write the frames as bytes
wf.writeframes(b"".join(frames))
# close the file
wf.close()
# Your Deepgram API Key
DEEPGRAM_API_KEY = 'YOUR_DEEPGRAM_API_KEY'
# Location of the file you want to transcribe. Should include filename and extension.
# Example of a local file: ../../Audio/life-moves-pretty-fast.wav
# Example of a remote file: https://static.deepgram.com/examples/interview_speech-analytics.wav
FILE = 'main.mp4'
# Mimetype for the file you want to transcribe
# Include this line only if transcribing a local file
# Example: audio/wav
MIMETYPE = 'audio/mp4'
async def main():
# Initialize the Deepgram SDK
deepgram = Deepgram(DEEPGRAM_API_KEY)
# Check whether requested file is local or remote, and prepare source
if FILE.startswith('http'):
# file is remote
# Set the source
source = {
'url': FILE
}
else:
# file is local
# Open the audio file
audio = open(FILE, 'rb')
# Set the source
source = {
'buffer': audio,
'mimetype': MIMETYPE
}
# Send the audio to Deepgram and get the response
response = await asyncio.create_task(
deepgram.transcription.prerecorded(
source,
{
'punctuate': True
}
)
)
# Write the response to the console
print(json.dumps(response, indent=4))
# Python program to create
# a pdf file
from fpdf import FPDF
# save FPDF() class into a
# variable pdf
pdf = FPDF()
# Add a page
pdf.add_page()
# set style and size of font
# that you want in the pdf
pdf.set_font("Arial", size = 15)
# create a cell
pdf.cell(200, 10, txt = "Title",
ln = 1, align = 'C')
# add another cell
pdf.cell(200, 10, txt = response["results"]["channels"][0]["alternatives"][0]["transcript"],
ln = 2, align = 'C')
# save the pdf with name .pdf
pdf.output("GFG.pdf")
# Write only the transcript to the console
#print(response["results"]["channels"][0]["alternatives"][0]["transcript"])
try:
# If running in a Jupyter notebook, Jupyter is already running an event loop, so run main with this line instead:
#await main()
asyncio.run(main())
except Exception as e:
exception_type, exception_object, exception_traceback = sys.exc_info()
line_number = exception_traceback.tb_lineno
print(f'line {line_number}: {exception_type} - {e}')
| StarcoderdataPython |
8161907 | import argparse
import numpy as np
import cv2
# Code for parsing command line arguments
parser = argparse.ArgumentParser(description='Captures video from the webcam and saves it to a file')
parser.add_argument('output', type=str, help='name of the output file')
args = parser.parse_args()
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter(args.output+'.mp4', fourcc, 50.0, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
out.write(frame)
cv2.imshow('Webcam', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows() | StarcoderdataPython |
6416180 | <reponame>BlackBoxOperator/GotchaTheNames
#!/usr/bin/env python3.6
# coding: utf-8
from tqdm import *
import numpy as np
import time, os, json, csv, re, sys
import shutil
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import joblib
from gensim import corpora
from gensim.models import Phrases
from bm25 import BM25Transformer
from logger import EpochLogger
from make_token import token_maker, method, usage
from ckip_util import name_extractor
from fit import fitness
###########################
# modifiable parameters #
###########################
title_weight = 2
retrieve_size = 300
w2vType = 't2m3w7d100e100'
amlFile = os.path.join('..', 'data', 'aml.csv')
def mapTrim(f):
return [t.strip() for t in f if t.strip()]
def retain_chinese(line):
return re.compile(r"[^\u4e00-\u9fa5]").sub('', line).replace('臺', '台')
def get_screen_len(line):
chlen = len(retain_chinese(line))
return (len(line) - chlen) + chlen * 2
def get_full_screen_width():
return shutil.get_terminal_size((80, 20)).columns
def old_models(config):
models = {}
print("loading bm25Cache...", end='');
sys.stdout.flush()
models['bm25'] = joblib.load(config['bm25Cache'])
print("ok")
print("loading docBM25Cache...", end='')
sys.stdout.flush()
models['doc_bm25'] = joblib.load(config['docBM25Cache'])
print("ok")
print("loading vectorizerCache...", end='')
sys.stdout.flush()
models['vectorizer'] = joblib.load(config['vectorizerCache'])
print("ok")
print("loading w2v model...", end='')
sys.stdout.flush()
models['w2v'] = config['load_w2v']()
print("ok")
print("loading docW2VCache...", end='')
sys.stdout.flush()
models['docWv'] = joblib.load(config['docW2VCache'])
print("ok")
return models
def new_models(config):
models = {}
token = mapTrim(open(config['tokenFile'], encoding="UTF-8").read().split('\n'))
title = mapTrim(open(config['titleFile'], encoding="UTF-8").read().split('\n'))
if len(config['tokey']) != len(token) or len(token) != len(title):
print('len(token) {} != len(tokey) {}'.format(len(token), len(config['tokey'])))
exit(0)
# append title to doc
print("\nappending title to document...\n")
for i, key in enumerate(tqdm(config['tokey'])):
if title and title != "Non":
token[i] += ' {}'.format(title[i]) * title_weight
print("\nbuilding corpus vector space...\n")
models['bm25'] = BM25Transformer()
models['vectorizer'] = TfidfVectorizer()
doc_tf = models['vectorizer'].fit_transform(tqdm(token))
print("fitting bm25...", end='');
sys.stdout.flush()
models['bm25'].fit(doc_tf)
print("transforming...", end='');
models['doc_bm25'] = models['bm25'].transform(doc_tf)
print("ok")
print("saving bm25Cache...", end='');
sys.stdout.flush()
joblib.dump(models['bm25'], config['bm25Cache'])
print("ok")
print("saving docBM25Cache...", end='');
sys.stdout.flush()
joblib.dump(models['doc_bm25'], config['docBM25Cache'])
print("ok")
print("saving vectorizerCache...", end='');
sys.stdout.flush()
joblib.dump(models['vectorizer'], config['vectorizerCache'])
print("ok")
print('\ncorpus vector space - ok\n')
docsTokens = [t.split() for t in token]
# mod
print("loading w2v model...", end='')
sys.stdout.flush()
models['w2v'] = config['load_w2v']()
print("ok")
print("making document word vector")
models['docWv'] = np.array(
[np.sum(models['w2v'][[t for t in docsTokens[i] if t in models['w2v']]], axis=0) \
for i in tqdm(range(len(docsTokens)))])
print("saving docW2VCache...", end='');
sys.stdout.flush()
joblib.dump(models['docWv'], config['docW2VCache'])
print("ok")
return models
def load_models(config):
config['tokey'] = mapTrim(open(config['tokeyFile'], encoding="UTF-8").read().split('\n'))
if all([os.path.isfile(cache) for cache in config['caches']]):
return old_models(config)
else:
return new_models(config)
def getConfig(name, check_w2v = True, check_token = True,
tokname = '', model_type = w2vType, pred_w2v = None):
config = {}
if tokname: tokname += '_'
config['tokenFile'] = os.path.join('..', 'token', '{}_{}token.txt'.format(name, tokname))
config['tokeyFile'] = os.path.join('..', 'token', '{}_{}tokey.txt'.format(name, tokname))
config['titleFile'] = os.path.join('..', 'token', '{}_{}title.txt'.format(name, tokname))
if pred_w2v:
config['w2vFile'] = os.path.splitext(os.path.basename(pred_w2v))[0]
config['w2vPath'] = pred_w2v
else:
config['w2vFile'] = '{}_{}{}'.format(name, tokname, model_type)
config['w2vPath'] = os.path.join('..', 'w2v', config['w2vFile'] + '.w2v')
check = [amlFile]
if check_token:
check = check + [config['tokenFile'], config['tokeyFile'], config['titleFile']]
if check_w2v: check.append(config['w2vPath'])
for fn in check:
if not os.path.isfile(fn):
print(fn, '''not found.
use `make [data|token]` or `python make_token.py [mode]` to prepare it.''')
exit(1)
bm25Cache = os.path.join('..', 'cache', name + '_bm25.pkl')
docBM25Cache = os.path.join('..', 'cache', name + '_doc_bm25.pkl')
vectorizerCache = os.path.join('..', 'cache', name + '_vectorizer.pkl')
docW2VCache = os.path.join('..', 'cache', config['w2vFile'] + '_doc_w2v.pkl')
caches = [bm25Cache, docBM25Cache, vectorizerCache, docW2VCache]
config['bm25Cache'] = bm25Cache
config['docBM25Cache'] = docBM25Cache
config['vectorizerCache'] = vectorizerCache
config['docW2VCache'] = docW2VCache
config['caches'] = caches
config['tokenize'] = token_maker(name)
config['load_w2v'] = method[name]['w2v'](config['w2vPath'])
return config
def ranking_by_stage(config, models, info, cap, cont, stages):
query = config['tokenize'](cont)
query += ' {}'.format(config['tokenize'](cap)) * title_weight
# w2v
indexs = [_ for _ in query.split() if _ in models['w2v'].vocab]
if indexs:
qryWv = np.sum(
models['w2v'][indexs],
axis=0)
#scores = models['w2v'].cosine_similarities(qryWv, models['docWv'])
else:
qryWv = np.zeros((models['w2v'].vector_size,))
#scores = np.zeros((models['doc_bm25'].shape[0],))
# bm25
qryTf = models['vectorizer'].transform([query])
qryBm25 = models['bm25'].transform(qryTf)
def ranking(qry_bm25, qry_wv):
sims = cosine_similarity(qry_bm25, models['doc_bm25']).reshape(-1)
sims += models['w2v'].cosine_similarities(qry_wv, models['docWv'])
return sorted(zip(config['tokey'], sims),
key=lambda e: e[-1], reverse=True)
def feedback(pre_qrybm25, pre_qryWv, rks, n):
weight = lambda rk: 0.5
rk2docBm = lambda rk: models['doc_bm25'][config['tokey'].index(rks[rk][0])]
rk2docWv = lambda rk: models['docWv'][config['tokey'].index(rks[rk][0])]
new_qrybm25 = pre_qrybm25 + np.sum(rk2docBm(rk) * weight(rk) for rk in range(n))
#new_qryWv = pre_qryWv # previous impl
new_qryWv = pre_qryWv + np.sum(rk2docWv(rk) * weight(rk) for rk in range(n))
return new_qrybm25, new_qryWv
#np.sum(np.fromiter(models ... ))) # for 3.7
for stage, n in enumerate(stages):
print("\033[F" + info.format(stage + 1))
ranks = ranking(qryBm25, qryWv)
qryBm25, qryWv = feedback(qryBm25, qryWv, ranks, n)
return ranking(qryBm25, qryWv)
def exist_english(s):
return any([c.isalpha() for c in s])
def aml_name_extractor(name):
config = getConfig(name)
models = load_models(config)
aml_list = [id for id, *_ in list(csv.reader(open(amlFile, encoding="UTF-8")))[1:]]
extract_name = name_extractor()
stages = [20, 40, 60, 100]
def extract(doc):
threshold = 50
info = '[ stage {{}}/{} ]'.format(len(stages))
ranks = ranking_by_stage(config, models, info, '', doc, stages)
ranks = ranks[:retrieve_size] # cut ranks
hit_count = sum([1 for id, _ in ranks if id in aml_list])
hit_score = [s for id, s in ranks if id in aml_list]
names, secs = extract_name(doc)[0] if hit_count > 50 else ([], 0)
# remove space (if any) in chinese name
names = [''.join(n.split()) if exist_english(n) else n for n in names]
# remove substring
names = [this for idx, this in enumerate(names)
if not any(this in other for other in names[idx + 1:])]
return names, secs
return extract
def keyword_tuner(table, queryFile = os.path.join('..', 'query', 'query.csv')):
name = "search_big"
config = getConfig(name)
#models = load_models(config)
#print("query file: {}\nw2vFile: {}".format(queryFile, config['w2vPath']))
aml_list = [id for id, *_ in list(csv.reader(open(amlFile, encoding="UTF-8")))[1:]]
q_tokens = []
keywords = set(x for x in table.keys())
queries = list(csv.reader(open(queryFile, 'r', encoding="UTF-8")))
for idx, [q_id, q_cap, q_cont] in enumerate(tqdm(queries)):
tokens = config['tokenize'](q_cont).split()
q_tokens.append(
('v' if q_id in aml_list else 'x',
len(tokens), [tok for tok in tokens if tok in keywords]))
def tuning_by_keyword(table):
hit_count = [0 for _ in range(len(queries))]
hit_score = [[] for _ in range(len(queries))]
d = {'v': [], 'x': []}
for l, doclen, tokens in q_tokens:
val = 0
for token in tokens:
if token in table:
val += table[token]
d[l].append(val / doclen)
return fitness(d['v'], d['x'], retrieve_size)
return tuning_by_keyword
if __name__ == '__main__':
if len(sys.argv) < 2:
usage('[query.csv] [corpus] [predict.w2v]')
exit(0)
else:
name = sys.argv[1]
if name not in method:
print("no such mode")
usage('[query.csv] [corpus] [predict.w2v]')
exit(1)
qname = ''
pred_w2v = None
queryFile = os.path.join('..', 'query', 'query.csv')
tokname = ''
for arg in sys.argv[2:]:
bname, ext = os.path.splitext(os.path.basename(arg))
if ext == '.csv':
queryFile = arg
qname = os.path.splitext(os.path.basename(queryFile))[0] + '_'
elif ext == '.w2v':
pred_w2v = arg
else:
tokname = arg
if not pred_w2v:
if name == 'search':
model_type = 't2m5w5d100e100'
elif name == 'search_big':
model_type = 't2m3w7d100e100'
else:
model_type = w2vType
config = getConfig(name, model_type = model_type, tokname = tokname, pred_w2v = pred_w2v)
models = load_models(config)
score_result = os.path.join('{}{}_score.txt'.format(qname, config['w2vFile']))
rank_result = os.path.join('{}{}_submit.csv'.format(qname, config['w2vFile']))
print("query file: {}\ncorpus file: {}\nw2vFile: {}".format(
queryFile, config['tokenFile'], config['w2vPath']))
aml_list = [id for id, *_ in list(csv.reader(open(amlFile, encoding="UTF-8")))[1:]]
showLastQueryProgress = False
recordRanks = False
score_file = open(score_result, 'w', newline='', encoding="UTF-8")
if recordRanks:
csvfile = open(rank_result, 'w', newline='', encoding="UTF-8")
writer = csv.writer(csvfile)
headers = ['Query_Index'] + ['Rank_{:03d}'.format(i) for i in range(1, retrieve_size + 1)]
writer.writerow(headers)
queries = list(csv.reader(open(queryFile, 'r', encoding="UTF-8")))
hit_count = [0 for _ in range(len(queries))]
hit_score = [[] for _ in range(len(queries))]
for idx, [q_id, q_cap, q_cont, *_] in enumerate(tqdm(queries)):
stages = [20, 40, 60, 100]
info = '[ stage {{}}/{} ]'.format(len(stages))
print('{} Query{}: {}'.format(
info.format(0), idx + 1,
q_cap[:min(30, (get_full_screen_width() // 4))]))
#ranks = ranking_by_stage(config, models, info, q_cap, q_cont, stages)
ranks = ranking_by_stage(config, models, info, '', q_cont, stages)
ranks = ranks[:retrieve_size] # cut ranks
hit_count[idx] = sum([1 for id, _ in ranks if id in aml_list])
hit_score[idx] = [s for id, s in ranks if id in aml_list]
if showLastQueryProgress and idx == len(queries) - 1:
print("\033[F" * 3)
print("\033[B", end='')
else:
print("\033[F" + ' ' * get_full_screen_width())
print("\033[F" * 3)
if recordRanks: writer.writerow([q_id] + [e[0] for e in ranks])
line = '[ {} {:3d} / {} = {:4f} score = {:6.2f} ] Query{}: {}'.format(
'v' if q_id in aml_list else 'x',
hit_count[idx],
retrieve_size,
hit_count[idx] / retrieve_size,
sum(hit_score[idx]),
idx + 1, q_cap[:30])
score_file.write(line + '\n')
if idx % 100 == 0: score_file.flush()
if recordRanks: csvfile.close()
score_file.close()
exit(0) # not show
print('-' * get_full_screen_width())
with open(score_result, 'w', newline='', encoding="UTF-8") as score_file:
for idx, [q_id, q_cap, q_cont] in enumerate(queries):
line = '{} [ {:3d}/{} = {:4f}, score = {:6.2f} ] Query{}: {}'.format(
'v' if q_id in aml_list else 'x',
hit_count[idx],
retrieve_size,
hit_count[idx] / retrieve_size,
sum(hit_score[idx]),
idx + 1, q_cap[:30])
print(line)
score_file.write(line + '\n')
#score_file.write('\n'.join([str(s) for s in hit_score[idx]]) + '\n')
| StarcoderdataPython |
12802115 | #! /usr/bin/env python
"""
Script that uses the CLI module to set a number of attributes for a bulk
account list.
"""
"""
Copyright (c) since 2007, GECAD Technologies. All rights reserved.
For feedback and/or bugs in this script, please send an e-mail to:
"AXIGEN Team" <<EMAIL>>
"""
_CVSID='$Id: set-bulk-accounts.py,v 1.5 2016/05/23 16:06:33 nini@qa1 Exp $'
if __name__=='__main__':
import sys
sys.path.append(os.path.join(sys.path[0],'lib'))
sys.path.append('/opt/axigen/scripts/lib')
try:
import cli2
except ImportError:
print >>sys.stderr, 'ERROR: AXIGEN CLI Module could not be imported.'
print >>sys.stderr, 'Please place cli2.py in one of the following directories:'
for x in sys.path:
print >>sys.stderr, '-',x
sys.exit(1)
def show_help():
print >>sys.stderr, """
Basic usage:
%s file=<accounts file> [host=<cli host>] \\
[port=<cli port>] [debug=<debug level>] \\
[pass=<<PASSWORD>>] [[context]:setting=value]...
Where, each parameter is:
file - the filename containing each account per line, including @domain
host - CLI host to connect to; default: localhost
port - CLI port to connect ro; default: 7000
debug - if set to 1 will display all the protocol communication over CLI
pass - if specified, will use this password, otherwise will ask for one
context:setting=value - for each account, you may specify one or multiple
settings with this format.
Context may be one of: webmaildata, quotas, limits, etc (anything that starts
with "CONFIG" in the account context's HELP command), also you may specify an
empty context to set attributes directly in the account
Examples of usage
- set the totalMessageSize quota setting to 100MB:
%s file=myaccounts.txt host=192.168.102.24 port=7001 quotas:totalMessageSize=102400
- set the language to "de":
%s file=myaccounts.txt host=192.168.102.24 port=7001 webmaildata:language=de
- reset the password to "<PASSWORD>":
%s file=myaccounts.txt :passwd=<PASSWORD>
""" % (sys.argv[0], sys.argv[0], sys.argv[0], sys.argv[0])
#defaults
acctFile=None
cliHost=None
cliPort=None
cliPass=None
cliDebug=None
sets=[]
for param in sys.argv[1:]:
if param.startswith('file='):
acctFile=param[5:]
continue
if param.startswith('host='):
cliHost=param[5:]
continue
if param.startswith('port='):
cliPort=param[5:]
continue
if param.startswith('pass='):
cliPass=param[5:]
continue
if param.startswith('debug='):
cliDebug=param[6:]
continue
if ':' not in param or '=' not in param:
#print >>sys.stderr, "param: %s ignored" % param
continue
sets.append(param)
if not len(sets):
print >>sys.stderr, "Nothig to set! Exiting..."
show_help()
sys.exit(1)
if cliHost==None:
cliHost="127.0.0.1"
if cliPort==None:
cliPort="7000"
if not cliPort.isdigit():
print >>sys.stderr, "Port must be a number"
sys.exit(1)
cliPort=int(cliPort)
try:
fd=open(acctFile, 'r')
except:
print >>sys.stderr, "Could not open accounts file (%s), or none specified" % acctFile
show_help()
sys.exit(1)
c=cli2.CLI(cliHost, cliPort)
if not cliPass:
import getpass
while not cliPass:
cliPass=getpass.getpass('Enter CLI Admin password: ')
if not cliPass:
print >>sys.stderr, 'Empty passwords are not allowed!'
sets.sort()
if cliDebug=="1":
cli2.CLI.debug=1
c.auth(cliPass, "admin")
prevDomain=None
lineNo=0
for line in fd:
lineNo+=1
acc_dom=line.strip().split('@')
if len(acc_dom)!=2:
print >>sys.stderr, "Ignored line %d: %s" % (lineNo, line.strip())
continue
myAccount=acc_dom[0]
myDomain=acc_dom[1]
if myDomain!=prevDomain:
if prevDomain!=None:
c.commit()
c.update_domain(myDomain)
prevDomain=myDomain
c.update_account(myAccount)
prevContext=None
for set in sets:
eq_split=set.split('=')
dc_split=eq_split[0].split(':')
myContext=dc_split[0]
mySetting=dc_split[1]
myValue=eq_split[1]
if myContext!=prevContext:
if prevContext!=None and myContext!='':
c.done()
if myContext!='':
c.config(myContext)
prevContext=myContext
c.set_data({mySetting: myValue})
print 'Ok: %s->%s->%s->%s: %s' % (myDomain, myAccount, myContext, mySetting, myValue)
if myContext!='':
c.done()
c.commit()
fd.close()
| StarcoderdataPython |
9736708 | <filename>cntapp/migrations/0005_document_thumbnail.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cntapp', '0004_auto_20150410_1020'),
]
operations = [
migrations.AddField(
model_name='document',
name='thumbnail',
field=models.FileField(null=True, upload_to='thumbnails', blank=True),
preserve_default=True,
),
]
| StarcoderdataPython |
9626373 | <filename>py/vtproto/throttlerservice_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: throttlerservice.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import throttlerdata_pb2 as throttlerdata__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='throttlerservice.proto',
package='throttlerservice',
syntax='proto3',
serialized_pb=_b('\n\x16throttlerservice.proto\x12\x10throttlerservice\x1a\x13throttlerdata.proto2\xf3\x03\n\tThrottler\x12M\n\x08MaxRates\x12\x1e.throttlerdata.MaxRatesRequest\x1a\x1f.throttlerdata.MaxRatesResponse\"\x00\x12S\n\nSetMaxRate\x12 .throttlerdata.SetMaxRateRequest\x1a!.throttlerdata.SetMaxRateResponse\"\x00\x12\x65\n\x10GetConfiguration\x12&.throttlerdata.GetConfigurationRequest\x1a\'.throttlerdata.GetConfigurationResponse\"\x00\x12n\n\x13UpdateConfiguration\x12).throttlerdata.UpdateConfigurationRequest\x1a*.throttlerdata.UpdateConfigurationResponse\"\x00\x12k\n\x12ResetConfiguration\x12(.throttlerdata.ResetConfigurationRequest\x1a).throttlerdata.ResetConfigurationResponse\"\x00\x62\x06proto3')
,
dependencies=[throttlerdata__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaThrottlerServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def MaxRates(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def SetMaxRate(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetConfiguration(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UpdateConfiguration(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ResetConfiguration(self, request, context):
raise NotImplementedError()
class BetaThrottlerStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def MaxRates(self, request, timeout):
raise NotImplementedError()
MaxRates.future = None
@abc.abstractmethod
def SetMaxRate(self, request, timeout):
raise NotImplementedError()
SetMaxRate.future = None
@abc.abstractmethod
def GetConfiguration(self, request, timeout):
raise NotImplementedError()
GetConfiguration.future = None
@abc.abstractmethod
def UpdateConfiguration(self, request, timeout):
raise NotImplementedError()
UpdateConfiguration.future = None
@abc.abstractmethod
def ResetConfiguration(self, request, timeout):
raise NotImplementedError()
ResetConfiguration.future = None
def beta_create_Throttler_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
request_deserializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationRequest.FromString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesRequest.FromString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationRequest.FromString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateRequest.FromString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationRequest.FromString,
}
response_serializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationResponse.SerializeToString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesResponse.SerializeToString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationResponse.SerializeToString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateResponse.SerializeToString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationResponse.SerializeToString,
}
method_implementations = {
('throttlerservice.Throttler', 'GetConfiguration'): face_utilities.unary_unary_inline(servicer.GetConfiguration),
('throttlerservice.Throttler', 'MaxRates'): face_utilities.unary_unary_inline(servicer.MaxRates),
('throttlerservice.Throttler', 'ResetConfiguration'): face_utilities.unary_unary_inline(servicer.ResetConfiguration),
('throttlerservice.Throttler', 'SetMaxRate'): face_utilities.unary_unary_inline(servicer.SetMaxRate),
('throttlerservice.Throttler', 'UpdateConfiguration'): face_utilities.unary_unary_inline(servicer.UpdateConfiguration),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Throttler_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
import throttlerdata_pb2
request_serializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationRequest.SerializeToString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesRequest.SerializeToString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationRequest.SerializeToString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateRequest.SerializeToString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationRequest.SerializeToString,
}
response_deserializers = {
('throttlerservice.Throttler', 'GetConfiguration'): throttlerdata_pb2.GetConfigurationResponse.FromString,
('throttlerservice.Throttler', 'MaxRates'): throttlerdata_pb2.MaxRatesResponse.FromString,
('throttlerservice.Throttler', 'ResetConfiguration'): throttlerdata_pb2.ResetConfigurationResponse.FromString,
('throttlerservice.Throttler', 'SetMaxRate'): throttlerdata_pb2.SetMaxRateResponse.FromString,
('throttlerservice.Throttler', 'UpdateConfiguration'): throttlerdata_pb2.UpdateConfigurationResponse.FromString,
}
cardinalities = {
'GetConfiguration': cardinality.Cardinality.UNARY_UNARY,
'MaxRates': cardinality.Cardinality.UNARY_UNARY,
'ResetConfiguration': cardinality.Cardinality.UNARY_UNARY,
'SetMaxRate': cardinality.Cardinality.UNARY_UNARY,
'UpdateConfiguration': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'throttlerservice.Throttler', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
6616457 | <gh_stars>1-10
length=10
width=5
area=length*width
perimeter=2*(length+width)
print(area)
print(perimeter) | StarcoderdataPython |
8129760 | # -*- coding: utf-8 -*-
#
# Convert unicode (encoded as utf-8) to the closest ascii equivalent.
#
# See README.md for more information.
#
# See LICENSE for licensing information.
#
#
# The basic idea is to assemble a regular expression that detects
# unicode that we know about. This happens the first time uni2ascii is
# called.
#
# Most of the meat is in setting up this regular expression, which
# happens in get_translits(). If you find new transliterations, you'd
# add them to get_translits().
#
# Note: I added transliterations and normalizations as I found them in
# our data by copying/pasting into get_translits() below. There are
# surely many more that aren't there yet. I'm happy to add more!
#
import re
import unicodedata
name = 'uni2ascii'
class Global:
"""Stores globals. There should be no instances of Global."""
# Map of utf-8=>ascii transliterations. Loaded the first time uni2ascii is called.
translits = None
# Regexp of lhs of translits. Loaded the first time uni2ascii is called.
unicode_re = None
# end class Global
def uni2ascii(line):
"""
Replace unicode characters that look similar to ASCII with their ASCII
equivalent.
"""
if Global.translits is None:
Global.translits = get_translits()
Global.unicodere = re.compile('|'.join(map(re.escape,
sorted(Global.translits.keys(),
key=len,
reverse=True))))
return re.sub(Global.unicodere,
lambda mo: Global.translits[mo.group()],
unicodedata.normalize('NFC', line))
# end uni2ascii()
def get_translits():
"""
Convenience function to make it easy to add translits in place.
Returns a dict of unicode=>ascii.
"""
translitstr = """
¡ i
² 2
³ 3
´ '
À A
Á A
 A
à A
Ä A
Å A
Æ AE
Ç C
È E
É E
Ê E
Ë E
Ì I
Í I
Î I
Ï I
Ð D
Ñ N
Ò O
Ó O
Ô O
Õ O
Ö O
× x
Ù U
Ú U
Û U
Ü U
Ý Y
à a
á a
â a
ã a
ä a
å a
æ ae
ç c
è e
é e
ê e
ë e
ì i
í i
î i
ï i
ñ n
ò o
ó o
ô o
õ o
ö o
ù u
ú u
û u
ü u
ý y
ÿ y
ć c
ę e
ğ g
ģ g
ī i
ń n
ō o
Œ OE
œ oe
š s
Ÿ Y
Ž Z
ƒ f
ɑ a
ɡ g
ʻ '
̂ ^
̑ ^
ν v
ο o
ρ p
а a
б 6
е e
о o
р p
с c
у y
х x
ѕ s
і i
ј j
ѵ v
ӕ ae
։ :
৪ 8
৭ q
੧ q
ଃ 8
୨ 9
ᵫ ue
ṭ t
‐ -
‒ -
– -
— -
― -
’ '
“ "
” "
… ...
′ '
⁄ /
₁ 1
₂ 2
∕ /
≤ <=
≥ >=
★ *
Ꜳ AA
ꜳ aa
ff ff
fi fi
ffi ffi
ffl ffl
st st
︰ :
"""
ret = {}
for line in translitstr.split('\n'):
line = line.strip()
if line.startswith('#') or line == '':
continue
(lhs, rhs) = line.split()
ret[unicodedata.normalize('NFC', lhs)] = rhs
# The following are various width spaces with various other
# interpretations (e.g. non-breaking). We render all these as a
# single space. A codepoint goes here if it separates a word but
# renders with no pixels, even if it's zero width.
whites = """ : : : : : : : : ::::: : : :"""
for sp in whites.split(':'):
ret[sp] = ' '
# The following are very thin spaces. They seem to be used for
# kerning rather than word separation, so we map them to
# nothing. YMMV.
nothings = """ : """
for sp in nothings.split(':'):
ret[sp] = ''
return ret
# end translits()
| StarcoderdataPython |
6446060 | import random
from datetime import datetime
from web3 import Web3
from src.tasks.playlists import parse_playlist_event, lookup_playlist_record
from src.utils.db_session import get_db
from src.utils.playlist_event_constants import playlist_event_types_lookup
from src.utils import helpers
from src.challenges.challenge_event_bus import setup_challenge_bus
from tests.index_helpers import AttrDict, IPFSClient, UpdateTask
block_hash = b"0x8f19da326900d171642af08e6770eedd83509c6c44f6855c98e6a752844e2521"
# event_type: PlaylistCreated
def get_playlist_created_event():
event_type = playlist_event_types_lookup["playlist_created"]
playlist_created_event = AttrDict(
{
"_playlistId": 1,
"_playlistOwnerId": 1,
"_isPrivate": True,
"_isAlbum": False,
"_trackIds": [], # This is a list of numbers (track ids)
}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_created_event}
)
# event_type: PlaylistNameUpdated
def get_playlist_name_updated_event():
event_type = playlist_event_types_lookup["playlist_name_updated"]
playlist_name_updated_event = AttrDict(
{"_playlistId": 1, "_updatedPlaylistName": "asdfg"}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_name_updated_event}
)
# event_type: PlaylistCoverPhotoUpdated
def get_playlist_cover_photo_updated_event():
event_type = playlist_event_types_lookup["playlist_cover_photo_updated"]
playlist_cover_photo_updated_event = AttrDict(
{
"_playlistId": 1,
"_playlistImageMultihashDigest": b"\xad\x8d\x1eeG\xf2\x12\xe3\x817"
+ b"\x7f\xb1A\xc6 M~\xfe\x03F\x98f\xab\xfa3\x17ib\xdcC>\xed",
}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_cover_photo_updated_event}
)
# event_type: PlaylistDescriptionUpdated
def get_playlist_description_updated_event():
event_type = playlist_event_types_lookup["playlist_description_updated"]
playlist_description_updated_event = AttrDict(
{"_playlistId": 1, "_playlistDescription": "adf"}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_description_updated_event}
)
# event_type: PlaylistTrackAdded
def get_playlist_track_added_event(playlistId, addedTrackId):
event_type = playlist_event_types_lookup["playlist_track_added"]
playlist_track_added_event = AttrDict(
{"_playlistId": playlistId, "_addedTrackId": addedTrackId}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_track_added_event}
)
# event_type: PlaylistTracksOrdered
def get_playlist_tracks_ordered_event():
event_type = playlist_event_types_lookup["playlist_tracks_ordered"]
playlist_tracks_ordered_event = AttrDict(
{"_playlistId": 1, "_orderedTrackIds": [2, 1]}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_tracks_ordered_event}
)
# event_type: PlaylistTrackDeleted
def get_playlist_track_delete_event(playlistId, deletedTrackId, deletedTrackTimestamp):
event_type = playlist_event_types_lookup["playlist_track_deleted"]
playlist_track_delete_event = AttrDict(
{
"_playlistId": playlistId,
"_deletedTrackId": deletedTrackId,
"_deletedTrackTimestamp": deletedTrackTimestamp,
}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_track_delete_event}
)
# event_type: PlaylistPrivacyUpdated
def get_playlist_privacy_updated_event():
event_type = playlist_event_types_lookup["playlist_privacy_updated"]
playlist_privacy_updated_event = AttrDict(
{"_playlistId": 1, "_updatedIsPrivate": False}
)
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_privacy_updated_event}
)
# event_type: PlaylistDeleted
def get_playlist_deleted_event():
event_type = playlist_event_types_lookup["playlist_deleted"]
playlist_deleted_event = AttrDict({"_playlistId": 1})
return event_type, AttrDict(
{"blockHash": block_hash, "args": playlist_deleted_event}
)
def test_index_playlist(app):
"""Tests that playlists are indexed correctly"""
with app.app_context():
db = get_db()
ipfs_client = IPFSClient({})
web3 = Web3()
challenge_event_bus = setup_challenge_bus()
update_task = UpdateTask(ipfs_client, web3, challenge_event_bus)
with db.scoped_session() as session:
# ================= Test playlist_created Event =================
event_type, entry = get_playlist_created_event()
block_number = random.randint(1, 10000)
block_timestamp = 1585336422
# Some sqlalchemy playlist instance
playlist_record = lookup_playlist_record(
update_task, session, entry, block_number, "0x" # txhash
)
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert playlist_record.playlist_owner_id == entry.args._playlistOwnerId
assert playlist_record.is_private == entry.args._isPrivate
assert playlist_record.is_album == entry.args._isAlbum
block_datetime = datetime.utcfromtimestamp(block_timestamp)
block_integer_time = int(block_timestamp)
playlist_content_array = []
for track_id in entry.args._trackIds:
playlist_content_array.append(
{"track": track_id, "time": block_integer_time}
)
assert playlist_record.playlist_contents == {
"track_ids": playlist_content_array
}
assert playlist_record.created_at == block_datetime
# ================= Test playlist_name_updated Event =================
event_type, entry = get_playlist_name_updated_event()
assert playlist_record.playlist_name == None
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert playlist_record.playlist_name == entry.args._updatedPlaylistName
# ================= Test playlist_cover_photo_updated Event =================
event_type, entry = get_playlist_cover_photo_updated_event()
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert playlist_record.playlist_image_sizes_multihash == (
helpers.multihash_digest_to_cid(entry.args._playlistImageMultihashDigest)
)
assert playlist_record.playlist_image_multihash == None
# ================= Test playlist_description_updated Event =================
event_type, entry = get_playlist_description_updated_event()
assert playlist_record.description == None
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert playlist_record.description == entry.args._playlistDescription
# ================= Test playlist_privacy_updated Event =================
event_type, entry = get_playlist_privacy_updated_event()
assert playlist_record.is_private == True
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert playlist_record.is_private == entry.args._updatedIsPrivate
# ================= Test playlist_track_added Event =================
event_type, entry = get_playlist_track_added_event(1, 1)
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
12, # block_timestamp,
session,
)
assert len(playlist_record.playlist_contents["track_ids"]) == 1
last_playlist_content = playlist_record.playlist_contents["track_ids"][-1]
assert last_playlist_content == {"track": entry.args._addedTrackId, "time": 12}
# ================= Test playlist_track_added with second track Event =================
event_type, entry = get_playlist_track_added_event(1, 2)
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
13, # block_timestamp,
session,
)
assert len(playlist_record.playlist_contents["track_ids"]) == 2
last_playlist_content = playlist_record.playlist_contents["track_ids"][-1]
assert last_playlist_content == {"track": entry.args._addedTrackId, "time": 13}
# ================= Test playlist_tracks_ordered Event =================
event_type, entry = get_playlist_tracks_ordered_event()
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert playlist_record.playlist_contents["track_ids"] == [
{"track": 2, "time": 13},
{"track": 1, "time": 12},
]
# ================= Test playlist_track_delete_event Event =================
event_type, entry = get_playlist_track_delete_event(1, 1, 12)
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert len(playlist_record.playlist_contents["track_ids"]) == 1
last_playlist_content = playlist_record.playlist_contents["track_ids"][-1]
assert playlist_record.playlist_contents["track_ids"] == [
{"track": 2, "time": 13}
]
# ================= Test playlist_track_delete_event Event =================
# This should be a no-op
event_type, entry = get_playlist_track_delete_event(1, 1, 12)
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert len(playlist_record.playlist_contents["track_ids"]) == 1
assert playlist_record.playlist_contents["track_ids"] == [
{"track": 2, "time": 13}
]
# ================= Test playlist_deleted Event =================
event_type, entry = get_playlist_deleted_event()
assert playlist_record.is_delete == False
parse_playlist_event(
None, # self - not used
None, # update_task - not used
entry,
event_type,
playlist_record,
block_timestamp,
session,
)
assert playlist_record.is_delete == True
| StarcoderdataPython |
3406099 | from django.conf.urls import url
from resources import views
urlpatterns = [
url(r'^(?P<resource_id>[0-9]+)/(?P<action>likes|unlikes)$',
views.ResourceVoteView.as_view(), name='resource_vote'),
url(r'^create$', views.CommunityView.as_view(), name='resource_create'),
url(r'^ajax/community/(?P<community>\w+|)$',
views.CommunityView.as_view(), name='community'),
url(r'^post/(?P<resource_id>[0-9]+)$', views.SinglePostView.as_view(), name='single_post'),
]
| StarcoderdataPython |
4937404 | <filename>LintCode/uncategorized/196. Missing Number/.ipynb_checkpoints/solution-checkpoint.py
class Solution:
"""
@param nums: An array of integers
@return: An integer
"""
def findMissing(self, nums):
# write your code here
nums.sort()
i = 0
while i < len(nums):
if i != nums[i]:
return i
i += 1
return len(nums) | StarcoderdataPython |
112385 | <reponame>romybauch/IML.HUJI
import numpy as np
from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes
from typing import Tuple
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from math import atan2, pi
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers.
File is assumed to be an ndarray of shape (n_samples, 3) where the first 2
columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
data = np.load(filename)
return data[:, :2], data[:, 2].astype(int)
def run_perceptron():
"""
Fit and plot fit progression of the Perceptron algorithm over both the
linearly separable and inseparable datasets
Create a line plot that shows the perceptron algorithm's training loss
values (y-axis) as a function of the training iterations (x-axis).
"""
for n, f in [("Linearly Separable", "linearly_separable.npy"),
("Linearly Inseparable", "linearly_inseparable.npy")]:
# Load dataset
X,Y = load_dataset(f'../datasets/{f}')
# Fit Perceptron and record loss in each fit iteration
losses = []
def my_call_back(fit: Perceptron, x: np.ndarray, y: int):
losses.append(fit.loss(X,Y))
perc_model = Perceptron(callback=my_call_back)
perc_model.fit(X,Y)
perc_model.predict(X)
print(losses)
# Plot figure of loss as function of fitting iteration
iters = np.arange(0, len(losses))
go.Figure([go.Scatter(x=iters, y=np.array(losses),mode='lines')],
layout=go.Layout(
title="Perceptrone algorithm loss as a function of it iterations",
xaxis_title="number of iterations",
yaxis_title="loss", height=400)).show()
def get_ellipse(mu: np.ndarray, cov: np.ndarray):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else \
(np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines",
marker_color="black")
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and
gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
X,y = load_dataset(f'../datasets/{f}')
# Fit models and predict over training set
lda_model = LDA()
lda_model.fit(X,y)
gnb_model = GaussianNaiveBayes()
gnb_model.fit(X,y)
# Plot a figure with two suplots, showing the Gaussian Naive Bayes
# predictions on the left and LDA predictions on the right. Plot title
# should specify dataset used and subplot titles should specify
# algorithm and accuracy
# Create subplots
from IMLearn.metrics import accuracy
accur_model = [round(accuracy(y, gnb_model.predict(X)),2),
round(accuracy(y, lda_model.predict(X)),2)]
lims = np.array([X.min(axis=0), X.max(axis=0)]).T + np.array([-.4, .4])
models = [gnb_model,lda_model]
model_names = ["Gaussian Naive Bayes","LDA"]
symbols = np.array(["circle", "star", "diamond", "X"])
fig = make_subplots(rows=2, cols=3,
subplot_titles=[f"{m} <br> Accuracy is: "
f"{accur_model[i]}" for i,m in
enumerate(model_names)],
horizontal_spacing=0.01, vertical_spacing=.03)
for i, m in enumerate(models):
fig.add_traces([decision_surface(m.predict, lims[0], lims[1],
showscale=False),
go.Scatter(x=X[:, 0], y=X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=m.predict(X),
symbol=symbols[y],
colorscale=[custom[0],
custom[-1]],
line=dict(color="black",
width=1)))],
rows=(i // 3) + 1, cols=(i % 3) + 1)
fig.update_layout(
title=rf"$\textbf{{Decision Boundaries Of Models - {f} Dataset}}$",
margin=dict(t=100)) \
.update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
# Add traces for data-points setting symbols and colors
# Add `X` dots specifying fitted Gaussians' means
for i, m in enumerate(models):
fig.add_traces([go.Scatter(x=m.mu_.T[0], y=m.mu_.T[1],
mode='markers', showlegend=False,
marker=dict(color="green", opacity=.7,
symbol='x'))],
rows = (i // 3) + 1, cols = (i % 3) + 1)
fig.update_layout(
title=rf"$\textbf{{Decision Boundaries Of Models - {f} Dataset}}$",
margin=dict(t=100)) \
.update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
# Add ellipses depicting the covariances of the fitted Gaussians
for i in range(3):
fig.add_trace(row=1, col=1, trace=get_ellipse(models[0].mu_[i],
np.diag(models[0].vars_[i])))
fig.add_trace(row=1, col=2, trace=get_ellipse(models[1].mu_[i],
models[1].cov_))
fig.update_layout(
title=rf"$\textbf{{Decision Boundaries Of Models - {f} Dataset}}$",
margin=dict(t=100)) \
.update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
if __name__ == '__main__':
np.random.seed(0)
run_perceptron()
compare_gaussian_classifiers()
| StarcoderdataPython |
304172 | # Generated by Django 2.2.1 on 2019-08-07 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Feed',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('link', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('created_time', models.DateTimeField()),
('updated_time', models.DateTimeField()),
('author_id', models.IntegerField()),
('author_name', models.CharField(max_length=255)),
],
),
]
| StarcoderdataPython |
9613145 | <reponame>ondrejholecek/fortimonitor
from GenericModel import GenericModel, NICCounters, NICDrops
class Model(GenericModel):
def init(self):
self.gen_ports()
self.np6 = [0]
def gen_ports(self):
hwnic_kernel = NICCounters(NICCounters.SRC_HWNIC, "kernel", NICCounters.SPD_IFACE)
xestats0_0 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/XE0", NICCounters.SPD_S10G)
xestats0_2 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/XE2", NICCounters.SPD_S10G)
gestats0_0 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE0", NICCounters.SPD_S10G)
gestats0_1 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE1", NICCounters.SPD_S10G)
gestats0_2 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE2", NICCounters.SPD_S10G)
gestats0_3 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE3", NICCounters.SPD_S10G)
gestats0_4 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE4", NICCounters.SPD_S10G)
gestats0_5 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE5", NICCounters.SPD_S10G)
gestats0_6 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE6", NICCounters.SPD_S10G)
gestats0_7 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE7", NICCounters.SPD_S10G)
gestats0_8 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE8", NICCounters.SPD_S10G)
gestats0_9 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE9", NICCounters.SPD_S10G)
gestats0_10 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE10", NICCounters.SPD_S10G)
gestats0_11 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE11", NICCounters.SPD_S10G)
gestats0_12 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE12", NICCounters.SPD_S10G)
gestats0_13 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE13", NICCounters.SPD_S10G)
gestats0_14 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE14", NICCounters.SPD_S10G)
gestats0_15 = NICCounters(NICCounters.SRC_NP6_PORTSTATS, "0/GIGE15", NICCounters.SPD_S10G)
np6drops0 = NICDrops(NICDrops.SRC_NP6_DROPS, 0)
self.add_port("port1", gestats0_13, gestats0_13, hwnic_kernel, np6drops0)
self.add_port("port2", gestats0_12, gestats0_12, hwnic_kernel, np6drops0)
self.add_port("port3", gestats0_15, gestats0_15, hwnic_kernel, np6drops0)
self.add_port("port4", gestats0_14, gestats0_14, hwnic_kernel, np6drops0)
self.add_port("port5", gestats0_9, gestats0_9, hwnic_kernel, np6drops0)
self.add_port("port6", gestats0_8, gestats0_8, hwnic_kernel, np6drops0)
self.add_port("port7", gestats0_11, gestats0_11, hwnic_kernel, np6drops0)
self.add_port("port8", gestats0_10, gestats0_10, hwnic_kernel, np6drops0)
self.add_port("port9", gestats0_6, gestats0_6, hwnic_kernel, np6drops0)
self.add_port("port10", gestats0_7, gestats0_7, hwnic_kernel, np6drops0)
self.add_port("port11", gestats0_4, gestats0_4, hwnic_kernel, np6drops0)
self.add_port("port12", gestats0_5, gestats0_5, hwnic_kernel, np6drops0)
self.add_port("s1", gestats0_2, gestats0_2, hwnic_kernel, np6drops0)
self.add_port("s2", gestats0_3, gestats0_3, hwnic_kernel, np6drops0)
self.add_port("vw1", gestats0_0, gestats0_0, hwnic_kernel, np6drops0)
self.add_port("vw2", gestats0_1, gestats0_1, hwnic_kernel, np6drops0)
self.add_port("x1", xestats0_0, xestats0_0, hwnic_kernel, np6drops0)
self.add_port("x2", xestats0_2, xestats0_2, hwnic_kernel, np6drops0)
self.add_port("mgmt", hwnic_kernel, None, hwnic_kernel, None)
self.add_port("ha", hwnic_kernel, None, hwnic_kernel, None)
| StarcoderdataPython |
315418 | import errno
import os
import stat
from unittest import mock
import pytest
from outrun.filesystem.service import LocalFileSystemService
from outrun.filesystem.caching.service import LocalCacheService
from outrun.filesystem.caching.filesystem import RemoteCachedFileSystem
from outrun.filesystem.caching.cache import RemoteCache
def create_cache(tmp_path, **override_args):
base_args = dict(
base_path=str(tmp_path / "cache"),
machine_id="machine",
client=LocalCacheService(),
prefetch=False,
max_entries=1024,
max_size=1024 * 1024,
cacheable_paths=["/"],
)
final_args = {**base_args, **override_args}
return RemoteCache(**final_args)
def create_remote_file_system(tmp_path, **override_args):
base_args = dict(
client=LocalFileSystemService(),
mount_callback=None,
cache=create_cache(tmp_path),
)
final_args = {**base_args, **override_args}
return RemoteCachedFileSystem(**final_args)
def test_cacheable_paths(tmp_path):
(tmp_path / "cache").mkdir()
(tmp_path / "cached").mkdir()
(tmp_path / "notcached").mkdir()
mock_client = mock.Mock()
mock_client.get_metadata.return_value = LocalCacheService().get_metadata("/")
fs = create_remote_file_system(
tmp_path,
client=mock_client,
cache=create_cache(
tmp_path, client=mock_client, cacheable_paths=[str(tmp_path / "cached")]
),
)
fs.getattr(str(tmp_path / "cached" / "a"), None)
fs.getattr(str(tmp_path / "cached" / "b"), None)
fs.getattr(str(tmp_path / "cached" / "a"), None)
assert mock_client.get_metadata.call_count == 2
# Should not even be retrieved through cache
fs.getattr(str(tmp_path / "notcached" / "a"), 123)
fs.getattr(str(tmp_path / "notcached" / "b"), 456)
fs.getattr(str(tmp_path / "notcached" / "a"), 789)
assert mock_client.get_metadata.call_count == 2
def test_cached_readlink(tmp_path):
(tmp_path / "cache").mkdir()
(tmp_path / "cached").mkdir()
os.symlink("a", tmp_path / "cached/b")
mock_client = mock.Mock()
mock_client.get_metadata.side_effect = LocalCacheService().get_metadata
fs = create_remote_file_system(
tmp_path,
client=mock_client,
cache=create_cache(
tmp_path, client=mock_client, cacheable_paths=[str(tmp_path / "cached")]
),
)
with pytest.raises(FileNotFoundError):
fs.getattr(str(tmp_path / "cached" / "a"), None)
with pytest.raises(FileNotFoundError):
fs.readlink(str(tmp_path / "cached" / "a"))
assert mock_client.get_metadata.call_count == 1
with pytest.raises(OSError) as e:
fs.readlink(str(tmp_path / "cached"))
assert e.value.args == (errno.EINVAL,)
assert fs.readlink(str(tmp_path / "cached/b")) == "a"
def test_uncached_readlink(tmp_path):
(tmp_path / "cache").mkdir()
os.symlink("bar", tmp_path / "foo")
fs = create_remote_file_system(
tmp_path, cache=create_cache(tmp_path, cacheable_paths=[]),
)
assert fs.readlink(str(tmp_path / "foo")) == "bar"
def test_cached_read(tmp_path):
(tmp_path / "cache").mkdir()
(tmp_path / "file").write_text("abcd")
fs = create_remote_file_system(tmp_path)
fd = fs.open(str(tmp_path / "file"), os.O_RDONLY)
try:
assert fs.read(str(tmp_path / "file"), fd, 1, 2) == b"bc"
assert fs.read(str(tmp_path / "file"), fd, 0, 2) == b"ab"
finally:
fs.release(str(tmp_path / "file"), fd)
def test_uncached_read(tmp_path):
(tmp_path / "cache").mkdir()
(tmp_path / "file").write_text("abcd")
fs = create_remote_file_system(
tmp_path, cache=create_cache(tmp_path, cacheable_paths=[]),
)
fd = fs.open(str(tmp_path / "file"), os.O_RDONLY)
try:
assert fs.read(str(tmp_path / "file"), fd, 1, 2) == b"bc"
assert fs.read(str(tmp_path / "file"), fd, 0, 2) == b"ab"
finally:
fs.release(str(tmp_path / "file"), fd)
def test_cache_not_writable(tmp_path):
(tmp_path / "cache").mkdir()
(tmp_path / "file").write_text("abcd")
fs = create_remote_file_system(
tmp_path, cache=create_cache(tmp_path, cacheable_paths=[]),
)
assert (
fs.getattr(str(tmp_path / "file"), None)["st_mode"]
& (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
!= 0
)
fs = create_remote_file_system(tmp_path)
assert (
fs.getattr(str(tmp_path / "file"), None)["st_mode"]
& (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
== 0
)
def test_cache_flush_file(tmp_path):
(tmp_path / "cache").mkdir()
fs = create_remote_file_system(tmp_path)
# Ensure that flush is a no-op for cached files
fs.flush(str(tmp_path / "file"), 0)
| StarcoderdataPython |
9735214 | <reponame>archibate/h2os
#!/usr/bin/env python
regs = 'bDSd'
vregs = ['ebx', 'edi', 'esi']
print('''#pragma once\n''')
print('''#include <l4/sys/syskip.h>\n''')
print('''#define _$E(x) x''')
def mksys(nx, ny):
print('''
#define _SYS%d%d(rett, func''' % (nx, ny) + \
''.join([', t%d, x%d' % (i,i) for i in range(nx)] + \
[', u%d, y%d' % (i,i) for i in range(ny)]) + \
''') \\
rett sys_##func(''' + \
','.join([' t%d x%d' % (i,i) for i in range(nx)] + \
[' u%d*y%d' % (i,i) for i in range(ny)]) + \
''') \\
{ \\
rett res; \\
asm volatile ( \\''')
#if ny >= 4 or nx >= 4:
print('''\t\t"push %%ebp\\n" \\''')
if nx >= 4:
print('''\t\t"mov %%edx, %%ebp\\n" \\''')
print('''\t\t"call "_SYSKIP_SYM_syscall_in"\\n" \\''')
if ny >= 4:
print('''\t\t"mov %%edx, %%ebp\\n" \\''')
#if ny >= 4 or nx >= 4:
print('''\t\t"pop %%ebp\\n" \\''')
print('''\t\t: "=a" (res) \\
''' + ''.join('\t\t, "=%s" (*y%d) \\\n' % (regs[i], i) for i in range(ny)) + \
'''\t\t: "a" (_$E(_SYS_##func)) \\''' + \
''.join('\n\t\t, "%s" (x%d) \\' % (regs[i], i) for i in range(nx)) + '''
: "ecx", ''' + ''.join('"%s", ' % x for x in vregs[max(ny,nx):]) + '''"cc", "memory"); \\
return res; \\
}''')
for nx in range(5):
for ny in range(5):
mksys(nx, ny)
| StarcoderdataPython |
40280 | # -*- coding: utf-8 -*-
from .fetcher import from_all
| StarcoderdataPython |
50095 | from __future__ import division
from __future__ import print_function
import os
import glob
import time
import random
import argparse
import numpy as np
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from utils import load_data, accuracy
from models import GAT, SpGAT
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False, help='Validate during training pass.')
parser.add_argument('--sparse', action='store_true', default=False, help='GAT with sparse version or not.')
parser.add_argument('--epochs', type=int, default=10000, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.005, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=8, help='Number of hidden units.')
parser.add_argument('--nb_heads', type=int, default=8, help='Number of head attentions.')
parser.add_argument('--dropout', type=float, default=0.6, help='Dropout rate (1 - keep probability).')
parser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.')
parser.add_argument('--patience', type=int, default=100, help='Patience')
parser.add_argument('--seed', type=int, default=72, help='Random seed.')
parser.add_argument('--time_file', type=str, default='', help='timing output file')
parser.add_argument('--pkl_file', type=str, default='trained-model.pkl', help='trained model input file (pkl)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
# Model and optimizer
if args.sparse:
model = SpGAT(nfeat=features.shape[1],
nhid=args.hidden,
nclass=int(labels.max()) + 1,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
else:
model = GAT(nfeat=features.shape[1],
nhid=args.hidden,
nclass=int(labels.max()) + 1,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
features, adj, labels = Variable(features), Variable(adj), Variable(labels)
def compute_test():
with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof:
with profiler.record_function("model_inference"):
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.data.item()),
"accuracy= {:.4f}".format(acc_test.data.item()))
#print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
print(prof.key_averages().table(sort_by="cpu_memory_usage", row_limit=10))
def time_model(file):
model.eval()
n_warmup = 50
n_sample = 50
print("=== Running Warmup Passes")
for i in range(0,n_warmup):
output = model(features, adj)
print("=== Collecting Runtime over ", str(n_sample), " Passes")
tic = time.perf_counter()
for i in range(0,n_sample):
output = model(features, adj)
toc = time.perf_counter()
avg_runtime = float(toc - tic)/n_sample
print("average runtime = ", avg_runtime)
# write runtime to file
f = open(file, "w")
f.write(str(avg_runtime)+"\n")
f.close()
if __name__ == "__main__":
map_location=torch.device('cpu')
model.load_state_dict(torch.load(args.pkl_file))
if len(args.time_file) != 0: # time and send time to file
time_model(args.time_file)
compute_test() | StarcoderdataPython |
8167550 | #!/usr/bin/python3
import argparse
import collections
from collections import defaultdict
import imageio
import numpy as np
from pathlib import Path
import pdb
from typing import Any, List, Mapping, Tuple
from mseg.utils.multiprocessing_utils import send_list_to_workers
from mseg.utils.txt_utils import generate_all_img_label_pair_fpaths
from mseg.utils.names_utils import get_classname_to_dataloaderid_map
from mseg.utils.mask_utils import swap_px_inside_mask
from mseg.dataset_apis.Ade20kMaskLevelDataset import Ade20kMaskDataset
from mseg.dataset_apis.BDDImageLevelDataset import BDDImageLevelDataset
from mseg.dataset_apis.COCOPanopticJsonMaskDataset import COCOPanopticJsonMaskDataset
from mseg.dataset_apis.JsonMaskLevelDataset import JsonMaskDataset
from mseg.dataset_apis.MapillaryMaskDataset import MapillaryMaskDataset
from mseg.dataset_apis.SunrgbdImageLevelDataset import SunrgbdImageLevelDataset
from mseg.label_preparation.relabeled_data_containers import LabelImageUpdateRecord, DatasetClassUpdateRecord
from mseg.label_preparation.remap_dataset import remap_dataset
from mseg.label_preparation.dataset_update_records import (
cocop_update_records,
ade20k_update_records,
bdd_update_records,
idd_update_records,
cityscapes_update_records,
sunrgbd_update_records,
mapillary_update_records,
)
from mseg.utils.dataset_config import infos
"""
Simple utilities to write a remapped version of the dataset in a universal
taxonomy, then overwrite the masks in place. Each mask is defined by a
tuple of three IDs -- (sequence_ID, image_ID, segment_ID).
We subdivide multiprocessing over the image level, rather than mask level,
to prevent race conditions.
TODO: DILATE IDD MASKS BY 1 PIXEL TO FIX WEIRD BOUNDARY DIFFERENT CLASS
OR MAP TO UNLABELED FIRST FOR ALL OF RIDER...
"""
_ROOT = Path(__file__).resolve().parent.parent
def get_unique_mask_identifiers(dname: str, annot_fname: str, data_split: str) -> Tuple[str, str, int]:
"""
Will be used to obtain the unique:
- filesystem parent (i.e. sequence ID),
- image ID
- segment ID
to overwrite this mask/segment later, by matching with label image file path.
Unfortunately, image IDs can be repeated across sequences/splits, so we need 3 unique
identifiers per mask. The sequence ID should always be the 2nd to last item in the
file path so that given an absolute path, we can immediately find it (`fname_parent`)
Args:
dname: dataset_name
annot_fname:
data_split: string representing data subset, e.g. 'train' or 'val'
Returns:
fname_parent: label file path parent
fname_stem: rgb file name stem
segment_id: integer unique segment identifier
"""
annot_fname = Path(annot_fname).stem
fname_parent = None
if dname in ["ade20k-150", "ade20k-150-relabeled", "coco-panoptic-133", "coco-panoptic-133-relabeled"]:
# e.g '000000024880_7237508.png' -> ('000000024880', 7237508)
fname_stem = "_".join(annot_fname.split("_")[:-1])
segmentid = annot_fname.split("_")[-1]
if "ade20k" in dname:
fname_parent = "training" if data_split == "train" else "validation"
elif "coco" in dname:
fname_parent = f"{data_split}2017"
elif dname in ["bdd", "bdd-relabeled"]:
# e.g. 0f4e8f1e-6ba53d52_11.jpg
fname_stem = annot_fname.split("_")[0]
segmentid = annot_fname.split("_")[-1]
fname_parent = data_split
elif dname in ["cityscapes-19", "cityscapes-19-relabeled", "cityscapes-34", "cityscapes-34-relabeled"]:
# e.g seqfrankfurt_frankfurt_000000_013942_leftImg8bit_28.jpg
fname_stem = "_".join(annot_fname.split("_")[1:-1])
segmentid = annot_fname.split("_")[-1]
fname_parent = annot_fname.split("_")[0][3:]
elif dname in ["idd-39", "idd-39-relabeled"]:
# seq173_316862_leftImg8bit_34.jpg
fname_stem = "_".join(annot_fname.split("_")[1:-1])
segmentid = annot_fname.split("_")[-1]
seq_prefix = annot_fname.split("_")[0]
fname_parent = seq_prefix[3:] # after 'seq'
elif dname in ["sunrgbd-37", "sunrgbd-37-relabeled"]:
# we refer to the `test` split (on disk) as `val` split,
# since `val` undefined.
fname_stem = annot_fname.split("_")[0]
segmentid = annot_fname.split("_")[-1]
fname_parent = "train" if data_split == "train" else "test"
elif dname in ["mapillary-public65", "mapillary-public65-relabeled"]:
# e.g. mapillary_czkO_9In4u30opBy5H1uLg_259.jpg
fname_stem = "_".join(annot_fname.split("_")[1:-1])
segmentid = annot_fname.split("_")[-1]
fname_parent = "labels"
else:
print("Unknown dataset")
quit()
return fname_parent, fname_stem, int(segmentid)
def form_fname_to_updatelist_dict(
dname: str, update_records: List[DatasetClassUpdateRecord], split: str
) -> Mapping[str, List[LabelImageUpdateRecord]]:
"""
Form a large dictionary mapping (parent,filename)->(update objects).
Later we can check to see if this image is in our big dictionary of filename->updates
so we can know if we need to overwrite the mask.
Args:
update_records
Returns:
parent_fname_to_updatelist_dict
"""
parent_fname_to_updatelist_dict = defaultdict(dict)
for rec in update_records:
for annot_fname in rec.img_list:
if rec.split != split:
continue
# ADE20K has underscores in the stem, so last underscore separates fname and segment ID
parent, fname_stem, segmentid = get_unique_mask_identifiers(dname, annot_fname, rec.split)
img_rec = LabelImageUpdateRecord(
rec.dataset_name, fname_stem, segmentid, rec.split, rec.orig_class, rec.relabeled_class
)
# Check for duplicated annotations.
same_img_recs = parent_fname_to_updatelist_dict[parent].get(fname_stem, [])
is_dup = any([int(segmentid) == same_img_rec.segmentid for same_img_rec in same_img_recs])
if is_dup:
print("Found Duplicate!")
pdb.set_trace()
parent_fname_to_updatelist_dict[parent].setdefault(fname_stem, []).append(img_rec)
return parent_fname_to_updatelist_dict
def write_out_updated_dataset(
num_processes: int, mld: Any, dname: str, dataroot: str, update_records, require_strict_boundaries: bool
):
"""
By using remap.py, we already have converted label img from original taxonomy, to universal taxonomy.
Args:
num_processes: number of processes to launch; shouldn't exceed number of cores on machine
mld: Mask Level Dataset
dname: string representing name of a dataset taxonomy
dataroot: string representing path to a file directory
update_records
"""
classname_to_id_map = get_classname_to_dataloaderid_map(dname, include_ignore_idx_cls=True)
# Check for incorrect class names.
for rec in update_records:
valid_orig = rec.orig_class in classname_to_id_map.keys()
valid_relabeled = rec.relabeled_class in classname_to_id_map.keys()
if not (valid_orig and valid_relabeled):
print(rec.__dict__)
print(f"Invalid universal classname: {rec.orig_class}, {rec.relabeled_class}")
quit()
for split in ["train", "val"]:
# Create for each split separately, since SUNRGBD has same file names for different images in train vs. val
parent_fname_to_updatelist_dict = form_fname_to_updatelist_dict(dname, update_records, split)
split_txt_fpath = f"{_ROOT}/dataset_lists/{dname}/list/{split}.txt"
# load up the data root and all absolute paths from the txt file
img_label_pairs = generate_all_img_label_pair_fpaths(data_root=dataroot, split_txt_fpath=split_txt_fpath)
if num_processes > 1:
send_list_to_workers(
num_processes=num_processes,
list_to_split=img_label_pairs,
worker_func_ptr=overwrite_mask_worker,
parent_fname_to_updatelist_dict=parent_fname_to_updatelist_dict,
mld=mld,
classname_to_id_map=classname_to_id_map,
require_strict_boundaries=require_strict_boundaries,
split=split,
)
elif num_processes == 1:
# useful for debugging in a single thread
for (img_fpath, label_img_fpath) in img_label_pairs:
overwrite_label_img_masks(
img_fpath,
label_img_fpath,
parent_fname_to_updatelist_dict,
mld,
classname_to_id_map,
require_strict_boundaries,
split,
)
def overwrite_mask_worker(
pairs: List[Tuple[str, str]], start_idx: int, end_idx: int, kwargs: Mapping[str, Any]
) -> None:
"""Given a list of (rgb image, label image) pairs to remap, call relabel_pair()
on each one of them.
Args:
img_fpath_list: list of strings
start_idx: integer
end_idx: integer
kwargs: dictionary with argument names mapped to argument values
"""
parent_fname_to_updatelist_dict = kwargs["parent_fname_to_updatelist_dict"]
mld = kwargs["mld"]
classname_to_id_map = kwargs["classname_to_id_map"]
require_strict_boundaries = kwargs["require_strict_boundaries"]
split = kwargs["split"]
chunk_sz = end_idx - start_idx
# process each image between start_idx and end_idx
for idx in range(start_idx, end_idx):
if idx % 500 == 0:
pct_completed = (idx - start_idx) / chunk_sz * 100
print(f"Completed {pct_completed:.2f}%")
pair = pairs[idx]
img_fpath, label_img_fpath = pair
overwrite_label_img_masks(
img_fpath,
label_img_fpath,
parent_fname_to_updatelist_dict,
mld,
classname_to_id_map,
require_strict_boundaries,
split,
)
def overwrite_label_img_masks(
img_fpath: str,
label_img_fpath: str,
parent_fname_to_updatelist_dict,
mld: Any,
classname_to_id_map: Mapping[str, int],
require_strict_boundaries: bool,
split: str,
) -> None:
"""
Swap the pixel values inside a label map's mask to a new value. This
effectively changes the mask's category on disk.
Get fname stem from rgb image file path
Get sequence ID/parent from label image path file system parent.
Args:
img_fpath:
label_img_fpath:
parent_fname_to_updatelist_dict:
mld:
classname_to_id_map:
require_strict_boundaries:
split:
"""
fname_stem = Path(img_fpath).stem
parent = Path(label_img_fpath).parts[-2] # get parent name, functions as sequence ID
if fname_stem not in parent_fname_to_updatelist_dict[parent]:
# we loop through the entire dataset, and many images won't have any
# updated masks. they'll pass through here.
return
# load up each label_img
# label image will already by in universal taxonomy
label_img = imageio.imread(label_img_fpath)
update_records = parent_fname_to_updatelist_dict[parent][fname_stem]
for rec in update_records:
# if it is, perform each update as described in the object
segment_mask = mld.get_segment_mask(parent, rec.segmentid, fname_stem, split)
if segment_mask is None:
print("No such mask found, exiting...")
exit()
# update the label image each time
orig_class_idx = classname_to_id_map[rec.orig_class]
new_class_idx = classname_to_id_map[rec.relabeled_class]
label_img = swap_px_inside_mask(
label_img, segment_mask, orig_class_idx, new_class_idx, require_strict_boundaries
)
# save it to disk, at new data root, using same abs path as before
overwrite = True # False #
if overwrite:
imageio.imwrite(label_img_fpath, label_img)
#####---------------------------------------------------------------
# Define the parameters need to rewrite the dataset on disk.
# Requires providing a mask-level dataset API as argument.
DatasetRewritingTask = collections.namedtuple(
typename="DatasetRewritingTask",
field_names="orig_dname remapped_dname mapping_tsv_fpath "
"orig_dataroot remapped_dataroot dataset_api update_records require_strict_boundaries",
)
def get_relabeling_task(dname: str) -> DatasetRewritingTask:
"""
Args:
dname: name of dataset to apply re-labeling to
Returns:
DatasetRewritingTask to complete
"""
if dname == "ade20k-150":
return DatasetRewritingTask(
orig_dname="ade20k-150",
remapped_dname="ade20k-150-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/ade20k-150_to_ade20k-150-relabeled.tsv",
orig_dataroot=infos["ade20k-150"].dataroot,
remapped_dataroot=infos["ade20k-150-relabeled"].dataroot,
dataset_api=Ade20kMaskDataset(
semantic_version_dataroot=infos["ade20k-151"].dataroot,
instance_version_dataroot=infos["ade20k-151-inst"].dataroot,
),
update_records=ade20k_update_records,
require_strict_boundaries=False,
)
elif dname == "bdd":
return DatasetRewritingTask(
orig_dname="bdd",
remapped_dname="bdd-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/bdd_to_bdd-relabeled.tsv",
orig_dataroot=infos["bdd"].dataroot,
remapped_dataroot=infos["bdd-relabeled"].dataroot,
dataset_api=BDDImageLevelDataset(infos["bdd"].dataroot),
update_records=bdd_update_records,
require_strict_boundaries=True,
)
elif dname == "cityscapes-19":
return DatasetRewritingTask(
orig_dname="cityscapes-19",
remapped_dname="cityscapes-19-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/cityscapes-19_to_cityscapes-19-relabeled.tsv",
orig_dataroot=infos["cityscapes-19"].dataroot,
remapped_dataroot=infos["cityscapes-19-relabeled"].dataroot,
dataset_api=JsonMaskDataset(dataroot=infos["cityscapes-34"].dataroot),
update_records=cityscapes_update_records,
require_strict_boundaries=False,
) # polygon->raster will be nonstrict
elif dname == "cityscapes-34":
return DatasetRewritingTask(
orig_dname="cityscapes-34",
remapped_dname="cityscapes-34-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/cityscapes-34_to_cityscapes-34-relabeled.tsv",
orig_dataroot=infos["cityscapes-34"].dataroot,
remapped_dataroot=infos["cityscapes-34-relabeled"].dataroot,
dataset_api=JsonMaskDataset(dataroot=infos["cityscapes-34"].dataroot),
update_records=cityscapes_update_records,
require_strict_boundaries=False,
) # polygon->raster will be nonstrict
elif dname == "coco-panoptic-133":
return DatasetRewritingTask(
orig_dname="coco-panoptic-133",
remapped_dname="coco-panoptic-133-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/coco-panoptic-133_to_coco-panoptic-133-relabeled.tsv",
orig_dataroot=infos["coco-panoptic-133"].dataroot,
remapped_dataroot=infos["coco-panoptic-133-relabeled"].dataroot,
dataset_api=COCOPanopticJsonMaskDataset(coco_dataroot=infos["coco-panoptic-133"].dataroot),
update_records=cocop_update_records,
require_strict_boundaries=True,
)
elif dname == "idd-39":
return DatasetRewritingTask(
orig_dname="idd-39",
remapped_dname="idd-39-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/idd-39_to_idd-39-relabeled.tsv",
orig_dataroot=infos["idd-39"].dataroot,
remapped_dataroot=infos["idd-39-relabeled"].dataroot,
dataset_api=JsonMaskDataset(infos["idd-39-relabeled"].dataroot),
update_records=idd_update_records,
require_strict_boundaries=False,
) # polygon->raster will be nonstrict
elif dname == "mapillary-public65":
return DatasetRewritingTask(
orig_dname="mapillary-public65",
remapped_dname="mapillary-public65-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/mapillary-public65_to_mapillary-public65-relabeled.tsv",
orig_dataroot=infos["mapillary-public65"].dataroot,
remapped_dataroot=infos["mapillary-public65-relabeled"].dataroot,
dataset_api=MapillaryMaskDataset(dataroot=infos["mapillary-public66"].dataroot),
update_records=mapillary_update_records,
require_strict_boundaries=True,
)
elif dname == "sunrgbd-37":
return DatasetRewritingTask(
orig_dname="sunrgbd-37",
remapped_dname="sunrgbd-37-relabeled",
mapping_tsv_fpath=f"{_ROOT}/class_remapping_files/sunrgbd-37_to_sunrgbd-37-relabeled.tsv",
orig_dataroot=infos["sunrgbd-37"].dataroot,
remapped_dataroot=infos["sunrgbd-37-relabeled"].dataroot,
dataset_api=SunrgbdImageLevelDataset(infos["sunrgbd-37"].dataroot),
update_records=sunrgbd_update_records,
require_strict_boundaries=True,
)
else:
print(f"This dataset {dname} is not currently configured for re-labeling")
print("Exiting...")
exit()
def main(args):
"""
We use the MSeg dataroot explicitly, as specified in mseg/utils/dataset_config.py
"""
task = get_relabeling_task(args.dataset_to_relabel)
# Rewrite original dataset into universal label space.
remap_dataset(
task.orig_dname,
task.remapped_dname,
task.mapping_tsv_fpath,
old_dataroot=task.orig_dataroot,
remapped_dataroot=task.remapped_dataroot,
num_processes=args.num_processes,
)
# Overwrite the universal labels using mask/segment updates.
write_out_updated_dataset(
args.num_processes,
task.dataset_api,
task.remapped_dname,
task.remapped_dataroot,
task.update_records,
task.require_strict_boundaries,
)
if __name__ == "__main__":
""" """
parser = argparse.ArgumentParser()
parser.add_argument("--num_processes", type=int, required=True, help="number of processes to use (multiprocessing)")
parser.add_argument("--dataset_to_relabel", type=str, required=True, help="name of dataset to apply re-labeling to")
args = parser.parse_args()
main(args)
| StarcoderdataPython |
6520216 | '''
Unittests/General/Mapping/hashable
__________________________________
Test suite for hashable, mapping object definitions.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules/submodules
import unittest
from xldlib.general.mapping import hashable
# CASES
# -----
class HashableMapping(unittest.TestCase):
'''Tests for methods of an ABC on a derived class'''
def test_mutable(self):
'''Test hashable.HashableMapping ABC for abstract methods'''
inst = hashable.HashableDict({1: 3})
self.assertTrue(1 in inst)
with self.assertRaises(KeyError):
hashable.HashableMapping.__getitem__(inst, 1)
class HashableDict(unittest.TestCase):
'''Tests for hashable, immutable mapping objects'''
def test_mutable(self):
'''Test hashable.HashableDict ABC for standard dict methods'''
inst = hashable.HashableDict({1: 3})
with self.assertRaises(NotImplementedError):
inst['key'] = "value"
with self.assertRaises(NotImplementedError):
del inst['key']
with self.assertRaises(NotImplementedError):
inst.clear()
with self.assertRaises(NotImplementedError):
inst.update({})
with self.assertRaises(NotImplementedError):
inst.pop('key')
with self.assertRaises(NotImplementedError):
inst.popitem()
with self.assertRaises(NotImplementedError):
inst.setdefault('key', 'value')
self.assertEquals(inst[1], 3)
# SUITE
# -----
def add_tests(suite):
'''Add tests to the unittest suite'''
suite.addTest(HashableMapping('test_mutable'))
suite.addTest(HashableDict('test_mutable'))
| StarcoderdataPython |
4888882 | <reponame>JessyLeal/flyfood<gh_stars>1-10
lista = ['oi', 'bem', 'meu']
a, b = lista.index('bem'), lista.index('meu')
lista[b], lista[a] = lista[a], lista[b]
print(lista) | StarcoderdataPython |
1734350 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
TESTS_DIR = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
INSTALLED_APPS = [
"django.contrib.sessions",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"widget_tweaks",
"contactform",
]
SITE_ID = 1
SECRET_KEY = 'aTi1Pi1EsaiJoh1O'
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(TESTS_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CONTACTFORM_RECIPIENTS = ['<EMAIL>']
| StarcoderdataPython |
56758 | # -*- coding: utf-8 -*-
# Copyright (c) ©2019, Cardinal Operations and/or its affiliates. All rights reserved.
# CARDINAL OPERATIONS PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
# @author: <EMAIL>
# @date: 2019/10/03
import decimal
import math
import typing
from marshmallow.fields import Integer, Float, Decimal
import numbers
from {{cookiecutter.app_name}}.commons.utils import str_util
class UdfInt(Integer):
def _validated(self, value):
if value == '':
return None
if self.strict:
if isinstance(value, numbers.Number) and isinstance(
value, numbers.Integral
):
return super()._validated(value)
raise self.make_error("invalid", input=value)
return super()._validated(value)
def _format_num(self, value) -> typing.Any:
"""Return the number value for value, given this field's `num_type`."""
if type(value) == str and ((len(value.strip()) == 0) or (len(value.strip()) > 0 and not str_util.is_number(value))):
if not self.required or self.allow_none:
return None
raise self.make_error("invalid", input=value)
return self.num_type(value)
class UdfFloat(Float):
def _validated(self, value):
if value == '':
return None
num = super()._validated(value)
if self.allow_nan is False:
if math.isnan(num) or num == float("inf") or num == float("-inf"):
raise self.make_error("special")
return num
def _format_num(self, value) -> typing.Any:
"""Return the number value for value, given this field's `num_type`."""
if type(value) == str and ((len(value.strip()) == 0) or (len(value.strip()) > 0 and not str_util.is_number(value))):
if not self.required or self.allow_none:
return None
raise self.make_error("invalid", input=value)
return self.num_type(value)
class UdfDecimal(Decimal):
def _validated(self, value):
if value == '':
return None
try:
num = super()._validated(value)
except decimal.InvalidOperation as error:
raise self.make_error("invalid") from error
if not self.allow_nan and (num.is_nan() or num.is_infinite()):
raise self.make_error("special")
return num
| StarcoderdataPython |
4865986 | #selection by level
#.loc[row start idx : row end idx, column start name : column end name]
import pandas as pd
data = {'Name':['Nahid','Hassan', 'Mursalin', 'Rafi', 'Rakib'],
'Year':[2018,2018,2018,2018,2018],
'Income':['5 core','10 core','3 core','7 core','6 core'],
'Age':[21,22,21,22,23],
'Sex':['Male','Male','Male','Male','Male'],
df = pd.DataFrame(data)
pd.set_option('max_rows',10) #show max 10 rows
print(df.loc[:,:]) #Going to display all rows and all columns
'''
Name Year Income Age Sex
0 Nahid 2018 5 core 21 Male
1 Hassan 2018 10 core 22 Male
2 Mursalin 2018 3 core 21 Male
3 Rafi 2018 7 core 22 Male
4 Rakib 2018 6 core 23 Male
'''
print(df.loc[:2]) #show 0-2 idx but general inx sysytem it shows 0 to 1 idx
'''
Name Year Income Age Sex
0 Nahid 2018 5 core 21 Male
1 Hassan 2018 10 core 22 Male
2 Mursalin 2018 3 core 21 Male
'''
#Display ros 0 to 2 and columns Name to Income
print(df.loc[:2,'Name':'Income'])
'''
Name Year Income
0 Nahid 2018 5 core
1 Hassan 2018 10 core
2 Mursalin 2018 3 core
'''
#****************Selecting a single row and single column***************
print(df.loc[5:5]) #Empty DataFrame
print(df.loc[4:4])
'''
Name Year Income Age Sex
4 Rakib 2018 6 core 23 Male
'''
print(df.loc[:,'Name'])
'''
0 Nahid
1 Hassan
2 Mursalin
3 Rafi
4 Rakib
'''
#**************assigning a value to particular row and column************
df.loc[:,'Age'] = 21
print(df.loc[:,:])
'''
Name Year Income Age Sex
0 Nahid 2018 5 core 21 Male
1 Hassan 2018 10 core 21 Male
2 Mursalin 2018 3 core 21 Male
3 Rafi 2018 7 core 21 Male
4 Rakib 2018 6 core 21 Male
'''
#****************A list or array of labels inside loc***************
print(df.loc[:2,['Name', 'Income']])
'''
Name Income
0 Nahid 5 core
1 Hassan 10 core
2 Mursalin 3 core
'''
print(df.loc[[0,2,4],['Name', 'Age', 'Income']])
'''
Name Age Income
0 Nahid 21 5 core
2 Mursalin 21 3 core
4 Rakib 21 6 core
'''
df.loc[:,'Age'] = [21 , 17, 23, 20, 27]
#***************For getting valuse for a boolean array*******************
print(df.loc[:,'Age'] > 20)
'''
0 True
1 False
2 True
3 False
4 True
'''
print(df.loc[df.loc[:,'Age']>20]) #print all the rows having age > 20
'''
Name Year Income Age Sex
0 Nahid 2018 5 core 21 Male
2 Mursalin 2018 3 core 23 Male
4 Rakib 2018 6 core 27 Male
'''
print(df.loc[df.loc[:,'Age']>20,'Age':'Age']) #display only age column
'''
Age
0 21
2 23
4 27
'''
print(df.loc[df.loc[:,'Age']>20,['Age']]) #display only age column using list
'''
Age
0 21
2 23
4 27
'''
#**********************Getting a particular cell value"**********************
print(df.loc[4,'Name']) #Rakib
#***************sorting a particular column*************************
df = pd.DataFrame(data, index=[5,4,1,2,3])
print(df)
'''
Name Year Income Age Sex
5 Nahid 2018 5 core 21 Male
4 Hassan 2018 10 core 22 Male
1 Mursalin 2018 3 core 21 Male
2 Rafi 2018 7 core 22 Male
3 Rakib 2018 6 core 23 Male
'''
df = df.sort_index() #sort index value
print(df.head())
'''
Name Year Income Age Sex
1 Mursalin 2018 3 core 21 Male
2 Rafi 2018 7 core 22 Male
3 Rakib 2018 6 core 23 Male
4 Hassan 2018 10 core 22 Male
5 Nahid 2018 5 core 21 Male
'''
| StarcoderdataPython |
3470031 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tldsearch
# Purpose: SpiderFoot plug-in for identifying the existence of this target
# on other TLDs.
#
# Author: <NAME> <<EMAIL>>
#
# Created: 31/08/2013
# Copyright: (c) <NAME> 2013
# Licence: MIT
# -------------------------------------------------------------------------------
import random
import threading
import time
import dns.resolver
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_tldsearch(SpiderFootPlugin):
meta = {
'name': "TLD Searcher",
'summary': "Search all Internet TLDs for domains with the same name as the target (this can be very slow.)",
'flags': ["slow"],
'useCases': ["Footprint"],
'categories': ["DNS"]
}
# Default options
opts = {
'activeonly': False, # Only report domains that have content (try to fetch the page)
'skipwildcards': True,
'_maxthreads': 50
}
# Option descriptions
optdescs = {
'activeonly': "Only report domains that have content (try to fetch the page)?",
"skipwildcards": "Skip TLDs and sub-TLDs that have wildcard DNS.",
"_maxthreads": "Maximum threads"
}
# Internal results tracking
results = None
# Track TLD search results between threads
tldResults = dict()
lock = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.__dataSource__ = "DNS"
self.lock = threading.Lock()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["INTERNET_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SIMILARDOMAIN"]
def tryTld(self, target, tld):
resolver = dns.resolver.Resolver()
resolver.timeout = 1
resolver.lifetime = 1
resolver.search = list()
if self.opts.get('_dnsserver', "") != "":
resolver.nameservers = [self.opts['_dnsserver']]
if self.opts['skipwildcards'] and self.sf.checkDnsWildcard(tld):
return
try:
if not self.sf.resolveHost(target) and not self.sf.resolveHost6(target):
with self.lock:
self.tldResults[target] = False
else:
with self.lock:
self.tldResults[target] = True
except Exception:
with self.lock:
self.tldResults[target] = False
def tryTldWrapper(self, tldList, sourceEvent):
self.tldResults = dict()
running = True
t = []
# Spawn threads for scanning
self.info(f"Spawning threads to check TLDs: {tldList}")
for i, pair in enumerate(tldList):
(domain, tld) = pair
tn = 'thread_sfp_tldsearch_' + str(random.SystemRandom().randint(0, 999999999))
t.append(threading.Thread(name=tn, target=self.tryTld, args=(domain, tld,)))
t[i].start()
# Block until all threads are finished
while running:
found = False
for rt in threading.enumerate():
if rt.name.startswith("thread_sfp_tldsearch_"):
found = True
if not found:
running = False
time.sleep(0.1)
for res in self.tldResults:
if self.tldResults[res] and res not in self.results:
self.sendEvent(sourceEvent, res)
# Store the result internally and notify listening modules
def sendEvent(self, source, result):
self.info("Found a TLD with the target's name: " + result)
self.results[result] = True
# Inform listening modules
if self.opts['activeonly']:
if self.checkForStop():
return
pageContent = self.sf.fetchUrl('http://' + result,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
noLog=True,
verify=False)
if pageContent['content'] is not None:
evt = SpiderFootEvent("SIMILARDOMAIN", result, self.__name__, source)
self.notifyListeners(evt)
else:
evt = SpiderFootEvent("SIMILARDOMAIN", result, self.__name__, source)
self.notifyListeners(evt)
# Search for similar sounding domains
def handleEvent(self, event):
eventData = event.data
if eventData in self.results:
return
self.results[eventData] = True
keyword = self.sf.domainKeyword(eventData, self.opts['_internettlds'])
if not keyword:
self.error(f"Failed to extract keyword from {eventData}")
return
self.debug(f"Keyword extracted from {eventData}: {keyword}")
if keyword in self.results:
return
self.results[keyword] = True
# Look through all TLDs for the existence of this target keyword
targetList = list()
for tld in self.opts['_internettlds']:
if type(tld) != str:
tld = str(tld.strip(), errors='ignore')
else:
tld = tld.strip()
if tld.startswith("//") or len(tld) == 0:
continue
if tld.startswith("!") or tld.startswith("*") or tld.startswith(".."):
continue
if tld.endswith(".arpa"):
continue
tryDomain = keyword + "." + tld
if self.checkForStop():
return
if len(targetList) <= self.opts['_maxthreads']:
targetList.append([tryDomain, tld])
else:
self.tryTldWrapper(targetList, event)
targetList = list()
# Scan whatever may be left over.
if len(targetList) > 0:
self.tryTldWrapper(targetList, event)
# End of sfp_tldsearch class
| StarcoderdataPython |
11286673 | <filename>tests/ast/cairo-keywords-for-names_test.py
import pytest
from utils import check_ast
from warp.yul.Renamer import MangleNamesVisitor
@check_ast(__file__)
def test_changing_names(ast):
return MangleNamesVisitor().map(ast)
| StarcoderdataPython |
3352432 | import pandas as pd
ATTRIBUTES = {
"type0": {"be", "bg", "ur", "vi"},
"type1": {"ga"},
"type2": {"mhr", "mt", "ug", "wo"},
"latin": {"ga", "mt", "vi", "wo"},
"cyrillic": {"be", "bg", "mhr"},
"arabic": {"ug", "ur"},
"all": {"be", "bg", "ga", "mhr", "mt", "ug", "ur", "vi", "wo"},
}
COMMON_LANGS = ["be", "bg", "ga", "mt", "ug", "ur", "vi"]
UD_COLUMNS = COMMON_LANGS + ["wo", "avg"]
PANX_COLUMNS = COMMON_LANGS + ["mhr", "avg"]
ROWS = ["fasttext", "roberta", "mbert", "lapt", "tva"]
POS_TUPLES = [
(68.84, 88.86, 86.87, 89.68, 89.45, 90.81, 81.84, 87.48, 85.48),
(91.00, 94.48, 90.36, 92.61, 90.87, 89.88, 84.73, 87.71, 90.20),
(94.57, 96.98, 91.91, 94.01, 78.07, 91.77, 88.97, 93.04, 91.16),
(95.74, 97.15, 93.28, 95.76, 79.88, 92.18, 89.64, 94.58, 92.28),
(95.28, 97.20, 93.33, 96.33, 91.49, 92.24, 89.49, 94.48, 93.73),
]
UD_TUPLES = [
(35.81, 84.03, 65.58, 68.45, 54.52, 79.33, 54.91, 70.39, 64.13),
(45.77, 84.61, 64.02, 65.92, 60.34, 78.07, 54.70, 60.12, 64.19),
(71.83, 91.62, 71.68, 76.63, 47.70, 81.45, 64.58, 76.24, 72.72),
(72.77, 92.08, 74.79, 81.53, 50.67, 81.78, 66.15, 80.34, 75.01),
(73.22, 91.90, 74.35, 82.00, 67.55, 81.88, 65.64, 80.22, 77.09),
]
NER_TUPLES = [
(84.26, 87.98, 67.21, 33.53, 00.00, 92.85, 85.57, 35.28, 60.84),
(88.08, 90.31, 76.58, 54.64, 61.54, 94.04, 88.08, 54.17, 75.93),
(91.13, 92.56, 82.82, 61.86, 50.76, 94.60, 92.13, 61.85, 78.46),
(91.61, 92.96, 84.13, 81.53, 56.76, 95.17, 92.41, 59.17, 81.72),
(91.38, 92.70, 84.82, 80.00, 68.93, 95.43, 92.43, 64.23, 83.74),
]
TRANSLIT_COLUMNS = [
"mhr_ner",
"mhrlatin_ner",
"ug_ner",
"uglatin_ner",
"ug_pos",
"uglatin_pos",
"ug_ud",
"uglatin_ud",
]
TRANSLIT_TUPLES = [
(35.28, 41.32, 00.00, 00.00, 89.45, 89.03, 54.52, 54.45),
(54.17, 48.45, 61.54, 63.05, 90.87, 90.76, 60.34, 60.08),
(61.85, 63.84, 50.76, 56.80, 78.07, 91.34, 47.70, 65.85),
(59.17, 63.68, 56.76, 67.57, 79.88, 92.59, 50.67, 69.39),
(64.23, 63.19, 68.93, 67.10, 91.49, 92.64, 67.55, 68.58),
]
def get_task_data():
pos_df = pd.DataFrame(POS_TUPLES, columns=UD_COLUMNS, index=ROWS)
ud_df = pd.DataFrame(UD_TUPLES, columns=UD_COLUMNS, index=ROWS)
ner_df = pd.DataFrame(NER_TUPLES, columns=PANX_COLUMNS, index=ROWS)
translit_df = pd.DataFrame(
TRANSLIT_TUPLES, columns=TRANSLIT_COLUMNS, index=ROWS
)
return {
"attributes": ATTRIBUTES,
"pos": pos_df,
"ud": ud_df,
"ner": ner_df,
"translit": translit_df,
}
| StarcoderdataPython |
6526736 | from django.apps import AppConfig
class SchemesConfig(AppConfig):
name = 'schemes'
| StarcoderdataPython |
9775288 | <filename>20210916/demo.py
import re
from collections import Counter
import pdb
def words(text): return re.findall(r"w", text.lower())
with open("big.txt", "r") as f:
t = f.read()
def calculate_frequency(tokens):
frequency = {}
for t in tokens:
try:
frequency[t] += 1
except:
frequency[t] = 1
"""
Sample output:
{
'the': 79809,
'project': 288,
...
}
"""
return frequency
def print_top_n(counter, n = 10):
top_n = [(counter[t], t) for t in counter]
for i in sorted(top_n, reverse=True)[:n]:
print("{} {}".format(i[1], i[0]))
def get_ngram(tokens, n=2):
tokens = [tokens[i:i+n] for i in range(len(tokens)) if (i+n-1) <= len(tokens) - 1]
tokens = [' '.join(t) for t in tokens]
return tokens
a = "Hello! I'm your TA!"
tokens = re.findall(r"[\w]+", t.lower())
print(re.findall(r"[\w]+", a.lower()))
counter = calculate_frequency(tokens)
c = print_top_n(counter)
get_ngram(tokens)
pdb.set_trace() | StarcoderdataPython |
9726253 | """
pylatch.processlatch のユニットテストが定義されています。
REFERENCES::
http://d.hatena.ne.jp/pythonco/20061015/p3
https://www.yoheim.net/blog.php?q=20160903
https://docs.python.jp/3/library/doctest.html
"""
import unittest as ut
import doctest as dc
import pylatch.processlatch as pl
class TestCountDownLatch(ut.TestCase):
"""pylatch.processlatch.CountDownLatch のユニットテストクラスです。"""
def test_doctest(self):
"""doctest を実行します。"""
ut.TextTestRunner().run(dc.DocTestSuite(pl))
def test_ctor_nagative_count(self):
"""ctor に負のカウントを指定した場合に例外が発生することを確認します。"""
# arrange
# act
# assert
with self.assertRaises(ValueError):
pl.CountDownLatch(-1)
def test_count_should_not_be_negative(self):
"""count_downを繰り返してもcountは0より小さくはならないことを確認します。"""
# arrange
sut = pl.CountDownLatch(2)
# act
for i in range(10):
sut.count_down()
# assert
self.assertEqual(0, sut.count)
| StarcoderdataPython |
3396257 | from django import forms
from django.forms.models import inlineformset_factory
from .models import Course, Module
ModuleFormSet = inlineformset_factory(
Course,
Module,
fields= ['title', 'description'],
extra= 2,
can_delete= True
) | StarcoderdataPython |
6453113 | <reponame>ankitrajbiswal/SEM_5
n=input("Enter the number -> ")
s=0
for _ in iter(int, 1):
if n=='':
break
s+=int(n)
n=input("Enter the number -> ")
print (s) | StarcoderdataPython |
5059483 | # init file for the config module | StarcoderdataPython |
3489764 | #!/usr/bin/env python3
import os,sys
import ctypes
import time
from smvScope import lib61850
import json
from datetime import datetime
import types
from flask import Flask, Response, render_template, request
import socket
from struct import unpack
import threading
import binascii
application = Flask(__name__)
control_data_d = {}
control_data_d_update = True
# streamlistener data
streamListingThread = threading.Thread()
streamList = []
StreamDetails = {}
#stream data
subscribers_list = []
# subscribe/unsibscribe data
receiver = None
subscribers = {}
streamFilter = {}
# subscriber callback data
smv_data = {}
sec_counter = {}
streamInfo = {}
oldSmpCnt = {}
log_list = []
# listbox data
control_data_d['streamSelect_items'] = [] # list of streams
control_data_d['streamSelect'] = { "streamValue": [], "enableListener": True } # selected stream
# duration can be > 0 to set a timeout, 0 for immediate and -1 for infinite
def getSMVStreams(interface, duration):
global streamList
global StreamDetails
#Convert a string of 6 characters of ethernet address into a dash separated hex string
def eth_addr (a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % ((a[0]) , (a[1]) , (a[2]), (a[3]), (a[4]) , (a[5]))
return b
ret = os.system("ifconfig %s promisc" % interface)
if ret != 0:
print_to_log("error setting promiscuous mode on %s" % sys.argv[1])
sys.exit(-1)
#create an INET, raw socket
#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
# SMV 0x88ba
# GOOSE 0x88b8
s = socket.socket( socket.AF_PACKET , socket.SOCK_RAW , socket.ntohs(0x88ba))
#s.setsockopt(socket.SOL_SOCKET, 25, str(interface + '\0').encode('utf-8'))
s.bind((interface,0))
streams = []
# handle duration
if duration < 0:
s.settimeout(1)
if duration == 0:
s.settimeout(0)
s.setblocking(0)
if duration > 0:
s.settimeout(1)
deadline = time.perf_counter() + duration
print("streamListingThread started!")
while control_data_d["streamSelect"]["enableListener"] == True:
if duration > 0 and time.perf_counter() > deadline:
break
try:
packet = s.recvfrom(65565)
except:
continue
#packet string from tuple
packet = packet[0]
#parse ethernet header
eth_length = 14
dst = eth_addr(packet[0:6])
src = eth_addr(packet[6:12])
# parse GOOSE streams, and make a list of them (record appid, MAC, gocbRef, )
# when an element is chosen, the subscriber can be initialised
# when a different element is chosen, re-init subscriber with new gocbRef
appid = unpack('!H' , packet[eth_length:eth_length+2] )[0]
svID_length = 31
svID_size = int(packet[svID_length + 1])
svID = packet[svID_length + 2 : svID_length + 2 + svID_size].decode("utf-8")
#print_to_log("mac: %s, appid: %i, gocbRef: %s, gocbRef_size: %i" % (dst, appid, gocbRef, gocbRef_size))
#item = "%s %i %s" % (dst,appid,gocbRef)
if svID not in StreamDetails:
StreamDetails[svID] = {'src': src, 'dst': dst, 'appid': appid}
else:
if StreamDetails[svID]['src'] != src or StreamDetails[svID]['dst'] != dst:
print_to_log("ERROR: goose collision! message received with matching gocbref: %s but;" % svID)
if StreamDetails[svID]['src'] != src:
print_to_log(" src mac not matching: expected: %s, received: %s" % (StreamDetails[svID]['src'], src))
if StreamDetails[svID]['dst'] != dst:
print_to_log(" dst mac not matching: expected: %s, received: %s" % (StreamDetails[svID]['dst'], dst))
if StreamDetails[svID]['appid'] != appid:
print_to_log(" appid not matching: expected: %s, received: %s" % (StreamDetails[svID]['appid'], appid))
print_to_log("NOTE: gocbref are expected to be unique for each stream")
for channel in range(8):# TODO: base range on decoded size
item = "%s,%i" % (svID,channel)
if item not in streams:
streams.append(item)
if duration == 0:
break
if duration < 0:
streamList = streams
s.close()
print("streamListingThread stopped!")
return streams
@application.route('/')
def index():
global control_data_d_update
control_data_d_update = True
return render_template('index.html')
def update_setting(subject, control, value):
global control_data_d_update
global control_data_d
if control == "enableListener":
global streamListingThread
if value == True:
if streamListingThread == None or streamListingThread.is_alive() == False:
streamListingThread = threading.Thread(target=getSMVStreams, args=(sys.argv[1],-1))
streamListingThread.start()
control_data_d[subject][control] = value
control_data_d_update = True
return True
if control == "streamValue":
global streamList
global subscribers_list
global receiver
global smv_data
dif_off = set(subscribers_list) - set(value)
dif_on = set(value) - set(subscribers_list)
#print_to_log(dif_off)
#print_to_log(dif_on)
for item in dif_off:
stream = streamList[int(item)-1].split(',') # svID from itemlist
svID = stream[0]
channel = int(stream[1])
unsubscribe(receiver, svID, channel, start = True)
print_to_log("INFO: SMV item %s unsubscribed" % item)
for item in dif_on:
stream = streamList[int(item)-1].split(',') # svID from itemlist
svID = stream[0]
channel = int(stream[1])
if svID not in smv_data:
sec_counter[svID] = 0
smv_data[svID] = {} # ensure we initialised the dataset
smv_data[svID][0] = []
oldSmpCnt[svID] = 0
subscribe(receiver, svID, channel, start = True)
# differences have been processed, value is the actual state
subscribers_list = value
if lib61850.SVReceiver_isRunning(receiver) == False:
print_to_log("ERROR: Failed to enable SMV subscriber")
else:# set control-data in the client control if succesfull
control_data_d[subject][control] = value
# update the control now
control_data_d_update = True
return True
return False
@application.route('/control-setting', methods=['POST'])
def control_setting(): # post requests with data from client-side javascript events
global control_data_d
content = request.get_json(silent=True)
if content['id'] == "refresh":
global control_data_d_update
control_data_d_update = True
else:
for subject in control_data_d:
if isinstance(control_data_d[subject], dict):
for item in control_data_d[subject]:
if item == content['id']:
if update_setting(subject, content['id'],content['value']) != True: # update the setting
print_to_log("ERROR: could not update setting: " + content['id'])
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
def control_data_g():
global control_data_d
global control_data_d_update
global streamList
streamList_Length = 0
while True:
time.sleep(0.1) # check for changes every 0.1 seconds, and if so send update to client
# update the stream list, if a new entry is found
if len(streamList) > streamList_Length:
control_data_d['streamSelect_items'] = []
for stream in streamList:
control_data_d['streamSelect_items'].append(stream)
streamList_Length = len(streamList)
control_data_d_update = True
# update the controls when a control is updated
if control_data_d_update == True:
control_data_d_update = False
json_data = json.dumps(control_data_d)
yield f"data:{json_data}\n\n"
@application.route('/control-data')
def control_data():
return Response(control_data_g(), mimetype='text/event-stream')
def stream_data_g():
global smv_data
global streamInfo
global sec_counter
global streamFilter
second_update = 0
while True:
allData = {}
allData['dataSets'] = {}
allData['stream_info'] = {}
new_data = False
index = 0
for svID in streamFilter:
second = sec_counter[svID] - 1
if second < 1: #ignore the first 2 seconds, so items can be initialised
continue
if index == 0 and second > second_update: # check for the first active item if second was incremented
second_update = second # reset it until next increment
new_data = True # record data from all datasets
#if we have new data
if new_data == True:
allData['dataSets'][svID] = smv_data[svID][second]
allData['stream_info'][svID] = streamInfo[svID]
index = index + 1
if new_data == True:
json_data = json.dumps(allData)
yield f"data:{json_data}\n\n"
new_data = False
time.sleep(0.1)
@application.route('/stream-data')
def stream_data():
return Response(stream_data_g(), mimetype='text/event-stream')
def print_to_log(message):
global log_list
log_list.append(message)
def log_data_g():
global log_list
log_length = 0
while True:
if len(log_list) > log_length:
json_data = json.dumps(log_list[log_length : ])
log_length = len(log_list)
yield f"data:{json_data}\n\n"
time.sleep(0.3)
@application.route('/log-data')
def log_data():
return Response(log_data_g(), mimetype='text/event-stream')
def svUpdateListener_cb(subscriber, parameter, asdu):
svID = lib61850.SVSubscriber_ASDU_getSvId(asdu).decode("utf-8")
global streamFilter
if svID not in streamFilter:
print_to_log("DEBUG: filter not matched for svID: " + svID)
return
#print_to_log("SMV event: (svID: %s)" % svID)
global smv_data
global sec_counter
global oldSmpCnt
seconds = sec_counter[svID]
size = lib61850.SVSubscriber_ASDU_getDataSize(asdu)
smpCnt = lib61850.SVSubscriber_ASDU_getSmpCnt(asdu)
#print_to_log(" confRev: %u" % lib61850.SVSubscriber_ASDU_getConfRev(asdu))
#print_to_log(" smpSynch: %u" % lib61850.SVSubscriber_ASDU_getSmpSynch(asdu))
# list with all y values (4x amp and 4x volt for 9-2 LE)
indices = {}
for channel in streamFilter[svID]:
if channel * 8 < size:
indices[channel] = {'y': lib61850.SVSubscriber_ASDU_getINT32(asdu, channel * 8) }
else:
print_to_log("ERROR: cannot retrieve channel %i for svID: %s, size = %i" % (channel,svID,size))
# json list with { x: samplecount, index: [{y:_},{y:_},{y:_},...] }
smv_data[svID][seconds].append( {'x': smpCnt, 'channels': indices } )
# increment the secod counter each 4000 sampled, i.e each second
if oldSmpCnt[svID] > smpCnt: # trigger second increment when the counter loops.(i.e. when the previous smpCnt is higher then the current, we assume we looped around from 4000 to 0)
global streamInfo
streamInfo[svID] = {
'size': size,
'seconds': seconds,
'svID': str(lib61850.SVSubscriber_ASDU_getSvId(asdu)),
'confRev': lib61850.SVSubscriber_ASDU_getConfRev(asdu),
'smpSync': lib61850.SVSubscriber_ASDU_getSmpSynch(asdu),
}
# OPTIONAL; not in 9-2 LE, source:https://knowledge.rtds.com/hc/en-us/article_attachments/360074685173/C_Kriger_Adewole_RTDS.pdf
if lib61850.SVSubscriber_ASDU_hasDatSet(asdu) == True:
streamInfo['datset'] = lib61850.SVSubscriber_ASDU_getDatSet(asdu)
if lib61850.SVSubscriber_ASDU_hasSmpRate(asdu) == True:
streamInfo['smpRate'] = lib61850.SVSubscriber_ASDU_getSmpRate(asdu)
if lib61850.SVSubscriber_ASDU_hasRefrTm(asdu) == True:
streamInfo['RefTm'] = lib61850.SVSubscriber_ASDU_getRefrTmAsMs(asdu)
if lib61850.SVSubscriber_ASDU_hasSmpMod(asdu) == True:
streamInfo['smpMod'] = lib61850.SVSubscriber_ASDU_getSmpMod(asdu)
#increment counter
seconds = seconds + 1
smv_data[svID][seconds] = [] # create a new list to store the samples
sec_counter[svID] = seconds
oldSmpCnt[svID] = smpCnt
# make the callback pointer global to prevent cleanup
svUpdateListener = lib61850.SVUpdateListener(svUpdateListener_cb)
def subscribe(receiver, svID, channel, start = True):
global streamFilter
global StreamDetails
global subscribers
# check if appid already in use in other filters
inuse = False
appid = StreamDetails[svID]['appid']
for key in streamFilter:
if StreamDetails[key]['appid'] == appid:
inuse = True
# if appid not yet subscribed to, subscribe
if inuse == False:
global svUpdateListener
if lib61850.SVReceiver_isRunning(receiver) == True:
lib61850.SVReceiver_stop(receiver)
subscriber = lib61850.SVSubscriber_create(None, appid)
subscribers[appid] = subscriber
lib61850.SVSubscriber_setListener(subscriber, svUpdateListener, None)
lib61850.SVReceiver_addSubscriber(receiver, subscriber)
streamFilter[svID] = set()
if start == True:
lib61850.SVReceiver_start(receiver)
if lib61850.SVReceiver_isRunning(receiver) == False:
print_to_log("Failed to start SMV subscriber. Reason can be that the Ethernet interface doesn't exist or root permission are required.")
sys.exit(-1)
# add the filter
streamFilter[svID].add(channel)
print_to_log("INFO: SMV subscribed with: %i %s %i" % (appid, svID, channel))
def unsubscribe(receiver, svID, channel, start = True):
global streamFilter
global StreamDetails
global subscribers
streamFilter[svID].remove(channel)
if len(streamFilter[svID]) == 0:
streamFilter.pop(svID) # remove filter
# check if appid still in use in other filters
inuse = False
appid = StreamDetails[svID]['appid']
for key in streamFilter:
if StreamDetails[key]['appid'] == appid:
inuse = True
if inuse == False:
if lib61850.SVReceiver_isRunning(receiver) == True:
lib61850.SVReceiver_stop(receiver)
lib61850.SVReceiver_removeSubscriber(receiver, subscribers[appid])
if start == True:
lib61850.SVReceiver_start(receiver)
if lib61850.SVReceiver_isRunning(receiver) == False:
print_to_log("Failed to start SMV subscriber. Reason can be that the Ethernet interface doesn't exist or root permission are required.")
sys.exit(-1)
print_to_log("INFO: SMV %s, %i unsubscribed" % (svID, channel))
def determine_path():
"""Borrowed from wxglade.py"""
try:
root = __file__
if os.path.islink (root):
root = os.path.realpath (root)
return os.path.dirname (os.path.abspath (root))
except:
print("ERROR: __file__ variable missing")
sys.exit ()
def start ():
global receiver
global streamListingThread
path = determine_path()
print( "path:" + path )
print("Data files path:")
files = [f for f in os.listdir(path + "/templates")]
print("\n" + path + "/templates")
print(files)
print("\n" + path + "/static")
files = [f for f in os.listdir(path + "/static")]
print(files)
print("\n")
receiver = lib61850.SVReceiver_create()
if len(sys.argv) > 1:
print_to_log("Set interface id: %s" % sys.argv[1])
lib61850.SVReceiver_setInterfaceId(receiver, sys.argv[1])
else:
print_to_log("Using interface eth0")
lib61850.SVReceiver_setInterfaceId(receiver, "eth0")
# general stream listener thread to catch all streams(subscribed and unsubscribed)
streamListingThread = threading.Thread(target=getSMVStreams, args=(sys.argv[1],-1))
streamListingThread.start()
#subs = subscribe(receiver, None, None, "simpleIOGenericIO/LLN0$GO$gcbAnalogValues",str(1))
application.run(host="0.0.0.0", debug=False, threaded=True) # debug=true will start 2 subscriber threads
lib61850.SVReceiver_stop(receiver)
lib61850.SVReceiver_destroy(receiver)
if __name__ == "__main__":
start()
| StarcoderdataPython |
1797144 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_ghpython.utilities
from compas.artists import PrimitiveArtist
from compas.colors import Color
from .artist import GHArtist
class PointArtist(GHArtist, PrimitiveArtist):
"""Artist for drawing points.
Parameters
----------
point : :class:`~compas.geometry.Point`
A COMPAS point.
**kwargs : dict, optional
Additional keyword arguments.
See :class:`~compas_ghpython.artists.GHArtist` and :class:`~compas.artists.PrimitiveArtist` for more info.
"""
def __init__(self, point, **kwargs):
super(PointArtist, self).__init__(primitive=point, **kwargs)
def draw(self, color=None):
"""Draw the point.
Parameters
----------
color : tuple[int, int, int] | tuple[float, float, float] | :class:`~compas.colors.Color`, optional
The RGB color of the point.
Default is :attr:`compas.artists.PrimitiveArtist.color`.
Returns
-------
:rhino:`Rhino.Geometry.Point3d`
"""
color = Color.coerce(color) or self.color
points = [{'pos': list(self.primitive), 'color': color.rgb255}]
return compas_ghpython.utilities.draw_points(points)[0]
| StarcoderdataPython |
221971 | import pandas as pd
import numpy as np
from imblearn.metrics import specificity_score
from sklearn import metrics
def eval_precision(gt, pred, average='macro'):
if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
return metrics.precision_score(gt.fillna(0.0), pred.fillna(0.0), average=average)
else:
return metrics.precision_score(gt, pred, average=average)
def eval_acc(gt, pred, average='binary'):
if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
return metrics.accuracy_score(gt.fillna(0.0), pred.fillna(0.0))
else:
return metrics.accuracy_score(gt, pred)
def eval_cohen(gt, pred, average='quadratic'):
if average != 'binary':
weight_method = "quadratic"
else:
weight_method = None
if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
return metrics.cohen_kappa_score(gt.fillna(0.0), pred.fillna(0.0), weights=weight_method)
else:
return metrics.cohen_kappa_score(gt, pred, weights=weight_method)
def eval_acc_multiple_classes(gt, pred, label=0):
tmp_pd = pd.DataFrame()
tmp_pd['gt'] = gt
tmp_pd['pred'] = pred
tmp_pd = tmp_pd[(tmp_pd['gt'] == label) | (tmp_pd['pred'] == label)]
tmp_pd['pred'] = tmp_pd['pred'].apply(lambda x: 1 if x == label else 0)
tmp_pd['gt'] = tmp_pd['gt'].apply(lambda x: 1 if x == label else 0)
return metrics.accuracy_score(tmp_pd['gt'], tmp_pd['pred'])
def eval_recall(gt, pred, average='macro'):
if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
return metrics.recall_score(gt.fillna(0.0), pred.fillna(0.0), average=average)
else:
return metrics.recall_score(gt, pred, average=average)
def eval_specificity(gt, pred, average='macro'):
if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
return specificity_score(gt.fillna(0.0), pred.fillna(0.0), average=average)
else:
return specificity_score(gt, pred, average=average)
# def eval_specificity(gt, pred, average='macro'):
# if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
# return metrics.recall_score(gt.fillna(0.0).astype(bool) == False, pred.fillna(0.0).astype(bool) == False,
# average=average)
# else:
# return metrics.recall_score(gt == False, pred == False, average=average)
def eval_f1(gt, pred, average='macro'):
if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
return metrics.f1_score(gt.fillna(0.0), pred.fillna(0.0), average=average)
else:
return metrics.f1_score(gt, pred, average='macro')
# def eval_f1_awake(gt, pred, average='macro'):
# if type(gt) is pd.core.frame.DataFrame or type(gt) is pd.core.frame.Series:
# return metrics.f1_score(gt.fillna(0.0).astype(bool) == False, pred.fillna(0.0).astype(bool) == False, average=average)
# else:
# return metrics.f1_score(gt == False, pred == False, average=average) | StarcoderdataPython |
3265949 | """This is a OCR with rule based model for finding question candidates.
"""
from .model import Model
from typing import List
from .popo import Question
from correlator.correlation import Correlation
from .popo import BaseQuestion
class OCRModel(Model):
def __init__(self) -> None:
super().__init__()
self.threshold = 0.2
def generate_questions(self) -> List[Question]:
super().generate_questions
return []
def process_correlation(self, correlation: Correlation) -> BaseQuestion:
if correlation.simple_score() > self.threshold:
# TODO: more advanced model would use NER
return BaseQuestion(
question=correlation.ocr_block_text(),
answer=correlation.caption,
yt_link=correlation.yt_ts_link(),
confidence=correlation.simple_score(),
)
else:
# not good enough correlation
return None
| StarcoderdataPython |
6440894 | import abc
import json
from typing import Callable, Type, TypeVar, Generic, Any, Union
from amino import List, Either, __, Left, Eval, ADT, Right, Try, Path, Map, Lists
from amino.do import do, Do
from amino.boolean import false, true
from amino.json.decoder import decode_json_type
from ribosome.nvim.io.compute import NvimIO
from ribosome.nvim.api.variable import variable_prefixed, variable, variable_set, variable_set_prefixed
from ribosome.nvim.api.util import cons_checked_e
from ribosome.nvim.io.api import N
from ribosome.util.doc.data import DocBlock
A = TypeVar('A', contravariant=True)
B = TypeVar('B')
class Setting(Generic[B], ADT['Setting[B]']):
@abc.abstractproperty
def value(self) -> NvimIO[Either[str, B]]:
...
@abc.abstractproperty
def default_e(self) -> Either[str, B]:
...
@abc.abstractmethod
def update(self, value: B) -> NvimIO[None]:
...
@do(NvimIO[None])
def ensure(self, fallback: B) -> Do:
current = yield self.value
yield current.cata(lambda e: self.update(fallback), lambda a: N.unit)
@do(NvimIO[Either[str, B]])
def value_or_default_e(self) -> Do:
value = yield self.value
return value.o(self.default_e)
@do(NvimIO[B])
def value_or_default(self) -> Do:
value = yield self.value_or_default_e()
yield N.from_either(value)
class StrictSetting(Generic[A, B], Setting[B]):
def __init__(
self,
name: str,
desc: str,
help: str,
prefix: bool,
tpe: Type[A],
ctor: Callable[[A], Either[str, B]],
default: Either[str, B],
) -> None:
self.name = name
self.desc = desc
self.help = help
self.prefix = prefix
self.tpe = tpe
self.ctor = ctor
self.default = default
def _arg_desc(self) -> List[str]:
return List(self.name, str(self.prefix), str(self.tpe))
@property
def value(self) -> NvimIO[Either[str, B]]:
api_call = variable_prefixed if self.prefix else variable
return api_call(self.name, cons_checked_e(self.tpe, self.ctor))
def value_or(self, default: B) -> NvimIO[B]:
return self.value / __.get_or_else(default)
@property
def default_e(self) -> Either[str, B]:
return self.default
def update(self, value: B) -> NvimIO[None]:
api_call = variable_set_prefixed if self.prefix else variable_set
return api_call(self.name, value)
class EvalSetting(Generic[B], Setting[B]):
def __init__(
self,
name: str,
f: Eval[NvimIO[Either[str, B]]],
default: Either[str, B]=Left('no default specified')
) -> None:
self.name = name
self.f = f
self.default = default
def _arg_desc(self) -> List[str]:
return List(self.name)
@property
def value(self) -> NvimIO[Either[str, B]]:
return self.f.value
@property
def default_e(self) -> Either[str, B]:
return self.default
def update(self, value: B) -> NvimIO[None]:
return N.pure(None)
SettingCtor = Callable[[str, str, str, bool, Either[str, B]], Setting[B]]
def no_default(name: str) -> Either[str, A]:
return Left(f'no default specified for setting `{name}`')
def setting_ctor(tpe: Type[A], ctor: Callable[[A], Either[str, B]]) -> SettingCtor:
def setting(
name: str,
desc: str,
help: Union[str, DocBlock],
prefix: bool,
default: Either[str, B]=None,
) -> Setting[B]:
return StrictSetting(
name,
desc,
DocBlock.string(help) if isinstance(help, str) else help,
prefix,
tpe,
ctor,
default or no_default(name),
)
return setting
def path_list(data: list) -> Either[str, List[Path]]:
return Lists.wrap(data).traverse(lambda a: Try(Path, a) / __.expanduser(), Either)
def str_list(data: list) -> Either[str, List[str]]:
return Lists.wrap(data).traverse(lambda a: Right(a) if isinstance(a, str) else Left(f'not a string: {a}'), Either)
@do(Either[str, A])
def decode_json_setting(tpe: Type[A], data: Any) -> Do:
js = yield Try(json.dumps, data)
yield decode_json_type(js, tpe)
def json_setting(tpe: Type[A]) -> SettingCtor[A]:
return setting_ctor(object, lambda a: decode_json_setting(tpe, a))
str_setting = setting_ctor(str, Right)
int_setting = setting_ctor(int, Right)
float_setting = setting_ctor(float, Right)
list_setting = setting_ctor(list, Right)
str_list_setting = setting_ctor(list, str_list)
path_setting = setting_ctor(str, (lambda a: Try(Path, a)))
path_list_setting = setting_ctor(list, path_list)
map_setting = setting_ctor(dict, lambda a: Right(Map(a)))
path_map_setting = setting_ctor(dict, lambda a: Try(Map, a).valmap(lambda b: Path(b).expanduser()))
bool_setting = setting_ctor(int, lambda a: Right(false if a == 0 else true))
__all__ = ('Setting', 'StrictSetting', 'EvalSetting', 'setting_ctor', 'str_setting', 'int_setting', 'float_setting',
'list_setting', 'path_setting', 'path_list_setting', 'map_setting', 'path_map_setting', 'bool_setting',
'str_list_setting', 'json_setting',)
| StarcoderdataPython |
4935287 | <filename>meson/post_install.py
#!/usr/bin/env python3
import os, sys, shutil, stat, subprocess
# get absolute input and output paths
input_path = sys.argv[1]
# make sure destination directory exists
os.makedirs(os.path.dirname(input_path), exist_ok=True)
for directory, subdirectories, files in os.walk(input_path):
for folder in subdirectories:
if "dynagenic/resources" in os.path.join(directory, folder):
print ("Changing permission of: " + os.path.join(directory, folder))
# Make the directory have Everyone can read/write/execute permissions
os.chmod(os.path.join(directory, folder), stat.S_IRWXO)
for file in files:
if "dynagenic/resources" in os.path.join(directory, file):
print ("Changing permission of: " + os.path.join(directory, file))
# Make the file have Everyone can read/write permissions
os.chmod(os.path.join(directory, file), stat.S_IROTH | stat.S_IWOTH) | StarcoderdataPython |
6654007 | # SPDX-FileCopyrightText: 2020 <NAME>
#
# SPDX-License-Identifier: MIT
"""Decoder interfaces for SIRC protocol."""
class DecodeException(Exception):
"""Raised when a set of pulse timings are not a valid SIRC command."""
class SIRCDecodeException(DecodeException):
pass
class NECDecodeException(DecodeException):
pass
def _bits_to_value_lsb(bits: list):
result = 0
for position, value in enumerate(bits):
result += value << position
return result
def _pulses_to_bits(pulses: list):
bits = []
evens = pulses[0::2]
odds = pulses[1::2]
pairs = zip(evens, odds)
for even, odd in pairs:
if odd > even * 1.75:
bits.append(1)
else:
bits.append(0)
return bits
def decode_sirc(pulses: list) -> tuple:
"""Decode SIRC (Sony) protocol commands from raw pulses.
The Sony command protocol uses a different format than the normal RC-5,
which requires a different decoding scheme.
Details of the protocol can be found at:
https://www.sbprojects.net/knowledge/ir/sirc.php
http://www.righto.com/2010/03/understanding-sony-ir-remote-codes-lirc.html
"""
# SIRC supports 12-, 15- and 20-bit commands. There's always one header pulse,
# and then two pulses per bit, so accept 25-, 31- and 41-pulses commands.
if not len(pulses) in [25, 31, 41]:
raise SIRCDecodeException("Invalid number of pulses %d" % len(pulses))
if not 2200 <= pulses[0] <= 2600:
raise SIRCDecodeException("Invalid header pulse length (%d usec)" % pulses[0])
bits = _pulses_to_bits(pulses[1:])
command_bits = bits[0:7]
# 20-bit commands are the same as 12-bit but with an additional 8-bit
# extension. 15-bit commands use 8-bit for the device address instead.
if len(pulses) == 31:
device_bits = bits[7:15]
extended_bits = None
else:
device_bits = bits[7:12]
extended_bits = bits[12:]
command = _bits_to_value_lsb(command_bits)
device = _bits_to_value_lsb(device_bits)
if extended_bits:
extended = _bits_to_value_lsb(extended_bits)
else:
extended = None
if extended is None:
return (command, device)
return (command, device, extended)
# Constant object to signify a NEC repeat code.
NEC_REPEAT = "NEC Repeat"
def decode_nec(pulses: list):
"""Decode (extended) NEC protocol commands from raw pulses.
The NEC command protocol is a structured 16-bit protocol that can be validated.
Details of the protocol can be found at:
https://www.sbprojects.net/knowledge/ir/nec.php
"""
if not len(pulses) in [67, 3]:
raise NECDecodeException("Invalid number of pulses %d" % len(pulses))
if not 8800 <= pulses[0] <= 9300:
raise NECDecodeException("Invalid AGC pulse length (%d usec)" % pulses[0])
if 2100 <= pulses[1] <= 2300 and 450 <= pulses[2] <= 700:
return NEC_REPEAT
if not 4400 <= pulses[1] <= 4600:
raise NECDecodeException("Invalid AGC space length (%d usec)" % pulses[1])
bits = _pulses_to_bits(pulses[2:])
# Decode the command first, because that is alwasy sent twice, once straight and
# once inverted. The address _might_ be inverted.
command = _bits_to_value_lsb(bits[16:24])
command_inverted = _bits_to_value_lsb(bits[24:32])
if command_inverted != (~command & 0xFF):
raise NECDecodeException(
"Not a valid NEC command: command != ~command_inverted"
)
address = _bits_to_value_lsb(bits[0:8])
address_inverted = _bits_to_value_lsb(bits[8:16])
if address_inverted == (~address & 0xFF):
return address, command
else:
return (address + address_inverted << 8), command
| StarcoderdataPython |
11383055 | import argparse
import os
import sys
import numpy as np
parser = argparse.ArgumentParser(description="run sphere synt test")
parser.add_argument("--cfg", default="configs/gdrn_sphere_synt/a6_cPnP_sphere.py", help="cfg path")
parser.add_argument("--ckpt", default="output/gdrn_sphere_synt/a6_cPnP_sphere/model_final.pth", help="ckpt path")
# parser.add_argument("--noise_sigma", default=0.0, type=float, 'noise sigma')
# parser.add_argument("--outlier", default=0.1, type=float, 'outlier ratio')
# parser.add_argument("--use_pnp", default=False, action="store_true")
args = parser.parse_args()
print(args.cfg)
print(args.ckpt)
for outlier in [0.1, 0.3]:
# for outlier in [0.3]:
for noise_level in range(0, 31):
noise_sigma = noise_level * 0.002 # [0, 0.06]
for use_pnp in [True, False]:
print("outlier: ", outlier, "noise sigma: ", noise_sigma, "use_pnp:", use_pnp)
cmd = "./core/gdrn_sphere_synt/test_gdrn_sphere_synt.sh {} 0 {} INPUT.XYZ_NOISE_SIGMA_TEST={} TEST.USE_PNP={} INPUT.MIN_XYZ_OUTLIER_TEST={} INPUT.MAX_XYZ_OUTLIER_TEST={}".format(
args.cfg, args.ckpt, noise_sigma, use_pnp, outlier, outlier
)
print(cmd)
os.system(cmd)
| StarcoderdataPython |
9731250 | '''
@ Author: <NAME>, songkai13 _at_ iccas.ac.cn
@ Notes : 1. Here I use RNN-LSTM to learn the pattern of our curves. We could see that the peaks is
relatively hard to learn. This is straightforward to understand. Physically, we could regrad
these bumps as rare events in the rate theory.
@ Refs : 1. https://keras.io/layers/recurrent/
'''
import keras
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras import optimizers
from keras.layers.core import Dense, Activation,Dropout
from keras.layers.recurrent import LSTM
import numpy as np
import support_part
in_out_neurons = 1
hidden_neurons = 208
n_input = 60
X_train11, Y_train11, X_test11, Y_test11 = support_part.load_data(n_input)
#X_train12, Y_train12, X_test12, Y_test12 = sk_lstm.load_data('init-rho12.dat', n_input)
#X_train21, Y_train21, X_test21, Y_test21 = sk_lstm.load_data('init-rho21.dat', n_input)
#X_train22, Y_train22, X_test22, Y_test22 = sk_lstm.load_data('init-rho22.dat', n_input)
#X_train = np.concatenate((X_train11,X_train12,X_train21,X_train22),axis=2)
#Y_train = np.concatenate((Y_train11,Y_train12,Y_train21,Y_train22),axis=1)
#X_test = np.concatenate((X_test11,X_test12,X_test21,X_test22),axis=2)
#Y_test = np.concatenate((Y_test11,Y_test12,Y_test21,Y_test22),axis=1)
X_train = X_train11
Y_train = Y_train11
X_test = X_test11
Y_test = Y_test11
print(len(X_train[0]))
print(X_train.shape,Y_train.shape)
model = Sequential()
model.add(LSTM(hidden_neurons, return_sequences=False,
input_shape=(None, in_out_neurons)))
#model.add(Dropout(0.3))
#model.add(LSTM(70, return_sequences=False))
model.add(Dropout(0.3))
#model.add(LSTM(20, return_sequences=False))
#model.add(Dropout(0.3))
model.add(Dense(in_out_neurons, input_dim=hidden_neurons))
model.add(Activation('linear'))
#myopt = optimizers.SGD(lr=0.01, decay=1e-4, momentum=0.9, nesterov=True)
model.compile(loss='mse', optimizer='rmsprop')
# generally, the epochs have to be large engough.
ss = model.fit(X_train, Y_train, batch_size=128, epochs=20, validation_split=0.05)
#print(type(ss))
# plot train and validation loss
#plt.plot(ss.history['loss'])
#plt.plot(ss.history['val_loss'])
#plt.title('model train vs validation loss')
#plt.ylabel('loss')
#plt.xlabel('epoch')
#plt.legend(['train', 'validation'], loc='best')
#plt.show()
predicted = model.predict(np.concatenate((X_train,X_test)))
print(predicted.shape)
analytic = np.concatenate((Y_train,Y_test),axis=0)
lstm = predicted
plt.figure(figsize=(8, 7))
colors = ['r','g','b','c', 'm', 'y', 'k', 'w']
#for i in range(1):
#print(dt_array[:10])
print(analytic.shape)
plt.plot(analytic[:,0],c=colors[0],label='Analytic')#analytic
plt.plot(lstm[:,0],'--',c=colors[1],label='RNN-LSTM')
#const = np.array([9]*1000)
#plt.plot(const,analytic[:,0],'--')
plt.legend(loc='best')
plt.xlabel('t(a.u.)')
plt.ylabel('Population')
#plt.xlim((2,30))
plt.title('RNN-LSTM to Learn the Pattern of a Curve')
plt.show() | StarcoderdataPython |
8196421 | <reponame>odemiral/Bluepost-Crawler
class Configuration(object):
def __init__(self):
#TODO: parse .us and en separately to give users option to change language and location.
self.bnet_diablo_url = 'http://us.battle.net/d3/en/forum/blizztracker/'
self.bnet_sc2_url = 'http://us.battle.net/sc2/en/forum/blizztracker/'
self.bnet_wow_url = 'http://us.battle.net/wow/en/forum/blizztracker/'
self.bnet_hots_url = 'http://us.battle.net/heroes/en/forum/blizztracker/'
self.bnet_hearthstone_url = 'http://us.battle.net/hearthstone/en/forum/blizztracker/'
self.bnet_US = 'http://us.battle.net' | StarcoderdataPython |
9726438 | <reponame>pcaston/core
"""Class to hold all lock accessories."""
import logging
from pyhap.const import CATEGORY_DOOR_LOCK
from openpeerpower.components.lock import DOMAIN, STATE_LOCKED, STATE_UNLOCKED
from openpeerpower.const import ATTR_CODE, ATTR_ENTITY_ID, STATE_UNKNOWN
from openpeerpower.core import callback
from .accessories import TYPES, HomeAccessory
from .const import CHAR_LOCK_CURRENT_STATE, CHAR_LOCK_TARGET_STATE, SERV_LOCK
_LOGGER = logging.getLogger(__name__)
OPP_TO_HOMEKIT = {
STATE_UNLOCKED: 0,
STATE_LOCKED: 1,
# Value 2 is Jammed which opp doesn't have a state for
STATE_UNKNOWN: 3,
}
HOMEKIT_TO_OPP = {c: s for s, c in OPP_TO_HOMEKIT.items()}
STATE_TO_SERVICE = {STATE_LOCKED: "lock", STATE_UNLOCKED: "unlock"}
@TYPES.register("Lock")
class Lock(HomeAccessory):
"""Generate a Lock accessory for a lock entity.
The lock entity must support: unlock and lock.
"""
def __init__(self, *args):
"""Initialize a Lock accessory object."""
super().__init__(*args, category=CATEGORY_DOOR_LOCK)
self._code = self.config.get(ATTR_CODE)
state = self.opp.states.get(self.entity_id)
serv_lock_mechanism = self.add_preload_service(SERV_LOCK)
self.char_current_state = serv_lock_mechanism.configure_char(
CHAR_LOCK_CURRENT_STATE, value=OPP_TO_HOMEKIT[STATE_UNKNOWN]
)
self.char_target_state = serv_lock_mechanism.configure_char(
CHAR_LOCK_TARGET_STATE,
value=OPP_TO_HOMEKIT[STATE_LOCKED],
setter_callback=self.set_state,
)
self.async_update_state(state)
def set_state(self, value):
"""Set lock state to value if call came from HomeKit."""
_LOGGER.debug("%s: Set state to %d", self.entity_id, value)
opp_value = HOMEKIT_TO_OPP.get(value)
service = STATE_TO_SERVICE[opp_value]
if self.char_current_state.value != value:
self.char_current_state.set_value(value)
params = {ATTR_ENTITY_ID: self.entity_id}
if self._code:
params[ATTR_CODE] = self._code
self.async_call_service(DOMAIN, service, params)
@callback
def async_update_state(self, new_state):
"""Update lock after state changed."""
opp_state = new_state.state
if opp_state in OPP_TO_HOMEKIT:
current_lock_state = OPP_TO_HOMEKIT[opp_state]
_LOGGER.debug(
"%s: Updated current state to %s (%d)",
self.entity_id,
opp_state,
current_lock_state,
)
# LockTargetState only supports locked and unlocked
# Must set lock target state before current state
# or there will be no notification
if (
opp_state in (STATE_LOCKED, STATE_UNLOCKED)
and self.char_target_state.value != current_lock_state
):
self.char_target_state.set_value(current_lock_state)
# Set lock current state ONLY after ensuring that
# target state is correct or there will be no
# notification
if self.char_current_state.value != current_lock_state:
self.char_current_state.set_value(current_lock_state)
| StarcoderdataPython |
9625799 | <gh_stars>100-1000
import math
import diffrax
import jax
import jax.numpy as jnp
import jax.random as jrandom
import pytest
import scipy.stats as stats
_vals = {
int: [0, 2],
float: [0.0, 2.0],
jnp.int32: [jnp.array(0, dtype=jnp.int32), jnp.array(2, dtype=jnp.int32)],
jnp.float32: [jnp.array(0.0, dtype=jnp.float32), jnp.array(2.0, dtype=jnp.float32)],
}
@pytest.mark.parametrize(
"ctr", [diffrax.UnsafeBrownianPath, diffrax.VirtualBrownianTree]
)
def test_shape(ctr, getkey):
t0 = 0
t1 = 2
for shape in ((0,), (1, 0), (2,), (3, 4), (1, 2, 3, 4)):
if ctr is diffrax.UnsafeBrownianPath:
path = ctr(shape, getkey())
assert path.t0 is None
assert path.t1 is None
elif ctr is diffrax.VirtualBrownianTree:
tol = 2**-5
path = ctr(t0, t1, tol, shape, getkey())
assert path.t0 == 0
assert path.t1 == 2
else:
assert False
for _t0 in _vals.values():
for _t1 in _vals.values():
t0, _ = _t0
_, t1 = _t1
out = path.evaluate(t0, t1)
assert out.shape == shape
@pytest.mark.parametrize(
"ctr", [diffrax.UnsafeBrownianPath, diffrax.VirtualBrownianTree]
)
def test_statistics(ctr):
# Deterministic key for this test; not using getkey()
key = jrandom.PRNGKey(5678)
keys = jrandom.split(key, 10000)
def _eval(key):
if ctr is diffrax.UnsafeBrownianPath:
path = ctr(shape=(), key=key)
elif ctr is diffrax.VirtualBrownianTree:
path = ctr(t0=0, t1=5, tol=2**-5, shape=(), key=key)
else:
assert False
return path.evaluate(0, 5)
values = jax.vmap(_eval)(keys)
assert values.shape == (10000,)
ref_dist = stats.norm(loc=0, scale=math.sqrt(5))
_, pval = stats.kstest(values, ref_dist.cdf)
assert pval > 0.1
def test_conditional_statistics():
key = jrandom.PRNGKey(5678)
bm_key, sample_key, permute_key = jrandom.split(key, 3)
# Get >80 randomly selected points; not too close to avoid discretisation error.
t0 = 0.3
t1 = 8.7
ts = jrandom.uniform(sample_key, shape=(100,), minval=t0, maxval=t1)
sorted_ts = jnp.sort(ts)
ts = []
prev_ti = sorted_ts[0]
for ti in sorted_ts[1:]:
if ti < prev_ti + 2**-10:
continue
prev_ti = ti
ts.append(ti)
ts = jnp.stack(ts)
assert len(ts) > 80
ts = jrandom.permutation(permute_key, ts)
# Get some random paths
bm_keys = jrandom.split(bm_key, 100000)
path = jax.vmap(
lambda k: diffrax.VirtualBrownianTree(
t0=t0, t1=t1, shape=(), tol=2**-12, key=k
)
)(bm_keys)
# Sample some points
out = []
for ti in ts:
vals = jax.vmap(lambda p: p.evaluate(t0, ti))(path)
out.append((ti, vals))
out = sorted(out, key=lambda x: x[0])
# Test their conditional statistics
for i in range(1, 98):
prev_t, prev_vals = out[i - 1]
this_t, this_vals = out[i]
next_t, next_vals = out[i + 1]
mean = prev_vals + (next_vals - prev_vals) * (
(this_t - prev_t) / (next_t - prev_t)
)
var = (next_t - this_t) * (this_t - prev_t) / (next_t - prev_t)
std = math.sqrt(var)
normalised_vals = (this_vals - mean) / std
_, pval = stats.kstest(normalised_vals, stats.norm.cdf)
# Raise if the failure is statistically significant at 10%, subject to
# multiple-testing correction.
assert pval > 0.001
| StarcoderdataPython |
6703069 | import numpy as np
from src.activation_functions import ReLU, Softmax, LeakyReLU
from src.evaluation import plot_loss_and_accuracy, accuracy
from src.loss_functions import SquaredLoss
from src.neural_net.layers import InputLayer, Layer
from src.neural_net.network import NeuralNetwork
from src.preprocessing import to_categorical, split
X, Y = np.loadtxt('small_mnist/mnist_small_train_in.txt', delimiter=','),\
np.loadtxt('small_mnist/mnist_small_train_out.txt', delimiter=',')
Xtest, Ytest = np.loadtxt('small_mnist/mnist_small_test_in.txt', delimiter=','),\
np.loadtxt('small_mnist/mnist_small_test_out.txt', delimiter=',')
(X, Y), (Xval, Yval) = split(X, Y, train_frac=0.9)
Y = to_categorical(Y)
hidden_layer_act = LeakyReLU(alpha=0.01)
layers = [InputLayer(X.shape[1]),
Layer(25, hidden_layer_act),
Layer(25, hidden_layer_act),
Layer(10, Softmax())]
nn = NeuralNetwork(layers)
nn.compile(SquaredLoss(), metric=accuracy)
history = nn.fit(X, Y, Xval, Yval, learning_rate=0.003, n_epochs=100, batch_size=16)
nn.print_predict(Xtest, Ytest)
plot_loss_and_accuracy(history)
| StarcoderdataPython |
8068842 | <filename>Mundo3/Desafio085b.py
num = [[],[]]
valor = 0
for c in range(1, 8):
valor = int(input('Digite um numero: '))
if valor % 2 == 0:
num[0].append(valor)
else:
num[1].append(valor)
num[0].sort()
num[1].sort()
print(num)
| StarcoderdataPython |
5093063 | <reponame>rystrauss/bax<filename>bax/utils.py
import os
def set_jax_memory_preallocation(value: bool):
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "true" if value else "false"
def set_tf_memory_preallocation(value: bool):
from tensorflow import config
gpus = config.list_physical_devices("GPU")
if gpus:
try:
for gpu in gpus:
config.experimental.set_memory_growth(gpu, value)
except RuntimeError as e:
print(e)
def set_tf_min_log_level(level: int):
os.environ["TF_CPP_MIN_LOG_LEVEL"] = str(level)
| StarcoderdataPython |
11262644 | <reponame>strickergt128/tytus<filename>parser/team08/Tytus_SQLPARSER_G8/nodo_arbol.py
class Nodo_Arbol():
def __init__(self,valor,tipo):
self.valor=valor
self.tipo=tipo
self.hijos=[]
def agregarHijo(self,hijo):
self.hijos.insert(hijo) | StarcoderdataPython |
4934841 | import plotly.figure_factory as ff
import pandas as pd
import csv
df=pd.read_csv("data.csv")
fig=ff.create_distplot([df["Weight(Pounds)"].tolist()],["Weight"],show_hist=False)
fig.show() | StarcoderdataPython |
4834720 | import gng2
from pygraph.classes.graph import graph
from pygraph.algorithms.minmax import cut_tree
from pygraph.algorithms.accessibility import connected_components
from utils import __dict_reverse as dict_reverse
import itertools
import time
from numpy import array,sum,sqrt
class data_block:
"""This is the programming and user interface for GNG. data is the training dataset, should be array or list, with each element numpy arrays with the same dimension.
no_label is True or False. While set to False, the last element of each array in data will be treated as labels.
The rest of the variables are training settings for GNG."""
nodes = [] #:Weight of neurons.
gr = {} #:Topology structures, implemeted with python-graph.
def __init__(self,data,no_label = True,age_max = 300,nn_lambda = 88,ann = 0.5,bnn = 0.0005,eb = 0.05,en = 0.0006):
gng2.set_parameter(age_max,nn_lambda,ann,bnn,eb,en)
un_label = 0
timecost = time.time()
t = 0
gr = graph()
if no_label:
for n_point in data:
t += 1
gng2.step(n_point,un_label,t)
else:
for n_point in data:
t += 1
n_data = list(n_point)
n_X = array(n_data[0:-1])
n_Y = n_data[-1]
gng2.step(n_X,n_Y,t)
gng2.step(array([]),0,-1)
print 'time cost',time.time() - timecost
self.nodes = gng2.setN
self.gr = gng2.gr
print len(self.nodes)
def output_graph(self):
"""Return the topology structure as a python-graph."""
return self.gr
def output_nodes(self):
"""Return the list of neuron weights."""
return self.nodes
def graph_features(self):
"""Generating topological features including vertice orders for future use."""
gr_nodes = self.gr.nodes()
gr_edges = self.gr.edges()
node_count = len(gr_nodes)
edge_count = len(gr_edges) / 2.0
average_order = 0.0
clustering_coefficient = 0.0
max_order = 0
for each_node in gr_nodes:
#for orders
current_node_order = self.gr.node_order(each_node)
average_order += current_node_order
max_order = max(max_order,current_node_order)
#now for clustering coefficient
direct_neighbors = self.gr.neighbors(each_node)
tmp_v_edge_count = 0.0
tmp_r_edge_count = 0.0
for virtual_edge in itertools.product(direct_neighbors,direct_neighbors):
if virtual_edge[0] != virtual_edge[1]:
tmp_v_edge_count += 1.0
if self.gr.has_edge(tuple(virtual_edge)):
tmp_r_edge_count += 1.0
if tmp_v_edge_count == 0:
clustering_coefficient += 0.0
else:
clustering_coefficient += (tmp_r_edge_count / tmp_v_edge_count)
clustering_coefficient /= float(node_count)
average_order /= float(node_count)
#for kernel order
cut_dict = cut_tree(self.gr)
cut_places = set(cut_dict.values())
how_many_kernel_orders = range(5)
kernel_orders = []
bloods = 0.0
for kernel_tick in how_many_kernel_orders:
if kernel_tick in cut_places:
bloods += 1.0
kernel_orders.append(bloods)
#for redundant edges and missing edges
redundant_edges = 0.0
missing_edges = 0.0
for each_edge in gr_edges:
node0 = each_edge[0]
node1 = each_edge[1]
#find common set of nodes' neighbors
common_set = set(self.gr.neighbors(node0)).intersection(set(self.gr.neighbors(node1)))
if len(common_set) == 0:
missing_edges += 1.0
elif len(common_set) > 1:
in_cell_edges = list(itertools.combinations(list(common_set),2))
cell_judge = True
for cell_edge in in_cell_edges:
if self.gr.has_edge(cell_edge):
cell_judge = False
if cell_judge == False:
redundant_edges += 1.0
if edge_count != 0.0:
redundant_edges /= float(edge_count)
missing_edges /= float(edge_count)
#average edge lenghth
total_length = 0.0
for each_edge in gr_edges:
node0 = each_edge[0]
node1 = each_edge[1]
total_length += sqrt(sum((self.nodes[node0] - self.nodes[node1])**2))
if len(gr_edges) == 0:
average_length = 0.0
else:
average_length = total_length / float(len(gr_edges))
return [average_length,node_count,edge_count,average_order,max_order,redundant_edges,missing_edges] + kernel_orders
def draw_2d(self, scale = 1, axis_ = False):
"""Draws the topology structure and neurons. scale is real number, it can be set arbitrarily to adjust the size
of drawed neuron clusters. axis is True or False, and means weither to enable axis in the final drawings.
In this method, MDS is used for drawing high dimensional Euclidean graphs. If you do not use this method, sklearn is
not a prerequisite for running the pygks software."""
groups = connected_components(self.gr)
if len(self.nodes[0]) != 2:
print('using MDS for none 2d drawing')
from sklearn import manifold
from sklearn.metrics import euclidean_distances
similarities = euclidean_distances(self.nodes)
for i in range(len(self.nodes)):
for j in range(len(self.nodes)):
if groups[i] == groups[j]:
similarities[i,j] *= scale
mds = manifold.MDS(n_components=2, max_iter=500, eps=1e-7,dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
draw_nodes = pos
else:
draw_nodes = self.nodes
print('now_drawing')
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
node_count = len(draw_nodes)
for i in range(node_count):
for j in range(i,node_count):
if self.gr.has_edge((i,j)):
ax.plot([draw_nodes[i][0], draw_nodes[j][0]],[draw_nodes[i][1], draw_nodes[j][1]], color='k', linestyle='-', linewidth=1)
group_counts = len(set(groups.values()))
style_tail = ['.','o','x','^','s','+']
style_head = ['b','r','g','k']
style_list = []
for each in itertools.product(style_head,style_tail):
style_list.append(each[0]+each[1])
i = 0
for each in draw_nodes:
ax.plot(each[0],each[1],style_list[groups[i]-1])
i += 1
if not axis_:
plt.axis('off')
plt.show()
def outlier_nn(self,positive = 1,negative = -1):
"""This method finds the largest neuron cluster. If a neuron belongs to this cluster, a label specified by positive will
be added to this neuron, else this neuron will be labeled by negative variable. The labeled results will be outputed in a
list as labels_final."""
groups = connected_components(self.gr)
#find the largest group
group_counts = dict_reverse(groups)
max_count = 0
for keys,values in group_counts.items():
if len(values) > max_count:
max_count = len(values)
max_group = keys
affines = {}
for keys,values in groups.items():
if values == max_group:
affines[values] = positive
else:
affines[values] = negative
#this is only for outlier detection
for values in groups.values():
if values not in affines.keys():
affines[values] = -1
for keys,values in groups.items():
groups[keys] = affines[values]
labels_final = []
for i in range(len(self.nodes)):
labels_final.append(groups[i])
print labels_final
return self.nodes, labels_final
def counts(self):
"""Output the winning times of each neuron and the accumulated errors of the GNG network."""
return gng2.accumulated, gng2.accumulated_error
if __name__ == '__main__':
print 'sb'
| StarcoderdataPython |
3455794 | <reponame>Lookin44/KVINT_test_bot<filename>main.py
from connector.bot_for_telegram import main
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.