id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8040844 |
# This code is based on: https://github.com/nutonomy/second.pytorch.git
#
# MIT License
# Copyright (c) 2018
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import inspect
import sys
from collections import OrderedDict
import numba
import numpy as np
import torch
def get_pos_to_kw_map(func):
pos_to_kw = {}
fsig = inspect.signature(func)
pos = 0
for name, info in fsig.parameters.items():
if info.kind is info.POSITIONAL_OR_KEYWORD:
pos_to_kw[pos] = name
pos += 1
return pos_to_kw
def get_kw_to_default_map(func):
kw_to_default = {}
fsig = inspect.signature(func)
for name, info in fsig.parameters.items():
if info.kind is info.POSITIONAL_OR_KEYWORD:
if info.default is not info.empty:
kw_to_default[name] = info.default
return kw_to_default
def change_default_args(**kwargs):
def layer_wrapper(layer_class):
class DefaultArgLayer(layer_class):
def __init__(self, *args, **kw):
pos_to_kw = get_pos_to_kw_map(layer_class.__init__)
kw_to_pos = {kw: pos for pos, kw in pos_to_kw.items()}
for key, val in kwargs.items():
if key not in kw and kw_to_pos[key] > len(args):
kw[key] = val
super().__init__(*args, **kw)
return DefaultArgLayer
return layer_wrapper
def torch_to_np_dtype(ttype):
type_map = {
torch.float16: np.dtype(np.float16),
torch.float32: np.dtype(np.float32),
torch.float16: np.dtype(np.float64),
torch.int32: np.dtype(np.int32),
torch.int64: np.dtype(np.int64),
torch.uint8: np.dtype(np.uint8),
}
return type_map[ttype] | StarcoderdataPython |
6466441 | <gh_stars>0
from app.Model.Model import Model
class Session(Model):
def __init__(self, json=None, pk=None):
self.id = None
self.sessionKey = None
self.orgId = None
super().__init__(json, pk)
@staticmethod
def table_name():
return 'sessions'
@staticmethod
def fields_mapping():
return {
'id': 'id',
'sessionKey': 'sessionKey',
'orgId': 'organizationId',
'organizationId': 'orgId'
}
| StarcoderdataPython |
23270 | <reponame>Kortemme-Lab/benchmark_set_construct<filename>benchmark_constructor/file_normalizers/ContactSelectFileNormalizer.py
import os
from .FileNormalizer import FileNormalizer
class ContactSelectFileNormalizer(FileNormalizer):
'''ContactSelectFileNormalizer creates a pymol script that selects
residues which have contacts to asymmetric units.
'''
def __init__(self):
pass
def normalize_one_file(self, path, crystal_contact_res_set):
cmd = 'select crystal_contact_res,'
for res in crystal_contact_res_set:
cmd += ' res {0} and chain {1}'.format(res[1], res[0])
with open(path, 'w') as f:
f.write(cmd)
def apply(self, info_dict):
for structure_dict in info_dict['candidate_list']:
d = os.path.dirname(structure_dict['path'])
n = '.'.join([structure_dict['name']+'_show_crystal_contact', 'pml'])
if 'crystal_contact_res_set' in structure_dict.keys():
self.normalize_one_file(os.path.join(d, n), structure_dict['crystal_contact_res_set'])
| StarcoderdataPython |
1819555 | <reponame>freedomtan/caffe2
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package db_input
# Module caffe2.python.helpers.db_input
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def db_input(model, blobs_out, batch_size, db, db_type):
dbreader_name = "dbreader_" + db
dbreader = model.param_init_net.CreateDB(
[],
dbreader_name,
db=db,
db_type=db_type,
)
return model.net.TensorProtosDBInput(
dbreader, blobs_out, batch_size=batch_size)
| StarcoderdataPython |
11219863 | <reponame>Ftan91/IG_API_Experiment<gh_stars>0
from elasticsearch.client import IndicesClient
from elasticsearch import Elasticsearch
from pprint import pprint
from configurations import configurations
import requests
import facebook
import os
# make sure you have the 3 variables below set as env variable
# set within .bash_profile OR .zshenv within your $HOME directory
# to refresh use source ~/.bash_profile OR source ~/.zshenv
# to check, printenv in your terminal
short_lived_token = os.environ['IG_API_SHORT_TOKEN']
app_id = os.environ['APP_ID']
app_secret = os.environ['APP_SECRET']
version = 'v13.0'
# get the long lived token - should last 60 days
url = f'https://graph.facebook.com/{version}/oauth/access_token?grant_type=fb_exchange_token&client_id={app_id}&client_secret={app_secret}&fb_exchange_token={short_lived_token} '
try:
request = requests.get(url)
time_left = request.json()['expires_in']/3600
print(f'Long lived token remaining time: {round(time_left/24, 1)}')
os.environ['IG_API_LONG_TOKEN'] = request.json()['access_token']
except KeyError:
print('Short lived access token has expired. Defaulting to long lived access token.')
# sample request to graph api
graph = facebook.GraphAPI(access_token=os.environ['IG_API_LONG_TOKEN'])
test = graph.request('/17841404648986880?fields=media{media_type, like_count, insights.metric(reach)}')
es_client = Elasticsearch(hosts=[{'host': 'localhost', 'port': 9200, 'scheme': 'http'}],
http_auth=['elastic', 'changeme'])
es_index_client = IndicesClient(es_client)
index = 'ig_media'
try:
es_index_client.create(index=index, body=configurations)
except Exception:
print(f'Index {index} already exists')
# this is a list of all media that has been uploaded by user
media_data = test['media']['data']
for i in media_data:
i.pop('insights')
es_client.index(index=index, id=i['id'], body=i)
print(f"Uploaded to ES ID: {i['id']}")
| StarcoderdataPython |
9789052 | <reponame>sanjitjain2/BlackBoard-Image-Mosaicing
import numpy as np
import cv2
import sys
import click
def calcHomography(pts1,pts2):
# Calculate Homography using SVD
A = np.zeros((8,9),dtype = 'float')
for i in range(0,8,2):
A[i][0], A[i][1], A[i][2] = pts1[i/2][0], pts1[i/2][1], 1
A[i][6], A[i][7], A[i][8] = pts1[i/2][0]*(-1)*pts2[i/2][0], pts1[i/2][1]*(-1)*pts2[i/2][0], (-1)*pts2[i/2][0]
A[i+1][3], A[i+1][4], A[i+1][5] = pts1[i/2][0], pts1[i/2][1], 1
A[i+1][6], A[i+1][7], A[i+1][8] = pts1[i/2][0]*(-1)*pts2[i/2][1], pts1[i/2][1]*(-1)*pts2[i/2][1], (-1)*pts2[i/2][1]
# Matrix A created, calculating SVD
U,S,V = np.linalg.svd(A)
# Extracting H from V
V = V[-1,:]/V[-1,-1]
H = V.reshape(3,3)
return H
def findNewCorners(r,c,inv_H):
# Corner coordinates of source image
initial_corners = [[0,0,1],[c-1,0,1],[0,r-1,1],[c-1,r-1,1]]
new_corners = []
# Iterate over source coordinates to calculate new alligned corner coordinates
for i in initial_corners:
coord = np.array(i).reshape(3,1)
corner_point = np.dot(inv_H,coord)
x = int(corner_point[0]/corner_point[2])
y = int(corner_point[1]/corner_point[2])
new_corners.append([x,y])
return new_corners
def findMaxMin(r,c,new_corners):
base_min_x = 0
base_min_y = 0
base_max_x = c-1
base_max_y = r-1
min_x = min(base_min_x,new_corners[0][0],new_corners[1][0],new_corners[2][0],new_corners[3][0])
max_x = max(base_max_x,new_corners[0][0],new_corners[1][0],new_corners[2][0],new_corners[3][0])
min_y = min(base_min_y,new_corners[0][1],new_corners[1][1],new_corners[2][1],new_corners[3][1])
max_y = max(base_max_y,new_corners[0][1],new_corners[1][1],new_corners[2][1],new_corners[3][1])
return min_x,min_y,max_x,max_y
def StichImage(max_x,max_y,min_x,min_y,r,c,H,img1,img2):
# Creating a new canvas to paste and stitch images on
canvas = np.full((max_y+1000,max_x+1000,3),255)
row_canvas, col_canvas,_ = canvas.shape
# Translate and paste destiantion image on canvas
for y in range(r):
for x in range(c):
canvas[y+abs(min_y)][x+abs(min_x)] = img1[y][x]
# Iterate over empty canvas and find corresponding alligned points from source image
for i in range(min_y,row_canvas-abs(min_y)):
for j in range(min_x,col_canvas-abs(min_x)):
# If point is white, find its alligned point
if canvas[i+abs(min_y),j+abs(min_x)][0] == 255 and canvas[i+abs(min_y),j+abs(min_x)][1] == 255 and canvas[i+abs(min_y),j+abs(min_x)][2] == 255:
point = np.array([i,j,1]).reshape(3,1)
homographed_point = np.dot(H,point)
row_homographed = int(homographed_point[0]/homographed_point[2])
col_homographed = int(homographed_point[1]/homographed_point[2])
# if calculated point lies within source image,bring it into the canvas
if row_homographed >= 0 and row_homographed < img2.shape[0]:
if col_homographed >= 0 and col_homographed < img2.shape[1]:
canvas[i+abs(min_y),j+abs(min_x)] = img2[row_homographed][col_homographed]
return canvas
def main():
i1 = 'm0.jpg'
i2 = 'm1.jpg'
# Reading Destination(base) image
img1 = cv2.imread(i1)
# Reading Source(need to be aligned) image
img2 = cv2.imread(i2)
# dimmensions of Destination image
r,c,p = img1.shape
if (len(sys.argv) == 1) or (len(sys.argv) == 2 and str(sys.argv[1]) == '0'):
# ---------------Common points----------------------
# pts1 => corner points of destiantion image
# pts2 => corner points of source image
"""
pts1 = [[1543,1924],[918,1705],[1054,584],[1786,960]] # m0+m1+m2+m3+m4
pts2 = [[1174,2240],[397,2086],[279,713],[925,882]] # m5
"""
"""
pts1 = [[1169,2430],[993,1310],[1537,1173],[1527,2674]] # m0+m1+m2+m3
pts2 = [[735,1946],[596,375],[979,214],[1131,2385]] # m4
"""
"""
pts1 = [[1038,2050],[1040,2491],[1273,2119],[1617,2425]] # m0+m1+m2
pts2 = [[263,264],[120,1300],[549,365],[1026,978]] # m3
"""
"""
pts1 = [[894,1766],[971,2484],[1576,1930],[1594,2389]] # mo+m1
pts2 = [[519,59],[767,1479],[1289,30],[1618,870]] # m2
"""
pts1 = [[1541,1926],[534,1779],[972,2483],[1617,2423]] # m0
pts2 = [[1364,1484],[470,500],[96,2057],[901,2396]] # m1
else:
pts1,pts2 = click.main(i1,i2)
# Calculating Homography Matrix
H = calcHomography(pts1,pts2)
# Calculating Inverse_homography matrix
inv_H = np.linalg.inv(H)
# New corners found by by multiplying with inv_H
new_corners = findNewCorners(r,c,inv_H)
# Calculate min_x, max_x, min_y, max_y and offset values
min_x,min_y,max_x,max_y = findMaxMin(r,c,new_corners)
new_image = StichImage(max_x,max_y,min_x,min_y,r,c,H,img1,img2)
# Write and save the final stitched canvas image
cv2.imwrite('result.jpg',new_image)
main() | StarcoderdataPython |
4915929 | # ---------- requirements ----------
from download import download
import pandas as pd
import os
# Define global path target on which to join our files.
# Ends up in the data folder
path_target = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
"..", "data")
# ---------- chiffres-cles ----------
url_cc = "https://www.data.gouv.fr/fr/datasets/r/0b66ca39-1623-4d9c-83ad-5434b7f9e2a4"
path_target_cc = os.path.join(path_target, "chiffres-cles.csv")
class Load_chiffres_cles:
"""
Download and save 'chiffres-cles.csv',
a dataset containing general Covid-19 information
"""
def __init__(self, url=url_cc, target_name=path_target_cc):
download(url, target_name, replace=True)
# above, set replace to True to always get the updated version
@staticmethod
def save_as_df():
# convert last lines type to str to avoid DtypeWarning
converters = {'source_nom': str, 'source_url': str,
'source_archive': str, 'source_type': str}
df_covid = pd.read_csv(path_target_cc, converters=converters)
return df_covid
# ---------- transfer ----------
url_tr = "https://www.data.gouv.fr/fr/datasets/r/70cf1fd0-60b3-4584-b261-63fb2281359e"
path_target_tr = os.path.join(path_target, "transfer.csv")
class Load_transfer:
"""
Download and save 'transfer.csv',
a dataset containing information about Covid-19 patient transfers
"""
def __init__(self, url=url_tr, target_name=path_target_tr):
download(url, target_name, replace=True)
# above, set replace to True to always get the updated version
@staticmethod
def save_as_df():
df_tr = pd.read_csv(path_target_tr)
return df_tr
# ---------- stocks-es-national ----------
url_sen = "https://www.data.gouv.fr/fr/datasets/r/519e2699-27d2-47c0-840b-81dbb30d4318"
path_target_sen = os.path.join(path_target, "./stocks-es-national.csv")
class Load_Vaccine_storage:
"""
Download and save 'stocks-es-national.csv',
a dataset containing Covid-19 vaccination informations
"""
def __init__(self, url=url_sen, target_name=path_target_sen):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_sen)
return df
# ---------- covid-19-france-vaccinations-age-dep ----------
url_vac = 'https://public.opendatasoft.com/explore/dataset/covid-19-france-vaccinations-age-sexe-dep/download/?format=csv&disjunctive.variable_label=true&refine.variable=Par+tranche+d%E2%80%99%C3%A2ge&refine.date=2021&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B'
path_target_vac = os.path.join(
path_target, "./covid-19-france-vaccinations-age-dep.csv")
class Load_vaccination:
"""
Download and save 'covid-19-france-vaccinations-age-dep.csv',
a dataset containing Covid-19 vaccination information
"""
def __init__(self, url=url_vac, target_name=path_target_vac):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_vac, sep=";")
return df
# ---------- chiffres-fr ----------
url_cfr = "https://www.data.gouv.fr/fr/datasets/r/d3a98a30-893f-47f7-96c5-2f4bcaaa0d71"
path_target_cfr = os.path.join(path_target, "./chiffres-fr.csv")
class Load_chiffres_fr:
"""
Download and save 'chiffres-fr.csv',
a dataset containing global information for France as a whole
"""
def __init__(self, url=url_cfr, target_name=path_target_cfr):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df_covid = pd.read_csv(path_target_cfr)
return df_covid
# ---------- posquotreg ----------
url_posreg = "https://www.data.gouv.fr/fr/datasets/r/001aca18-df6a-45c8-89e6-f82d689e6c01"
path_target_posreg = os.path.join(path_target, "./posquotreg.csv")
class Load_posquotreg:
"""
Download and save 'posquotreg.csv',
a dataset containing positivity information by region.
"""
def __init__(self, url=url_posreg, target_name=path_target_posreg):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_posreg, sep=";")
return df
# ---------- posquotdep ----------
url_posdep = "https://www.data.gouv.fr/fr/datasets/r/406c6a23-e283-4300-9484-54e78c8ae675"
path_target_posdep = os.path.join(path_target, "./posquotdep.csv")
class Load_posquotdep:
"""
Download and save 'posquotdep.csv',
a dataset containing positivity information by departments
"""
def __init__(self, url=url_posdep, target_name=path_target_posdep):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_posdep, sep=";")
return df
# ---------- posquotfr ----------
url_posfr = "https://www.data.gouv.fr/fr/datasets/r/dd0de5d9-b5a5-4503-930a-7b08dc0adc7c"
path_target_posfr = os.path.join(path_target, "./posquotfr.csv")
class Load_posquotfr:
"""
Download and save 'posquotfr.csv',
a dataset containing positivity information for France
"""
def __init__(self, url=url_posfr, target_name=path_target_posfr):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_posfr, sep=";")
return df
# ---------- poshebreg ----------
url_poshebreg = "https://www.data.gouv.fr/fr/datasets/r/1ff7af5f-88d6-44bd-b8b6-16308b046afc"
path_target_poshebreg = os.path.join(path_target, "./poshebreg.csv")
class Load_poshebreg:
"""
Download and save 'poshebreg.csv',
a dataset containing positivity informations by regions weekly
"""
def __init__(self, url=url_poshebreg, target_name=path_target_poshebreg):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_poshebreg, sep=";")
return df
# ---------- poshebfr ----------
url_poshebfr = "https://www.data.gouv.fr/fr/datasets/r/2f0f720d-fbd2-41a7-95b4-3a70ff5a9253"
path_target_poshebfr = os.path.join(path_target, "./poshebfr.csv")
class Load_poshebfr:
"""
Download and save 'poshebfr.csv',
a dataset containing positivity informations in France weekly
"""
def __init__(self, url=url_poshebfr, target_name=path_target_poshebfr):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_poshebfr, sep=";")
return df
# ---------- incquotreg ----------
url_increg = "https://www.data.gouv.fr/fr/datasets/r/ad09241e-52fa-4be8-8298-e5760b43cae2"
path_target_increg = os.path.join(path_target, "./incquotreg.csv")
class Load_incquotreg:
"""
Download and save 'incquotreg.csv',
a dataset containing incidence information by regions
"""
def __init__(self, url=url_increg, target_name=path_target_increg):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_increg, sep=";")
return df
# ---------- incfr ----------
url_incfr = "https://www.data.gouv.fr/fr/datasets/r/57d44bd6-c9fd-424f-9a72-7834454f9e3c"
path_target_incfr = os.path.join(path_target, "./incquotfr.csv")
class Load_incquotfr:
"""
Download and save 'incquotfr.csv',
a dataset containing incidence information for France
"""
def __init__(self, url=url_incfr, target_name=path_target_incfr):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_incfr, sep=";")
return df
# ---------- inchebreg ----------
url_incregheb = "https://www.data.gouv.fr/fr/datasets/r/66b09e9a-41b5-4ed6-b03c-9aef93a4b559"
path_target_incregheb = os.path.join(path_target, "./inchebreg.csv")
class Load_inchebreg:
"""
Download and save 'inchebreg.csv',
a dataset containing incidence information by regions weekly
"""
def __init__(self, url=url_incregheb, target_name=path_target_incregheb):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_incregheb, sep=";")
return df
# ---------- inchebfr ----------
url_incfrheb = "https://www.data.gouv.fr/fr/datasets/r/2360f82e-4fa4-475a-bc07-9caa206d9e32"
path_target_incfrheb = os.path.join(path_target, "./inchebfr.csv")
class Load_inchebfr:
"""
Download and save 'inchebfr.csv',
a dataset containing incidence information for France weekly
"""
def __init__(self, url=url_incfrheb, target_name=path_target_incfrheb):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target_incfrheb, sep=";")
return df
# ---------- classe_age ----------
url_classe_age = "https://www.data.gouv.fr/fr/datasets/r/08c18e08-6780-452d-9b8c-ae244ad529b3"
path_target6 = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
"..", "data", "./classe_age.csv")
class Load_classe_age:
def __init__(self, url=url_classe_age, target_name=path_target6):
download(url, target_name, replace=True)
@staticmethod
def save_as_df():
df = pd.read_csv(path_target6, sep=";")
return df
| StarcoderdataPython |
353802 | from django.dispatch import Signal
hijack_started = Signal()
hijack_ended = Signal()
| StarcoderdataPython |
283958 | <reponame>minyiky/xSACdb
# Generated by Django 2.2.7 on 2019-11-06 17:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('xsd_kit', '0002_auto_20141109_1711'),
]
operations = [
migrations.AlterField(
model_name='kit',
name='club_owned',
field=models.BooleanField(blank=True, default=True),
),
migrations.AlterField(
model_name='kit',
name='needs_testing',
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterField(
model_name='kit',
name='type',
field=models.CharField(choices=[('WETS', 'Wetsuit'), ('SEMI', 'Semidry'), ('DRYS', 'Drysuit'), ('BCD', 'BCD'), ('WING', 'Wing'), ('REGS', 'Regs'), ('CYL', 'Cylinder'), ('MASK', 'Mask'), ('FINS', 'Fins'), ('SNRK', 'Snorkel'), ('COMP', 'Computer'), ('TORH', 'Torch'), ('SMB', 'SMB'), ('DSMB', 'DSMB'), ('REEL', 'Reel')], max_length=64),
),
migrations.AlterField(
model_name='loan',
name='approved',
field=models.BooleanField(blank=True, default=False),
),
]
| StarcoderdataPython |
12831948 | <gh_stars>1-10
import abc
import asyncio
import fnmatch
from abc import ABC
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Tuple
@dataclass
class Notification:
topic: str
subject_id: str
data: dict
class Handler(ABC):
@abc.abstractmethod
async def handle(self, notification: Notification):
pass
class NotificationService:
def __init__(self) -> None:
self.handlers: Dict[str, List[Tuple[str, Handler]]] = \
defaultdict(lambda: [])
def register_handler(
self, subject_id: str,
topic: str, handler: Handler):
self.handlers[subject_id].append((topic, handler))
def unregister_handler(
self, subject_id: str,
topic: str, handler: Handler):
self.handlers[subject_id].remove((topic, handler))
def notify(self, notification: Notification):
import asyncio
asyncio.create_task(self.notify_now(notification))
async def notify_now(self, notification: Notification):
handlers = set()
for subject_id, tpl in self.handlers.items():
for topic, handler in tpl:
if self._subject_matches(notification.subject_id, subject_id) and \
self._topic_matches(notification.topic, topic):
handlers.add(handler)
await asyncio.gather(*[h.handle(notification) for h in handlers])
def _subject_matches(self, subject_id: str, subject_id_wildcard: str):
if subject_id_wildcard == '*':
return True
return subject_id == subject_id_wildcard
def _topic_matches(self, topic: str, topic_wildcard: str):
if topic_wildcard == '*':
return True
filtered = fnmatch.filter([topic], topic_wildcard)
if filtered:
return True
| StarcoderdataPython |
341510 | <reponame>prithagupta/ml-sca
from .baseline import RandomClassifier, MajorityVoting, PriorClassifier
from .csv_reader import CSVReader
from .synthetic_dataset_reader import SyntheticDatasetGenerator
from .real_dataset_generator import RealDatasetGenerator
from .classification_test import optimize_search_cv
from .classifiers import classifiers_space, custom_dict
from .constants import *
from .statistical_tests import *
from .utils import * | StarcoderdataPython |
4958365 | # -*- coding: utf-8 -*-
import os
import cv2
import numpy as np
from utils.util import get_arguments
from utils.palette import get_palette
from PIL import Image as PILImage
import importlib
args = get_arguments()
config = importlib.import_module('config')
cfg = getattr(config, 'cfg')
# paddle垃圾回收策略FLAG,ACE2P模型较大,当显存不够时建议开启
os.environ['FLAGS_eager_delete_tensor_gb']='0.0'
import paddle.fluid as fluid
# 预测数据集类
class TestDataSet():
def __init__(self):
self.data_dir = cfg.data_dir
self.data_list_file = cfg.data_list_file
self.data_list = self.get_data_list()
self.data_num = len(self.data_list)
def get_data_list(self):
# 获取预测图像路径列表
data_list = []
data_file_handler = open(self.data_list_file, 'r')
for line in data_file_handler:
img_name = line.strip()
name_prefix = img_name.split('.')[0]
if len(img_name.split('.')) == 1:
img_name = img_name + '.jpg'
img_path = os.path.join(self.data_dir, img_name)
data_list.append(img_path)
return data_list
def preprocess(self, img):
# 图像预处理
if cfg.example == 'ACE2P':
reader = importlib.import_module('reader')
ACE2P_preprocess = getattr(reader, 'preprocess')
img = ACE2P_preprocess(img)
else:
img = cv2.resize(img, cfg.input_size).astype(np.float32)
img -= np.array(cfg.MEAN)
img /= np.array(cfg.STD)
img = img.transpose((2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
def get_data(self, index):
# 获取图像信息
img_path = self.data_list[index]
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return img, img,img_path, None
img_name = img_path.split(os.sep)[-1]
name_prefix = img_name.replace('.'+img_name.split('.')[-1],'')
img_shape = img.shape[:2]
img_process = self.preprocess(img)
return img, img_process, name_prefix, img_shape
def infer():
if not os.path.exists(cfg.vis_dir):
os.makedirs(cfg.vis_dir)
palette = get_palette(cfg.class_num)
# 人像分割结果显示阈值
thresh = 120
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# 加载预测模型
test_prog, feed_name, fetch_list = fluid.io.load_inference_model(
dirname=cfg.model_path, executor=exe, params_filename='__params__')
#加载预测数据集
test_dataset = TestDataSet()
data_num = test_dataset.data_num
for idx in range(data_num):
# 数据获取
ori_img, image, im_name, im_shape = test_dataset.get_data(idx)
if image is None:
print(im_name, 'is None')
continue
# 预测
if cfg.example == 'ACE2P':
# ACE2P模型使用多尺度预测
reader = importlib.import_module('reader')
multi_scale_test = getattr(reader, 'multi_scale_test')
parsing, logits = multi_scale_test(exe, test_prog, feed_name, fetch_list, image, im_shape)
else:
# HumanSeg,RoadLine模型单尺度预测
result = exe.run(program=test_prog, feed={feed_name[0]: image}, fetch_list=fetch_list)
parsing = np.argmax(result[0][0], axis=0)
parsing = cv2.resize(parsing.astype(np.uint8), im_shape[::-1])
# 预测结果保存
result_path = os.path.join(cfg.vis_dir, im_name + '.png')
if cfg.example == 'HumanSeg':
logits = result[0][0][1]*255
logits = cv2.resize(logits, im_shape[::-1])
ret, logits = cv2.threshold(logits, thresh, 0, cv2.THRESH_TOZERO)
logits = 255 *(logits - thresh)/(255 - thresh)
# 将分割结果添加到alpha通道
rgba = np.concatenate((ori_img, np.expand_dims(logits, axis=2)), axis=2)
cv2.imwrite(result_path, rgba)
else:
output_im = PILImage.fromarray(np.asarray(parsing, dtype=np.uint8))
output_im.putpalette(palette)
output_im.save(result_path)
if (idx + 1) % 100 == 0:
print('%d processd' % (idx + 1))
print('%d processd done' % (idx + 1))
return 0
if __name__ == "__main__":
infer()
| StarcoderdataPython |
3386937 | import math
import random
import unittest
from cog_abm.ML.core import (NominalAttribute, NumericAttribute, Sample,
load_samples_arff, split_data, split_data_cv)
animals = ["dog", "cat", "lion", "duck", "python:)"]
class TestAttributes(unittest.TestCase):
def setUp(self):
self.symbols = animals
self.na = NominalAttribute(self.symbols)
def test_numeric_attr_getting_value(self):
na = NumericAttribute()
for i in xrange(10):
self.assertEqual(i, na.get_value(i))
def test_nominal_attr_getting_value(self):
na = NominalAttribute(self.symbols)
for i, s in enumerate(self.symbols):
self.assertEqual(s, na.get_value(i))
self.assertEqual(s, na.get_symbol(i))
self.assertEqual(i, na.get_idx(s))
def test_equality(self):
self.assertEqual(self.na, NominalAttribute(animals))
self.assertEqual(self.na,
NominalAttribute(["dog", "cat", "lion", "duck", "python:)"]))
self.assertNotEqual(self.na, NominalAttribute(animals + ["donkey"]))
self.assertEqual(NumericAttribute(), NumericAttribute())
self.assertNotEqual(self.na, NumericAttribute())
class TestSample(unittest.TestCase):
def setUp(self):
self.meta = [NumericAttribute(), NominalAttribute(animals)]
self.sample = Sample([1.2, self.meta[1].get_idx("dog")], self.meta)
self.meta_cl = NominalAttribute(animals)
self.sample_cl = Sample([100, self.meta[1].get_idx("cat")], self.meta,
self.meta_cl.get_idx("duck"), self.meta_cl)
def test_basic(self):
self.assertIsNone(self.sample.get_cls())
self.assertEqual(self.sample_cl.get_cls(), "duck")
self.assertEqual(self.sample.get_values(), [1.2, "dog"])
self.assertEqual(self.sample_cl.get_values(), [100, "cat"])
def test_equality(self):
self.assertNotEqual(self.sample, self.sample_cl)
meta = [NumericAttribute(), NominalAttribute(animals)]
sample = Sample([1.2, meta[1].get_idx("dog")], meta)
self.assertEqual(self.sample, sample)
self.assertNotEqual(self.sample, self.sample_cl)
meta = [NumericAttribute(), NominalAttribute(animals), NumericAttribute()]
sample = Sample([1.2, meta[1].get_idx("dog"), 3.14], meta)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
meta = [NumericAttribute(), NominalAttribute(animals)]
sample = Sample([1.2, meta[1].get_idx("cat")], meta)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
sample = Sample([1.3, meta[1].get_idx("dog")], meta)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
sample = Sample([100, self.meta[1].get_idx("cat")], self.meta,
self.meta_cl.get_idx("duck"), self.meta_cl)
self.assertEqual(self.sample_cl, sample)
self.assertNotEqual(self.sample, sample)
sample = Sample([10.20, self.meta[1].get_idx("cat")], self.meta,
self.meta_cl.get_idx("duck"), self.meta_cl)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
class TestSamplePreparation(unittest.TestCase):
def setUp(self):
self.samples = load_samples_arff("test/iris.arff")
def test_loading_arff(self):
expected_meta = [NumericAttribute() for _ in xrange(4)]
expected_cls_meta = NominalAttribute(
["Iris-setosa", "Iris-versicolor", "Iris-virginica"])
sample = self.samples[0]
self.assertEqual(sample.meta, expected_meta)
self.assertEqual(sample.cls_meta, expected_cls_meta)
def test_spliting_samples(self):
for _ in xrange(100):
split_ratio = random.random()
train, test = split_data(self.samples, split_ratio)
self.assertEqual(math.ceil(len(self.samples) * split_ratio), len(train))
self.assertEqual(len(self.samples), len(train) + len(test))
def test_split_data_cv(self):
N = 100
for _ in xrange(100):
samples = range(N)
folds = random.randint(2, N / 3)
sets = split_data_cv(samples, folds)
for train, test in sets:
for ts in test:
self.assertTrue(ts not in train)
self.assertTrue(N / folds <= len(test) <= N / folds + 1)
self.assertEqual(N, len(test) + len(train))
| StarcoderdataPython |
6503273 | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-log-packets.py
#
# Set the following commands in the local system:
#
# lisp debug {
# itr = yes
# etr = yes
# }
# lisp xtr-parameters {
# data-plane-logging = yes
# }
#
# By using the lispapi. The command will toggle the setting. When environment
# variable LISP_PW is set, the value is the password that is used to connect.
# to the LISP API.
#
# Usage: python -O lisp-log-packets.py [force] [<api-port>] [help]
#
# Note when <api-port> is negative, it is passed into the lispapi to tell it
# to use http versus https. This port number should be the same value as
# passed on the ./RESTART-LISP command.
#
#------------------------------------------------------------------------------
import lispapi
import sys
import os
#
# Get command-line parameters.
#
force = True
api_port = 8080
for arg in sys.argv[1::]:
if (arg == "help"):
print "Usage: log-packets [force] [<api-port>] [help]"
exit(0)
#endif
if (arg == "force"): force = True
if (arg.isdigit()): api_port = int(arg)
if (arg[0] == "-" and arg[1::].isdigit()): api_port = -int(arg[1::])
#endfor
#
# Get user supplied password, if any.
#
pw = os.getenv("LISP_PW")
if (pw == None): pw = ""
#
# Open API to localhost. If debug status dict array None, the open failed.
#
lisp = lispapi.api_init("localhost", "root", pw, port=api_port)
if (lisp.debug_status == None):
print ("Could not connect to API, is lispers.net running?, " + \
"LISP_PW set?, or using the correct port number?")
exit(1)
#endif
#
# Get current settings for control-plane.
#
itr = lisp.is_itr_debug_enabled()
etr = lisp.is_etr_debug_enabled()
rtr = lisp.is_rtr_debug_enabled()
#
# Get current settings for data-plane logging.
#
xtr_parms = lisp.get_xtr_parameters()
if (xtr_parms == None):
print "Could not get xtr-parameters from API"
exit(1)
#endif
dp_logging = xtr_parms["data-plane-logging"] == "yes"
rtr_running = lisp.is_rtr_enabled()
if (dp_logging):
lisp.disable_xtr_data_plane_logging()
lisp.disable_itr_debug()
lisp.disable_etr_debug()
if (rtr_running): lisp.disable_rtr_debug()
print "Data-plane logging has been disabled"
else:
if (rtr_running):
if (rtr and force == False):
print "Control-plane logging is enabled, no action taken"
exit(0)
#endif
lisp.enable_rtr_debug()
else:
if ((itr or etr) and force == False):
print "Control-plane logging is enabled, no action taken"
exit(0)
#endif
lisp.enable_itr_debug()
lisp.enable_etr_debug()
#endif
lisp.enable_xtr_data_plane_logging()
print "Data-plane logging has been enabled"
#endif
exit(0)
#------------------------------------------------------------------------------
| StarcoderdataPython |
383392 | <filename>demo/networks/Attention.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
import random
from dataset import START, PAD
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CNN(nn.Module):
def __init__(self, nc, leakyRelu=False):
super(CNN, self).__init__()
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
def convRelu(i, batchNormalization=False):
cnn = nn.Sequential()
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
return cnn
self.conv0 = convRelu(0)
self.pooling0 = nn.MaxPool2d(2, 2)
self.conv1 = convRelu(1)
self.pooling1 = nn.MaxPool2d(2, 2)
self.conv2 = convRelu(2, True)
self.conv3 = convRelu(3)
self.pooling3 = nn.MaxPool2d((2, 2), (2, 1), (0, 1))
self.conv4 = convRelu(4, True)
self.conv5 = convRelu(5)
self.pooling5 = nn.MaxPool2d((2, 2), (2, 1), (0, 1))
self.conv6 = convRelu(6, True)
def forward(self, input):
out = self.conv0(input) # [batch size, 64, 128, 128]
out = self.pooling0(out) # [batch size, 64, 64, 64]
out = self.conv1(out) # [batch size, 128, 64, 64]
out = self.pooling1(out) # [batch size, 128, 32, 32]
out = self.conv2(out) # [batch size, 256, 32, 32]
out = self.conv3(out) # [batch size, 256, 32, 32]
out = self.pooling3(out) # [batch size, 256, 16, 33]
out = self.conv4(out) # [batch size, 512, 16, 33]
out = self.conv5(out) # [batch size, 512, 16, 33]
out = self.pooling5(out) # [batch size, 512, 8, 34]
out = self.conv6(out) # [batch size, 512, 7, 33]
return out
class AttentionCell(nn.Module):
def __init__(self, src_dim, hidden_dim, embedding_dim, num_layers=1, cell_type='LSTM'):
super(AttentionCell, self).__init__()
self.num_layers = num_layers
self.i2h = nn.Linear(src_dim, hidden_dim, bias=False)
self.h2h = nn.Linear(
hidden_dim, hidden_dim
) # either i2i or h2h should have bias
self.score = nn.Linear(hidden_dim, 1, bias=False)
if num_layers == 1:
if cell_type == 'LSTM':
self.rnn = nn.LSTMCell(src_dim + embedding_dim, hidden_dim)
elif cell_type == 'GRU':
self.rnn = nn.GRUCell(src_dim + embedding_dim, hidden_dim)
else:
raise NotImplementedError
else:
if cell_type == 'LSTM':
self.rnn = nn.ModuleList(
[nn.LSTMCell(src_dim + embedding_dim, hidden_dim)]
+ [
nn.LSTMCell(hidden_dim, hidden_dim)
for _ in range(num_layers - 1)
]
)
elif cell_type == 'GRU':
self.rnn = nn.ModuleList(
[nn.GRUCell(src_dim + embedding_dim, hidden_dim)]
+ [
nn.GRUCell(hidden_dim, hidden_dim)
for _ in range(num_layers - 1)
]
)
else:
raise NotImplementedError
self.hidden_dim = hidden_dim
def forward(self, prev_hidden, src, tgt): # src: [b, L, c]
src_features = self.i2h(src) # [b, L, h]
if self.num_layers == 1:
prev_hidden_proj = self.h2h(prev_hidden[0]).unsqueeze(1) # [b, 1, h]
else:
prev_hidden_proj = self.h2h(prev_hidden[-1][0]).unsqueeze(1) # [b, 1, h]
attention_logit = self.score(
torch.tanh(src_features + prev_hidden_proj) # [b, L, h]
) # [b, L, 1]
alpha = F.softmax(attention_logit, dim=1) # [b, L, 1]
context = torch.bmm(alpha.permute(0, 2, 1), src).squeeze(1) # [b, c]
concat_context = torch.cat([context, tgt], 1) # [b, c+e]
if self.num_layers == 1:
cur_hidden = self.rnn(concat_context, prev_hidden)
else:
cur_hidden = []
for i, layer in enumerate(self.rnn):
if i == 0:
concat_context = layer(concat_context, prev_hidden[i])
else:
concat_context = layer(concat_context[0], prev_hidden[i])
cur_hidden.append(concat_context)
return cur_hidden, alpha
class AttentionDecoder(nn.Module):
def __init__(
self,
num_classes,
src_dim,
embedding_dim,
hidden_dim,
pad_id,
st_id,
num_layers=1,
cell_type='LSTM',
checkpoint=None,
):
super(AttentionDecoder, self).__init__()
self.embedding = nn.Embedding(num_classes + 1, embedding_dim)
self.attention_cell = AttentionCell(
src_dim, hidden_dim, embedding_dim, num_layers, cell_type
)
self.hidden_dim = hidden_dim
self.num_classes = num_classes
self.num_layers = num_layers
self.generator = nn.Linear(hidden_dim, num_classes)
self.pad_id = pad_id
self.st_id = st_id
if checkpoint is not None:
self.load_state_dict(checkpoint)
def forward(
self, src, text, is_train=True, teacher_forcing_ratio=1.0, batch_max_length=50
):
"""
input:
batch_H : contextual_feature H = hidden state of encoder. [batch_size x num_steps x contextual_feature_channels]
text : the text-index of each image. [batch_size x (max_length+1)]. +1 for [START] token. text[:, 0] = [START].
output: probability distribution at each step [batch_size x num_steps x num_classes]
"""
batch_size = src.size(0)
num_steps = batch_max_length - 1 # +1 for [s] at end of sentence.
output_hiddens = (
torch.FloatTensor(batch_size, num_steps, self.hidden_dim)
.fill_(0)
.to(device)
)
if self.num_layers == 1:
hidden = (
torch.FloatTensor(batch_size, self.hidden_dim).fill_(0).to(device),
torch.FloatTensor(batch_size, self.hidden_dim).fill_(0).to(device),
)
else:
hidden = [
(
torch.FloatTensor(batch_size, self.hidden_dim).fill_(0).to(device),
torch.FloatTensor(batch_size, self.hidden_dim).fill_(0).to(device),
)
for _ in range(self.num_layers)
]
if is_train and random.random() < teacher_forcing_ratio:
for i in range(num_steps):
# one-hot vectors for a i-th char. in a batch
embedd = self.embedding(text[:, i])
# hidden : decoder's hidden s_{t-1}, batch_H : encoder's hidden H, char_onehots : one-hot(y_{t-1})
hidden, alpha = self.attention_cell(hidden, src, embedd)
if self.num_layers == 1:
output_hiddens[:, i, :] = hidden[
0
] # LSTM hidden index (0: hidden, 1: Cell)
else:
output_hiddens[:, i, :] = hidden[-1][0]
probs = self.generator(output_hiddens)
else:
targets = (
torch.LongTensor(batch_size).fill_(self.st_id).to(device)
) # [START] token
probs = (
torch.FloatTensor(batch_size, num_steps, self.num_classes)
.fill_(0)
.to(device)
)
for i in range(num_steps):
embedd = self.embedding(targets)
hidden, alpha = self.attention_cell(hidden, src, embedd)
if self.num_layers == 1:
probs_step = self.generator(hidden[0])
else:
probs_step = self.generator(hidden[-1][0])
probs[:, i, :] = probs_step
_, next_input = probs_step.max(1)
targets = next_input
return probs # batch_size x num_steps x num_classes
class Attention(nn.Module):
def __init__(
self,
FLAGS,
train_dataset,
checkpoint=None,
):
super(Attention, self).__init__()
self.encoder = CNN(FLAGS.data.rgb)
self.decoder = AttentionDecoder(
num_classes=len(train_dataset.id_to_token),
src_dim=FLAGS.Attention.src_dim,
embedding_dim=FLAGS.Attention.embedding_dim,
hidden_dim=FLAGS.Attention.hidden_dim,
pad_id=train_dataset.token_to_id[PAD],
st_id=train_dataset.token_to_id[START],
num_layers=FLAGS.Attention.layer_num,
cell_type=FLAGS.Attention.cell_type)
self.criterion = (
nn.CrossEntropyLoss()
)
if checkpoint:
self.load_state_dict(checkpoint)
def forward(self, input, expected, is_train, teacher_forcing_ratio):
out = self.encoder(input)
b, c, h, w = out.size()
out = out.view(b, c, h * w).transpose(1, 2) # [b, h x w, c]
output = self.decoder(out, expected, is_train, teacher_forcing_ratio, batch_max_length=expected.size(1)) # [b, sequence length, class size]
return output | StarcoderdataPython |
5047905 | <reponame>lmyybh/pytorch-networks
import torch
import torch.nn as nn
class ProWGANDLoss(nn.Module):
def __init__(self):
super(ProWGANDLoss, self).__init__()
def gradient_penalty(self, D, real_img, fake_img, step, alpha, LAMDA=10):
batch_size = real_img.size(0)
device = real_img.device
gp_alpha = torch.rand(batch_size, 1)
gp_alpha = gp_alpha.expand(batch_size, real_img.nelement()//batch_size).reshape(real_img.shape).to(device)
x = (gp_alpha * real_img + (1 - gp_alpha) * fake_img).requires_grad_(True).to(device)
out = D(x, step=step, alpha=alpha)
grad_outputs = torch.ones(out.shape).to(device)
gradients = torch.autograd.grad(outputs=out, inputs=x, grad_outputs=grad_outputs, create_graph=True, only_inputs=True)[0]
gradients = gradients.reshape(batch_size, -1)
return LAMDA * ((gradients.norm(2, dim=1)-1)**2).mean()
def forward(self, G, D, z, img, step, alpha):
# D loss
loss_real = -D(img, step=step, alpha=alpha).mean()
fake_img = G(z, step=step, alpha=alpha)
loss_fake = D(fake_img.detach(), step=step, alpha=alpha).mean()
gp = self.gradient_penalty(D, img.detach(), fake_img.detach(), step=step, alpha=alpha, LAMDA=10)
return loss_real + loss_fake + gp
| StarcoderdataPython |
12827197 | <gh_stars>1-10
from django.db import models
from address.models import AddressField
class Entity(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def update_attrs(self, **attrs):
try:
for key in attrs:
setattr(self, key, attrs[key])
self.save()
except AttributeError:
pass
except:
pass
class Licensee(Entity):
customer_id = models.BigIntegerField()
@property
def first_cert(self):
return self.cert.first()
class LicenseeCert(Entity):
licensee = models.ForeignKey(Licensee, related_name='certs', null=True)
cert_id = models.BigIntegerField()
certificate = models.CharField(max_length=9)
certificate_status = models.NullBooleanField()
status_date = models.DateTimeField(null=True)
legal_name = models.CharField(max_length=1024)
dba = models.CharField(max_length=1024)
address = AddressField(null=True)
class Scrape(Entity):
licensee = models.ForeignKey(Licensee, null=True)
error = models.TextField()
class InspectionReport(Entity):
licensee_cert = models.ForeignKey(LicenseeCert, null=True)
inspection_site_name = models.CharField(max_length=1024)
inspection_date = models.DateField(null=True)
inspection_type = models.CharField(max_length=256)
prepared_by = models.CharField(max_length=2048)
prepared_by_title = models.CharField(max_length=512)
inspector_number = models.BigIntegerField(null=True)
text = models.TextField()
img_link = models.URLField(max_length=1024)
img_file = models.ImageField(upload_to='inspection_reports/')
def ocr_pdf_to_text(self):
raise NotImplementedError
class AnimalInventory(Entity):
inspection_report = models.ForeignKey(InspectionReport, null=True)
animal_count = models.IntegerField(null=True)
animal_name = models.CharField(max_length=1024)
animal_group_name = models.CharField(max_length=2048)
class NonCompliance(Entity):
inspection_report = models.ForeignKey(InspectionReport, null=True)
cfrs_id = models.BigIntegerField(null=True)
regulation_section = models.CharField(max_length=128)
description = models.CharField(max_length=2048)
direct_non_compliance = models.NullBooleanField()
repeat_non_compliance = models.NullBooleanField() | StarcoderdataPython |
260649 | <filename>music21/tree/fromStream.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: timespans/fromStream.py
# Purpose: Tools for creating timespans from Streams
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2013-16 <NAME> and the music21
# Project
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
'''
Tools for creating timespans (fast, manipulable objects) from Streams
'''
import unittest
from music21.base import Music21Object
from music21 import common
from music21 import key
from music21.tree import spans
from music21.tree import timespanTree
from music21.tree import trees
def listOfTreesByClass(inputStream,
currentParentage=None,
initialOffset=0.0,
flatten=False,
classLists=None,
useTimespans=False):
r'''
Recurses through `inputStream`, and constructs TimespanTrees for each
encountered substream and PitchedTimespan for each encountered non-stream
element.
`classLists` should be a sequence of elements contained in `classSet`. One
TimespanTree will be constructed for each element in `classLists`, in
a single optimized pass through the `inputStream`.
This is used internally by `streamToTimespanTree`.
>>> score = tree.makeExampleScore()
Get everything in the score
>>> treeList = tree.fromStream.listOfTreesByClass(score, useTimespans=True)
>>> treeList
[<TimespanTree {2} (-inf to inf) <music21.stream.Score ...>>]
>>> tl0 = treeList[0]
>>> for t in tl0:
... print(t)
<TimespanTree {4} (-inf to inf) <music21.stream.Part ...>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>>
<TimespanTree {4} (-inf to inf) <music21.stream.Part ...>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 1 offset=0.0>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 2 offset=2.0>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 3 offset=4.0>>
<TimespanTree {0} (-inf to inf) <music21.stream.Measure 4 offset=6.0>>
Now filter the Notes and the Clefs & TimeSignatures of the score
(flattened) into a list of two TimespanTrees
>>> classLists = ['Note', ('Clef', 'TimeSignature')]
>>> treeList = tree.fromStream.listOfTreesByClass(score, useTimespans=True,
... classLists=classLists, flatten=True)
>>> treeList
[<TimespanTree {12} (0.0 to 8.0) <music21.stream.Score ...>>,
<TimespanTree {4} (0.0 to 0.0) <music21.stream.Score ...>>]
'''
if currentParentage is None:
currentParentage = (inputStream,)
# fix non-tuple classLists -- first call only...
if classLists:
for i, cl in enumerate(classLists):
if not common.isIterable(cl):
classLists[i] = (cl,)
lastParentage = currentParentage[-1]
if useTimespans:
treeClass = timespanTree.TimespanTree
else:
treeClass = trees.OffsetTree
if classLists is None or not classLists:
outputTrees = [treeClass(source=lastParentage)]
classLists = []
else:
outputTrees = [treeClass(source=lastParentage) for _ in classLists]
# do this to avoid munging activeSites
inputStreamElements = inputStream._elements[:] + inputStream._endElements
for element in inputStreamElements:
offset = lastParentage.elementOffset(element) + initialOffset
wasStream = False
if element.isStream:
localParentage = currentParentage + (element,)
containedTrees = listOfTreesByClass(element,
currentParentage=localParentage,
initialOffset=offset,
flatten=flatten,
classLists=classLists,
useTimespans=useTimespans)
for outputTree, subTree in zip(outputTrees, containedTrees):
if flatten is not False: # True or semiFlat
outputTree.insert(subTree[:])
else:
outputTree.insert(subTree.lowestPosition(), subTree)
wasStream = True
if not wasStream or flatten == 'semiFlat':
parentOffset = initialOffset
parentEndTime = initialOffset + lastParentage.duration.quarterLength
endTime = offset + element.duration.quarterLength
for classBasedTree, classList in zip(outputTrees, classLists):
if classList and element.classSet.isdisjoint(classList):
continue
if useTimespans:
if hasattr(element, 'pitches') and not isinstance(element, key.Key):
spanClass = spans.PitchedTimespan
else:
spanClass = spans.ElementTimespan
elementTimespan = spanClass(element=element,
parentage=tuple(reversed(currentParentage)),
parentOffset=parentOffset,
parentEndTime=parentEndTime,
offset=offset,
endTime=endTime)
classBasedTree.insert(elementTimespan)
else:
classBasedTree.insert(offset, element)
return outputTrees
def asTree(inputStream, flatten=False, classList=None, useTimespans=False, groupOffsets=False):
'''
Converts a Stream and constructs an :class:`~music21.tree.trees.ElementTree` based on this.
Use Stream.asTree() generally since that caches the ElementTree.
>>> score = tree.makeExampleScore()
>>> elementTree = tree.fromStream.asTree(score)
>>> elementTree
<ElementTree {2} (0.0 <0.-20...> to 8.0) <music21.stream.Score exampleScore>>
>>> for x in elementTree.iterNodes():
... x
<ElementNode: Start:0.0 <0.-20...> Indices:(l:0 *0* r:1) Payload:<music21.stream.Part ...>>
<ElementNode: Start:0.0 <0.-20...> Indices:(l:0 *1* r:2) Payload:<music21.stream.Part ...>>
>>> etFlat = tree.fromStream.asTree(score, flatten=True)
>>> etFlat
<ElementTree {20} (0.0 <0.-25...> to 8.0) <music21.stream.Score exampleScore>>
The elementTree's classSortOrder has changed to -25 to match the lowest positioned element
in the score, which is an Instrument object (classSortOrder=-25)
>>> for x in etFlat.iterNodes():
... x
<ElementNode: Start:0.0 <0.-25...> Indices:(l:0 *0* r:2)
Payload:<music21.instrument.Instrument 'PartA: : '>>
<ElementNode: Start:0.0 <0.-25...> Indices:(l:1 *1* r:2)
Payload:<music21.instrument.Instrument 'PartB: : '>>
<ElementNode: Start:0.0 <0.0...> Indices:(l:0 *2* r:4) Payload:<music21.clef.BassClef>>
<ElementNode: Start:0.0 <0.0...> Indices:(l:3 *3* r:4) Payload:<music21.clef.BassClef>>
...
<ElementNode: Start:0.0 <0.20...> Indices:(l:5 *6* r:8) Payload:<music21.note.Note C>>
<ElementNode: Start:0.0 <0.20...> Indices:(l:7 *7* r:8) Payload:<music21.note.Note C#>>
<ElementNode: Start:1.0 <0.20...> Indices:(l:0 *8* r:20) Payload:<music21.note.Note D>>
...
<ElementNode: Start:7.0 <0.20...> Indices:(l:15 *17* r:20) Payload:<music21.note.Note C>>
<ElementNode: Start:End <0.-5...> Indices:(l:18 *18* r:20)
Payload:<music21.bar.Barline type=final>>
<ElementNode: Start:End <0.-5...> Indices:(l:19 *19* r:20)
Payload:<music21.bar.Barline type=final>>
>>> etFlat.getPositionAfter(0.5)
SortTuple(atEnd=0, offset=1.0, priority=0, classSortOrder=20, isNotGrace=1, insertIndex=...)
>>> etFlatNotes = tree.fromStream.asTree(score, flatten=True, classList=(note.Note,))
>>> etFlatNotes
<ElementTree {12} (0.0 <0.20...> to 8.0) <music21.stream.Score exampleScore>>
'''
def recurseGetTreeByClass(
inputStream,
currentParentage,
initialOffset,
outputTree=None):
lastParentage = currentParentage[-1]
if outputTree is None:
outputTree = treeClass(source=lastParentage)
# do this to avoid munging activeSites
inputStreamElements = inputStream._elements[:] + inputStream._endElements
parentEndTime = initialOffset + lastParentage.duration.quarterLength
for element in inputStreamElements:
flatOffset = common.opFrac(lastParentage.elementOffset(element) + initialOffset)
if element.isStream and flatten is not False: # True or 'semiFlat'
localParentage = currentParentage + (element,)
recurseGetTreeByClass(element, # put the elements into the current tree...
currentParentage=localParentage,
initialOffset=flatOffset,
outputTree=outputTree)
if flatten != 'semiFlat':
continue # do not insert the stream itself unless we are doing semiflat
if classList and element.classSet.isdisjoint(classList):
continue
endTime = flatOffset + element.duration.quarterLength
if useTimespans:
pitchedTimespan = spans.PitchedTimespan(
element=element,
parentage=tuple(reversed(currentParentage)),
parentOffset=initialOffset,
parentEndTime=parentEndTime,
offset=flatOffset,
endTime=endTime)
outputTree.insert(pitchedTimespan)
elif groupOffsets is False:
# for sortTuples
position = element.sortTuple(lastParentage)
flatPosition = position.modify(offset=flatOffset)
outputTree.insert(flatPosition, element)
else:
outputTree.insert(flatOffset, element)
return outputTree
# first time through...
if useTimespans:
treeClass = timespanTree.TimespanTree
elif groupOffsets is False:
treeClass = trees.ElementTree
else:
treeClass = trees.OffsetTree
# this lets us use the much faster populateFromSortedList -- the one-time
# sort in C is faster than the node implementation.
if not inputStream.isSorted and inputStream.autoSort:
inputStream.sort()
# check to see if we can shortcut and make a Tree very fast from a sorted list.
if (inputStream.isSorted
and groupOffsets is False # currently we can't populate for an OffsetTree*
and (inputStream.isFlat or flatten is False)):
outputTree = treeClass(source=inputStream)
inputStreamElements = inputStream._elements[:] + inputStream._endElements
# Can use tree.populateFromSortedList and speed up by an order of magnitude
if classList is None:
elementTupleList = [(e.sortTuple(inputStream), e) for e in inputStreamElements]
else:
elementTupleList = [(e.sortTuple(inputStream), e) for e in inputStreamElements
if not e.classSet.isdisjoint(classList)]
outputTree.populateFromSortedList(elementTupleList)
if outputTree.rootNode is not None:
outputTree.rootNode.updateEndTimes()
return outputTree
# * to make this work for an OffsetTree, we'd need to use OffsetIterator
# first to make it so that the midpoint of the list is also the root node, etc.
else:
return recurseGetTreeByClass(inputStream,
currentParentage=(inputStream,),
initialOffset=0.0)
def asTimespans(inputStream, flatten, classList):
r'''
Recurses through a score and constructs a
:class:`~music21.tree.trees.TimespanTree`. Use Stream.asTimespans() generally
since that caches the TimespanTree.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> scoreTree
<TimespanTree {165} (0.0 to 36.0) <music21.stream.Score ...>>
>>> for x in scoreTree[:5]:
... x
...
<PitchedTimespan (0.0 to 0.5) <music21.note.Note C#>>
<PitchedTimespan (0.0 to 0.5) <music21.note.Note A>>
<PitchedTimespan (0.0 to 0.5) <music21.note.Note A>>
<PitchedTimespan (0.0 to 1.0) <music21.note.Note E>>
<PitchedTimespan (0.5 to 1.0) <music21.note.Note B>>
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=False, classList=())
Each of these has 11 elements -- mainly the Measures
>>> for x in scoreTree:
... x
...
<ElementTimespan (0.0 to 0.0) <music21.metadata.Metadata object at 0x...>>
<ElementTimespan (0.0 to 0.0) <music21.layout.StaffGroup ...>>
<TimespanTree {11} (0.0 to 36.0) <music21.stream.Part Soprano>>
<TimespanTree {11} (0.0 to 36.0) <music21.stream.Part Alto>>
<TimespanTree {11} (0.0 to 36.0) <music21.stream.Part Tenor>>
<TimespanTree {11} (0.0 to 36.0) <music21.stream.Part Bass>>
>>> tenorElements = scoreTree[4]
>>> tenorElements
<TimespanTree {11} (0.0 to 36.0) <music21.stream.Part Tenor>>
>>> tenorElements.source
<music21.stream.Part Tenor>
>>> tenorElements.source is score[3]
True
'''
if classList is None:
classList = Music21Object
classLists = [classList]
listOfTimespanTrees = listOfTreesByClass(inputStream,
initialOffset=0.0,
flatten=flatten,
classLists=classLists,
useTimespans=True)
return listOfTimespanTrees[0]
# --------------------
class Test(unittest.TestCase):
def testFastPopulate(self):
'''
tests that the isSorted speed up trick ends up producing identical results.
'''
from music21 import corpus
sf = corpus.parse('bwv66.6').flatten()
sfTree = sf.asTree()
# print(sfTree)
sf.isSorted = False
sf._cache = {}
sfTreeSlow = sf.asTree()
for i in range(len(sf)):
fastI = sfTree[i]
slowI = sfTreeSlow[i]
self.assertIs(fastI, slowI)
def testAutoSortExample(self):
from music21.tree import makeExampleScore
sc = makeExampleScore()
sc.sort()
t = asTree(sc)
self.assertEqual(t.endTime, 8.0)
# print(repr(t))
# def x_testExampleScoreAsTimespans(self):
# from music21 import tree
# score = tree.makeExampleScore()
# treeList = tree.fromStream.listOfTreesByClass(score, useTimespans=True)
# tl0 = treeList[0]
# --------------------
if __name__ == '__main__':
import music21
music21.mainTest(Test) # , runTest='testAutoSortExample')
| StarcoderdataPython |
3204200 | BBBB BBBBBBBBBBBBBB BBBB
XXXXX XXXXXXXXBBB BBBBBBBBX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX gettext('Search') X XXXXXXXXXXX XXXXXXXX XXXXXXXXX
XXXXXX
BB BBBBBBBBBBBBBBBBBBBB
BB BBBBBBBBBBBBBBBBBBBBBBBBBBB BB B
XXXXXX XXXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXX
BBBB
XXXX XXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX gettext('Everything') XXXXXXXXX
BBB BBBBBBBBBBBBB BBBBB BB BBBBBBBBBBBBBBBBBBBB
XXXXXXX XXXXXXXX
BB BBBBB BB BBBBBBBBBBBBBBBBBBBBXXXXXXXXBBBBBX
XXXXXXXXX
BBBBBB
XXXXXXXXX
XXXXXX
BBBBB
BBBBB
XXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXX gettext('Go') XX
XXXXXXX
| StarcoderdataPython |
11227489 | <filename>setup/audioparser.py
"""
This module contains helper functions to:
- generate the list of file paths of the audio samples
- extract audio features for each audio sample
- create a dataframe containing the extracted features
"""
import os
from multiprocessing import Pool
# import time ## uncomment to measure execution time
import librosa
import numpy as np
import pandas as pd
DATA_DIR = os.path.join(os.curdir, "data")
FRAME_LENGTH = 2048
HOP_LENGTH = 1024
def get_file_paths():
"""
Returns a dictionary with all the paths to audio files in DATA_DIR.
"""
# extract file names per folder path into a dictionary
folder_contents = {a: c for a, b, c in os.walk(DATA_DIR) if len(c)>0}
# create a list to collect file paths
paths = []
for path, files in folder_contents.items():
for file in files:
paths.append(os.path.join(path, file))
# convert the list of file paths into a dictionary with id's
file_paths = {idx:path for (idx, path) in enumerate(paths)}
return file_paths
def calculate_amplitude_envelope(y, frame_size):
"""
Calculates and returns the amplitude envelope.
"""
amplitude_envelope = []
for i in range (0, len(y), frame_size):
frame_ae = max(y[i:i+frame_size])
amplitude_envelope.append(frame_ae)
return np.array(amplitude_envelope)
def calculate_split_frequency(S, split_freq, sr):
"""
Calculate and return the split frequency.
"""
frange = sr/2
delta_f_perbin = frange/S.shape[0]
split_freq_bin = np.floor(split_freq/delta_f_perbin)
return int(split_freq_bin)
def calculate_band_energy_ratio(S, split_freq, sr):
"""
Calculates and returns the Band Energy Ratio (BER).
"""
split_freq_bin = calculate_split_frequency(S, split_freq, sr)
power = librosa.amplitude_to_db(S).T
BER = []
#calculate the energy ratio in each frame
for frequency in power:
sum_low_freq = np.sum(frequency[:split_freq_bin])
sum_high_freq = np.sum(frequency[split_freq_bin:])
BER_frame = sum_low_freq/sum_high_freq
BER.append(BER_frame)
return np.array(BER)
def get_features(file_path):
"""
This function gets a feature vector for a file on a specified file path.
When extracting features for a collection of audio samples, we want to
sequentially access audio files, perform the desired computations, extract the
useful information, and close the files to get them out of memory. By combining
these steps file by file, we can use multiprocessing to distribute the workload.
That is the purpose of this function.
As input, the function takes a tuple of index and file path.
As output, the function returns a one-dimensional array; a feature vector.
"""
# load the file in librosa
y, sr = librosa.load(path=file_path[1], sr=None)
# compute time domain features
# !!! when adding features, also add labels in get_column_labels func !!!
# Amplitude Envelope
ae = calculate_amplitude_envelope(y=y, frame_size=FRAME_LENGTH)
# Root Mean Square Energy
rms = librosa.feature.rms(y=y)
# Zero Crossing Rate
zcr = librosa.feature.zero_crossing_rate(y=y)
# apply Short Time Fourier Transform
spec = np.abs(librosa.stft(y=y))
# compute frequency domain features
# !!! when adding features, also add labels in get_column_labels func !!!
# BER
ber = calculate_band_energy_ratio(S=spec, split_freq=1000, sr=sr)
# Spectral centroid
sce = librosa.feature.spectral_centroid(S=spec, sr=sr)
# Spectral bandwidth
sbw = librosa.feature.spectral_bandwidth(S=spec, sr=sr)
# Spectral contrast
sco = librosa.feature.spectral_contrast(S=spec, sr=sr)
# Spectral flatness
sfl = librosa.feature.spectral_flatness(S=spec)
# Spectral roll-off
sro = librosa.feature.spectral_rolloff(S=spec, sr=sr)
sro_99 = librosa.feature.spectral_rolloff(S=spec, sr=sr, roll_percent=0.99)
sro_01 = librosa.feature.spectral_rolloff(S=spec, sr=sr, roll_percent=0.01)
# list all features
features = [ae, rms, zcr, ber, sce, sbw, sco, sfl, sro, sro_99, sro_01]
# compute aggregations for features
aggregations = [(f.mean(), f.min(), f.max(), f.std()) for f in features]
# compose list of features
audio_features = [file_path[0]]
audio_features.extend([i for tup in aggregations for i in tup])
# append distributed features
# append distributed AE
audio_features.extend(calculate_amplitude_envelope(y=y, frame_size=sr*2))
# append distributed RMS
rms_distr = librosa.feature.rms(y=y, hop_length=sr, frame_length=sr*2)[0].tolist()
audio_features.extend(rms_distr)
# append distributed ZCR
zcr_distr = librosa.feature.zero_crossing_rate(y=y, hop_length=sr, frame_length=sr*2)[0].tolist()
audio_features.extend(zcr_distr)
return audio_features
def get_column_labels():
"""
This function generates a list of column names for the extracted features
that are returned by the get_features function.
"""
# list the names of the extracted features
feature_labels = ["amplitude_envelope",
"root_mean_square_energy",
"zero_crossing_rate",
"band_energy_ratio",
"spectral_centroid",
"spectral_bandwidth",
"spectral_contrast",
"spectral_flatness",
"spectral_rolloff",
"spectral_rolloff_99",
"spectral_rolloff_01"]
# list the names of the used descriptive statistics
measure_suffixes = ["_mean", "_min", "_max", "_std"]
# create a list to append the generated column names to
columns = ["row_index"]
# generate some labels and append them to the list
columns.extend([l+s for l in feature_labels for s in measure_suffixes])
# append labels for the distributed AE
columns.extend(["amplitude_envelope_f1",
"amplitude_envelope_f2",
"amplitude_envelope_f3",
"amplitude_envelope_f4",
"amplitude_envelope_f5"])
# append labels for the distributed RMS
columns.extend(["root_mean_square_energy_f0",
"root_mean_square_energy_f1",
"root_mean_square_energy_f2",
"root_mean_square_energy_f3",
"root_mean_square_energy_f4",
"root_mean_square_energy_f5",
"root_mean_square_energy_f6",
"root_mean_square_energy_f7",
"root_mean_square_energy_f8",
"root_mean_square_energy_f9",
"root_mean_square_energy_f10"])
# append labels for the distributed ZCR
columns.extend(["zero_crossing_rate_f0",
"zero_crossing_rate_f1",
"zero_crossing_rate_f2",
"zero_crossing_rate_f3",
"zero_crossing_rate_f4",
"zero_crossing_rate_f5",
"zero_crossing_rate_f6",
"zero_crossing_rate_f7",
"zero_crossing_rate_f8",
"zero_crossing_rate_f9",
"zero_crossing_rate_f10"])
return columns
def get_features_data(file_paths):
"""
This function creates and returns a dictionary containing:
- a list of column labels
- an array of data with all the extracted features
With the dictionary we'll create a dataframe in pandas.
To speed up the process multiprocessing has been used.
"""
# start_time = time.time() ## uncomment to measure execution time
# create a dictionary and store the column labels
features = {"columns": get_column_labels()}
# create a process pool (for multiprocessing)
p = Pool()
# get features for each file path and store in dictionary
features["data"] = p.map(get_features, file_paths.items())
p.close()
p.join()
# duration = time.time() - start_time ## uncomment to measure execution time
# print(f"Processing {len(file_paths)} samples took {duration:.0f} seconds.") ## uncomment to measure execution time
return features
def get_base_df(file_paths):
"""
*FIXME* to check and test
"""
# use the file_paths to create a list of info per file
for path in file_paths.values():
# split folder paths
parts = path.split(sep=os.sep)
# deduce information by combining folder names and the level
# of the folder in the folder hierarchy
is_defect = True if folder_names[-1] == "abnormal" else False
machine_id = folder_names[-2]
machine_type = folder_names[-3].capitalize()
snr = folder_names[-4].split("_dB_")[0]
# create a list of features that apply to files in this folder
file_features = [machine_type, machine_id, snr, is_defect]
# loop over files
for file_name in file_names:
# construct the file_path and store it in a list
file_info = [os.path.join(folder_path, file_name)]
# extend the list with file_features
file_info.extend(file_features)
# append file_info list to audio_samples list
audio_samples.append(file_info)
def get_df(features, file_paths):
# get df_base
df_base = get_base_df(file_paths)
# get df_extra
df_extra = pd.DataFrame(data=features["data"], columns=features["columns"])
if __name__ == '__main__':
#goal: get filepaths, calculate features, construct dataframe, export to csv
# get filepaths
file_paths = get_file_paths()
# get features
features = get_features_data(file_paths)
# construct dataframe
df = get_df(features, file_paths)
# write to output
print("Still under construction.") | StarcoderdataPython |
3306241 | import os
import ctypes
import copy
import struct
import windows
from windows import winproxy
from windows import utils
import windows.generated_def as gdef
from windows.winobject import process
from windows.winobject import network
from windows.winobject import registry
from windows.winobject import exception
from windows.winobject import service
from windows.winobject import volume
from windows.winobject import wmi
from windows.winobject import object_manager
from windows.winobject import handle
from windows.winobject import event_log
from windows.winobject import task_scheduler
from windows.winobject import system_module
from windows.generated_def.winstructs import *
from windows.dbgprint import dbgprint
class System(object):
"""The state of the current ``Windows`` system ``Python`` is running on"""
# Setup these in a fixedproperty ?
network = network.Network()
"""Object of class :class:`windows.winobject.network.Network`"""
registry = registry.Registry()
"""Object of class :class:`windows.winobject.registry.Registry`"""
@property
def processes(self):
"""The list of running processes
:type: [:class:`~windows.winobject.process.WinProcess`] -- A list of Process
"""
return self.enumerate_processes()
@property
def threads(self):
"""The list of running threads
:type: [:class:`~windows.winobject.process.WinThread`] -- A list of Thread
"""
return self.enumerate_threads_setup_owners()
@property
def logicaldrives(self):
"""List of logical drives [C:\, ...]
:type: [:class:`~windows.winobject.volume.LogicalDrive`] -- A list of LogicalDrive
"""
return volume.enum_logical_drive()
@property
def services(self):
"""The list of services
:type: [:class:`~windows.winobject.service.ServiceA`] -- A list of Service"""
return service.enumerate_services()
@property
def handles(self):
"""The list of system handles
:type: [:class:`~windows.winobject.handle.Handle`] -- A list of Hanlde"""
return handle.enumerate_handles()
@property
def modules(self):
"""The list of system modules
:type: [:class:`~windows.winobject.system_module.SystemModule`] -- A list of :class:`~windows.winobject.system_module.SystemModule` or :class:`~windows.winobject.system_module.SystemModuleWow64`
"""
return system_module.enumerate_kernel_modules()
@utils.fixedpropety
def bitness(self):
"""The bitness of the system
:type: :class:`int` -- 32 or 64
"""
if os.environ["PROCESSOR_ARCHITECTURE"].lower() != "x86":
return 64
if "PROCESSOR_ARCHITEW6432" in os.environ:
return 64
return 32
@utils.fixedpropety
def wmi(self):
r"""An object to perform wmi requests to various namespaces
:type: :class:`~windows.winobject.wmi.WmiManager`"""
return wmi.WmiManager()
@utils.fixedpropety
def event_log(self):
return event_log.EvtlogManager()
@utils.fixedpropety
def task_scheduler(self):
"""An object able to manage scheduled tasks on the local system
:type: :class:`~windows.winobject.task_scheduler.TaskService`
"""
windows.com.init()
clsid_task_scheduler = gdef.IID.from_string("0f87369f-a4e5-4cfc-bd3e-73e6154572dd")
task_service = task_scheduler.TaskService()
# What is non-implemented (WinXP)
# Raise (NotImplementedError?) ? Return NotImplemented ?
windows.com.create_instance(clsid_task_scheduler, task_service)
task_service.connect()
return task_service
@utils.fixedpropety
def object_manager(self):
"""An object to query the objects in the kernel object manager.
:type: :class:`~windows.winobject.object_manager.ObjectManager`
"""
return windows.winobject.object_manager.ObjectManager()
#TODO: use GetComputerNameExA ? and recover other names ?
@utils.fixedpropety
def computer_name(self):
"""The name of the computer
:type: :class:`str`
"""
size = DWORD(0x1000)
buf = ctypes.c_buffer(size.value)
winproxy.GetComputerNameA(buf, ctypes.byref(size))
return buf[:size.value]
@utils.fixedpropety
def version(self):
"""The version of the system
:type: (:class:`int`, :class:`int`) -- (Major, Minor)
"""
data = self.get_version()
result = data.dwMajorVersion, data.dwMinorVersion
if result == (6,2):
result_str = self.get_file_version("kernel32")
result_tup = [int(x) for x in result_str.split(".")]
result = tuple(result_tup[:2])
return result
@utils.fixedpropety
def version_name(self):
"""The name of the system version, values are:
* Windows Server 2016
* Windows 10
* Windows Server 2012 R2
* Windows 8.1
* Windows Server 2012
* Windows 8
* Windows Server 2008
* Windows 7
* Windows Server 2008
* Windows Vista
* Windows XP Professional x64 Edition
* TODO: version (5.2) + is_workstation + bitness == 32 (don't even know if possible..)
* Windows Server 2003 R2
* Windows Server 2003
* Windows XP
* Windows 2000
* "Unknow Windows <version={0} | is_workstation={1}>".format(version, is_workstation)
:type: :class:`str`
"""
version = self.version
is_workstation = self.product_type == VER_NT_WORKSTATION
if version == (10, 0):
return ["Windows Server 2016", "Windows 10"][is_workstation]
elif version == (6, 3):
return ["Windows Server 2012 R2", "Windows 8.1"][is_workstation]
elif version == (6, 2):
return ["Windows Server 2012", "Windows 8"][is_workstation]
elif version == (6, 1):
return ["Windows Server 2008 R2", "Windows 7"][is_workstation]
elif version == (6, 0):
return ["Windows Server 2008", "Windows Vista"][is_workstation]
elif version == (5, 2):
metric = winproxy.GetSystemMetrics(SM_SERVERR2)
if is_workstation:
if self.bitness == 64:
return "Windows XP Professional x64 Edition"
else:
return "TODO: version (5.2) + is_workstation + bitness == 32"
elif metric != 0:
return "Windows Server 2003 R2"
else:
return "Windows Server 2003"
elif version == (5, 1):
return "Windows XP"
elif version == (5, 0):
return "Windows 2000"
else:
return "Unknow Windows <version={0} | is_workstation={1}>".format(version, is_workstation)
VERSION_MAPPER = gdef.FlagMapper(VER_NT_WORKSTATION, VER_NT_DOMAIN_CONTROLLER, VER_NT_SERVER)
@utils.fixedpropety
def product_type(self):
"""The product type, value might be:
* VER_NT_WORKSTATION(0x1L)
* VER_NT_DOMAIN_CONTROLLER(0x2L)
* VER_NT_SERVER(0x3L)
:type: :class:`long` or :class:`int` (or subclass)
"""
version = self.get_version()
return self.VERSION_MAPPER[version.wProductType]
EDITION_MAPPER = gdef.FlagMapper(PRODUCT_UNDEFINED,
PRODUCT_ULTIMATE,
PRODUCT_HOME_BASIC,
PRODUCT_HOME_PREMIUM,
PRODUCT_ENTERPRISE,
PRODUCT_HOME_BASIC_N,
PRODUCT_BUSINESS,
PRODUCT_STANDARD_SERVER,
PRODUCT_DATACENTER_SERVER,
PRODUCT_SMALLBUSINESS_SERVER,
PRODUCT_ENTERPRISE_SERVER,
PRODUCT_STARTER,
PRODUCT_DATACENTER_SERVER_CORE,
PRODUCT_STANDARD_SERVER_CORE,
PRODUCT_ENTERPRISE_SERVER_CORE,
PRODUCT_ENTERPRISE_SERVER_IA64,
PRODUCT_BUSINESS_N,
PRODUCT_WEB_SERVER,
PRODUCT_CLUSTER_SERVER,
PRODUCT_HOME_SERVER,
PRODUCT_STORAGE_EXPRESS_SERVER,
PRODUCT_STORAGE_STANDARD_SERVER,
PRODUCT_STORAGE_WORKGROUP_SERVER,
PRODUCT_STORAGE_ENTERPRISE_SERVER,
PRODUCT_SERVER_FOR_SMALLBUSINESS,
PRODUCT_SMALLBUSINESS_SERVER_PREMIUM,
PRODUCT_HOME_PREMIUM_N,
PRODUCT_ENTERPRISE_N,
PRODUCT_ULTIMATE_N,
PRODUCT_WEB_SERVER_CORE,
PRODUCT_MEDIUMBUSINESS_SERVER_MANAGEMENT,
PRODUCT_MEDIUMBUSINESS_SERVER_SECURITY,
PRODUCT_MEDIUMBUSINESS_SERVER_MESSAGING,
PRODUCT_SERVER_FOUNDATION,
PRODUCT_HOME_PREMIUM_SERVER,
PRODUCT_SERVER_FOR_SMALLBUSINESS_V,
PRODUCT_STANDARD_SERVER_V,
PRODUCT_DATACENTER_SERVER_V,
PRODUCT_ENTERPRISE_SERVER_V,
PRODUCT_DATACENTER_SERVER_CORE_V,
PRODUCT_STANDARD_SERVER_CORE_V,
PRODUCT_ENTERPRISE_SERVER_CORE_V,
PRODUCT_HYPERV,
PRODUCT_STORAGE_EXPRESS_SERVER_CORE,
PRODUCT_STORAGE_STANDARD_SERVER_CORE,
PRODUCT_STORAGE_WORKGROUP_SERVER_CORE,
PRODUCT_STORAGE_ENTERPRISE_SERVER_CORE,
PRODUCT_STARTER_N,
PRODUCT_PROFESSIONAL,
PRODUCT_PROFESSIONAL_N,
PRODUCT_SB_SOLUTION_SERVER,
PRODUCT_SERVER_FOR_SB_SOLUTIONS,
PRODUCT_STANDARD_SERVER_SOLUTIONS,
PRODUCT_STANDARD_SERVER_SOLUTIONS_CORE,
PRODUCT_SB_SOLUTION_SERVER_EM,
PRODUCT_SERVER_FOR_SB_SOLUTIONS_EM,
PRODUCT_SOLUTION_EMBEDDEDSERVER,
PRODUCT_SOLUTION_EMBEDDEDSERVER_CORE,
PRODUCT_SMALLBUSINESS_SERVER_PREMIUM_CORE,
PRODUCT_ESSENTIALBUSINESS_SERVER_MGMT,
PRODUCT_ESSENTIALBUSINESS_SERVER_ADDL,
PRODUCT_ESSENTIALBUSINESS_SERVER_MGMTSVC,
PRODUCT_ESSENTIALBUSINESS_SERVER_ADDLSVC,
PRODUCT_CLUSTER_SERVER_V,
PRODUCT_EMBEDDED,
PRODUCT_STARTER_E,
PRODUCT_HOME_BASIC_E,
PRODUCT_HOME_PREMIUM_E,
PRODUCT_PROFESSIONAL_E,
PRODUCT_ENTERPRISE_E,
PRODUCT_ULTIMATE_E,
PRODUCT_ENTERPRISE_EVALUATION,
PRODUCT_MULTIPOINT_STANDARD_SERVER,
PRODUCT_MULTIPOINT_PREMIUM_SERVER,
PRODUCT_STANDARD_EVALUATION_SERVER,
PRODUCT_DATACENTER_EVALUATION_SERVER,
PRODUCT_ENTERPRISE_N_EVALUATION,
PRODUCT_STORAGE_WORKGROUP_EVALUATION_SERVER,
PRODUCT_STORAGE_STANDARD_EVALUATION_SERVER,
PRODUCT_CORE_ARM,
PRODUCT_CORE_N,
PRODUCT_CORE_COUNTRYSPECIFIC,
PRODUCT_CORE_LANGUAGESPECIFIC,
PRODUCT_CORE,
PRODUCT_PROFESSIONAL_WMC,
PRODUCT_UNLICENSED)
@utils.fixedpropety
def edition(self): # Find a better name ?
version = self.get_version()
edition = DWORD()
try:
winproxy.GetProductInfo(version.dwMajorVersion,
version.dwMinorVersion,
version.wServicePackMajor,
version.wServicePackMinor,
edition)
except winproxy.ExportNotFound as e:
# Windows XP does not implem GetProductInfo
assert version.dwMajorVersion, version.dwMinorVersion == (5,1)
return self._edition_windows_xp()
return self.EDITION_MAPPER[edition.value]
def _edition_windows_xp(self):
# Emulate standard response from IsOS(gdef.OS_PROFESSIONAL)
if winproxy.IsOS(gdef.OS_PROFESSIONAL):
return PRODUCT_PROFESSIONAL
return PRODUCT_HOME_BASIC
@utils.fixedpropety
def windir(self):
buffer = ctypes.c_buffer(0x100)
reslen = winproxy.GetWindowsDirectoryA(buffer)
return buffer[:reslen]
def get_version(self):
data = windows.generated_def.OSVERSIONINFOEXA()
data.dwOSVersionInfoSize = ctypes.sizeof(data)
winproxy.GetVersionExA(ctypes.cast(ctypes.pointer(data), ctypes.POINTER(windows.generated_def.OSVERSIONINFOA)))
return data
def get_file_version(self, name):
size = winproxy.GetFileVersionInfoSizeA(name)
buf = ctypes.c_buffer(size)
winproxy.GetFileVersionInfoA(name, 0, size, buf)
bufptr = PVOID()
bufsize = UINT()
winproxy.VerQueryValueA(buf, "\\VarFileInfo\\Translation", ctypes.byref(bufptr), ctypes.byref(bufsize))
bufstr = ctypes.cast(bufptr, LPCSTR)
tup = struct.unpack("<HH", bufstr.value[:4])
req = "{0:04x}{1:04x}".format(*tup)
winproxy.VerQueryValueA(buf, "\\StringFileInfo\\{0}\\ProductVersion".format(req), ctypes.byref(bufptr), ctypes.byref(bufsize))
bufstr = ctypes.cast(bufptr, LPCSTR)
return bufstr.value
@utils.fixedpropety
def build_number(self):
# This returns the last version where ntdll was updated
# Should look at HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion
# values: CurrentBuild + UBR
# windows.system.registry(r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion")["CurrentBuild"].value
# windows.system.registry(r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion")["UBR"].value
return self.get_file_version("comctl32")
@staticmethod
def enumerate_processes():
dbgprint("Enumerating processes with CreateToolhelp32Snapshot", "SLOW")
process_entry = PROCESSENTRY32()
process_entry.dwSize = ctypes.sizeof(process_entry)
snap = winproxy.CreateToolhelp32Snapshot(gdef.TH32CS_SNAPPROCESS, 0)
winproxy.Process32First(snap, process_entry)
res = []
res.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
while winproxy.Process32Next(snap, process_entry):
res.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
winproxy.CloseHandle(snap)
return res
@staticmethod
def enumerate_threads_generator():
# Ptet dangereux, parce que on yield la meme THREADENTRY32 a chaque fois
dbgprint("Enumerating threads with CreateToolhelp32Snapshot <generator>", "SLOW")
thread_entry = THREADENTRY32()
thread_entry.dwSize = ctypes.sizeof(thread_entry)
snap = winproxy.CreateToolhelp32Snapshot(gdef.TH32CS_SNAPTHREAD, 0)
dbgprint("New handle CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD) <generator> | {0:#x}".format(snap), "HANDLE")
try:
winproxy.Thread32First(snap, thread_entry)
yield thread_entry
while winproxy.Thread32Next(snap, thread_entry):
yield thread_entry
finally:
winproxy.CloseHandle(snap)
dbgprint("CLOSE CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD) <generator> | {0:#x}".format(snap), "HANDLE")
@staticmethod
def enumerate_threads():
return [WinThread._from_THREADENTRY32(th) for th in System.enumerate_threads_generator()]
def enumerate_threads_setup_owners(self):
# Enumerating threads is a special operation concerning the owner process.
# We may not be able to retrieve the name of the owning process by normal way
# (as we need to get a handle on the process)
# So, this implementation of enumerate_thread also setup the owner with the result of enumerate_processes
dbgprint("Enumerating threads with CreateToolhelp32Snapshot and setup owner", "SLOW")
# One snap for both enum to be prevent race
snap = winproxy.CreateToolhelp32Snapshot(gdef.TH32CS_SNAPTHREAD | gdef.TH32CS_SNAPPROCESS, 0)
process_entry = PROCESSENTRY32()
process_entry.dwSize = ctypes.sizeof(process_entry)
winproxy.Process32First(snap, process_entry)
processes = []
processes.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
while winproxy.Process32Next(snap, process_entry):
processes.append(process.WinProcess._from_PROCESSENTRY32(process_entry))
# Forge a dict pid -> process
proc_dict = {proc.pid: proc for proc in processes}
thread_entry = THREADENTRY32()
thread_entry.dwSize = ctypes.sizeof(thread_entry)
threads = []
winproxy.Thread32First(snap, thread_entry)
parent = proc_dict[thread_entry.th32OwnerProcessID]
threads.append(process.WinThread._from_THREADENTRY32(thread_entry, owner=parent))
while winproxy.Thread32Next(snap, thread_entry):
parent = proc_dict[thread_entry.th32OwnerProcessID]
threads.append(process.WinThread._from_THREADENTRY32(thread_entry, owner=parent))
winproxy.CloseHandle(snap)
return threads | StarcoderdataPython |
3508986 | default_app_config = 'tenant_workspace.apps.TenantWorkspaceConfig'
| StarcoderdataPython |
8029450 | <gh_stars>0
from rest_framework import serializers
from restapi.models import book
from django.contrib.postgres.fields import ArrayField
class bookSerializer(serializers.Serializer):
id=serializers.IntegerField(read_only=True)
name=serializers.CharField(max_length=100)
isbn=serializers.CharField(max_length=14)
authors=serializers.ListField(child=serializers.CharField())
number_of_pages=serializers.IntegerField()
publisher=serializers.CharField(max_length=100)
country=serializers.CharField(max_length=25)
release_date=serializers.DateField()
def create(self, validated_data):
return book.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.isbn = validated_data.get('isbn', instance.isbn)
instance.authors = validated_data.get('authors', instance.authors)
instance.number_of_pages = validated_data.get('number_of_pages', instance.number_of_pages)
instance.publisher = validated_data.get('publisher', instance.publisher)
instance.country = validated_data.get('country', instance.country)
instance.release_date = validated_data.get('release_date', instance.release_date)
instance.save()
return instance
| StarcoderdataPython |
13861 | """
@Author : xiaotao
@Email : <EMAIL>
@Lost modifid : 2020/4/24 10:02
@Filename : __init__.py.py
@Description :
@Software : PyCharm
""" | StarcoderdataPython |
6413530 | <gh_stars>1-10
import argparse
import logging
import gpxpy as gpxpy
from tqdm import tqdm
from coordinates_label_photos.coordinates import exif_to_coordinates
from coordinates_label_photos.photos import list_photo_filenames, get_photo_exif
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='extract GPS coordinates from an image folder and create a gpx track')
parser.add_argument("--images", help="image directory (edited in place)", type=str, required=True,
metavar='/path/to/images')
parser.add_argument("--output", help=".gpx coordinates file", type=str, required=True, metavar='/path/to/gpx')
args = parser.parse_args()
photos = list_photo_filenames(getattr(args, 'images'))
gpx = gpxpy.gpx.GPX()
gpx_track = gpxpy.gpx.GPXTrack()
gpx.tracks.append(gpx_track)
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx_track.segments.append(gpx_segment)
for photo in tqdm(photos, desc='Parsing images'):
exif = get_photo_exif(photo)
coord = exif_to_coordinates(exif)
gpx_segment.points.append(
gpxpy.gpx.GPXTrackPoint(coord.lat, coord.lon, elevation=coord.elevation, time=coord.timestamp))
gpx_file=getattr(args, 'output')
with open(gpx_file, 'w') as fd_out:
fd_out.write(gpx.to_xml())
fd_out.close()
logging.info('Saved %s points in GPX %s' % (len(photos), gpx_file))
if __name__ == '__main__':
main()
| StarcoderdataPython |
11206662 | import cloudpickle
import sys
from typing import Dict
def _execute_fun(path_to_serialized_fun: str) -> None:
with open(path_to_serialized_fun, "rb") as fd:
func: Dict = cloudpickle.load(fd)
func['func'](*func['args'])
if __name__ == "__main__":
_execute_fun(sys.argv[1])
| StarcoderdataPython |
4928979 | from decimal import Decimal
from serde import serde
from serde.json import from_json, to_json
@serde
class Foo:
v: Decimal
def main():
foo = Foo(Decimal(0.1))
print(f"Into Json: {to_json(foo)}")
s = '{"v": "0.1000000000000000055511151231257827021181583404541015625"}'
print(f"From Json: {from_json(Foo, s)}")
if __name__ == '__main__':
main()
| StarcoderdataPython |
11317954 | # Stack
from typing import TypeVar, Generic, List
T = TypeVar('T')
class Stack(Generic[T]):
def __init__(self) -> None:
self.items: List[T] = []
def push(self, item: T) -> None:
self.items.append(item)
def pop(self) -> T:
return self.items.pop()
def peek(self) -> T:
return self.items[-1]
def empty(self) -> bool:
return not self.items
def get_max(self) -> T:
return max(self.items)
def print(self) -> None:
for i in range(len(self.items)-1, -1, -1):
print(self.items[i], " ")
# Construct an empty Stack[int] instance
stack = Stack[int]()
stack.push(2)
stack.push(4)
stack.push(12)
stack.push(25)
stack.push(22)
stack.pop()
stack.push(3)
stack.print()
print(stack.get_max())
| StarcoderdataPython |
172434 | # Generated by Django 3.1.12 on 2021-06-28 08:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('business_register', '0123_auto_20210622_1059'),
]
operations = [
migrations.AlterField(
model_name='vehicle',
name='brand',
field=models.CharField(blank=True, default='', help_text='brand', max_length=80, verbose_name='brand'),
),
migrations.AlterField(
model_name='vehicle',
name='model',
field=models.CharField(blank=True, default='', help_text='model', max_length=140, verbose_name='model'),
),
migrations.AlterField(
model_name='vehicle',
name='type',
field=models.PositiveSmallIntegerField(choices=[(1, 'Car'), (2, 'Truck'), (4, 'Boat'), (5, 'Agricultural machinery'), (6, 'Air_means'), (10, 'Other')], help_text='type of the vehicle', verbose_name='type'),
),
]
| StarcoderdataPython |
3572071 | from selenium.webdriver.common.by import By
class MainPageLocators(object):
GO_BUTTON = (By.ID, 'submit')
class SearchResultsPageLocators(object):
pass
| StarcoderdataPython |
1737428 | <reponame>suhassatish/h2o-dev<gh_stars>0
import sys
sys.path.insert(1, "../../../")
import h2o
from h2o.expr import Expr
def expr_slicing(ip,port):
# Connect to h2o
h2o.init(ip,port)
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
iris.show()
###################################################################
# expr[int] (column slice), expr is pending
res = 2 - iris
res2 = h2o.as_list(res[0])
assert abs(res2[3][0] - -2.6) < 1e-10 and abs(res2[17][0] - -3.1) < 1e-10 and abs(res2[24][0] - -2.8) < 1e-10, \
"incorrect values"
# expr[int,int], expr is remote
res.eager()
res3 = h2o.as_list(res[13, 3])
assert abs(res3[0][0] - 1.9) < 1e-10, "incorrect values"
# expr[int, slice], expr is remote
res4 = h2o.as_list(res[12, 0:3])
assert abs(res4[0][0] - -2.8) < 1e-10 and abs(res4[0][1] - -1.0) < 1e-10 and abs(res4[0][2] - 0.6) < 1e-10 and \
abs(res4[0][3] - 1.9) < 1e-10, "incorrect values"
# expr[slice, int], expr is remote
res5 = h2o.as_list(res[5:8, 1])
assert abs(res5[0][0] - -1.9) < 1e-10 and abs(res5[1][0] - -1.4) < 1e-10 and abs(res5[2][0] - -1.4) < 1e-10 and \
abs(res5[3][0] - -0.9) < 1e-10, "incorrect values"
# expr[slice, slice], expr is pending
res = iris * 2
res6 = h2o.as_list(res[5:8, 0:3])
assert abs(res6[0][0] - 10.8) < 1e-10 and abs(res6[1][1] - 6.8) < 1e-10 and abs(res6[2][2] - 3.0) < 1e-10 and \
abs(res6[3][3] - 0.4) < 1e-10, "incorrect values"
if __name__ == "__main__":
h2o.run_test(sys.argv, expr_slicing) | StarcoderdataPython |
11207749 | """Mass Equality Set Graph (MESGraph)."""
from SBMLLint.common import constants as cn
from SBMLLint.common.molecule import Molecule, MoleculeStoichiometry
from SBMLLint.common.reaction import Reaction
from SBMLLint.games.som import SOM
from SBMLLint.common.simple_sbml import SimpleSBML
import collections
import itertools
import networkx as nx
NULL_STR = ""
MESGraphReport = collections.namedtuple("MESGraphReport",
"type_one type_two type_three type_four type_five")
SOMMoleculeStoichiometry = collections.namedtuple("SOMStoichiometry",
"som molecule stoichiometry")
class MESGraph(nx.DiGraph):
"""
The MESGraph class represents a collection of SOMs as nodes
and their inequality relationships as edges (arcs).
Mass inequality between SOMs from reactions can help us
detect their relationship.
Type I Error occurs when we find inequality between two molecules
in the same SOM, because each element in a SOM has the same weight.
Type II Error implies there is cyclism between molecules, such as
A < B < C < ... < A, which is physically impossible.
"""
def __init__(self, simple=None):
"""
:param SimpleSBML simple:
"""
super(MESGraph, self).__init__()
self.simple = simple
self.soms = self.initializeSOMs(simple)
self.add_nodes_from(self.soms)
self.identifier = self.makeId()
self.multimulti_reactions = []
self.type_one_error = False
self.type_two_error = False
self.type_one_errors = []
self.type_two_errors = []
self.type_three_errors = []
self.type_four_errors = []
self.type_five_errors = []
def __repr__(self):
return self.identifier
def initializeSOMs(self, simple):
"""
Create a list of one-molecule SOMs
:param SimpleSBML simple:
:return list-SOM:
"""
soms = []
if type(simple) == SimpleSBML:
for molecule in simple.molecules:
if molecule.name == cn.EMPTYSET:
continue
else:
soms.append(SOM({molecule}))
return soms
def makeId(self):
"""
Construct an identifier for the graph.
:return str:
"""
identifier = ""
if self.edges:
for edge in self.edges:
identifier = identifier + str(edge[0]) + cn.ARC_ARROW + str(edge[1]) + "\n"
for key, node in enumerate(nx.isolates(self)):
identifier = identifier + str(node)
if key < (len(list(nx.isolates(self)))-1):
identifier = identifier + cn.KINETICS_SEPARATOR
# Return the identifier
return identifier
def getNode(self, molecule):
"""
Find a node(SOM) containing the given molecule.
If no such SOM exists, return False
:param Molecule molecule:
:return SOM/False:
"""
for som in list(self.nodes):
for mole in som.molecules:
if mole.name == molecule.name:
return som
return False
def mergeNodes(self, som1, som2, reaction):
"""
Merge two nodes (SOMs).
Update arcs if applicable.
:param SOM som1:
:param SOM som2:
:param Reaction reaction:
:return SOM new_som:
"""
new_som = som1.merge(som2)
new_som.reactions.add(reaction)
for som in [som1, som2]:
for edge in list(self.in_edges(som)):
remaining_som = edge[0]
reaction_label = self.get_edge_data(edge[0], edge[1])[cn.REACTION]
self.add_edge(remaining_som, new_som, reaction=reaction_label)
for edge in list(self.out_edges(som)):
remaining_som = edge[1]
reaction_label = self.get_edge_data(edge[0], edge[1])[cn.REACTION]
self.add_edge(new_som, remaining_som, reaction=reaction_label)
self.remove_nodes_from([som1, som2])
if not self.has_node(new_som):
self.add_node(new_som)
return new_som
def processUniUniReaction(self, reaction):
"""
Process a 1-1 reaction to merge nodes.
If no need to merge, return None.
:param Reaction reaction:
"""
if reaction.category != cn.REACTION_1_1:
pass
else:
reactant_som = self.getNode(reaction.reactants[0].molecule)
product_som = self.getNode(reaction.products[0].molecule)
if reactant_som == product_som:
return None
else:
# new_som = reactant_som.merge(product_som)
# new_som.reactions.add(reaction)
# # TODO: if there are edges, need to also check them: self.mergeNodes(som1, som2, reaction)
# self.remove_node(reactant_som)
# self.remove_node(product_som)
# self.add_node(new_som)
new_som = self.mergeNodes(reactant_som, product_som, reaction)
self.identifier = self.makeId()
return new_som
def processUniMultiReaction(self, reaction):
"""
Process a 1-n reaction to add arcs.
Since the mass of reactant is greater than
that of each product, it adds arcs by
addArc(source=products, destination=reactant).
:param Reaction reaction:
"""
if reaction.category != cn.REACTION_1_n:
pass
else:
destination = [reaction.reactants[0].molecule]
source = [product.molecule for product in reaction.products]
arcs = itertools.product(source, destination)
for arc in arcs:
if not self.checkTypeOneError(arc, reaction):
som_source = self.getNode(arc[0])
som_destination = self.getNode(arc[1])
self.addArc(som_source, som_destination, reaction)
self.identifier = self.makeId()
def processMultiUniReaction(self, reaction):
"""
Process a n-1 reaction to add arcs.
Since the mass of product is greater than
that of each reactant, it adds arcs by
addArc(source=reactants, destination=product).
:param Reaction reaction:
"""
if reaction.category != cn.REACTION_n_1:
pass
else:
destination = [reaction.products[0].molecule]
source = [reactant.molecule for reactant in reaction.reactants]
arcs = itertools.product(source, destination)
for arc in arcs:
if not self.checkTypeOneError(arc, reaction):
som_source = self.getNode(arc[0])
som_destination = self.getNode(arc[1])
self.addArc(som_source, som_destination, reaction)
self.identifier = self.makeId()
def addMultiMultiReaction(self, reaction=None):
"""
Add a multi-multi reaction to self.multimulti_reactions
:param reaction Reaction:
:return bool:
"""
if reaction not in self.multimulti_reactions:
self.multimulti_reactions.append(reaction)
def addTypeThreeError(self, som1, som2, reaction):
"""
Add Type III Error components to self.type_three_errors
All components of resulting PathComponents are str
:param SOM som1:
:param SOM som2:
:param Reaction reaction:
:return bool flag:
"""
flag = False
for component in self.type_three_errors:
if (component.node1==som1) and (component.node2==som2):
new_component = cn.PathComponents(node1=som1,
node2=som2,
reactions=component.reactions+[reaction.label])
self.type_three_errors.remove(component)
self.type_three_errors.append(new_component)
flag = True
break
if not flag:
self.type_three_errors.append(cn.PathComponents(node1=som1,
node2=som2,
reactions=[reaction.label]))
flag = True
return flag
def checkTypeThreeError(self, som1, som2, reaction):
"""
Check type III error, which is when
we cannot merge two nodes because there is an arc.
Add the error to type three error.
:param SOM som1:
:param SOM som2:
:param reaction Reaction:
:return bool:
"""
if self.has_edge(som1, som2):
self.addTypeThreeError(som1, som2, reaction)
return True
elif self.has_edge(som2, som1):
self.addTypeThreeError(som2, som1, reaction)
return True
else:
return False
def reduceReaction(self, reaction):
"""
Reduce the given reaction
:param Reaction reaction:
:return False/Reaction reaction:
"""
if reaction.category != cn.REACTION_n_n:
return False
# Reduces the reaction by examining for each SOM
for som in list(self.nodes):
reactants_in = collections.deque([mole_stoich for mole_stoich in
reaction.reactants if
self.getNode(mole_stoich.molecule)==som])
reactants_out = [mole_stoich for mole_stoich in
reaction.reactants if
self.getNode(mole_stoich.molecule)!=som]
products_in = collections.deque([mole_stoich for mole_stoich in
reaction.products if
self.getNode(mole_stoich.molecule)==som])
products_out = [mole_stoich for mole_stoich in
reaction.products if
self.getNode(mole_stoich.molecule)!=som]
#
while reactants_in and products_in:
reactant = reactants_in[0]
product = products_in[0]
if reactant.stoichiometry > product.stoichiometry:
reactants_in[0] = MoleculeStoichiometry(reactant.molecule,
reactant.stoichiometry - product.stoichiometry)
products_in.popleft()
elif reactant.stoichiometry < product.stoichiometry:
products_in[0] = MoleculeStoichiometry(product.molecule,
product.stoichiometry - reactant.stoichiometry)
reactants_in.popleft()
else:
reactants_in.popleft()
products_in.popleft()
reactants = list(reactants_in) + reactants_out
products = list(products_in) + products_out
#
if (len(reaction.reactants) > len(reactants)) | \
(len(reaction.products) > len(products)):
reaction.reactants = reactants
reaction.products = products
reaction.identifier = reaction.makeIdentifier()
reaction.category = reaction.getCategory()
# reduced_reaction = cn.ReactionComponents(label = reaction.label, reactants=reactants, products=products)
return reaction
# def addTypeFourError(self, reduced_reaction):
# """
# Add type IV error, which is when reduced reaction
# only has one elements on one side.
# :param Reaction reduced_reaction:
# :return bool:
# """
# if som1 == som2:
# self.addTypeFourError(som1, som2, reaction)
# return True
# return False
def processMultiMultiReaction(self, reaction):
"""
Process a multi-multi reaction.
Return False means graph was updated or error was added
Return True means graph wasn't updated
and error wasn't added (already exist or couldn't find)
:param Reaction reaction:
:return bool:
"""
# Reduce the reaction using existing SOMs
reduced_reaction = self.reduceReaction(reaction)
# If the reaction was not a multi-multi reaction, return False
if not reduced_reaction:
return False
# if reduced reaction is EmptySet -> EmptySet, don't do anything
if len(reduced_reaction.reactants)==0 and len(reduced_reaction.products)==0:
return False
# elif reduced reaction has exactly one side EmptySet, add type four error
elif len(reduced_reaction.reactants)==0 or len(reduced_reaction.products)==0:
if reduced_reaction in self.type_four_errors:
return False
else:
self.type_four_errors.append(reduced_reaction)
return True
reactant_soms = list({self.getNode(ms.molecule) for ms in reduced_reaction.reactants})
product_soms = list({self.getNode(ms.molecule) for ms in reduced_reaction.products})
reactant_stoichiometry = [ms.stoichiometry for ms in reduced_reaction.reactants]
product_stoichiometry = [ms.stoichiometry for ms in reduced_reaction.products]
sum_reactant_stoichiometry = sum(reactant_stoichiometry)
sum_product_stoichiometry = sum(product_stoichiometry)
#
# if both sides have more than one SOMS, we cannot process (return False)
# if (len(reactant_soms)>1) and (len(product_soms)>1):
# return False
# if both sides have exactly one SOM
if (len(reactant_soms)==1) and (len(product_soms)==1):
reactant_som = reactant_soms[0]
product_som = product_soms[0]
if sum_reactant_stoichiometry == sum_product_stoichiometry:
if reactant_som != product_som:
if not self.checkTypeThreeError(reactant_som, product_som, reaction):
self.mergeNodes(reactant_som, product_som, reaction)
# Add reactant_som -> product_som
elif sum_reactant_stoichiometry > sum_product_stoichiometry:
self.addArc(reactant_som, product_som, reaction)
# Add product_som -> reactant_som
else:
self.addArc(product_som, reactant_som, reaction)
self.identifier = self.makeId()
return True
# if one side has exactly one SOM, and the other side multiple SOMs
else:
# SOM uni-multi reaction
if (len(reactant_soms)==1) and \
(sum_reactant_stoichiometry==1) and \
(len(product_soms)>1):
som_arcs = itertools.product(product_soms, reactant_soms)
# SOM multi-uni reaction
elif (len(reactant_soms)>1) and \
(len(product_soms)==1) and \
(sum_product_stoichiometry==1):
som_arcs = itertools.product(reactant_soms, product_soms)
# The rest are all multi-multi
# that is, multiple SOMs on both sides, or one side one SOM with stoichiometry>1
else:
return self.processByInequality(reduced_reaction)
for arc in som_arcs:
self.addArc(arc[0], arc[1], reaction)
self.identifier = self.makeId()
return True
# return none if none of above applied (should not happen)
return None
def processByInequality(self, reduced_reaction):
"""
Cross-examine inequality and add arcs if possible.
One example is, A + B -> C + D where A < C is given
by existing arc.
We can conclude B > D and add an arc.
Return True if processed, False otherwise
:param Reaction reduced_reaction:
:return bool:
"""
# create lists of soms with stoichiometry
reactants = collections.deque([
SOMMoleculeStoichiometry(som = self.getNode(moles.molecule),
molecule = moles.molecule,
stoichiometry = moles.stoichiometry) for \
moles in reduced_reaction.reactants])
products = collections.deque([
SOMMoleculeStoichiometry(som = self.getNode(moles.molecule),
molecule = moles.molecule,
stoichiometry = moles.stoichiometry) for \
moles in reduced_reaction.products])
reactant_soms = list({reactant.som for reactant in reactants})
product_soms = list({product.som for product in products})
#
reactant_lessthan_product = []
product_lessthan_reactant = []
no_relationship = []
for pair in itertools.product(reactant_soms, product_soms):
if self.has_edge(pair[0], pair[1]):
reactant_lessthan_product.append(pair)
elif self.has_edge(pair[1], pair[0]):
product_lessthan_reactant.append(pair)
else:
no_relationship.append(pair)
# print("reduced reaction...", reduced_reaction.makeIdentifier(is_include_kinetics=False))
# print("reactant_lessthan_product: ", reactant_lessthan_product)
# print("product_lessthan_reactant: ", product_lessthan_reactant)
# print("no_realtionship :", no_relationship)
# print("----------------------------------------------------------")
# now, want to infer the relationship of no_relationship
# or prove if existing relationships conflict
if not no_relationship:
return False
# if both directions exist, let's say we cannot do anything; return False
if reactant_lessthan_product and product_lessthan_reactant:
return False
def processPairs(pairs, small, big, idx_small, idx_big):
# under product_lessthan_reactant, idx_small = 1, idx_big = 0
# under the same, small = products, big = reactants
# soms_buffer is same side as small_som
# remaining_soms is same side as big_som
big_som_stoichiometry = 0
small_som_stoichiometry = 0
soms_buffer = [pair[idx_small] for pair in no_relationship]
remaining_soms = [pair[idx_big] for pair in no_relationship]
for pair in pairs:
print("We are dealing with, ", pair)
big_som_stoichiometry += sum([
sms.stoichiometry for sms in big if sms.som==pair[idx_big]])
small_som_stoichiometry += sum([
sms.stoichiometry for sms in small if sms.som==pair[idx_small]])
if pair[idx_small] in soms_buffer:
soms_buffer.remove(pair[idx_small])
if pair[idx_big] in remaining_soms:
remaining_soms.remove(pair[idx_big])
print("big_som_stoi, ", big_som_stoichiometry)
print("small_som_stoi, ", small_som_stoichiometry)
# if product_som_stoichiometry is bigger, it's okay
# if not, check if there is at least one buffer on product;
# if yes, try adding an arc if the buffer is at least one
if big_som_stoichiometry < small_som_stoichiometry:
return False
elif soms_buffer:
if len(soms_buffer)==1:
# add arc
for arc_source in remaining_soms:
# the SOMs cannot be the same because they were already reduced
self.addArc(arc_source, soms_buffer[0])
return True
# cannot decide now because there are more than two buffers
else:
return False
# no buffer; add error
else:
if reduced_reaction in self.type_four_errors:
return False
else:
self.type_four_errors.append(reduced_reaction)
print("type four error added!", reduced_reaction)
return True
if product_lessthan_reactant:
return processPairs(
pairs=product_lessthan_reactant,
big=reactants,
small=products,
idx_big=0, idx_small=1)
elif reactant_lessthan_product:
return processPairs(
pairs=reactant_lessthan_product,
big=products,
small=reactants,
idx_big=1, idx_small=0)
return False
def addArc(self, arc_source, arc_destination, reaction):
"""
Add a single arc (edge) using two SOMs and reaction.
:param SOM arc_source:
:param SOM arc_destination:
:param Reaction reaction:
"""
# if there is already a preious reaction,
if self.has_edge(arc_source, arc_destination):
reaction_label = self.get_edge_data(arc_source, arc_destination)[cn.REACTION]
# if reaction.label is not already included in the attribute,
if reaction.label not in set(reaction_label):
reaction_label = reaction_label + [reaction.label]
else:
reaction_label = [reaction.label]
# overwrite the edge with new reactions set
self.add_edge(arc_source, arc_destination, reaction=reaction_label)
def getSOMPath(self, som, mole1, mole2):
"""
Create an undirected graph between
two molecules within a SOM
and find the shortest path
:param SOM som:
:param Molecule mole1:
:param Molecule mole2:
:return PathComponents som_path:
"""
molecule1 = mole1.name
molecule2 = mole2.name
# construct undirected graph
subg = nx.Graph()
# here, every reaction is 1-1 reaction
for reaction in list(som.reactions):
node1 = reaction.reactants[0].molecule.name
node2 = reaction.products[0].molecule.name
if subg.has_edge(node1, node2):
reaction_label = subg.get_edge_data(node1, node2)[cn.REACTION]
# if reaction.label is not already included in the attribute,
if reaction.label not in set(reaction_label):
reaction_label = reaction_label + [reaction.label]
else:
reaction_label = [reaction.label]
subg.add_edge(node1, node2, reaction=reaction_label)
path = [short_p for short_p in nx.shortest_path(subg,
source=molecule1,
target=molecule2)]
som_path = []
for idx in range(len(path)-1):
edge_reactions = subg.get_edge_data(path[idx], path[idx+1])[cn.REACTION]
som_path.append(cn.PathComponents(node1=path[idx],
node2=path[idx+1],
reactions=edge_reactions))
return som_path
def printSOMPath(self, molecule_name1, molecule_name2):
"""
Print out shortest SOM path between two molecules.
Arguments are str and both molecules sholud be in the
same SOM.
:param str molecule_name1:
:param str molecule_name2:
:return bool/str:
"""
path_report = NULL_STR
som1 = self.getNode(self.simple.getMolecule(molecule_name1))
som2 = self.getNode(self.simple.getMolecule(molecule_name2))
if som1 != som2:
return False
else:
# add case when molecule_name1 == molecule_name2
if molecule_name1 == molecule_name2:
# print("Clearly,", molecule_name1, cn.EQUAL, molecule_name2)
path_report = path_report + "Clearly, %s %s %s\n" % (
molecule_name1, cn.EQUAL, molecule_name2)
else:
som_path = self.getSOMPath(som1,
self.simple.getMolecule(molecule_name1),
self.simple.getMolecule(molecule_name2))
for pat in som_path:
# print("\n%s %s %s by reaction(s):" % (pat.node1, cn.EQUAL, pat.node2))
path_report = path_report + "\n%s %s %s by reaction(s):\n" % (pat.node1, cn.EQUAL, pat.node2)
for r in pat.reactions:
som_reaction = self.simple.getReaction(r)
# print(som_reaction.makeIdentifier(is_include_kinetics=False))
path_report = path_report + "%s\n" % (som_reaction.makeIdentifier(is_include_kinetics=False))
return path_report
def addTypeOneError(self, mole1, mole2, reaction):
"""
Add Type I Error components to self.type_one_errors
All components of resulting PathComponents are str
:param Molecule mole1:
:param Molecule mole2:
:param Reaction reaction:
:return bool flag:
"""
flag = False
for component in self.type_one_errors:
if (component.node1==mole1.name) and (component.node2==mole2.name):
new_component = cn.PathComponents(node1=mole1.name,
node2=mole2.name,
reactions=component.reactions+[reaction.label])
self.type_one_errors.remove(component)
self.type_one_errors.append(new_component)
flag = True
break
if not flag:
self.type_one_errors.append(cn.PathComponents(node1=mole1.name,
node2=mole2.name,
reactions=[reaction.label]))
flag = True
return flag
def checkTypeOneError(self, arc, inequality_reaction=None):
"""
Check Type I Error of an arc.
If both source and destination are found
in the same SOM, send error message and return True.
If not, return False.
:param tuple-Molecule arc:
:param Reaction inequality_reaction:
:return bool:
"""
som1 = self.getNode(arc[0])
som2 = self.getNode(arc[1])
if som1 == som2:
self.addTypeOneError(arc[0], arc[1], inequality_reaction)
return True
else:
return False
def addTypeTwoError(self, cycle):
"""
Add Type II Error components to self.type_two_errors
which is a list of lists
All components of resulting PathComponents are str
:param list-SOM cycle:
"""
# exceptionally, here PathComponents are
# node1=[], node2=[], reactions=[] and their index
# of each component will match. All elements within nodes
# are in the same SOM
error_cycle = []
for node_idx in range(len(cycle)-1):
som1 = cycle[node_idx]
som2 = cycle[node_idx+1]
som1_moles = {mole.name for mole in list(som1.molecules)}
som2_moles = {mole.name for mole in list(som2.molecules)}
reactions = self.get_edge_data(som1, som2)[cn.REACTION]
# all reactions (in an edge), should create a single PathComponent
nodes1 = []
nodes2 = []
reaction_labels = []
for r in reactions:
reaction = self.simple.getReaction(r)
if reaction.category == cn.REACTION_n_1:
sources = {r.molecule.name for r in reaction.reactants}
destinations = {p.molecule.name for p in reaction.products}
elif reaction.category == cn.REACTION_1_n:
sources = {p.molecule.name for p in reaction.products}
destinations = {r.molecule.name for r in reaction.reactants}
# for any reaction that addes arcs, len(nodes2)==1
node2 = list(destinations.intersection(som2_moles))[0]
for node1 in list(sources.intersection(som1_moles)):
nodes1.append(node1)
nodes2.append(node2)
reaction_labels.append(reaction.label)
error_cycle.append(cn.PathComponents(node1=nodes1,
node2=nodes2,
reactions=reaction_labels))
som1 = cycle[-1]
som2 = cycle[0]
som1_moles = {mole.name for mole in list(som1.molecules)}
som2_moles = {mole.name for mole in list(som2.molecules)}
reactions = self.get_edge_data(som1, som2)[cn.REACTION]
# all reactions (in an edge), should create a single PathComponent
nodes1 = []
nodes2 = []
reaction_labels = []
for r in reactions:
reaction = self.simple.getReaction(r)
if reaction.category == cn.REACTION_n_1:
sources = {r.molecule.name for r in reaction.reactants}
destinations = {p.molecule.name for p in reaction.products}
elif reaction.category == cn.REACTION_1_n:
sources = {p.molecule.name for p in reaction.products}
destinations = {r.molecule.name for r in reaction.reactants}
# for any reaction that addes arcs, len(nodes2)==1
node2 = list(destinations.intersection(som2_moles))[0]
for node1 in list(sources.intersection(som1_moles)):
nodes1.append(node1)
nodes2.append(node2)
reaction_labels.append(reaction.label)
error_cycle.append(cn.PathComponents(node1=nodes1,
node2=nodes2,
reactions=reaction_labels))
self.type_two_errors.append(error_cycle)
def checkTypeTwoError(self):
"""
Check Type II Error (cycles) of a MESGraph.
If there is at least one cycle,
report an error message, related reactions
and return True.
If there is no cycle, return False.
:return bool:
"""
graph = nx.DiGraph()
graph.add_edges_from(self.edges)
cycles = list(nx.simple_cycles(graph))
if len(cycles) == 0:
return False
else:
for cycle in cycles:
self.addTypeTwoError(cycle)
if not self.type_two_error:
self.type_two_error = True
return True
def checkTypeFiveError(self):
"""
Check Type V Error (cycles) of a MESGraph.
If there is at least one cycle,
add cycle to self.type_five_errors.
The biggest difference between type II error
is that type five is for multi-multi reactions,
so the cycle is reported by SOM-level.
:return bool:
"""
graph = nx.DiGraph()
graph.add_edges_from(self.edges)
cycles = list(nx.simple_cycles(graph))
if len(cycles) == 0:
return False
else:
self.type_five_errors = cycles
return True
def analyze(self, reactions=None, error_details=True):
"""
Sort list of reactions and process them.
Add arcs or sending error messages using
checkTypeOneError or checkTypeTwoError.
:param list-Reaction reactions:
:return str:
"""
if reactions is None:
reactions = self.simple.reactions
# Associate the reaction category with the function
# that processes that category
report = NULL_STR
reaction_dic = {
cn.REACTION_1_1: self.processUniUniReaction,
cn.REACTION_1_n: self.processUniMultiReaction,
cn.REACTION_n_1: self.processMultiUniReaction,
cn.REACTION_n_n: self.addMultiMultiReaction,
}
# Process each type of reaction
for category in reaction_dic.keys():
for reaction in [r for r in reactions if r.category == category]:
func = reaction_dic[category]
func(reaction)
#
self.checkTypeTwoError()
#
if error_details:
# if (len(self.type_one_errors)==0) and (len(self.type_two_errors)==0):
# report = report + "No error found in uni-uni and mulit-uni(uni-multi) reactions.\n"
#
for error_path in self.type_one_errors:
path_report = self.printSOMPath(error_path.node1, error_path.node2)
# print(path_report)
report = report + "%s\n" % path_report
# print("\nHowever, the following reaction(s)")
report = report + "However, the following reaction(s)\n"
for arc_reaction in error_path.reactions:
# print(self.simple.getReaction(arc_reaction).makeIdentifier(is_include_kinetics=False))
report = "%s%s\n" % (report, self.simple.getReaction(arc_reaction).makeIdentifier(is_include_kinetics=False))
# print("imply " + error_path.node1, cn.LESSTHAN, error_path.node2)
report = report + "imply %s %s %s\n" % (error_path.node1, cn.LESSTHAN, error_path.node2)
# print("------------------------------------")
report = report + "------------------------------------\n"
#print("************************************")
#
# print("We Do have type II Errors", self.type_two_errors)
for cycle in self.type_two_errors:
for idx, path_comp in enumerate(cycle):
nodes1 = collections.deque(path_comp.node1)
nodes2 = collections.deque(path_comp.node2)
if idx < len(cycle)-1:
next_nodes1 = collections.deque(cycle[idx+1].node1)
else:
next_nodes1 = collections.deque(cycle[0].node1)
reactions = collections.deque(path_comp.reactions)
# print SOM path between node elements
if len(nodes1)>1:
for node_idx in range(len(nodes1)-1):
path_report = self.printSOMPath(nodes1[node_idx], nodes1[node_idx+1])
report = report + "%s\n" % path_report
if not set(nodes2).intersection(set(next_nodes1)):
path_report = self.printSOMPath(nodes2[0], next_nodes1[0])
report = report + "%s\n" % path_report
#
while nodes1:
report = report + "\n%s %s %s by reaction:\n" % (nodes1[0], cn.LESSTHAN, nodes2[0])
arc_reaction = self.simple.getReaction(reactions[0])
report = report + "%s\n" % (arc_reaction.makeIdentifier(is_include_kinetics=False))
nodes1.popleft()
nodes2.popleft()
reactions.popleft()
report = report + "%s" % ("------------------------------------")
# report = report + "%s\n" % ("*************************************************************")
# Process multi-multi reactions only if there's no elementary errors
if len(self.type_one_errors)==0 and len(self.type_two_errors)==0:
sub_multimulti = self.multimulti_reactions
unsuccessful_load = 0
max_loop = 0
while (self.multimulti_reactions) and (max_loop<5):
max_loop = max_loop + 1
flag_loop = [False] * len(self.multimulti_reactions)
for idx, multimulti in enumerate(self.multimulti_reactions):
result = self.processMultiMultiReaction(multimulti)
if result is None:
print("This reaction returned None")
print(multimulti)
pass
else:
flag_loop[idx] = result
# if nothing was processed, quit the while loop
if sum(flag_loop)==0:
break
# if at least one was processed, subset unpressed reactions
self.multimulti_reactions = [self.multimulti_reactions[idx] for idx, tr \
in enumerate(flag_loop) if not tr]
# check SOM cycles (type V error)
self.checkTypeFiveError()
# if len(self.type_three_errors)==0 and \
# len(self.type_four_errors)==0 and \
# len(self.type_five_errors)==0:
# # print("No error found in multi-multi reactions.")
# # print("*************************************************************")
# report = report + "%s\n%s\n" % (
# "No error found in multi-multi reactions.",
# "*************************************************************")
#
# if error_details:
# if self.type_three_errors:
# print("We have type III errors\n", self.type_three_errors)
# else:
# print("We don't have type III errors")
# if self.type_four_errors:
# print("We have type IV errors\n", self.type_four_errors)
# else:
# print("We don't have type IV errors")
# if self.type_five_errors:
# print("We have type V errors\n", self.type_five_errors)
# else:
# print("We don't have type V errors")
#
self.identifier = self.makeId()
print("Model analyzed...")
if self.type_one_errors or \
self.type_two_errors or \
self.type_three_errors or \
self.type_four_errors or \
self.type_five_errors:
print("At least one error found.")
else:
print("No error detected.")
#return self
return report
| StarcoderdataPython |
250173 | import copy
import heapq
from datetime import datetime, timedelta, time
import itertools
from dateutil.rrule import rrulestr
from django.utils import timezone
# from .models import RepeatingEvent, Event, Accrual
class UnboundedOverlapError(Exception):
def __init__(self):
super().__init__('unbounded overlap detected')
class AggregationError(Exception):
pass
def make_future_dtrange(dtrange):
""" Convert a time range to be lower bounded from now.
"""
now = timezone.now().date() + timedelta(1)
if dtrange[0] is not None and dtrange[0] >= now:
return dtrange
elif dtrange[1] is None or dtrange[1] > now:
return (now, dtrange[1])
else:
return (now, now)
def make_repeating_event_iterator(rev, future):
cur_range = (
max(future[0], rev.range.lower),
min(future[1], rev.range.upper) if rev.range.upper is not None else future[1]
)
rr = rrulestr(rev.repetition, dtstart=cur_range[0])
for dt in rr.xafter(datetime.combine(cur_range[0], time()), inc=True):
date = dt.date()
if date >= cur_range[1]:
raise StopIteration
yield (date, rev)
def canonicalize_events(events):
events = events if events is not None else []
events = [(e.source, e.occurred)
for e in events
if events.parent is None and events.source]
return set(events)
def overlapping_repeating_events(RepeatingEvent, dtrange):
""" Yield all repeating events that overlap the provided time range.
"""
future = make_future_dtrange(dtrange)
if future[1] is None:
raise UnboundedOverlapError
all_revs = []
for rev in RepeatingEvent.objects.filter(range__overlap=future): # TODO: chunking
all_revs.append(iter(make_repeating_event_iterator(rev, future)))
if len(all_revs) > 1000:
raise AggregationError('probably too many repeating-event objects')
for date, rev in heapq.merge(*all_revs, key=lambda x: x[0]):
yield rev.instantiate(date)
def overlapping_events(Event, dtrange):
""" Yield all non-repeating events that overlap the provided time range.
Note that this will return events that are amendments. This is because I
want the sources to be available for the `overlap` function in order to
remove future duplicates. I will filter out non-amendments in `overlap`.
"""
query = {}
if dtrange[0] is not None:
query['occurred__gte'] = dtrange[0]
if dtrange[1] is not None:
query['occurred__lt'] = dtrange[1]
ev_qs = Event.objects.filter(**query).order_by('occurred') # TODO: chunking
for ev in ev_qs:
yield ev
def overlap(Event, RepeatingEvent, dtrange):
""" Find all overlapping events.
"""
all_evs = [
iter(overlapping_events(Event, dtrange)),
iter(overlapping_repeating_events(RepeatingEvent, dtrange))
]
lce_date = None # last concrete event date
lce_sources = set() # last concrete event sources
for ev in heapq.merge(*all_evs, key=lambda x: x.occurred):
# This part allows us to prevent duplicate generation of future events
# when they have been pregenerated. It hinges on `heapq.merge` respecting
# iterator order; I pass in the concrete event iterator first.
if ev.id is not None:
if lce_date != ev.occurred:
lce_date = ev.occurred
lce_sources = set()
if ev.source:
lce_sources.add(ev.source.id)
elif lce_date is not None and lce_date == ev.occurred and ev.source.id in lce_sources:
continue
# Don't send through concrete events that have amendments.
if ev.amendments.exists(): # TODO: perhaps use a boolean "is_amended" for performance?
continue
yield ev
def aggregate(Event, RepeatingEvent, dtrange, op, initial=None,
exclude_tagged=True):
""" Perform an aggregate over overlapping events.
By default any event with tags is excluded from the aggregation. To include
tagged events set `exclude_tagged` to True.
"""
val = initial
for ev in overlap(Event, RepeatingEvent, dtrange):
if exclude_tagged and len(ev.tags.keys()):
continue
val = op(val, op.coerce(ev))
return op.final(val)
def aggregate_tags(Event, RepeatingEvent, dtrange, op, initial={}):
""" Aggregate event tags.
"""
val = copy.deepcopy(initial)
for ev in overlap(Event, RepeatingEvent, dtrange):
for kk in val.keys():
if not ev[1].tags or kk in ev[1].tags:
val[kk] = op(val[kk], op.coerce(ev))
for kk, vv in val.items():
val[kk] = op.final(vv)
return val
# def accrue(dt, op, initial_tags=[]):
# """ Aggregate up until a datetime.
# """
# now = timezone.now().date()
# latest = Accrual.objects.filter(timestamp__lte=now).order_by('-timestamp')
# if latest:
# base = reduce(
# op, [latest[0].values.get(t, 0) for t in initial_tags], None
# )
# start = latest[0].timestamp
# else:
# base = None
# start = None
# addition = aggregate((start, dt), op) # TODO: initial?
# return op(base, addition)
| StarcoderdataPython |
3482091 | <reponame>ufo2011/NXP-MCUBootUtility<filename>src/ui/ui_cfg_usdhcmmc.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import wx
import sys
import os
import RTyyyy_uidef
import uidef
import uivar
import uilang
sys.path.append(os.path.abspath(".."))
from win import bootDeviceWin_UsdhcMmc
from utils import sound
class secBootUiUsdhcMmc(bootDeviceWin_UsdhcMmc.bootDeviceWin_UsdhcMmc):
def __init__(self, parent):
bootDeviceWin_UsdhcMmc.bootDeviceWin_UsdhcMmc.__init__(self, parent)
self._setLanguage()
self.hasMultiUsdhcBootInstance = None
usdhcMmcOpt0, usdhcMmcOpt1 = uivar.getBootDeviceConfiguration(RTyyyy_uidef.kBootDevice_UsdhcMmc)
self.usdhcMmcOpt0 = usdhcMmcOpt0
self.usdhcMmcOpt1 = usdhcMmcOpt1
self._recoverLastSettings()
def _setLanguage( self ):
runtimeSettings = uivar.getRuntimeSettings()
langIndex = runtimeSettings[3]
self.m_notebook_mmcOpt0.SetPageText(0, uilang.kSubLanguageContentDict['panel_mmcOpt0'][langIndex])
self.m_staticText_busWidth.SetLabel(uilang.kSubLanguageContentDict['sText_busWidth'][langIndex])
self.m_staticText_timingInterface.SetLabel(uilang.kSubLanguageContentDict['sText_timingInterface'][langIndex])
self.m_staticText_partitionAccess.SetLabel(uilang.kSubLanguageContentDict['sText_partitionAccess'][langIndex])
self.m_staticText_enableBootConfig.SetLabel(uilang.kSubLanguageContentDict['sText_enableBootConfig'][langIndex])
self.m_staticText_bootBusWidth.SetLabel(uilang.kSubLanguageContentDict['sText_bootBusWidth'][langIndex])
self.m_staticText_bootMode.SetLabel(uilang.kSubLanguageContentDict['sText_bootMode'][langIndex])
self.m_staticText_enableBootPartition.SetLabel(uilang.kSubLanguageContentDict['sText_enableBootPartition'][langIndex])
self.m_staticText_enableBootAck.SetLabel(uilang.kSubLanguageContentDict['sText_enableBootAck'][langIndex])
self.m_staticText_resetBootBusConditions.SetLabel(uilang.kSubLanguageContentDict['sText_resetBootBusConditions'][langIndex])
self.m_notebook_mmcOpt1.SetPageText(0, uilang.kSubLanguageContentDict['panel_mmcOpt1'][langIndex])
self.m_staticText_instance.SetLabel(uilang.kSubLanguageContentDict['sText_instance'][langIndex])
self.m_staticText_enable1V8.SetLabel(uilang.kSubLanguageContentDict['sText_enable1V8'][langIndex])
self.m_staticText_enablePowerCycle.SetLabel(uilang.kSubLanguageContentDict['sText_enablePowerCycle'][langIndex])
self.m_staticText_powerPolarity.SetLabel(uilang.kSubLanguageContentDict['sText_powerPolarity'][langIndex])
self.m_staticText_powerUpTime.SetLabel(uilang.kSubLanguageContentDict['sText_powerUpTime'][langIndex])
self.m_staticText_powerDownTime.SetLabel(uilang.kSubLanguageContentDict['sText_powerDownTime'][langIndex])
self.m_button_ok.SetLabel(uilang.kSubLanguageContentDict['button_usdhcmmc_ok'][langIndex])
self.m_button_cancel.SetLabel(uilang.kSubLanguageContentDict['button_usdhcmmc_cancel'][langIndex])
def setNecessaryInfo( self, hasMultiUsdhcBootInstance ):
self.hasMultiUsdhcBootInstance = hasMultiUsdhcBootInstance
self._recoverLastSettings()
def _updateInstanceField ( self, isEnabled ):
if isEnabled:
self.m_choice_instance.Enable( True )
else:
self.m_choice_instance.Enable( False )
def _updateBootCfgField ( self, isEnabled ):
if isEnabled:
self.m_choice_bootBusWidth.Enable( True )
self.m_choice_bootMode.Enable( True )
self.m_choice_enableBootPartition.Enable( True )
self.m_choice_enableBootAck.Enable( True )
self.m_choice_resetBootBusConditions.Enable( True )
else:
self.m_choice_bootBusWidth.Enable( False )
self.m_choice_bootMode.Enable( False )
self.m_choice_enableBootPartition.Enable( False )
self.m_choice_enableBootAck.Enable( False )
self.m_choice_resetBootBusConditions.Enable( False )
def _recoverLastSettings ( self ):
busWidth = (self.usdhcMmcOpt0 & 0x00000100) >> 8
if busWidth <= 2:
self.m_choice_busWidth.SetSelection(busWidth)
elif busWidth >= 5:
self.m_choice_busWidth.SetSelection(busWidth - 2)
else:
pass
timingInterface = (self.usdhcMmcOpt0 & 0x00007000) >> 12
self.m_choice_timingInterface.SetSelection(timingInterface)
partitionAccess = (self.usdhcMmcOpt0 & 0x07000000) >> 24
self.m_choice_partitionAccess.SetSelection(partitionAccess)
enableBootConfig = self.usdhcMmcOpt0 & 0x00000001
self.m_choice_enableBootConfig.SetSelection(enableBootConfig)
if enableBootConfig == 0:
self._updateBootCfgField(False)
else:
self._updateBootCfgField(True)
bootBusWidth = (self.usdhcMmcOpt0 & 0x00030000) >> 16
self.m_choice_bootBusWidth.SetSelection(bootBusWidth)
bootMode = (self.usdhcMmcOpt0 & 0x00000030) >> 4
self.m_choice_bootMode.SetSelection(bootMode)
enableBootPartition = (self.usdhcMmcOpt0 & 0x00700000) >> 20
if enableBootPartition <= 2:
self.m_choice_enableBootPartition.SetSelection(enableBootPartition)
elif enableBootPartition >= 7:
self.m_choice_enableBootPartition.SetSelection(enableBootPartition - 4)
else:
pass
enableBootAck = (self.usdhcMmcOpt0 & 0x00000004) >> 2
self.m_choice_enableBootAck.SetSelection(enableBootAck)
resetBootBusConditions = (self.usdhcMmcOpt0 & 0x00000008) >> 3
self.m_choice_resetBootBusConditions.SetSelection(resetBootBusConditions)
self._updateInstanceField(self.hasMultiUsdhcBootInstance)
if self.hasMultiUsdhcBootInstance:
instance = self.usdhcMmcOpt1 & 0x0000000F
self.m_choice_instance.SetSelection(instance)
enable1V8 = (self.usdhcMmcOpt1 & 0x00040000) >> 18
self.m_choice_enable1V8.SetSelection(enable1V8)
enablePowerCycle = (self.usdhcMmcOpt1 & 0x00080000) >> 19
self.m_choice_enablePowerCycle.SetSelection(enablePowerCycle)
powerPolarity = (self.usdhcMmcOpt1 & 0x00800000) >> 23
self.m_choice_powerPolarity.SetSelection(powerPolarity)
powerUpTime = (self.usdhcMmcOpt1 & 0x00100000) >> 20
self.m_choice_powerUpTime.SetSelection(powerUpTime)
powerDownTime = (self.usdhcMmcOpt1 & 0x03000000) >> 24
self.m_choice_powerDownTime.SetSelection(powerDownTime)
enablePermConfig = 0
if self.hasMultiUsdhcBootInstance:
enablePermConfig = (self.usdhcMmcOpt1 & 0x00000030) >> 4
else:
enablePermConfig = self.usdhcMmcOpt1 & 0x00000003
self.m_textCtrl_enablePermConfig.Clear()
self.m_textCtrl_enablePermConfig.write(str(enablePermConfig))
permBootConfigProt = (self.usdhcMmcOpt1 & 0x00030000) >> 16
self.m_textCtrl_permBootConfigProt.Clear()
self.m_textCtrl_permBootConfigProt.write(str(permBootConfigProt))
driverStrength = (self.usdhcMmcOpt1 & 0xF0000000) >> 28
self.m_textCtrl_driverStrength.Clear()
self.m_textCtrl_driverStrength.write(str(driverStrength))
def _getBusWidth( self ):
txt = self.m_choice_busWidth.GetString(self.m_choice_busWidth.GetSelection())
if txt == '1bit':
val = 0x0
elif txt == '4bit':
val = 0x1
elif txt == '8bit':
val = 0x2
elif txt == '4bit DDR':
val = 0x5
elif txt == '8bit DDR':
val = 0x6
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFFFFF0FF) | (val << 8)
def _getTimingInterface( self ):
txt = self.m_choice_timingInterface.GetString(self.m_choice_timingInterface.GetSelection())
if txt == 'Non-HighSpeed':
val = 0x0
elif txt == 'HighSpeed':
val = 0x1
elif txt == 'HighSpeed 200':
val = 0x2
elif txt == 'HighSpeed 400':
val = 0x3
elif txt == 'HighSpeed 26MHz':
val = 0x4
elif txt == 'HighSpeed 52MHz':
val = 0x5
elif txt == 'HighSpeed DDR52':
val = 0x6
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFFFF0FFF) | (val << 12)
def _getPartitionAccess( self ):
txt = self.m_choice_partitionAccess.GetString(self.m_choice_partitionAccess.GetSelection())
if txt == 'User Area Normal':
val = 0x0
elif txt == 'Read/Write Boot1':
val = 0x1
elif txt == 'Read/Write Boot2':
val = 0x2
elif txt == 'Replay Protected Mem Block':
val = 0x3
elif txt == 'General Purpose1':
val = 0x4
elif txt == 'General Purpose2':
val = 0x5
elif txt == 'General Purpose3':
val = 0x6
elif txt == 'General Purpose4':
val = 0x7
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xF8FFFFFF) | (val << 24)
def _getEnableBootConfig( self ):
txt = self.m_choice_enableBootConfig.GetString(self.m_choice_enableBootConfig.GetSelection())
if txt == 'No':
val = 0x0
elif txt == 'Yes':
val = 0x1
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFFFFFFFE) | (val << 0)
def _getBootBusWidth( self ):
txt = self.m_choice_bootBusWidth.GetString(self.m_choice_bootBusWidth.GetSelection())
if txt == '1bit-SDR, 4bit-DDR':
val = 0x0
elif txt == '4bit-SDR, 4bit-DDR':
val = 0x1
elif txt == '8bit-SDR, 8bit-DDR':
val = 0x2
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFFFCFFFF) | (val << 16)
def _getBootMode( self ):
txt = self.m_choice_bootMode.GetString(self.m_choice_bootMode.GetSelection())
if txt == 'SDR Non-HighSpeed':
val = 0x0
elif txt == 'SDR HighSpeed':
val = 0x1
elif txt == 'DDR':
val = 0x2
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFFFFFFCF) | (val << 4)
def _getEnableBootPartition( self ):
txt = self.m_choice_enableBootPartition.GetString(self.m_choice_enableBootPartition.GetSelection())
if txt == 'No':
val = 0x0
elif txt == 'Boot1':
val = 0x1
elif txt == 'Boot2':
val = 0x2
elif txt == 'User Area':
val = 0x7
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFF8FFFFF) | (val << 20)
def _getEnableBootAck( self ):
txt = self.m_choice_enableBootAck.GetString(self.m_choice_enableBootAck.GetSelection())
if txt == 'No':
val = 0x0
elif txt == 'Yes':
val = 0x1
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFFFFFFFB) | (val << 2)
def _getResetBootBusConditions( self ):
txt = self.m_choice_resetBootBusConditions.GetString(self.m_choice_resetBootBusConditions.GetSelection())
if txt == 'Reset to 1bit-SDR':
val = 0x0
elif txt == 'Retain Boot Bus Width':
val = 0x1
else:
pass
self.usdhcMmcOpt0 = (self.usdhcMmcOpt0 & 0xFFFFFFF7) | (val << 3)
def _getInstance( self ):
val = self.m_choice_instance.GetSelection()
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFFFFFFF0) | val
def _getEnable1V8( self ):
txt = self.m_choice_enable1V8.GetString(self.m_choice_enable1V8.GetSelection())
if txt == 'No':
val = 0x0
elif txt == 'Yes':
val = 0x1
else:
pass
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFFFBFFFF) | (val << 18)
def _getEnablePowerCycle( self ):
txt = self.m_choice_enablePowerCycle.GetString(self.m_choice_enablePowerCycle.GetSelection())
if txt == 'No':
val = 0x0
elif txt == 'Yes':
val = 0x1
else:
pass
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFFF7FFFF) | (val << 19)
def _getPowerPolarity( self ):
txt = self.m_choice_powerPolarity.GetString(self.m_choice_powerPolarity.GetSelection())
if txt == 'RST Low-Disable':
val = 0x0
elif txt == 'RST High-Disable':
val = 0x1
else:
pass
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFF7FFFFF) | (val << 23)
def _getPowerUpTime( self ):
txt = self.m_choice_powerUpTime.GetString(self.m_choice_powerUpTime.GetSelection())
if txt == '5ms':
val = 0x0
elif txt == '2.5ms':
val = 0x1
else:
pass
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFFEFFFFF) | (val << 20)
def _getPowerDownTime( self ):
txt = self.m_choice_powerDownTime.GetString(self.m_choice_powerDownTime.GetSelection())
if txt == '20ms':
val = 0x0
elif txt == '10ms':
val = 0x1
elif txt == '5ms':
val = 0x2
elif txt == '2.5ms':
val = 0x3
else:
pass
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFCFFFFFF) | (val << 24)
def _getRsvFields(self):
val = int(self.m_textCtrl_enablePermConfig.GetLineText(0))
if self.hasMultiUsdhcBootInstance:
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFFFFFFCF) | (val << 4)
else:
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFFFFFFFC) | val
val = int(self.m_textCtrl_permBootConfigProt.GetLineText(0))
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0xFFFCFFFF) | (val << 16)
val = int(self.m_textCtrl_driverStrength.GetLineText(0))
self.usdhcMmcOpt1 = (self.usdhcMmcOpt1 & 0x0FFFFFFF) | (val << 28)
def callbackEnableBootConfig( self, event ):
txt = self.m_choice_enableBootConfig.GetString(self.m_choice_enableBootConfig.GetSelection())
if txt == 'No':
self._updateBootCfgField(False)
elif txt == 'Yes':
self._updateBootCfgField(True)
else:
pass
def callbackOk( self, event ):
self._getBusWidth()
self._getTimingInterface()
self._getPartitionAccess()
self._getEnableBootConfig()
enableBootConfig = self.usdhcMmcOpt0 & 0x00000001
if enableBootConfig:
self._getBootBusWidth()
self._getBootMode()
self._getEnableBootPartition()
self._getEnableBootAck()
self._getResetBootBusConditions()
if self.hasMultiUsdhcBootInstance:
self._getInstance()
self._getEnable1V8()
self._getEnablePowerCycle()
self._getPowerPolarity()
self._getPowerUpTime()
self._getPowerDownTime()
self._getRsvFields()
uivar.setBootDeviceConfiguration(RTyyyy_uidef.kBootDevice_UsdhcMmc, self.usdhcMmcOpt0, self.usdhcMmcOpt1)
uivar.setRuntimeSettings(False)
self.Show(False)
runtimeSettings = uivar.getRuntimeSettings()
sound.playSoundEffect(runtimeSettings[1], runtimeSettings[2], uidef.kSoundEffectFilename_Progress)
def callbackCancel( self, event ):
uivar.setRuntimeSettings(False)
self.Show(False)
def callbackClose( self, event ):
uivar.setRuntimeSettings(False)
self.Show(False) | StarcoderdataPython |
11399801 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
from collections import OrderedDict
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
import funsor
import funsor.torch.distributions as dist
import funsor.ops as ops
from funsor.domains import Bint, Reals
REPO_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_PATH = os.path.join(REPO_PATH, 'data')
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
def forward(self, image):
image = image.reshape(image.shape[:-2] + (-1,))
h1 = F.relu(self.fc1(image))
loc = self.fc21(h1)
scale = self.fc22(h1).exp()
return loc, scale
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def forward(self, z):
h3 = F.relu(self.fc3(z))
out = torch.sigmoid(self.fc4(h3))
return out.reshape(out.shape[:-1] + (28, 28))
def main(args):
funsor.set_backend("torch")
# XXX Temporary fix after https://github.com/pyro-ppl/pyro/pull/2701
import pyro
pyro.enable_validation(False)
encoder = Encoder()
decoder = Decoder()
encode = funsor.function(Reals[28, 28], (Reals[20], Reals[20]))(encoder)
decode = funsor.function(Reals[20], Reals[28, 28])(decoder)
@funsor.interpretation(funsor.montecarlo.MonteCarlo())
def loss_function(data, subsample_scale):
# Lazily sample from the guide.
loc, scale = encode(data)
q = funsor.Independent(
dist.Normal(loc['i'], scale['i'], value='z_i'),
'z', 'i', 'z_i')
# Evaluate the model likelihood at the lazy value z.
probs = decode('z')
p = dist.Bernoulli(probs['x', 'y'], value=data['x', 'y'])
p = p.reduce(ops.add, {'x', 'y'})
# Construct an elbo. This is where sampling happens.
elbo = funsor.Integrate(q, p - q, 'z')
elbo = elbo.reduce(ops.add, 'batch') * subsample_scale
loss = -elbo
return loss
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(DATA_PATH, train=True, download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True)
encoder.train()
decoder.train()
optimizer = optim.Adam(list(encoder.parameters()) +
list(decoder.parameters()), lr=1e-3)
for epoch in range(args.num_epochs):
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
subsample_scale = float(len(train_loader.dataset) / len(data))
data = data[:, 0, :, :]
data = funsor.Tensor(data, OrderedDict(batch=Bint[len(data)]))
optimizer.zero_grad()
loss = loss_function(data, subsample_scale)
assert isinstance(loss, funsor.Tensor), loss.pretty()
loss.data.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 50 == 0:
print(' loss = {}'.format(loss.item()))
if batch_idx and args.smoke_test:
return
print('epoch {} train_loss = {}'.format(epoch, train_loss))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('-n', '--num-epochs', type=int, default=10)
parser.add_argument('--batch-size', type=int, default=8)
parser.add_argument('--smoke-test', action='store_true')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
9744550 | <reponame>jazzathoth/DS-Unit-3-Sprint-1-Software-Engineering
#! /usr/bin/env python
from random import randint, sample, uniform
from acme import Product
# Useful to use with random.sample to generate names
ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']
NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']
def generate_products(num_products=30):
"""Instantiates a list of 30 products and returns list"""
products = []
for x in range(num_products):
products.append(Product("{} {}".format(sample(ADJECTIVES, 1),
sample(NOUNS, 1)),
randint(1, 50),
randint(1, 50),
uniform(0, 1)))
return(products)
def inventory_report(products):
"""Creates a report of products by looping through
a list of products, collecting attributes, then printing
averages and counts of attributes"""
names = []
n_unique = []
x_unique = 0
prices = []
weights = []
flammability = []
for product in products:
names.append(product.name)
prices.append(product.price)
weights.append(product.weight)
flammability.append(product.flammability)
for n in names:
if n not in n_unique:
n_unique.append(n)
x_unique += 1
print('ACME CORPORATION INVENTORY REPORT')
print('Unique product names: {}'.format(x_unique))
print('Average price: {}'.format(sum(prices)/len(prices)))
print('Average weight: {}'.format(sum(weights)/len(weights)))
print('Average flammability: {}'
.format(sum(flammability)/len(flammability)))
pass # TODO - your code! Loop over the products to calculate the report.
if __name__ == '__main__':
inventory_report(generate_products())
| StarcoderdataPython |
11265689 | <gh_stars>1-10
from colander import Email
from colander import Length
from colander import SchemaNode
from colander import String
from colander import drop
from colander import SequenceSchema
from sandglass.time.schemas import BaseModelSchema
from sandglass.time.schemas import Dictionary
class UserSchema(BaseModelSchema):
"""
Schema definition for user model.
"""
email = SchemaNode(
String(),
validator=Email())
first_name = SchemaNode(
String(),
validator=Length(max=60))
last_name = SchemaNode(
String(),
validator=Length(max=80))
password = SchemaNode(
String(),
validator=Length(max=255),
missing=drop)
data = SchemaNode(
Dictionary(),
missing=drop)
class UserListSchema(SequenceSchema):
user = UserSchema()
class UserSignupSchema(UserSchema):
"""
Schema definition for user signup.
"""
password = SchemaNode(
String(),
validator=Length(max=30))
class UserSigninSchema(BaseModelSchema):
"""
Schema definition for user logins.
"""
email = SchemaNode(
String(),
validator=Email())
password = SchemaNode(
String(),
validator=Length(max=30))
| StarcoderdataPython |
165943 | <reponame>abhishekkushwaha4u/vitolx-backend
from django.contrib import admin
from exchange.models import (
Exchange
)
admin.site.register(Exchange)
| StarcoderdataPython |
5032406 | import pyjion
import gc
from test.libregrtest import main
pyjion.enable()
main()
pyjion.disable()
gc.collect()
| StarcoderdataPython |
5079917 | <reponame>Poehavshi/Hackaton_telegram_bot
TOKEN = '<YOUR_TOKEN>' | StarcoderdataPython |
6478172 | import pytest
from dolib.client import AsyncClient, Client
from dolib.models import Firewall, FirewallTargets
@pytest.mark.vcr
@pytest.mark.block_network()
def test_crud_firewalls(client: Client) -> None:
fw = Firewall(
name="test",
inbound_rules=[
Firewall.InboundRule(
protocol="tcp",
ports="80",
sources=FirewallTargets(addresses=["0.0.0.0/0"]),
)
],
)
# create firewall
created_fw = client.firewalls.create(fw)
assert isinstance(created_fw, Firewall)
assert created_fw.id is not None
# list firewalls
fws = client.firewalls.all()
assert len(fws) > 0
# read firewall
read_fw = client.firewalls.get(str(fws[0].id))
assert read_fw.id == fws[0].id
assert isinstance(read_fw, Firewall)
# update firewall
read_fw.tags = ["test"]
updated_fw = client.firewalls.update(read_fw)
assert read_fw.tags == updated_fw.tags
droplets = client.droplets.all()
# add droplets
client.firewalls.add_droplets(str(read_fw.id), droplet_ids=[d.id for d in droplets])
# remove droplets
client.firewalls.remove_droplets(
str(read_fw.id), droplet_ids=[d.id for d in droplets]
)
# add tags
client.firewalls.add_tags(str(read_fw.id), tags=["test"])
# remove tags
client.firewalls.remove_tags(str(read_fw.id), tags=["test"])
in_rule = Firewall.InboundRule(
protocol="tcp", ports="81", sources=FirewallTargets(addresses=["0.0.0.0/0"])
)
out_rule = Firewall.OutboundRule(
protocol="tcp",
ports="81",
destinations=FirewallTargets(addresses=["0.0.0.0/0"]),
)
# add rules
client.firewalls.add_rules(str(read_fw.id), inbound_rules=[in_rule])
client.firewalls.add_rules(str(read_fw.id), outbound_rules=[out_rule])
# remove rules
client.firewalls.remove_rules(str(read_fw.id), inbound_rules=[in_rule])
client.firewalls.remove_rules(str(read_fw.id), outbound_rules=[out_rule])
# delete firewall
client.firewalls.delete(firewall=read_fw)
@pytest.mark.vcr
@pytest.mark.block_network()
@pytest.mark.asyncio
async def test_async_crud_firewalls(async_client: AsyncClient) -> None:
fw = Firewall(
name="test",
inbound_rules=[
Firewall.InboundRule(
protocol="tcp",
ports="80",
sources=FirewallTargets(addresses=["0.0.0.0/0"]),
)
],
)
# create firewall
created_fw = await async_client.firewalls.create(fw)
assert isinstance(created_fw, Firewall)
assert created_fw.id is not None
# list firewalls
fws = await async_client.firewalls.all()
assert len(fws) > 0
# read firewall
read_fw = await async_client.firewalls.get(str(fws[0].id))
assert read_fw.id == fws[0].id
assert isinstance(read_fw, Firewall)
# update firewall
read_fw.tags = ["test"]
updated_fw = await async_client.firewalls.update(read_fw)
assert read_fw.tags == updated_fw.tags
droplets = await async_client.droplets.all()
# add droplets
await async_client.firewalls.add_droplets(
str(read_fw.id), droplet_ids=[d.id for d in droplets]
)
# remove droplets
await async_client.firewalls.remove_droplets(
str(read_fw.id), droplet_ids=[d.id for d in droplets]
)
# add tags
await async_client.firewalls.add_tags(str(read_fw.id), tags=["test"])
# remove tags
await async_client.firewalls.remove_tags(str(read_fw.id), tags=["test"])
in_rule = Firewall.InboundRule(
protocol="tcp", ports="81", sources=FirewallTargets(addresses=["0.0.0.0/0"])
)
out_rule = Firewall.OutboundRule(
protocol="tcp",
ports="81",
destinations=FirewallTargets(addresses=["0.0.0.0/0"]),
)
# add rules
await async_client.firewalls.add_rules(str(read_fw.id), inbound_rules=[in_rule])
await async_client.firewalls.add_rules(str(read_fw.id), outbound_rules=[out_rule])
# remove rules
await async_client.firewalls.remove_rules(str(read_fw.id), inbound_rules=[in_rule])
await async_client.firewalls.remove_rules(
str(read_fw.id), outbound_rules=[out_rule]
)
# delete firewall
await async_client.firewalls.delete(firewall=read_fw)
| StarcoderdataPython |
51142 | from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.regularizers import get_regularizer
from torchreid.losses.wrapped_cross_entropy_loss import WrappedCrossEntropyLoss
from torchreid.models.tricks.dropout import DropoutOptimizer
import logging
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'CRITICAL'))
# global variables
parser = argument_parser()
args = parser.parse_args()
dropout_optimizer = DropoutOptimizer(args)
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for
the specified values of k.
Args:
output (torch.Tensor): prediction matrix with shape (batch_size, num_classes).
target (torch.LongTensor): ground truth labels with shape (batch_size).
topk (tuple, optional): accuracy at top-k will be computed. For example,
topk=(1, 5) means accuracy at top-1 and top-5 will be computed.
Returns:
list: accuracy at top-k.
Examples::
>>> from torchreid import metrics
>>> metrics.accuracy(output, target)
"""
maxk = max(topk)
batch_size = target.size(0)
if isinstance(output, (tuple, list)):
output = output[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
acc = correct_k.mul_(100.0 / batch_size)
res.append(acc)
return res
def get_criterions(num_classes: int, use_gpu: bool, args) -> ('criterion', 'fix_criterion', 'switch_criterion'):
from torchreid.losses.wrapped_triplet_loss import WrappedTripletLoss
from torchreid.regularizers.param_controller import HtriParamController
htri_param_controller = HtriParamController()
if 'htri' in args.criterion:
fix_criterion = WrappedTripletLoss(num_classes, use_gpu, args, htri_param_controller)
switch_criterion = WrappedTripletLoss(num_classes, use_gpu, args, htri_param_controller)
else:
fix_criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
switch_criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
if args.criterion == 'xent':
criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'spectral':
from torchreid.losses.spectral_loss import SpectralLoss
criterion = SpectralLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth, penalty_position=args.penalty_position)
elif args.criterion == 'batch_spectral':
from torchreid.losses.batch_spectral_loss import BatchSpectralLoss
criterion = BatchSpectralLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'lowrank':
from torchreid.losses.lowrank_loss import LowRankLoss
criterion = LowRankLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'singular':
from torchreid.losses.singular_loss import SingularLoss
criterion = SingularLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth, penalty_position=args.penalty_position)
elif args.criterion == 'htri':
criterion = WrappedTripletLoss(num_classes=num_classes, use_gpu=use_gpu, args=args, param_controller=htri_param_controller)
elif args.criterion == 'singular_htri':
from torchreid.losses.singular_triplet_loss import SingularTripletLoss
criterion = SingularTripletLoss(num_classes, use_gpu, args, htri_param_controller)
elif args.criterion == 'incidence':
from torchreid.losses.incidence_loss import IncidenceLoss
criterion = IncidenceLoss()
elif args.criterion == 'incidence_xent':
from torchreid.losses.incidence_xent_loss import IncidenceXentLoss
criterion = IncidenceXentLoss(num_classes, use_gpu, args.label_smooth)
else:
raise RuntimeError('Unknown criterion {!r}'.format(criterion))
if args.fix_custom_loss:
fix_criterion = criterion
if args.switch_loss < 0:
criterion, switch_criterion = switch_criterion, criterion
return criterion, fix_criterion, switch_criterion, htri_param_controller
def main():
global args, dropout_optimizer
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
sys.stderr = sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing image data manager")
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, use_gpu=use_gpu, dropout_optimizer=dropout_optimizer)
print(model)
print("Model size: {:.3f} M".format(count_num_param(model)))
# criterion = WrappedCrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
criterion, fix_criterion, switch_criterion, htri_param_controller = get_criterions(dm.num_train_pids, use_gpu, args)
regularizer, reg_param_controller = get_regularizer(args.regularizer)
optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
if args.load_weights and check_isfile(args.load_weights):
# load pretrained weights but ignore layers that don't match in size
try:
checkpoint = torch.load(args.load_weights)
except Exception as e:
print(e)
checkpoint = torch.load(args.load_weights, map_location={'cuda:0': 'cpu'})
# dropout_optimizer.set_p(checkpoint.get('dropout_p', 0))
# print(list(checkpoint.keys()), checkpoint['dropout_p'])
pretrain_dict = checkpoint['state_dict']
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.resume and check_isfile(args.resume):
checkpoint = torch.load(args.resume)
state = model.state_dict()
state.update(checkpoint['state_dict'])
model.load_state_dict(state)
# args.start_epoch = checkpoint['epoch'] + 1
print("Loaded checkpoint from '{}'".format(args.resume))
print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1']))
if use_gpu:
model = nn.DataParallel(model, device_ids=list(range(len(args.gpu_devices.split(','))))).cuda()
extract_train_info(model, trainloader)
def extract_train_info(model, trainloader):
model.eval()
os.environ['fake'] = '1'
accs = [AverageMeter() for _ in range(3)]
with torch.no_grad():
for imgs, pids, _, paths in trainloader:
xent_features = model(imgs.cuda())[1]
for i, xent_feature in enumerate(xent_features):
accs[i].update(
accuracy(xent_feature, pids.cuda())[0].item(),
pids.size(0),
)
with open(args.load_weights + '.acc', 'w') as f:
print(*(acc.avg for acc in accs), file=f)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1947972 | import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from network import Protocol, BCPNNFast, NetworkManager
from connectivity_functions import create_artificial_manager
sns.set(font_scale=2.0)
# Patterns parameters
hypercolumns = 4
minicolumns = 15
n_patterns = 15
# Manager properties
dt = 0.001
T_recalling = 5.0
values_to_save = []
# Protocol
training_time = 0.1
inter_sequence_interval = 1.0
inter_pulse_interval = 0.0
epochs = 2
tau_z = 0.150
# Build the network
nn = BCPNNFast(hypercolumns, minicolumns, tau_z)
# Build the manager
manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)
# Build the protocol for training
protocol = Protocol()
patterns_indexes = [i for i in range(n_patterns)]
protocol.simple_protocol(patterns_indexes, training_time=training_time, inter_pulse_interval=inter_pulse_interval,
inter_sequence_interval=inter_sequence_interval, epochs=epochs)
# Train
manager.run_network_protocol(protocol=protocol, verbose=True)
# Artificial matrix
beta = False
value = 1.0
inhibition = -0.3
extension = 3
decay_factor = 0.45
sequence_decay = 0.0
ampa = True
self_influence = False
sequences = [[i for i in range(n_patterns)]]
manager_art = create_artificial_manager(hypercolumns, minicolumns, sequences, value, inhibition, extension, decay_factor,
sequence_decay, dt, BCPNNFast, NetworkManager, ampa, beta, beta_decay=False,
self_influence=self_influence)
cmap = 'coolwarm'
w = manager.nn.w
w = w[:nn.minicolumns, :nn.minicolumns]
aux_max = np.max(np.abs(w))
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(121)
im1 = ax1.imshow(w, cmap=cmap, interpolation='None', vmin=-aux_max, vmax=aux_max)
ax1.set_title('Training Procedure')
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
ax1.grid()
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes('right', size='5%', pad=0.05)
ax1.get_figure().colorbar(im1, ax=ax1, cax=cax1)
w_art = manager_art.nn.w
w_art = w_art[:nn.minicolumns, :nn.minicolumns]
aux_max = np.max(np.abs(w))
ax2 = fig.add_subplot(122)
im2 = ax2.imshow(w_art, cmap=cmap, interpolation='None', vmin=-aux_max, vmax=aux_max)
ax2.set_title('Artificial Matrix')
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax2.grid()
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes('right', size='5%', pad=0.05)
ax2.get_figure().colorbar(im2, ax=ax2, cax=cax2)
# Save the figure
fname = './plots/comparison.pdf'
plt.savefig(fname, format='pdf', dpi=90, bbox_inches='tight', frameon=False, transparent=True)
| StarcoderdataPython |
3515361 | <filename>exceptions/01_catch_exceptions.py
# This code snippet explains how Exception hierarchy works
# For more information check: https://docs.python.org/3.8/library/exceptions.html
try:
raise Exception("My custom exception")
except Exception as e: # catching Exception is not the best idea, try to be more specific
print("Exception occurred:", repr(e))
except BaseException as e: # catching BaseException is even worse, as we can catch KeyboardInterrupt here as well
print("BaseException occurred:", repr(e))
| StarcoderdataPython |
3353653 | <filename>Conf/sentinel.py<gh_stars>1-10
COMMAND_CONN_INFO_CYCLE = 10
COMMAND_CONN_PING_CYCLE = 1
COMMAND_CONN_SELF_CYCLE = 2
| StarcoderdataPython |
11312524 | import urllib.request
url='https://www.samsclub.com/sitemap.xml'
someRequest = urllib.request.urlopen(url)#loads provided URL
someRequest.getheaders() #Lists all HTTP headers.
someRequest.getheader("Content-Type") #return value of header 'Content-Type'
| StarcoderdataPython |
1893954 | import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from argparse import Namespace
from .metrics import accuracy
from sopa.src.solvers.utils import noise_params
from MegaAdversarial.src.attacks import (
Clean,
PGD,
FGSM
)
CONFIG_PGD_TRAIN = {"eps": 0.3, "lr": 2.0 / 255, "n_iter": 7}
CONFIG_FGSM_TRAIN = {"eps": 0.3}
def train(itr,
model,
data_gen,
solvers,
solver_options,
criterion,
optimizer,
batch_time_meter,
f_nfe_meter,
b_nfe_meter,
device = 'cpu',
dtype = torch.float32,
is_odenet = True,
args = None,
logger = None,
wandb_logger = None):
end = time.time()
optimizer.zero_grad()
x, y = data_gen.__next__()
x = x.to(device)
y = y.to(device)
##### Noise params
if args.noise_type is not None:
for i in range(len(solvers)):
solvers[i].u, solvers[i].v = noise_params(solvers[i].u0,
solvers[i].v0,
std = args.noise_sigma,
bernoulli_p = args.noise_prob,
noise_type = args.noise_type)
solvers[i].build_ButcherTableau()
if args.adv_training_mode == "clean":
train_attack = Clean(model)
elif args.adv_training_mode == "fgsm":
train_attack = FGSM(model, **CONFIG_FGSM_TRAIN)
elif args.adv_training_mode == "at":
train_attack = PGD(model, **CONFIG_PGD_TRAIN)
else:
raise ValueError("Attack type not understood.")
x, y = train_attack(x, y, {"solvers": solvers, "solver_options": solver_options})
# Add noise:
if args.data_noise_std > 1e-12:
with torch.no_grad():
x = x + args.data_noise_std * torch.randn_like(x)
##### Forward pass
if is_odenet:
logits = model(x, solvers, solver_options, Namespace(ss_loss=args.ss_loss))
else:
logits = model(x)
xentropy = criterion(logits, y)
if args.ss_loss:
ss_loss = model.get_ss_loss()
loss = xentropy + args.ss_loss_reg * ss_loss
else:
ss_loss = 0.
loss = xentropy
if wandb_logger is not None:
wandb_logger.log({"xentropy": xentropy.item(),
"ss_loss": ss_loss,
"loss": loss.item(),
"log_func": "train"})
# if logger is not None:
# fix
##### Compute NFE-forward
if is_odenet:
nfe_forward = 0
for i in range(len(model.blocks)):
nfe_forward += model.blocks[i].rhs_func.nfe
model.blocks[i].rhs_func.nfe = 0
loss.backward()
optimizer.step()
##### Compute NFE-backward
if is_odenet:
nfe_backward = 0
for i in range(len(model.blocks)):
nfe_backward += model.blocks[i].rhs_func.nfe
model.blocks[i].rhs_func.nfe = 0
##### Denoise params
if args.noise_type is not None:
for i in range(len(solvers)):
solvers[i].u, solvers[i].v = solvers[i].u0, solvers[i].v0
solvers[i].build_ButcherTableau()
batch_time_meter.update(time.time() - end)
if is_odenet:
f_nfe_meter.update(nfe_forward)
b_nfe_meter.update(nfe_backward)
def validate_standalone(best_acc,
itr,
model,
train_eval_loader,
test_loader,
batches_per_epoch,
solvers,
solver_options,
batch_time_meter,
f_nfe_meter,
b_nfe_meter,
device = 'cpu',
dtype = torch.float32,
args = None,
logger = None,
wandb_logger=None):
nsolvers = len(solvers)
with torch.no_grad():
train_acc = [0] * nsolvers
val_acc = [0] * nsolvers
for solver_id, solver in enumerate(solvers):
train_acc_id = accuracy(model, train_eval_loader, device, [solver], solver_options)
val_acc_id = accuracy(model, test_loader, device, [solver], solver_options)
train_acc[solver_id] = train_acc_id
val_acc[solver_id] = val_acc_id
if val_acc_id > best_acc[solver_id]:
best_acc[solver_id] = val_acc_id
torch.save({'state_dict': model.state_dict(),
'args': args,
'solver_id':solver_id,
'val_solver_mode':solver_options.solver_mode,
'acc': val_acc_id},
os.path.join(os.path.join(args.save, str(args.timestamp)),
'model_best_{}.pth'.format(solver_id)))
if wandb_logger is not None:
wandb_logger.save(os.path.join(os.path.join(args.save, str(args.timestamp)),
'model_best_{}.pth'.format(solver_id)))
if logger is not None:
logger.info("Epoch {:04d} | SolverMode {} | SolverId {} | "
"TrainAcc {:.10f} | TestAcc {:.10f} | BestAcc {:.10f}".format(
itr // batches_per_epoch, solver_options.solver_mode, solver_id,
train_acc_id, val_acc_id, best_acc[solver_id]))
if wandb_logger is not None:
wandb_logger.log({
"epoch": itr // batches_per_epoch,
"solver_mode": solver_options.solver_mode,
"solver_id": solver_id,
"train_acc": train_acc_id,
"test_acc": val_acc_id,
"best_acc": best_acc[solver_id],
"log_func": "validate_standalone"
})
for i in range(len(model.blocks)):
model.blocks[i].rhs_func.nfe = 0
return best_acc
def validate_ensemble_switch(best_acc,
itr,
model,
train_eval_loader,
test_loader,
batches_per_epoch,
solvers,
solver_options,
batch_time_meter,
f_nfe_meter,
b_nfe_meter,
device = 'cpu',
dtype = torch.float32,
args = None,
logger=None,
wandb_logger=None):
nsolvers = len(solvers)
with torch.no_grad():
train_acc = accuracy(model, train_eval_loader, device, solvers, solver_options)
val_acc = accuracy(model, test_loader, device, solvers, solver_options)
if val_acc > best_acc:
best_acc = val_acc
torch.save({'state_dict': model.state_dict(),
'args': args,
'solver_id':None,
'val_solver_mode':solver_options.solver_mode,
'acc': val_acc},
os.path.join(os.path.join(args.save, str(args.timestamp)),
'model_best.pth'))
if wandb_logger is not None:
wandb_logger.save(os.path.join(os.path.join(args.save, str(args.timestamp)),
'model_best.pth'))
if logger is not None:
logger.info("Epoch {:04d} | SolverMode {} | SolverId {} | "
"TrainAcc {:.10f} | TestAcc {:.10f} | BestAcc {:.10f}".format(
itr // batches_per_epoch, solver_options.solver_mode, None,
train_acc, val_acc, best_acc))
if wandb_logger is not None:
wandb_logger.log({
"epoch": itr // batches_per_epoch,
"solver_mode": solver_options.solver_mode,
"solver_id": None,
"train_acc": train_acc,
"test_acc": val_acc,
"best_acc": best_acc,
"log_func": "validate_ensemble_switch"
})
for i in range(len(model.blocks)):
model.blocks[i].rhs_func.nfe = 0
return best_acc
def validate(best_acc,
itr,
model,
train_eval_loader,
test_loader,
batches_per_epoch,
solvers,
val_solver_modes,
batch_time_meter,
f_nfe_meter,
b_nfe_meter,
device = 'cpu',
dtype = torch.float32,
args = None,
logger = None,
wandb_logger=None):
for solver_mode in val_solver_modes:
if solver_mode == 'standalone':
val_solver_options = Namespace(solver_mode = 'standalone')
best_acc['standalone'] = validate_standalone(best_acc['standalone'],
itr,
model,
train_eval_loader,
test_loader,
batches_per_epoch,
solvers = solvers,
solver_options = val_solver_options,
batch_time_meter = batch_time_meter,
f_nfe_meter = f_nfe_meter,
b_nfe_meter = b_nfe_meter,
device = device,
dtype = dtype,
args = args,
logger = logger,
wandb_logger = wandb_logger)
elif solver_mode == 'ensemble':
val_solver_options = Namespace(solver_mode = 'ensemble',
ensemble_weights = args.ensemble_weights,
ensemble_prob = args.ensemble_prob)
best_acc['ensemble'] = validate_ensemble_switch(best_acc['ensemble'],
itr,
model,
train_eval_loader,
test_loader,
batches_per_epoch,
solvers = solvers,
solver_options = val_solver_options,
batch_time_meter = batch_time_meter,
f_nfe_meter = f_nfe_meter,
b_nfe_meter = b_nfe_meter,
device = device,
dtype = dtype,
args = args,
logger = logger,
wandb_logger = wandb_logger)
elif solver_mode == 'switch':
val_solver_options = Namespace(solver_mode = 'switch', switch_probs = args.switch_probs)
best_acc['switch'] = validate_ensemble_switch(best_acc['switch'],
itr,
model,
train_eval_loader,
test_loader,
batches_per_epoch,
solvers = solvers,
solver_options = val_solver_options,
batch_time_meter = batch_time_meter,
f_nfe_meter = f_nfe_meter,
b_nfe_meter = b_nfe_meter,
device = device,
dtype = dtype,
args = args,
logger = logger,
wandb_logger = wandb_logger)
if logger is not None:
logger.info("Epoch {:04d} | Time {:.3f} ({:.3f}) | NFE-F {:.1f} | NFE-B {:.1f}".format(
itr // batches_per_epoch,
batch_time_meter.val, batch_time_meter.avg,
f_nfe_meter.avg, b_nfe_meter.avg))
if wandb_logger is not None:
wandb_logger.log({
"epoch": itr // batches_per_epoch,
"batch_time_val": batch_time_meter.val,
"nfe": f_nfe_meter.avg,
"nbe": b_nfe_meter.avg,
"log_func": "validate"
})
return best_acc | StarcoderdataPython |
382526 | def test_model_with_one_index():
ddl = """
CREATE table v2.task_requests (
runid decimal(21) not null
,job_id decimal(21) not null
,object_id varchar(100) not null default 'none'
,pipeline_id varchar(100) not null default 'none'
,sequence smallint not null
,processor_id varchar(100) not null
,source_file varchar(1000) not null default 'none'
,job_args varchar array null
,request_time timestamp not null default now()
,status varchar(25) not null
,status_update_time timestamp null default now()
) ;
create unique index task_requests_pk on v2.task_requests (runid) ;
"""
pass | StarcoderdataPython |
3373390 | import pickle
import matplotlib.pyplot as plt
import numpy as np
from feature_format import featureFormat, targetFeatureSplit
from sklearn.feature_selection import SelectKBest, SelectPercentile, f_classif
import pprint
import operator
# loading the enron data dictionary
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
# removing 'TOTAL' outlier
del data_dict['TOTAL']
'''
Possible new features:
total_payments + total_stock_value
shared_receipt_with_poi/ to_messages
from_this_person_to_poi/ from_messages
from_poi_to_this_person/ to_messages
'''
# defining a function to return the labels and features as a numpy array
def labels_features(feature1, feature2):
features_list = ['poi', feature1, feature2]
data = featureFormat(data_dict, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
return np.array(labels), np.array(features)
# creating labels and features for the new features mentioned above
labels1, features1 = labels_features('total_payments', 'total_stock_value')
labels2, features2 = labels_features('shared_receipt_with_poi', 'to_messages')
labels3, features3 = labels_features('from_this_person_to_poi', 'from_messages')
labels4, features4 = labels_features('from_poi_to_this_person', 'to_messages')
# creating new features
new_features1 = features1[:,0] + features1[:,1]
new_features2 = features2[:,0] / features2[:,1]
new_features3 = features3[:,0] / features3[:,1]
new_features4 = features4[:,0] / features4[:,1]
# defining a function to create scatter plots
def scatter_plot(labels, features, feature_name):
plt.scatter(labels,
features,
s = 50, c = "b", alpha = 0.5)
plt.xlabel('poi')
plt.ylabel(feature_name)
plt.xticks(np.arange(0,1.5,1))
plt.show()
# plotting new features vs poi
scatter_plot(labels1, new_features1, "total payments and stock value")
scatter_plot(labels2, new_features2, "shared poi receipt/ to messages")
scatter_plot(labels3, new_features3, "to poi/ from messages")
scatter_plot(labels4, new_features4, "from poi/ to messages")
# creating a list of all labels and features
all_features_list = ['poi',
'salary',
'to_messages',
'deferral_payments',
'total_payments',
'exercised_stock_options',
'bonus',
'restricted_stock',
'shared_receipt_with_poi',
'restricted_stock_deferred',
'total_stock_value',
'expenses',
'loan_advances',
'from_messages',
'other',
'from_this_person_to_poi',
'director_fees',
'deferred_income',
'long_term_incentive',
'from_poi_to_this_person']
# creating list of labels and list of numpy arrays containing the features
enron_data = featureFormat(data_dict, all_features_list, sort_keys = True)
enron_labels, enron_features = targetFeatureSplit(enron_data)
# adding new features from above to the list of numpy arrays
for i in np.arange(len(enron_features)):
ttl_pay_stock = enron_features[i][3] + enron_features[i][9]
enron_features[i] = np.append(enron_features[i], ttl_pay_stock)
# addining new feature names to the all features list
all_features_list.extend(["total_payments_stock"])
# listing out the best features in ascending order using SelectKBest method
best_features = {}
selector = SelectKBest(f_classif, k=20)
selector.fit(enron_features, enron_labels)
for i, s in enumerate(selector.scores_):
best_features[all_features_list[(i+1)]] = s
pprint.pprint(sorted(best_features.items(), key=operator.itemgetter(1), reverse=True))
| StarcoderdataPython |
3291906 | <filename>codewof/programming/content/en/hours-to-seconds/initial.py
def hours_to_seconds(hours):
seconds = hours x 60 x 60
return seconds
| StarcoderdataPython |
3524003 | <reponame>Correct-Syntax/wgpu-py<gh_stars>0
"""
WGPU backend implementation based on wgpu-native
The wgpu-native project (https://github.com/gfx-rs/wgpu) is a Rust library
based on gfx-hal, which wraps Metal, Vulkan, DX12 and more in the
future. It can compile into a dynamic library exposing a C-API,
accompanied by a C header file. We wrap this using cffi, which uses the
header file to do most type conversions for us.
Developer notes and tips:
* The purpose of this module is to tie our Pythonic API, which closely
resembles the WebGPU spec, to the C API of wgpu-native.
* Most of it is converting dicts to ffi structs. You may think that
this can be automated, and this would indeed be possible for 80-90%
of the methods. However, the API's do not always line up, and there's
async stuff to take into account too. Therefore we do it manually.
In the end, I think that this will make the code easier to maintain.
* Run the wgpu.help() call that is listed above each API method. This
will usually give you all info that you need from webgpu.idl and
wgpu.h.
* Though sometimes you may also need webgpu.idl and wgpu.h from the
resource dir as a reference.
* Use new_struct() to create a C structure with minimal boilerplate.
It also converts string enum values to their corresponding integers.
* When we update the upstream webgpu.idl or wgpu.h, the
codegen-script.py should be run. This will update base.py and this
module. Then run git diff to see what changed in webgpu.idl, wgpu.h
and in this file, and make adjustments as needed.
"""
import os
import sys
import ctypes
import logging
import ctypes.util
from weakref import WeakKeyDictionary
from cffi import FFI, __version_info__ as cffi_version_info
from .. import base, flags, _structs
from .. import _register_backend
from .._coreutils import get_resource_filename, logger_set_level_callbacks
from .._mappings import cstructfield2enum, enummap
logger = logging.getLogger("wgpu") # noqa
# wgpu-native version that we target/expect
__version__ = "0.5.2"
__commit_sha__ = "160be433dbec0fc7a27d25f2aba3423666ccfa10"
version_info = tuple(map(int, __version__.split(".")))
if cffi_version_info < (1, 10): # no-cover
raise ImportError(f"{__name__} needs cffi 1.10 or later.")
# %% Load the lib and integrate logging system
def _get_wgpu_h():
"""Read header file and strip some stuff that cffi would stumble on."""
lines = []
with open(get_resource_filename("wgpu.h")) as f:
for line in f.readlines():
if not line.startswith(
(
"#include ",
"#define WGPU_LOCAL",
"#define WGPUColor",
"#define WGPUOrigin3d_ZERO",
"#if defined",
"#endif",
)
):
lines.append(line)
return "".join(lines)
def _get_wgpu_lib_path():
"""Get the path to the wgpu library, taking into account the
WGPU_LIB_PATH environment variable.
"""
# If path is given, use that or fail trying
override_path = os.getenv("WGPU_LIB_PATH", "").strip()
if override_path:
return override_path
# Load the debug binary if requested
debug_mode = os.getenv("WGPU_DEBUG", "").strip() == "1"
build = "debug" if debug_mode else "release"
# Get lib filename for supported platforms
if sys.platform.startswith("win"): # no-cover
lib_filename = f"wgpu_native-{build}.dll"
elif sys.platform.startswith("darwin"): # no-cover
lib_filename = f"libwgpu_native-{build}.dylib"
elif sys.platform.startswith("linux"): # no-cover
lib_filename = f"libwgpu_native-{build}.so"
else: # no-cover
raise RuntimeError(
f"No WGPU library shipped for platform {sys.platform}. Set WGPU_LIB_PATH instead."
)
# Note that this can be a false positive, e.g. ARM linux.
embedded_path = get_resource_filename(lib_filename)
if not os.path.isfile(embedded_path): # no-cover
raise RuntimeError(f"Could not find WGPU library in {embedded_path}")
else:
return embedded_path
# Configure cffi and load the dynamic library
# NOTE: `import wgpu.backends.rs` is used in pyinstaller tests to verify
# that we can load the DLL after freezing
ffi = FFI()
ffi.cdef(_get_wgpu_h())
ffi.set_source("wgpu.h", None)
_lib = ffi.dlopen(_get_wgpu_lib_path())
# Get the actual wgpu-native version
_version_int = _lib.wgpu_get_version()
version_info_lib = tuple((_version_int >> bits) & 0xFF for bits in (16, 8, 0))
if version_info_lib != version_info: # no-cover
logger.warning(
f"Expected wgpu-native version {version_info} but got {version_info_lib}"
)
@ffi.callback("void(int level, const char *)")
def _logger_callback(level, c_msg):
"""Called when Rust emits a log message."""
msg = ffi.string(c_msg).decode(errors="ignore") # make a copy
# todo: We currently skip some false negatives to avoid spam.
false_negatives = (
"Unknown decoration",
"Failed to parse shader",
"Shader module will not be validated",
)
if msg.startswith(false_negatives):
return
m = {
_lib.WGPULogLevel_Error: logger.error,
_lib.WGPULogLevel_Warn: logger.warning,
_lib.WGPULogLevel_Info: logger.info,
_lib.WGPULogLevel_Debug: logger.debug,
_lib.WGPULogLevel_Trace: logger.debug,
}
func = m.get(level, logger.warning)
func(msg)
def _logger_set_level_callback(level):
"""Called when the log level is set from Python."""
if level >= 40:
_lib.wgpu_set_log_level(_lib.WGPULogLevel_Error)
elif level >= 30:
_lib.wgpu_set_log_level(_lib.WGPULogLevel_Warn)
elif level >= 20:
_lib.wgpu_set_log_level(_lib.WGPULogLevel_Info)
elif level >= 10:
_lib.wgpu_set_log_level(_lib.WGPULogLevel_Debug)
elif level >= 5:
_lib.wgpu_set_log_level(_lib.WGPULogLevel_Trace) # extra level
else:
_lib.wgpu_set_log_level(_lib.WGPULogLevel_Off)
# Connect Rust logging with Python logging
_lib.wgpu_set_log_callback(_logger_callback)
logger_set_level_callbacks.append(_logger_set_level_callback)
_logger_set_level_callback(logger.level)
# %% Helper functions and objects
# Object to be able to bind the lifetime of objects to other objects
_refs_per_struct = WeakKeyDictionary()
# Some enum keys need a shortcut
_cstructfield2enum_alt = {
"load_op": "LoadOp",
"store_op": "StoreOp",
"depth_store_op": "StoreOp",
"stencil_store_op": "StoreOp",
}
def new_struct_p(ctype, **kwargs):
"""Create a pointer to an ffi struct. Provides a flatter syntax
and converts our string enums to int enums needed in C. The passed
kwargs are also bound to the lifetime of the new struct.
"""
assert ctype.endswith(" *")
struct_p = _new_struct_p(ctype, **kwargs)
_refs_per_struct[struct_p] = kwargs
return struct_p
# Some kwargs may be other ffi objects, and some may represent
# pointers. These need special care because them "being in" the
# current struct does not prevent them from being cleaned up by
# Python's garbage collector. Keeping hold of these objects in the
# calling code is painful and prone to missing cases, so we solve
# the issue here. We cannot attach an attribute to the struct directly,
# so we use a global WeakKeyDictionary. Also see issue #52.
def new_struct(ctype, **kwargs):
"""Create an ffi value struct. The passed kwargs are also bound
to the lifetime of the new struct.
"""
assert not ctype.endswith("*")
struct_p = _new_struct_p(ctype + " *", **kwargs)
struct = struct_p[0]
_refs_per_struct[struct] = kwargs
return struct
def _new_struct_p(ctype, **kwargs):
struct_p = ffi.new(ctype)
for key, val in kwargs.items():
if isinstance(val, str) and isinstance(getattr(struct_p, key), int):
if key in _cstructfield2enum_alt:
structname = _cstructfield2enum_alt[key]
else:
structname = cstructfield2enum[ctype.strip(" *")[4:] + "." + key]
ival = enummap[structname + "." + val]
setattr(struct_p, key, ival)
else:
setattr(struct_p, key, val)
return struct_p
def get_surface_id_from_canvas(canvas):
"""Get an id representing the surface to render to. The way to
obtain this id differs per platform and GUI toolkit.
"""
win_id = canvas.get_window_id()
if sys.platform.startswith("win"): # no-cover
# wgpu_create_surface_from_windows_hwnd(void *_hinstance, void *hwnd)
hwnd = ffi.cast("void *", int(win_id))
hinstance = ffi.NULL
return _lib.wgpu_create_surface_from_windows_hwnd(hinstance, hwnd)
elif sys.platform.startswith("darwin"): # no-cover
# wgpu_create_surface_from_metal_layer(void *layer)
# This is what the triangle example from wgpu-native does:
# #if WGPU_TARGET == WGPU_TARGET_MACOS
# {
# id metal_layer = NULL;
# NSWindow *ns_window = glfwGetCocoaWindow(window);
# [ns_window.contentView setWantsLayer:YES];
# metal_layer = [CAMetalLayer layer];
# [ns_window.contentView setLayer:metal_layer];
# surface = wgpu_create_surface_from_metal_layer(metal_layer);
# }
window = ctypes.c_void_p(win_id)
objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("objc"))
objc.objc_getClass.restype = ctypes.c_void_p
objc.sel_registerName.restype = ctypes.c_void_p
objc.objc_msgSend.restype = ctypes.c_void_p
objc.objc_msgSend.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
content_view_sel = objc.sel_registerName(b"contentView")
set_wants_layer_sel = objc.sel_registerName(b"setWantsLayer:")
responds_to_sel_sel = objc.sel_registerName(b"respondsToSelector:")
layer_sel = objc.sel_registerName(b"layer")
set_layer_sel = objc.sel_registerName(b"setLayer:")
# Try some duck typing to see what kind of object the window pointer points to
# Qt doesn't return a NSWindow, but a QNSView instead, which is subclass of NSView.
if objc.objc_msgSend(
window, responds_to_sel_sel, ctypes.c_void_p(content_view_sel)
):
# NSWindow instances respond to contentView selector
content_view = objc.objc_msgSend(window, content_view_sel)
elif objc.objc_msgSend(window, responds_to_sel_sel, ctypes.c_void_p(layer_sel)):
# NSView instances respond to layer selector
# Let's assume that the given window pointer is actually the content view
content_view = window
else:
# If the code reaches this part, we know that `window` is an
# objective-c object but the type is neither NSView or NSWindow.
raise RuntimeError("Received unidentified objective-c object.")
# [ns_window.contentView setWantsLayer:YES]
objc.objc_msgSend(content_view, set_wants_layer_sel, True)
# metal_layer = [CAMetalLayer layer];
ca_metal_layer_class = objc.objc_getClass(b"CAMetalLayer")
metal_layer = objc.objc_msgSend(ca_metal_layer_class, layer_sel)
# [ns_window.content_view setLayer:metal_layer];
objc.objc_msgSend(content_view, set_layer_sel, ctypes.c_void_p(metal_layer))
metal_layer_ffi_pointer = ffi.cast("void *", metal_layer)
return _lib.wgpu_create_surface_from_metal_layer(metal_layer_ffi_pointer)
elif sys.platform.startswith("linux"): # no-cover
# wgpu_create_surface_from_wayland(void *surface, void *display)
# wgpu_create_surface_from_xlib(const void **display, uint64_t window)
display_id = canvas.get_display_id()
is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower()
if is_wayland:
# todo: works, but have not yet been able to test drawing to the window
surface = ffi.cast("void *", win_id)
display = ffi.cast("void *", display_id)
return _lib.wgpu_create_surface_from_wayland(surface, display)
else:
display = ffi.cast("void **", display_id)
return _lib.wgpu_create_surface_from_xlib(display, win_id)
else: # no-cover
raise RuntimeError("Cannot get surface id: unsupported platform.")
def _tuple_from_tuple_or_dict(ob, fields):
"""Given a tuple/list/dict, return a tuple. Also checks tuple size.
>> # E.g.
>> _tuple_from_tuple_or_dict({"x": 1, "y": 2}, ("x", "y"))
(1, 2)
>> _tuple_from_tuple_or_dict([1, 2], ("x", "y"))
(1, 2)
"""
error_msg = "Expected tuple/key/dict with fields: {}"
if isinstance(ob, (list, tuple)):
if len(ob) != len(fields):
raise ValueError(error_msg.format(", ".join(fields)))
return tuple(ob)
elif isinstance(ob, dict):
try:
return tuple(ob[key] for key in fields)
except KeyError:
raise ValueError(error_msg.format(", ".join(fields)))
else:
raise TypeError(error_msg.format(", ".join(fields)))
def _loadop_and_clear_from_value(value):
"""In WebGPU, the load op, can be given either as "load" or a value.
The latter translates to "clear" plus that value in wgpu-native.
The value can be float/int/color, but we don't deal with that here.
"""
if isinstance(value, str):
assert value == "load"
return 1, 0 # WGPULoadOp_Load and a stub value
else:
return 0, value # WGPULoadOp_Clear and the value
def _get_memoryview_and_address(data):
"""Get a memoryview for the given data and its memory address.
The data object must support the buffer protocol.
"""
# To get the address from a memoryview, there are multiple options.
# The most obvious is using ctypes:
#
# c_array = (ctypes.c_uint8 * nbytes).from_buffer(m)
# address = ctypes.addressof(c_array)
#
# Unfortunately, this call fails if the memoryview is readonly, e.g. if
# the data is a bytes object or readonly numpy array. One could then
# use from_buffer_copy(), but that introduces an extra data copy, which
# can hurt performance when the data is large.
#
# Another alternative that can be used for objects implementing the array
# interface (like numpy arrays) is to directly read the address:
#
# address = data.__array_interface__["data"][0]
#
# But what seems to work best (at the moment) is using cffi.
# Convert data to a memoryview. That way we have something consistent
# to work with, which supports all objects implementing the buffer protocol.
m = memoryview(data)
# Get the address via ffi. In contrast to ctypes, this also
# works for readonly data (e.g. bytes)
c_data = ffi.from_buffer("uint8_t []", m)
address = int(ffi.cast("uintptr_t", c_data))
return m, address
def _get_memoryview_from_address(address, nbytes, format="B"):
"""Get a memoryview from an int memory address and a byte count,"""
# The default format is "<B", which seems to confuse some memoryview
# operations, so we always cast it.
c_array = (ctypes.c_uint8 * nbytes).from_address(address)
return memoryview(c_array).cast(format, shape=(nbytes,))
def _check_struct(what, d):
"""Check that the given dict does not have any unexpected keys
(which may be there because of typos or api changes).
"""
fields = set(d.keys())
ref_fields = getattr(_structs, what).keys()
unexpected = fields.difference(ref_fields)
if unexpected:
s1 = ", ".join(unexpected)
s2 = ", ".join(ref_fields)
raise ValueError(f"Unexpected keys: {s1}.\n -> for {what}: {s2}.")
# %% The API
# wgpu.help('RequestAdapterOptions', 'requestadapter', dev=True)
def request_adapter(*, canvas, power_preference: "GPUPowerPreference"):
"""Get a :class:`GPUAdapter`, the object that represents an abstract wgpu
implementation, from which one can request a :class:`GPUDevice`.
This is the implementation based on the Rust wgpu-native library.
Arguments:
canvas (WgpuCanvas): The canvas that the adapter should be able to
render to (to create a swap chain for, to be precise). Can be None
if you're not rendering to screen (or if you're confident that the
returned adapter will work just fine).
powerPreference(PowerPreference): "high-performance" or "low-power"
"""
# Get surface id that the adapter must be compatible with. If we
# don't pass a valid surface id, there is no guarantee we'll be
# able to create a swapchain for it (from this adapter).
if canvas is None:
surface_id = 0
else:
surface_id = get_surface_id_from_canvas(canvas)
# Convert the descriptor
struct = new_struct_p(
"WGPURequestAdapterOptions *",
power_preference=power_preference,
compatible_surface=surface_id,
)
# Select possible backends. This is not exposed in the WebGPU API
# 1 => Backend::Empty,
# 2 => Backend::Vulkan,
# 4 => Backend::Metal,
# 8 => Backend::Dx12, (buggy)
# 16 => Backend::Dx11, (not implemented yet)
# 32 => Backend::Gl, (not implemented yet)
backend_mask = 2 | 4 # Vulkan or Metal
# Do the API call and get the adapter id
adapter_id = None
@ffi.callback("void(unsigned long long, void *)")
def _request_adapter_callback(received, userdata):
nonlocal adapter_id
adapter_id = received
_lib.wgpu_request_adapter_async(
struct, backend_mask, _request_adapter_callback, ffi.NULL
) # userdata, stub
# For now, Rust will call the callback immediately
# todo: when wgpu gets an event loop -> while run wgpu event loop or something
assert adapter_id is not None
extensions = []
return GPUAdapter("WGPU", extensions, adapter_id)
# wgpu.help('RequestAdapterOptions', 'requestadapter', dev=True)
async def request_adapter_async(*, canvas, power_preference: "GPUPowerPreference"):
"""Async version of ``request_adapter()``.
This function uses the Rust WGPU library.
"""
return request_adapter(canvas=canvas, power_preference=power_preference) # no-cover
# Mark as the backend at import time
_register_backend(request_adapter, request_adapter_async)
class GPUAdapter(base.GPUAdapter):
def __init__(self, name, extensions, id):
super().__init__(name, extensions)
self._id = id
# wgpu.help('DeviceDescriptor', 'adapterrequestdevice', dev=True)
def request_device(
self,
*,
label="",
extensions: "GPUExtensionName-list" = [],
limits: "GPULimits" = {},
):
return self._request_device(label, extensions, limits, "")
def request_device_tracing(
self,
trace_path,
*,
label="",
extensions: "GPUExtensionName-list" = [],
limits: "GPULimits" = {},
):
"""Write a trace of all commands to a file so it can be reproduced
elsewhere. The trace is cross-platform!
"""
if not os.path.isdir(trace_path):
os.makedirs(trace_path, exist_ok=True)
elif os.listdir(trace_path):
logger.warning(f"Trace directory not empty: {trace_path}")
return self._request_device(label, extensions, limits, trace_path)
def _request_device(self, label, extensions, limits, trace_path):
c_trace_path = ffi.NULL
if trace_path: # no-cover
c_trace_path = ffi.new("char []", trace_path.encode())
# Handle default limits
_check_struct("Limits", limits)
limits2 = base.default_limits.copy()
limits2.update(limits or {})
c_extensions = new_struct(
"WGPUExtensions",
anisotropic_filtering="anisotropic_filtering" in extensions,
)
c_limits = new_struct("WGPULimits", max_bind_groups=limits2["max_bind_groups"])
struct = new_struct_p(
"WGPUDeviceDescriptor *", extensions=c_extensions, limits=c_limits
)
device_id = _lib.wgpu_adapter_request_device(self._id, struct, c_trace_path)
# Get the actual limits reported by the device
c_limits = new_struct_p("WGPULimits *")
_lib.wgpu_device_get_limits(device_id, c_limits)
limits3 = {key: getattr(c_limits, key) for key in dir(c_limits)}
# Get the queue to which commands can be submitted
queue_id = _lib.wgpu_device_get_default_queue(device_id)
queue = GPUQueue("", queue_id, None)
return GPUDevice(label, device_id, self, extensions, limits3, queue)
# wgpu.help('DeviceDescriptor', 'adapterrequestdevice', dev=True)
async def request_device_async(
self,
*,
label="",
extensions: "GPUExtensionName-list" = [],
limits: "GPULimits" = {},
):
return self._request_device(label, extensions, limits, "") # no-cover
def _destroy(self):
if self._id is not None:
self._id, id = None, self._id
_lib.wgpu_adapter_destroy(id)
class GPUDevice(base.GPUDevice):
# wgpu.help('BufferDescriptor', 'devicecreatebuffer', dev=True)
def create_buffer(
self,
*,
label="",
size: int,
usage: "GPUBufferUsageFlags",
mapped_at_creation: bool = False,
):
size = int(size)
if mapped_at_creation:
raise ValueError(
"In wgpu-py, mapped_at_creation must be False. Use create_buffer_with_data() instead."
)
# Create a buffer object
c_label = ffi.new("char []", label.encode())
struct = new_struct_p(
"WGPUBufferDescriptor *", label=c_label, size=size, usage=usage
)
id = _lib.wgpu_device_create_buffer(self._internal, struct)
# Return wrapped buffer
return GPUBuffer(label, id, self, size, usage, "unmapped")
def create_buffer_with_data(self, *, label="", data, usage: "GPUBufferUsageFlags"):
# Get a memoryview of the data
m, src_address = _get_memoryview_and_address(data)
if not m.contiguous: # no-cover
raise ValueError("The given texture data is not contiguous")
m = m.cast("B", shape=(m.nbytes,))
# Create a buffer object, and get a memory pointer to its mapped memory
c_label = ffi.new("char []", label.encode())
struct = new_struct_p(
"WGPUBufferDescriptor *", label=c_label, size=m.nbytes, usage=usage
)
buffer_memory_pointer = ffi.new("uint8_t * *")
id = _lib.wgpu_device_create_buffer_mapped(
self._internal, struct, buffer_memory_pointer
)
# Copy the data to the mapped memory
dst_address = int(ffi.cast("intptr_t", buffer_memory_pointer[0]))
dst_m = _get_memoryview_from_address(dst_address, m.nbytes)
dst_m[:] = m # nicer than ctypes.memmove(dst_address, src_address, m.nbytes)
_lib.wgpu_buffer_unmap(id)
# Return the wrapped buffer
return GPUBuffer(label, id, self, m.nbytes, usage, "unmapped")
# wgpu.help('TextureDescriptor', 'devicecreatetexture', dev=True)
def create_texture(
self,
*,
label="",
size: "GPUExtent3D",
mip_level_count: "GPUIntegerCoordinate" = 1,
sample_count: "GPUSize32" = 1,
dimension: "GPUTextureDimension" = "2d",
format: "GPUTextureFormat",
usage: "GPUTextureUsageFlags",
):
c_label = ffi.new("char []", label.encode())
size = _tuple_from_tuple_or_dict(size, ("width", "height", "depth"))
c_size = new_struct(
"WGPUExtent3d",
width=size[0],
height=size[1],
depth=size[2],
)
struct = new_struct_p(
"WGPUTextureDescriptor *",
label=c_label,
size=c_size,
mip_level_count=mip_level_count,
sample_count=sample_count,
dimension=dimension,
format=format,
usage=usage,
)
id = _lib.wgpu_device_create_texture(self._internal, struct)
tex_info = {
"size": size,
"mip_level_count": mip_level_count,
"sample_count": sample_count,
"dimension": dimension,
"format": format,
"usage": usage,
}
return GPUTexture(label, id, self, tex_info)
# wgpu.help('SamplerDescriptor', 'devicecreatesampler', dev=True)
def create_sampler(
self,
*,
label="",
address_mode_u: "GPUAddressMode" = "clamp-to-edge",
address_mode_v: "GPUAddressMode" = "clamp-to-edge",
address_mode_w: "GPUAddressMode" = "clamp-to-edge",
mag_filter: "GPUFilterMode" = "nearest",
min_filter: "GPUFilterMode" = "nearest",
mipmap_filter: "GPUFilterMode" = "nearest",
lod_min_clamp: float = 0,
lod_max_clamp: float = 0xFFFFFFFF,
compare: "GPUCompareFunction" = None,
):
c_label = ffi.new("char []", label.encode())
struct = new_struct_p(
"WGPUSamplerDescriptor *",
label=c_label,
address_mode_u=address_mode_u,
address_mode_v=address_mode_v,
mag_filter=mag_filter,
min_filter=min_filter,
mipmap_filter=mipmap_filter,
lod_min_clamp=lod_min_clamp,
lod_max_clamp=lod_max_clamp,
compare=0 if compare is None else compare,
)
id = _lib.wgpu_device_create_sampler(self._internal, struct)
return base.GPUSampler(label, id, self)
# wgpu.help('BindGroupLayoutDescriptor', 'devicecreatebindgrouplayout', dev=True)
def create_bind_group_layout(
self, *, label="", entries: "GPUBindGroupLayoutEntry-list"
):
c_entries_list = []
for entry in entries:
_check_struct("BindGroupLayoutEntry", entry)
type = entry["type"]
if "texture" in type:
need = {"view_dimension"}
if "storage" in type:
need.add("storage_texture_format")
else:
need.add("texture_component_type")
assert all(
x in entry for x in need
), f"{type} binding should specify {need}"
c_entry = new_struct(
"WGPUBindGroupLayoutEntry",
binding=int(entry["binding"]),
visibility=int(entry["visibility"]),
ty=type,
# Used for uniform buffer and storage buffer bindings.
has_dynamic_offset=bool(entry.get("has_dynamic_offset", False)),
# Used for sampled texture and storage texture bindings.
view_dimension=entry.get("view_dimension", "2d"),
# Used for sampled texture bindings.
texture_component_type=entry.get("texture_component_type", "float"),
# Used for sampled texture bindings.
multisampled=bool(entry.get("multisampled", False)),
# Used for storage texture bindings.
storage_texture_format=entry.get("storage_texture_format", 0),
)
c_entries_list.append(c_entry)
c_label = ffi.new("char []", label.encode())
struct = new_struct_p(
"WGPUBindGroupLayoutDescriptor *",
label=c_label,
entries=ffi.new("WGPUBindGroupLayoutEntry []", c_entries_list),
entries_length=len(c_entries_list),
)
id = _lib.wgpu_device_create_bind_group_layout(self._internal, struct)
return base.GPUBindGroupLayout(label, id, self, entries)
# wgpu.help('BindGroupDescriptor', 'devicecreatebindgroup', dev=True)
def create_bind_group(
self,
*,
label="",
layout: "GPUBindGroupLayout",
entries: "GPUBindGroupEntry-list",
):
c_entries_list = []
for entry in entries:
_check_struct("BindGroupEntry", entry)
# The resource can be a sampler, texture view, or buffer descriptor
resource = entry["resource"]
if isinstance(resource, base.GPUSampler):
c_resource_kwargs = {
"tag": 1, # WGPUBindingResource_Tag.WGPUBindingResource_Sampler
"sampler": new_struct(
"WGPUBindingResource_WGPUSampler_Body", _0=resource._internal
),
}
elif isinstance(resource, base.GPUTextureView):
c_resource_kwargs = {
"tag": 2, # WGPUBindingResource_Tag.WGPUBindingResource_TextureView
"texture_view": new_struct(
"WGPUBindingResource_WGPUTextureView_Body",
_0=resource._internal,
),
}
elif isinstance(resource, dict): # Buffer binding
_check_struct("BufferBinding", resource)
c_buffer_entry = new_struct(
"WGPUBufferBinding",
buffer=resource["buffer"]._internal,
offset=resource["offset"],
size=resource["size"],
)
c_resource_kwargs = {
"tag": 0, # WGPUBindingResource_Tag.WGPUBindingResource_Buffer
"buffer": new_struct(
"WGPUBindingResource_WGPUBuffer_Body", _0=c_buffer_entry
),
}
else:
raise TypeError(f"Unexpected resource type {type(resource)}")
c_resource = new_struct("WGPUBindingResource", **c_resource_kwargs)
c_entry = new_struct(
"WGPUBindGroupEntry",
binding=int(entry["binding"]),
resource=c_resource,
)
c_entries_list.append(c_entry)
c_label = ffi.new("char []", label.encode())
c_entries_array = ffi.new("WGPUBindGroupEntry []", c_entries_list)
struct = new_struct_p(
"WGPUBindGroupDescriptor *",
label=c_label,
layout=layout._internal,
entries=c_entries_array,
entries_length=len(c_entries_list),
)
id = _lib.wgpu_device_create_bind_group(self._internal, struct)
return GPUBindGroup(label, id, self, entries)
# wgpu.help('PipelineLayoutDescriptor', 'devicecreatepipelinelayout', dev=True)
def create_pipeline_layout(
self, *, label="", bind_group_layouts: "GPUBindGroupLayout-list"
):
bind_group_layouts_ids = [x._internal for x in bind_group_layouts]
c_layout_array = ffi.new("WGPUBindGroupLayoutId []", bind_group_layouts_ids)
struct = new_struct_p(
"WGPUPipelineLayoutDescriptor *",
bind_group_layouts=c_layout_array,
bind_group_layouts_length=len(bind_group_layouts),
)
id = _lib.wgpu_device_create_pipeline_layout(self._internal, struct)
return GPUPipelineLayout(label, id, self, bind_group_layouts)
# wgpu.help('ShaderModuleDescriptor', 'devicecreateshadermodule', dev=True)
def create_shader_module(self, *, label="", code: str, source_map: "dict" = None):
if isinstance(code, bytes):
data = code # Assume it's Spirv
elif hasattr(code, "to_bytes"):
data = code.to_bytes()
elif hasattr(code, "to_spirv"):
data = code.to_spirv()
else:
raise TypeError("Need bytes or ob with ob.to_spirv() for shader.")
magic_nr = b"\x03\x02#\x07" # 0x7230203
if data[:4] != magic_nr:
raise ValueError("Given shader data does not look like a SpirV module")
# From bytes to WGPUU32Array
data_u8 = ffi.new("uint8_t[]", data)
data_u32 = ffi.cast("uint32_t *", data_u8)
c_code = ffi.new(
"WGPUU32Array *", {"bytes": data_u32, "length": len(data) // 4}
)[0]
struct = new_struct_p("WGPUShaderModuleDescriptor *", code=c_code)
id = _lib.wgpu_device_create_shader_module(self._internal, struct)
return GPUShaderModule(label, id, self)
# wgpu.help('ComputePipelineDescriptor', 'devicecreatecomputepipeline', dev=True)
def create_compute_pipeline(
self,
*,
label="",
layout: "GPUPipelineLayout" = None,
compute_stage: "GPUProgrammableStageDescriptor",
):
_check_struct("ProgrammableStageDescriptor", compute_stage)
c_compute_stage = new_struct(
"WGPUProgrammableStageDescriptor",
module=compute_stage["module"]._internal,
entry_point=ffi.new("char []", compute_stage["entry_point"].encode()),
)
struct = new_struct_p(
"WGPUComputePipelineDescriptor *",
layout=layout._internal,
compute_stage=c_compute_stage,
)
id = _lib.wgpu_device_create_compute_pipeline(self._internal, struct)
return GPUComputePipeline(label, id, self, layout)
# wgpu.help('RenderPipelineDescriptor', 'devicecreaterenderpipeline', dev=True)
def create_render_pipeline(
self,
*,
label="",
layout: "GPUPipelineLayout" = None,
vertex_stage: "GPUProgrammableStageDescriptor",
fragment_stage: "GPUProgrammableStageDescriptor" = None,
primitive_topology: "GPUPrimitiveTopology",
rasterization_state: "GPURasterizationStateDescriptor" = {},
color_states: "GPUColorStateDescriptor-list",
depth_stencil_state: "GPUDepthStencilStateDescriptor" = None,
vertex_state: "GPUVertexStateDescriptor" = {},
sample_count: "GPUSize32" = 1,
sample_mask: "GPUSampleMask" = 0xFFFFFFFF,
alpha_to_coverage_enabled: bool = False,
):
_check_struct("ProgrammableStageDescriptor", vertex_stage)
_check_struct("RasterizationStateDescriptor", rasterization_state)
_check_struct("VertexStateDescriptor", vertex_state)
c_vertex_stage = new_struct(
"WGPUProgrammableStageDescriptor",
module=vertex_stage["module"]._internal,
entry_point=ffi.new("char []", vertex_stage["entry_point"].encode()),
)
c_fragment_stage = ffi.NULL
if fragment_stage is not None:
_check_struct("ProgrammableStageDescriptor", fragment_stage)
c_fragment_stage = new_struct_p(
"WGPUProgrammableStageDescriptor *",
module=fragment_stage["module"]._internal,
entry_point=ffi.new("char []", fragment_stage["entry_point"].encode()),
)
c_rasterization_state = new_struct_p(
"WGPURasterizationStateDescriptor *",
front_face=rasterization_state.get("front_face", "ccw"),
cull_mode=rasterization_state.get("cull_mode", "none"),
depth_bias=rasterization_state.get("depth_bias", 0),
depth_bias_slope_scale=rasterization_state.get("depth_bias_slope_scale", 0),
depth_bias_clamp=rasterization_state.get("depth_bias_clamp", 0),
)
c_color_states_list = []
for color_state in color_states:
_check_struct("ColorStateDescriptor", color_state)
alpha_blend = _tuple_from_tuple_or_dict(
color_state["alpha_blend"],
("src_factor", "dst_factor", "operation"),
)
c_alpha_blend = new_struct(
"WGPUBlendDescriptor",
src_factor=alpha_blend[0],
dst_factor=alpha_blend[1],
operation=alpha_blend[2],
)
color_blend = _tuple_from_tuple_or_dict(
color_state["color_blend"],
("src_factor", "dst_factor", "operation"),
)
c_color_blend = new_struct(
"WGPUBlendDescriptor",
src_factor=color_blend[0],
dst_factor=color_blend[1],
operation=color_blend[2],
)
c_color_state = new_struct(
"WGPUColorStateDescriptor",
format=color_state["format"],
alpha_blend=c_alpha_blend,
color_blend=c_color_blend,
write_mask=color_state.get("write_mask", 0xF),
)
c_color_states_list.append(c_color_state)
c_color_states_array = ffi.new(
"WGPUColorStateDescriptor []", c_color_states_list
)
if depth_stencil_state is None:
c_depth_stencil_state = ffi.NULL
else:
_check_struct("DepthStencilStateDescriptor", depth_stencil_state)
stencil_front = depth_stencil_state.get("stencil_front", {})
_check_struct("StencilStateFaceDescriptor", stencil_front)
c_stencil_front = new_struct(
"WGPUStencilStateFaceDescriptor",
compare=stencil_front.get("compare", "always"),
fail_op=stencil_front.get("fail_op", "keep"),
depth_fail_op=stencil_front.get("depth_fail_op", "keep"),
pass_op=stencil_front.get("pass_op", "keep"),
)
stencil_back = depth_stencil_state.get("stencil_back", {})
c_stencil_back = new_struct(
"WGPUStencilStateFaceDescriptor",
compare=stencil_back.get("compare", "always"),
fail_op=stencil_back.get("fail_op", "keep"),
depth_fail_op=stencil_back.get("depth_fail_op", "keep"),
pass_op=stencil_back.get("pass_op", "keep"),
)
c_depth_stencil_state = new_struct_p(
"WGPUDepthStencilStateDescriptor *",
format=depth_stencil_state["format"],
depth_write_enabled=bool(
depth_stencil_state.get("depth_write_enabled", False)
),
depth_compare=depth_stencil_state.get("depth_compare", "always"),
stencil_front=c_stencil_front,
stencil_back=c_stencil_back,
stencil_read_mask=depth_stencil_state.get(
"stencil_read_mask", 0xFFFFFFFF
),
stencil_write_mask=depth_stencil_state.get(
"stencil_write_mask", 0xFFFFFFFF
),
)
c_vertex_buffer_descriptors_list = []
for buffer_des in vertex_state["vertex_buffers"]:
_check_struct("VertexBufferLayoutDescriptor", buffer_des)
c_attributes_list = []
for attribute in buffer_des["attributes"]:
_check_struct("VertexAttributeDescriptor", attribute)
c_attribute = new_struct(
"WGPUVertexAttributeDescriptor",
format=attribute["format"],
offset=attribute["offset"],
shader_location=attribute["shader_location"],
)
c_attributes_list.append(c_attribute)
c_attributes_array = ffi.new(
"WGPUVertexAttributeDescriptor []", c_attributes_list
)
c_vertex_buffer_descriptor = new_struct(
"WGPUVertexBufferLayoutDescriptor",
array_stride=buffer_des["array_stride"],
step_mode=buffer_des.get("step_mode", "vertex"),
attributes=c_attributes_array,
attributes_length=len(c_attributes_list),
)
c_vertex_buffer_descriptors_list.append(c_vertex_buffer_descriptor)
c_vertex_buffer_descriptors_array = ffi.new(
"WGPUVertexBufferLayoutDescriptor []", c_vertex_buffer_descriptors_list
)
c_vertex_state = new_struct(
"WGPUVertexStateDescriptor",
index_format=vertex_state.get("index_format", "uint32"),
vertex_buffers=c_vertex_buffer_descriptors_array,
vertex_buffers_length=len(c_vertex_buffer_descriptors_list),
)
struct = new_struct_p(
"WGPURenderPipelineDescriptor *",
layout=layout._internal,
vertex_stage=c_vertex_stage,
fragment_stage=c_fragment_stage,
primitive_topology=primitive_topology,
rasterization_state=c_rasterization_state,
color_states=c_color_states_array,
color_states_length=len(c_color_states_list),
depth_stencil_state=c_depth_stencil_state,
vertex_state=c_vertex_state,
sample_count=sample_count,
sample_mask=sample_mask,
alpha_to_coverage_enabled=alpha_to_coverage_enabled,
)
id = _lib.wgpu_device_create_render_pipeline(self._internal, struct)
return GPURenderPipeline(label, id, self, layout)
# wgpu.help('CommandEncoderDescriptor', 'devicecreatecommandencoder', dev=True)
def create_command_encoder(self, *, label=""):
c_label = ffi.new("char []", label.encode())
struct = new_struct_p("WGPUCommandEncoderDescriptor *", label=c_label)
id = _lib.wgpu_device_create_command_encoder(self._internal, struct)
return GPUCommandEncoder(label, id, self)
# Not yet implemented in wgpu-native
# def create_render_bundle_encoder(
# self,
# *,
# label="",
# color_formats: "GPUTextureFormat-list",
# depth_stencil_format: "GPUTextureFormat" = None,
# sample_count: "GPUSize32" = 1,
# ):
# pass
def configure_swap_chain(self, canvas, format, usage=None):
usage = flags.TextureUsage.OUTPUT_ATTACHMENT if usage is None else usage
return GPUSwapChain(self, canvas, format, usage)
class GPUBuffer(base.GPUBuffer):
# def map(self, mode, offset=0, size=0):
# if not size:
# size = self.size - offset
# if not (offset == 0 and size == self.size): # no-cover
# raise ValueError(
# "Cannot (yet) map buffers with nonzero offset and non-full size."
# )
#
# if mode == flags.MapMode.READ:
# return self._map_read()
# elif mode == flags.MapMode.WRITE:
# return self._map_write()
# else: # no-cover
# raise ValueError(f"Invalid MapMode flag: {mode}")
def read_data(self, offset=0, size=0):
if not size:
size = self.size - offset
assert 0 <= offset < self.size
assert 0 <= size <= (self.size - offset)
mapped_mem = self._map_read(offset, size)
new_mem = memoryview((ctypes.c_uint8 * mapped_mem.nbytes)()).cast("B")
new_mem[:] = mapped_mem
self._unmap()
return new_mem
async def read_data_async(self, offset=0, size=0):
return self.read_data(offset, size)
def write_data(self, data, offset=0):
m = memoryview(data).cast("B")
if not m.contiguous: # no-cover
raise ValueError("The given buffer data is not contiguous")
size = m.nbytes
assert 0 <= offset < self.size
assert 0 <= size <= (self.size - offset)
mapped_mem = self._map_write(offset, size)
mapped_mem[:] = m
self._unmap()
def _map_read(self, start, size):
data = None
@ffi.callback("void(WGPUBufferMapAsyncStatus, uint8_t*, uint8_t*)")
def _map_read_callback(status, buffer_data_p, user_data_p):
nonlocal data
if status == 0:
address = int(ffi.cast("intptr_t", buffer_data_p))
data = _get_memoryview_from_address(address, size)
_lib.wgpu_buffer_map_read_async(
self._internal, start, size, _map_read_callback, ffi.NULL
)
# Let it do some cycles
self._state = "mapping pending"
self._map_mode = flags.MapMode.READ
_lib.wgpu_device_poll(self._device._internal, True)
if data is None: # no-cover
raise RuntimeError("Could not read buffer data.")
self._state = "mapped"
return data
def _map_write(self, start, size):
data = None
@ffi.callback("void(WGPUBufferMapAsyncStatus, uint8_t*, uint8_t*)")
def _map_write_callback(status, buffer_data_p, user_data_p):
nonlocal data
if status == 0:
address = int(ffi.cast("intptr_t", buffer_data_p))
data = _get_memoryview_from_address(address, size)
_lib.wgpu_buffer_map_write_async(
self._internal, start, size, _map_write_callback, ffi.NULL
)
# Let it do some cycles
self._state = "mapping pending"
_lib.wgpu_device_poll(self._device._internal, True)
if data is None: # no-cover
raise RuntimeError("Could not read buffer data.")
self._state = "mapped"
self._map_mode = flags.MapMode.WRITE
return memoryview(data)
def _unmap(self):
_lib.wgpu_buffer_unmap(self._internal)
self._state = "unmapped"
self._map_mode = 0
# wgpu.help('bufferdestroy', dev=True)
def destroy(self):
self._destroy() # no-cover
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
self._state = "destroyed"
self._map_mode = 0
_lib.wgpu_buffer_destroy(internal)
class GPUTexture(base.GPUTexture):
# wgpu.help('TextureViewDescriptor', 'texturecreateview', dev=True)
def create_view(
self,
*,
label="",
format: "GPUTextureFormat" = None,
dimension: "GPUTextureViewDimension" = None,
aspect: "GPUTextureAspect" = "all",
base_mip_level: "GPUIntegerCoordinate" = 0,
mip_level_count: "GPUIntegerCoordinate" = 0,
base_array_layer: "GPUIntegerCoordinate" = 0,
array_layer_count: "GPUIntegerCoordinate" = 0,
):
if format is None or dimension is None:
if not (
format is None
and dimension is None
and aspect == "all"
and base_mip_level == 0
and mip_level_count == 0
and base_array_layer == 0
and array_layer_count == 0
):
raise ValueError(
"In create_view() if any paramter is given, "
+ "both format and dimension must be specified."
)
id = _lib.wgpu_texture_create_view(self._internal, ffi.NULL)
else:
c_label = ffi.new("char []", label.encode())
struct = new_struct_p(
"WGPUTextureViewDescriptor *",
label=c_label,
format=format,
dimension=dimension,
aspect=aspect or "all",
base_mip_level=base_mip_level,
level_count=mip_level_count,
base_array_layer=base_array_layer,
array_layer_count=array_layer_count,
)
id = _lib.wgpu_texture_create_view(self._internal, struct)
return base.GPUTextureView(label, id, self._device, self, self.texture_size)
# wgpu.help('texturedestroy', dev=True)
def destroy(self):
self._destroy() # no-cover
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
_lib.wgpu_texture_destroy(internal)
class GPUBindGroup(base.GPUBindGroup):
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
_lib.wgpu_bind_group_layout_destroy(internal)
class GPUPipelineLayout(base.GPUPipelineLayout):
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
_lib.wgpu_pipeline_layout_destroy(internal)
class GPUShaderModule(base.GPUShaderModule):
# wgpu.help('shadermodulecompilationinfo', dev=True)
def compilation_info(self):
return super().compilation_info()
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
_lib.wgpu_shader_module_destroy(internal)
class GPUComputePipeline(base.GPUComputePipeline):
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
_lib.wgpu_compute_pipeline_destroy(internal)
class GPURenderPipeline(base.GPURenderPipeline):
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
_lib.wgpu_render_pipeline_destroy(internal)
class GPUCommandEncoder(base.GPUCommandEncoder):
# wgpu.help('ComputePassDescriptor', 'commandencoderbegincomputepass', dev=True)
def begin_compute_pass(self, *, label=""):
struct = new_struct_p("WGPUComputePassDescriptor *", todo=0)
raw_pass = _lib.wgpu_command_encoder_begin_compute_pass(self._internal, struct)
return GPUComputePassEncoder(label, raw_pass, self)
# wgpu.help('RenderPassDescriptor', 'commandencoderbeginrenderpass', dev=True)
def begin_render_pass(
self,
*,
label="",
color_attachments: "GPURenderPassColorAttachmentDescriptor-list",
depth_stencil_attachment: "GPURenderPassDepthStencilAttachmentDescriptor" = None,
occlusion_query_set: "GPUQuerySet" = None,
):
# Note that occlusion_query_set is ignored because wgpu-native does not have it.
c_color_attachments_list = []
for color_attachment in color_attachments:
_check_struct("RenderPassColorAttachmentDescriptor", color_attachment)
assert isinstance(color_attachment["attachment"], base.GPUTextureView)
texture_view_id = color_attachment["attachment"]._internal
c_resolve_target = (
0
if color_attachment.get("resolve_target", None) is None
else color_attachment["resolve_target"]._internal
) # this is a TextureViewId or null
c_load_op, clear_color = _loadop_and_clear_from_value(
color_attachment["load_value"]
)
clr = (
(0.0, 0.0, 0.0, 0.0)
if clear_color == 0
else _tuple_from_tuple_or_dict(clear_color, "rgba")
)
c_clear_color = new_struct(
"WGPUColor", r=clr[0], g=clr[1], b=clr[2], a=clr[3]
)
c_attachment = new_struct(
"WGPURenderPassColorAttachmentDescriptor",
attachment=texture_view_id,
resolve_target=c_resolve_target,
load_op=c_load_op,
store_op=color_attachment.get("store_op", "store"),
clear_color=c_clear_color,
)
c_color_attachments_list.append(c_attachment)
c_color_attachments_array = ffi.new(
"WGPURenderPassColorAttachmentDescriptor []", c_color_attachments_list
)
c_depth_stencil_attachment = ffi.NULL
if depth_stencil_attachment is not None:
_check_struct(
"RenderPassDepthStencilAttachmentDescriptor", depth_stencil_attachment
)
c_depth_load_op, c_depth_clear = _loadop_and_clear_from_value(
depth_stencil_attachment["depth_load_value"]
)
c_stencil_load_op, c_stencil_clear = _loadop_and_clear_from_value(
depth_stencil_attachment["stencil_load_value"]
)
c_depth_stencil_attachment = new_struct_p(
"WGPURenderPassDepthStencilAttachmentDescriptor *",
attachment=depth_stencil_attachment["attachment"]._internal,
depth_load_op=c_depth_load_op,
depth_store_op=depth_stencil_attachment["depth_store_op"],
clear_depth=float(c_depth_clear),
stencil_load_op=c_stencil_load_op,
stencil_store_op=depth_stencil_attachment["stencil_store_op"],
clear_stencil=int(c_stencil_clear),
)
struct = new_struct_p(
"WGPURenderPassDescriptor *",
color_attachments=c_color_attachments_array,
color_attachments_length=len(c_color_attachments_list),
depth_stencil_attachment=c_depth_stencil_attachment,
)
raw_pass = _lib.wgpu_command_encoder_begin_render_pass(self._internal, struct)
return GPURenderPassEncoder(label, raw_pass, self)
# wgpu.help('Buffer', 'Size64', 'commandencodercopybuffertobuffer', dev=True)
def copy_buffer_to_buffer(
self, source, source_offset, destination, destination_offset, size
):
assert isinstance(source, GPUBuffer)
assert isinstance(destination, GPUBuffer)
_lib.wgpu_command_encoder_copy_buffer_to_buffer(
self._internal,
source._internal,
int(source_offset),
destination._internal,
int(destination_offset),
int(size),
)
# wgpu.help('BufferCopyView', 'Extent3D', 'TextureCopyView', 'commandencodercopybuffertotexture', dev=True)
def copy_buffer_to_texture(self, source, destination, copy_size):
_check_struct("BufferCopyView", source)
_check_struct("TextureCopyView", destination)
c_source = new_struct_p(
"WGPUBufferCopyView *",
buffer=source["buffer"]._internal,
layout=new_struct(
"WGPUTextureDataLayout",
offset=int(source.get("offset", 0)),
bytes_per_row=int(source["bytes_per_row"]),
rows_per_image=int(source.get("rows_per_image", 0)),
),
)
ori = _tuple_from_tuple_or_dict(destination["origin"], "xyz")
c_origin = new_struct("WGPUOrigin3d", x=ori[0], y=ori[1], z=ori[2])
c_destination = new_struct_p(
"WGPUTextureCopyView *",
texture=destination["texture"]._internal,
mip_level=int(destination.get("mip_level", 0)),
origin=c_origin,
)
size = _tuple_from_tuple_or_dict(copy_size, ("width", "height", "depth"))
c_copy_size = new_struct_p(
"WGPUExtent3d *",
width=size[0],
height=size[1],
depth=size[2],
)
_lib.wgpu_command_encoder_copy_buffer_to_texture(
self._internal,
c_source,
c_destination,
c_copy_size,
)
# wgpu.help('BufferCopyView', 'Extent3D', 'TextureCopyView', 'commandencodercopytexturetobuffer', dev=True)
def copy_texture_to_buffer(self, source, destination, copy_size):
_check_struct("TextureCopyView", source)
_check_struct("BufferCopyView", destination)
ori = _tuple_from_tuple_or_dict(source["origin"], "xyz")
c_origin = new_struct("WGPUOrigin3d", x=ori[0], y=ori[1], z=ori[2])
c_source = new_struct_p(
"WGPUTextureCopyView *",
texture=source["texture"]._internal,
mip_level=int(source.get("mip_level", 0)),
origin=c_origin,
)
c_destination = new_struct_p(
"WGPUBufferCopyView *",
buffer=destination["buffer"]._internal,
layout=new_struct(
"WGPUTextureDataLayout",
offset=int(destination.get("offset", 0)),
bytes_per_row=int(destination["bytes_per_row"]),
rows_per_image=int(destination.get("rows_per_image", 0)),
),
)
size = _tuple_from_tuple_or_dict(copy_size, ("width", "height", "depth"))
c_copy_size = new_struct_p(
"WGPUExtent3d *",
width=size[0],
height=size[1],
depth=size[2],
)
_lib.wgpu_command_encoder_copy_texture_to_buffer(
self._internal,
c_source,
c_destination,
c_copy_size,
)
# wgpu.help('Extent3D', 'TextureCopyView', 'commandencodercopytexturetotexture', dev=True)
def copy_texture_to_texture(self, source, destination, copy_size):
_check_struct("TextureCopyView", source)
_check_struct("TextureCopyView", destination)
ori = _tuple_from_tuple_or_dict(source["origin"], "xyz")
c_origin1 = new_struct("WGPUOrigin3d", x=ori[0], y=ori[1], z=ori[2])
c_source = new_struct_p(
"WGPUTextureCopyView *",
texture=source["texture"]._internal,
mip_level=int(source.get("mip_level", 0)),
origin=c_origin1,
)
ori = _tuple_from_tuple_or_dict(destination["origin"], "xyz")
c_origin2 = new_struct("WGPUOrigin3d", x=ori[0], y=ori[1], z=ori[2])
c_destination = new_struct_p(
"WGPUTextureCopyView *",
texture=destination["texture"]._internal,
mip_level=int(destination.get("mip_level", 0)),
origin=c_origin2,
)
size = _tuple_from_tuple_or_dict(copy_size, ("width", "height", "depth"))
c_copy_size = new_struct_p(
"WGPUExtent3d *",
width=size[0],
height=size[1],
depth=size[2],
)
_lib.wgpu_command_encoder_copy_texture_to_texture(
self._internal,
c_source,
c_destination,
c_copy_size,
)
# wgpu.help('CommandBufferDescriptor', 'commandencoderfinish', dev=True)
def finish(self, *, label=""):
struct = new_struct_p("WGPUCommandBufferDescriptor *", todo=0)
id = _lib.wgpu_command_encoder_finish(self._internal, struct)
return base.GPUCommandBuffer(label, id, self)
# todo: these do not exist yet for command_encoder in wgpu-native
# def push_debug_group(self, group_label):
# def pop_debug_group(self):
# def insert_debug_marker(self, marker_label):
class GPUProgrammablePassEncoder(base.GPUProgrammablePassEncoder):
# wgpu.help('BindGroup', 'Index32', 'Size32', 'Size64', 'programmablepassencodersetbindgroup', dev=True)
def set_bind_group(
self,
index,
bind_group,
dynamic_offsets_data,
dynamic_offsets_data_start,
dynamic_offsets_data_length,
):
offsets = list(dynamic_offsets_data)
c_offsets = ffi.new("WGPUDynamicOffset []", offsets)
bind_group_id = bind_group._internal
if isinstance(self, GPUComputePassEncoder):
_lib.wgpu_compute_pass_set_bind_group(
self._internal, index, bind_group_id, c_offsets, len(offsets)
)
else:
_lib.wgpu_render_pass_set_bind_group(
self._internal, index, bind_group_id, c_offsets, len(offsets)
)
# wgpu.help('programmablepassencoderpushdebuggroup', dev=True)
def push_debug_group(self, group_label):
c_group_label = ffi.new("char []", group_label.encode())
if isinstance(self, GPUComputePassEncoder):
_lib.wgpu_compute_pass_push_debug_group(self._internal, c_group_label)
else:
_lib.wgpu_render_pass_push_debug_group(self._internal, c_group_label)
# wgpu.help('programmablepassencoderpopdebuggroup', dev=True)
def pop_debug_group(self):
if isinstance(self, GPUComputePassEncoder):
_lib.wgpu_compute_pass_pop_debug_group(self._internal)
else:
_lib.wgpu_render_pass_pop_debug_group(self._internal)
# wgpu.help('programmablepassencoderinsertdebugmarker', dev=True)
def insert_debug_marker(self, marker_label):
c_marker_label = ffi.new("char []", marker_label.encode())
if isinstance(self, GPUComputePassEncoder):
_lib.wgpu_compute_pass_insert_debug_marker(self._internal, c_marker_label)
else:
_lib.wgpu_render_pass_insert_debug_marker(self._internal, c_marker_label)
class GPUComputePassEncoder(GPUProgrammablePassEncoder):
""""""
# wgpu.help('ComputePipeline', 'computepassencodersetpipeline', dev=True)
def set_pipeline(self, pipeline):
pipeline_id = pipeline._internal
_lib.wgpu_compute_pass_set_pipeline(self._internal, pipeline_id)
# wgpu.help('Size32', 'computepassencoderdispatch', dev=True)
def dispatch(self, x, y=1, z=1):
_lib.wgpu_compute_pass_dispatch(self._internal, x, y, z)
# wgpu.help('Buffer', 'Size64', 'computepassencoderdispatchindirect', dev=True)
def dispatch_indirect(self, indirect_buffer, indirect_offset):
buffer_id = indirect_buffer._internal
_lib.wgpu_compute_pass_dispatch_indirect(
self._internal, buffer_id, int(indirect_offset)
)
# wgpu.help('computepassencoderendpass', dev=True)
def end_pass(self):
_lib.wgpu_compute_pass_end_pass(self._internal)
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
internal # todo: crashes _lib.wgpu_compute_pass_destroy(internal)
class GPURenderEncoderBase(GPUProgrammablePassEncoder):
""""""
# wgpu.help('RenderPipeline', 'renderencoderbasesetpipeline', dev=True)
def set_pipeline(self, pipeline):
pipeline_id = pipeline._internal
_lib.wgpu_render_pass_set_pipeline(self._internal, pipeline_id)
# wgpu.help('Buffer', 'Size64', 'renderencoderbasesetindexbuffer', dev=True)
def set_index_buffer(self, buffer, offset=0, size=0):
if not size:
size = buffer.size - offset
_lib.wgpu_render_pass_set_index_buffer(
self._internal, buffer._internal, int(offset), int(size)
)
# wgpu.help('Buffer', 'Index32', 'Size64', 'renderencoderbasesetvertexbuffer', dev=True)
def set_vertex_buffer(self, slot, buffer, offset=0, size=0):
if not size:
size = buffer.size - offset
_lib.wgpu_render_pass_set_vertex_buffer(
self._internal, int(slot), buffer._internal, int(offset), int(size)
)
# wgpu.help('Size32', 'renderencoderbasedraw', dev=True)
def draw(self, vertex_count, instance_count=1, first_vertex=0, first_instance=0):
_lib.wgpu_render_pass_draw(
self._internal, vertex_count, instance_count, first_vertex, first_instance
)
# wgpu.help('Buffer', 'Size64', 'renderencoderbasedrawindirect', dev=True)
def draw_indirect(self, indirect_buffer, indirect_offset):
buffer_id = indirect_buffer._internal
_lib.wgpu_render_pass_draw_indirect(
self._internal, buffer_id, int(indirect_offset)
)
# wgpu.help('SignedOffset32', 'Size32', 'renderencoderbasedrawindexed', dev=True)
def draw_indexed(
self,
index_count,
instance_count=1,
first_index=0,
base_vertex=0,
first_instance=0,
):
_lib.wgpu_render_pass_draw_indexed(
self._internal,
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
)
# wgpu.help('Buffer', 'Size64', 'renderencoderbasedrawindexedindirect', dev=True)
def draw_indexed_indirect(self, indirect_buffer, indirect_offset):
buffer_id = indirect_buffer._internal
_lib.wgpu_render_pass_draw_indexed_indirect(
self._internal, buffer_id, int(indirect_offset)
)
def _destroy(self):
if self._internal is not None:
self._internal, internal = None, self._internal
internal # todo: crashes _lib.wgpu_render_pass_destroy(internal)
class GPURenderPassEncoder(GPURenderEncoderBase):
# Note: this does not inherit from base.GPURenderPassEncoder!
# wgpu.help('renderpassencodersetviewport', dev=True)
def set_viewport(self, x, y, width, height, min_depth, max_depth):
_lib.wgpu_render_pass_set_viewport(
self._internal,
float(x),
float(y),
float(width),
float(height),
float(min_depth),
float(max_depth),
)
# wgpu.help('IntegerCoordinate', 'renderpassencodersetscissorrect', dev=True)
def set_scissor_rect(self, x, y, width, height):
_lib.wgpu_render_pass_set_scissor_rect(
self._internal, int(x), int(y), int(width), int(height)
)
# wgpu.help('Color', 'renderpassencodersetblendcolor', dev=True)
def set_blend_color(self, color):
color = _tuple_from_tuple_or_dict(color, "rgba")
c_color = new_struct_p(
"WGPUColor *", r=color[0], g=color[1], b=color[2], a=color[3]
)
_lib.wgpu_render_pass_set_blend_color(self._internal, c_color)
# wgpu.help('StencilValue', 'renderpassencodersetstencilreference', dev=True)
def set_stencil_reference(self, reference):
_lib.wgpu_render_pass_set_stencil_reference(self._internal, int(reference))
# Not sure what this function exists in the Rust API, because there is no
# way to create bundles yet?
# def execute_bundles(self, bundles):
# bundles2 = []
# for bundle in bundles:
# if isinstance(bundle, base.GPURenderBundle):
# bundles2.append(bundle._internal)
# else:
# bundles2.append(int(bundle))
#
# c_bundles_array = ffi.new("WGPURenderBundleId []", bundles2)
# _lib.wgpu_render_pass_execute_bundles(
# self._internal, c_bundles_array, len(bundles2),
# )
# wgpu.help('renderpassencoderendpass', dev=True)
def end_pass(self):
_lib.wgpu_render_pass_end_pass(self._internal)
class GPURenderBundleEncoder(base.GPURenderBundleEncoder):
pass
# Not yet implemented in wgpu-native
# def finish(self, *, label=""):
# ...
class GPUQueue(base.GPUQueue):
# wgpu.help('queuesubmit', dev=True)
def submit(self, command_buffers):
command_buffer_ids = [cb._internal for cb in command_buffers]
c_command_buffers = ffi.new("WGPUCommandBufferId []", command_buffer_ids)
_lib.wgpu_queue_submit(
self._internal, c_command_buffers, len(command_buffer_ids)
)
# Seems not yet implemented in wgpu-native
# def copy_image_bitmap_to_texture(self, source, destination, copy_size):
# ...
# wgpu.help('Buffer', 'Size64', 'queuewritebuffer', dev=True)
def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None):
# We support anything that memoryview supports, i.e. anything
# that implements the buffer protocol, including, bytes,
# bytearray, ctypes arrays, numpy arrays, etc.
m, address = _get_memoryview_and_address(data)
nbytes = m.nbytes
# Checks
if not m.contiguous: # no-cover
raise ValueError("The given buffer data is not contiguous")
# Deal with offset and size
buffer_offset = int(buffer_offset)
data_offset = int(data_offset)
if not size:
data_length = nbytes - data_offset
else:
data_length = int(size)
assert 0 <= buffer_offset < buffer.size
assert 0 <= data_offset < nbytes
assert 0 <= data_length <= (nbytes - data_offset)
assert data_length <= buffer.size - buffer_offset
# Make the call. Note that this call copies the data - it's ok
# if we lose our reference to the data once we leave this function.
c_data = ffi.cast("uint8_t *", address + data_offset)
_lib.wgpu_queue_write_buffer(
self._internal, buffer._internal, buffer_offset, c_data, data_length
)
# wgpu.help('Extent3D', 'TextureCopyView', 'TextureDataLayout', 'queuewritetexture', dev=True)
def write_texture(self, destination, data, data_layout, size):
m, address = _get_memoryview_and_address(data)
# todo: could we not derive the size from the shape of m?
# Checks
if not m.contiguous: # no-cover
raise ValueError("The given texture data is not contiguous")
c_data = ffi.cast("uint8_t *", address)
data_length = m.nbytes
ori = _tuple_from_tuple_or_dict(destination.get("origin", (0, 0, 0)), "xyz")
c_origin = new_struct("WGPUOrigin3d", x=ori[0], y=ori[1], z=ori[2])
c_destination = new_struct_p(
"WGPUTextureCopyView *",
texture=destination["texture"]._internal,
mip_level=destination.get("mip_level", 0),
origin=c_origin,
)
c_data_layout = new_struct_p(
"WGPUTextureDataLayout *",
offset=data_layout.get("offset", 0),
bytes_per_row=data_layout["bytes_per_row"],
rows_per_image=data_layout.get("rows_per_image", 0),
)
size = _tuple_from_tuple_or_dict(size, ("width", "height", "depth"))
c_size = new_struct_p(
"WGPUExtent3d *",
width=size[0],
height=size[1],
depth=size[2],
)
_lib.wgpu_queue_write_texture(
self._internal, c_destination, c_data, data_length, c_data_layout, c_size
)
class GPUSwapChain(base.GPUSwapChain):
def __init__(self, device, canvas, format, usage):
super().__init__("", None, device)
self._canvas = canvas
self._format = format
self._usage = usage
self._surface_size = (-1, -1)
self._surface_id = None
self._create_native_swap_chain_if_needed()
def _create_native_swap_chain_if_needed(self):
canvas = self._canvas
psize = canvas.get_physical_size()
if psize == self._surface_size:
return
self._surface_size = psize
# logger.info(str((psize, canvas.get_logical_size(), canvas.get_pixel_ratio())))
struct = new_struct_p(
"WGPUSwapChainDescriptor *",
usage=self._usage,
format=self._format,
width=max(1, psize[0]),
height=max(1, psize[1]),
present_mode=1,
)
# present_mode -> 0: Immediate, 1: Mailbox, 2: Fifo
if self._surface_id is None:
self._surface_id = get_surface_id_from_canvas(canvas)
self._internal = _lib.wgpu_device_create_swap_chain(
self._device._internal, self._surface_id, struct
)
def __enter__(self):
# Get the current texture view, and make sure it is presented when done
self._create_native_swap_chain_if_needed()
sc_output = _lib.wgpu_swap_chain_get_next_texture(self._internal)
status, view_id = sc_output.status, sc_output.view_id
if status == _lib.WGPUSwapChainStatus_Good:
pass
elif status == _lib.WGPUSwapChainStatus_Suboptimal: # no-cover
if not getattr(self, "_warned_swap_chain_suboptimal", False):
logger.warning(f"Swap chain status of {self} is suboptimal")
self._warned_swap_chain_suboptimal = True
else: # no-cover
status_str = swap_chain_status_map.get(status, "")
raise RuntimeError(
f"Swap chain status is not good: {status_str} ({status})"
)
size = self._surface_size[0], self._surface_size[1], 1
return base.GPUTextureView("swap_chain", view_id, self._device, None, size)
def __exit__(self, type, value, tb):
# Present the current texture
_lib.wgpu_swap_chain_present(self._internal)
swap_chain_status_map = {
getattr(_lib, "WGPUSwapChainStatus_" + x): x
for x in ("Good", "Suboptimal", "Lost", "Outdated", "OutOfMemory", "Timeout")
}
# %%
def _copy_docstrings():
for ob in globals().values():
if not (isinstance(ob, type) and issubclass(ob, base.GPUObject)):
continue
elif ob.__module__ != __name__:
continue # no-cover
base_cls = ob.mro()[1]
ob.__doc__ = base_cls.__doc__
for name, attr in ob.__dict__.items():
if name.startswith("_") or not hasattr(attr, "__doc__"):
continue # no-cover
base_attr = getattr(base_cls, name, None)
if base_attr is not None:
attr.__doc__ = base_attr.__doc__
_copy_docstrings()
| StarcoderdataPython |
3428451 | <reponame>datatalking/Python-Natural-Language-Processing
from gensim import models
w = models.Word2Vec.load_word2vec_format('/home/jalaj/Downloads/GoogleNews-vectors-negative300.bin', binary=True)
print 'King - man + woman:'
print ''
print w.wv.most_similar(positive=['woman', 'king'], negative=['man'])
print 'Similarity between man and woman:'
print w.similarity('woman', 'man')
| StarcoderdataPython |
355621 | <gh_stars>0
''' Doc_String '''
from os import listdir, mkdir, getcwd, chdir, path
from my_headers import ii
####################################################################
####################################################################
####################################################################
####################################################################
print()
maindir = getcwd()
folders = ['cherrygrove','home','house1','house2','my_room',
'newbark','professor_elm','route_27','route_29']
for folder in folders:
print('{:#^80}'.format(f" {folder} "))
chdir(folder)
files = [i for i in listdir() if i[-4:] == '.txt']
for file in files:
with open(file,'r') as file_obj: dat = file_obj.read()
if len(dat): continue
print(file, len(dat))
with open(file,'w') as file_obj: file_obj.write('.')
chdir(maindir)
print()
##files = listdir()
####################################################################
####################################################################
##def main():
## to_make = 'bgs','doors','music','signs','tiles'
## new_data = {k:[] for k in to_make}
## npcs = {}
## if 'data.txt' not in listdir(): return 0
## with open('data.txt','r') as f: data = f.read().splitlines()
##
## for line in data:
## if not line: continue
##
## if line.startswith('sign='):
## new_data['signs'].append(line.replace('sign=',''))
## if line.startswith('extra_bg='):
## new_data['bgs'].append(line.replace('extra_bg=',''))
## if line.startswith('music='):
## new_data['music'].append(line.replace('music=',''))
##
## if line.startswith('move='):
## if line == 'move=#': continue
## if ':' in line:
## new_line = [[j.strip() for j in i.split(':')]
## for i in line.split('=')[-1].split(',')]
## dat = ''.join([f"{k}:{v:>3}, " for k,v in new_line]
## )[:-4].replace('tile:','tile: ')
## else:
## x,y = [i for i in line.split('=')[-1].split(',')]
## dat = f"x:{x:>3}, y:{y:>3}, tile: None"
## new_data['tiles'].append(dat)
##
## if line.startswith('door='):
## new_line = [[j.strip() for j in i.split(':')]
## for i in line.split('=')[-1].split(',')]
## bg = new_line.pop(2)
## wash = ['washout',' True']
## if len(new_line) == 6:
## new_line.pop()
## wash = ['washout',' False']
## new_line.extend([wash,bg])
## dat = ''.join([f"{k}:{v:>3}, " for k,v in new_line]
## )[:-4].replace('bg:','bg: ')
## new_data['doors'].append(dat)
##
##
## if line.startswith('npc='):
## _,values = [i.strip() for i in line.split('=')]
## name = values.split(',')[0].split(':')[-1]
## print(name)
## facing = ['facing','0']
## frozen = ['frozen','False']
## type_ = ['type',name]
## if ':' in line:
## vv = [i.strip().split(':') for i in values.split(',') if 'name' not in i]
## if 'type' in [k for k,v in vv]:
## type_ = vv.pop([k for k,v in vv].index('type'))
## if 'facing' in [k for k,v in vv]:
## facing = vv.pop([k for k,v in vv].index('facing'))
## if 'frozen' in [k for k,v in vv]:
## frozen = vv.pop([k for k,v in vv].index('frozen'))
## frozen = [f"{i:>6}" for i in frozen]
##
## vv.extend([facing,frozen,type_])
## dat = 'pos='+''.join([f"{k}:{v:>3}, " for k,v in vv])[:-4].replace(
## 'type:','type: ')
## else:
## name, x, y = [i.strip() for i in values.split(',')]
## vv = [['x',x], ['y',y]]
## vv.extend([facing,frozen,type_])
## dat = 'pos='+''.join([f"{k}:{v:>3}, " for k,v in vv])[:-4].replace(
## 'type:','type: ')
## npcs[name] = {'pos':dat,'move':[],'data':''}
##
## if line.startswith('npc_move='):
## _,values = line.split('=')
## name = values.split(',')[0]
## x,y = values.replace(f"{name},",'').split(',')
## dat = f"move=x:{x.strip():>3}, y:{y.strip():>3}"
## npcs[name]['move'].append(dat)
##
## if line.startswith('npc_interact='):
## _,values = line.split('=')
## name = values.split(',')[0].split(':')[-1]
## dat = f"interact={values.replace('name:','').replace(name+', ','')}"
## npcs[name]['interact'] = dat
##
##
## ####################################################################
##
## for name,value_dict in npcs.items():
## for key,val in value_dict.items():
## if key == 'data': continue
## if key == 'move': npcs[name]['move'] = '\n'.join(npcs[name]['move'])
## npcs[name]['data'] += npcs[name][key]
## npcs[name]['data'] += '\n\n'
## npcs[name]['data'] = npcs[name]['data'].replace('\n\n\n','\n')[:-2]
##
##
##
## for k,v in new_data.items():
## joiner = '\n\n'
## if k == 'tiles': joiner = '\n'
## new_data[k] = joiner.join(new_data[k])
##
##
## ####################################################################
##
##
## for k,v in new_data.items():
## print(f"\t {k.upper()} \n")
## file_name = f"{k}.txt"
## with open(file_name,'w') as f: f.write(v)
##
##
## try: mkdir('npcs')
## except: print('already made NPC file','\n')
##
## mainfile = getcwd()
## npcdir = path.join(mainfile,'npcs')
## chdir(npcdir)
##
## for name,value_dict in npcs.items():
## print(f"\t {name.upper()} \n")
## file_name = f"{name}.txt"
## with open(file_name,'w') as f: f.write(npcs[name]['data'])
##
## chdir(mainfile)
####################################################################
####################################################################
####################################################################
####################################################################
##for file in files:
## ext = file.split('.')[-1]
## if (ext != 'txt') or (file in ['data.txt','rebuild.txt']): continue
##
#### with open(file,'r') as f:
#### data = f.read().splitlines()
#### tofile = []
####
#### for line in data:
#### if not line: continue
#### k,v = line.split('=')
#### tofile.append(v)
####
#### tofile = '\n'.join(tofile)
#### print(f"\n{'#'*80}\n")
## print(file.upper(),'\n')
#### print(f'"{tofile}"')
####
#### with open(file,'w') as f:
#### f.write(tofile)
##with open('bgs.txt','r') as file:
## data = file.read()
##data = data.replace(' ','')
##data = data.replace('glow_0','grass')
##data = data.replace('glow_1','ledge')
##data = data.replace('glow_2','encounter_grass')
##print(data)
##
##
##with open('data.txt','w') as file:
## file.write(data)
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
| StarcoderdataPython |
12830389 | ################
# Dependencies #
################
import importlib
from os import replace
from datetime import datetime, timedelta
from .helpers import route_url
from flask import make_response, jsonify, render_template, request as flask_request, abort as flask_abort, redirect as flask_redirect, session as flask_session
from werkzeug.security import check_password_hash, generate_password_hash
# Flask objects
request = flask_request
session = flask_session
# Fetch configuretion module
config = importlib.import_module('config')
debug = getattr(config, "DEBUG")
default_lang = getattr(config, "DEFAULT_LANG")
multi_lang = getattr(config, "MULTI_LANG")
languages = getattr(config, "LANGUAGES")
# Fetch apps module
apps_module = importlib.import_module('_apps')
apps = getattr(apps_module, "apps")
##
# @desc Redirects to HTTP error pages
#
# @param code: int - HTTP status code
#
# @return object
##
def abort(code:int=404):
# Return result
return flask_abort(status=code)
##
# @desc Redirects to relative URL
#
# @param url: str
#
# @return object
##
def redirect(url:str, code:int=302):
# Return results
return flask_redirect(location=url, code=code)
##
# @desc Redirects to app URL
#
# @param app: str - The app name
# @param controller: str - The app controller name
#
# @return object
##
def redirect_to(app:str, controller:str=None, code:int=302):
# Fetch the route final url
url = route_url(app, controller)
# Return result
return redirect(url=url, code=code)
##
# @desc Checks session for existence
#
# @param name: str -- *Required session name
#
# @return bool
##
def check_session(name:str):
# Session exists
if name in session:
return True
# Session not exists
else:
return False
##
# @desc Gets session
#
# @param name: str -- *Required session name
#
# @return object
##
def get_session(name:str):
return session[name]
##
# @desc Sets session
#
# @param name: str -- *Required session name
# @param value: str -- *Required session value
##
def set_session(name:str, value:str):
session[name] = value
##
# @desc Unset session
#
# @param name: str -- *Required session name
##
def unset_session(name:str):
session.pop(name, None)
##
# @desc Checks cookie for existence
#
# @param name: str -- *Required cookie name
#
# @return bool
##
def check_cookie(name:str):
# Cookie exists
if name in request.cookies:
return True
# Cookie not exists
else:
return False
##
# @desc Get cookie
#
# @param name: str -- *Required cookie name
#
# @return object
##
def get_cookie(name:str):
return request.cookies.get(name)
##
# @desc Sets cookie
#
# @param name: str -- *Required cookie name
# @param value: str -- *Required cookie value
# @param days: int -- Optional expiry days
# @param data: dictionary -- Optional data
#
# @return object
##
def set_cookie(name:str, value:str, data:dict={}, days:int=30):
# Check required params
if not name and not value:
# Produce error message
error = 'Please provide the required parameters!'
# Check debug mode
if debug:
# Raise error
raise Exception(error)
else:
# Print error
print(error)
exit()
# Check data
if data:
if data["type"] == "redirect":
res = make_response(redirect(data["response"]))
elif data["type"] == "render":
res = make_response(render_template(data["response"]))
elif data["type"] == "json":
res = make_response(jsonify(data["response"]))
elif data["type"] == "text":
res = make_response(data["response"])
# Create response
else:
res = make_response("Cookie set successfully!")
# expires in 30 days
expire = datetime.utcnow() + timedelta(days=days)
# Set cookie
res.set_cookie(name, value, expires=expire)
# Return response
return res
##
# @desc Unsets cookie
#
# @param name: str -- *Required cookie name
# @param data: dictionary -- Optional data
##
def unset_cookie(name:str, data:dict={}):
# Check required params
if not name:
# Produce error message
error = 'Please provide the required parameters!'
# Check debug mode
if debug:
# Raise error
raise Exception(error)
else:
# Print error
print(error)
exit()
# Check data
if data:
if data["type"] == "redirect":
res = make_response(redirect(data["response"]))
elif data["type"] == "render":
res = make_response(render_template(data["response"]))
elif data["type"] == "json":
res = make_response(jsonify(data["response"]))
elif data["type"] == "text":
res = make_response(data["response"])
else:
res = make_response("Cookie unset successfully!")
# unset cookie
res.set_cookie(name, '', expires=0)
# Return response
return res
##
# @desc Finds active language
#
# @var active_lang: str - The active language code
#
# @return str
##
def find_lang():
path = request.path
lang = path.split('/')[1]
# Check multi language
if multi_lang:
# Check the language path
if lang in languages:
active_lang = lang
LANGUAGE = '/' + active_lang
set_session('active_lang', lang)
elif check_cookie('active_lang'):
active_lang = get_cookie('active_lang')
LANGUAGE = '/' + active_lang
set_session('active_lang', get_cookie('active_lang'))
elif check_session('active_lang'):
active_lang = get_session('active_lang')
LANGUAGE = '/' + active_lang
else:
active_lang = default_lang
LANGUAGE = '/' + active_lang
set_session('active_lang', default_lang)
else:
active_lang = default_lang
LANGUAGE = ''
# Return result
return {
'active_language': active_lang,
'LANGUAGE': LANGUAGE,
}
##
# @desc Redirects not logged-in users
#
# @param url: str -- *Required url for users app
#
# @var next: str -- The next url
#
# @return object
##
def login_required(app:str, controller:str=None, validate:str='user'):
# Fetch the route final url
url = route_url(app, controller)
def wrapper(inner):
def decorator(*args, **kwargs):
# Find next URL
next = request.url.replace(request.url_root, '/')
# Check cookie
if check_cookie(validate):
set_session(validate, get_cookie(validate))
# User is not logged-in
if not check_session(validate):
# Check the language
if multi_lang:
if check_session('active_lang'):
return redirect(f'''/{get_session('active_lang')}/{url}?next={next}''')
return redirect(f'{url}?next={next}')
# if next:
# return redirect(f'{url}?next={next}')
# else:
# return redirect(f'{url}?next={next}')
# User is logged-in
else:
return inner(*args, **kwargs)
return decorator
return wrapper
##
# @desc Redirects logged-in users
#
# @param url: str -- *Required url for app
#
# @return object
##
def login_abort(app:str, controller:str=None, validate:str='user'):
# Fetch the route final url
url = route_url(app, controller)
def wrapper(inner):
def decorator(*args, **kwargs):
# Check cookie
if check_cookie(validate):
set_session(validate, get_cookie(validate))
# User is logged-in
if check_session(validate):
return redirect(url)
# User is not logged-in
else:
return inner(*args, **kwargs)
return decorator
return wrapper
##
# @desc Hashing password
#
# @param password: str
#
# @return str
##
def hash_password(password):
return generate_password_hash(password)
##
# @desc Check hashed password with requested password
#
# @param hashed_password: str -- Hashed password from database
# @param requested_password: str -- Requested password by the user
#
# @return bool
##
def check_password(hashed_password, requested_password):
# Valid password
if check_password_hash(hashed_password, requested_password):
return True
# Invalid password
else:
return False
| StarcoderdataPython |
240085 | <reponame>stjordanis/owid-importer
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-05 07:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grapher_admin', '0025_auto_20180104_1124'),
]
operations = [
migrations.AddField(
model_name='chart',
name='published_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='chart',
name='published_by',
field=models.ForeignKey(blank=True, db_column='published_by', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='published_charts', to=settings.AUTH_USER_MODEL, to_field='name'),
),
]
| StarcoderdataPython |
1900339 | # -*- coding: utf-8 -*-
"""Top-level package for LightBox."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| StarcoderdataPython |
6563210 | from db_context.Context import Context
class QueryContext(Context):
def __init__(self):
super().__init__()
def get_item_by_id(self, collection_name, item_id):
return self.db[collection_name].find_one({"_id": item_id})
def get_all(self, collection_name):
return self.db[collection_name].find({})
def get_all_by_condition(self, collection_name, query):
return self.db[collection_name].find(query)
| StarcoderdataPython |
3334954 | <gh_stars>1-10
from .ass_toggle_comment import AssToggleCommentCommand
from .ass_toggle_comment import AssToggleCommentEventListener
__all__ = (
"AssToggleCommentCommand",
"AssToggleCommentEventListener",
)
| StarcoderdataPython |
277285 | <filename>src/utils/grid.py<gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
import code
import cv2
from mpl_toolkits.axes_grid1 import ImageGrid
def nBilateral(img, n, params):
for i in range(n):
img = cv2.bilateralFilter(img, 5, *params)
return img
def nSharpen(img, n):
sharpen_kernel = np.array([
[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]
])
for i in range(n):
img = cv2.filter2D(img, -1, sharpen_kernel)
return img
test_image = cv2.cvtColor(cv2.imread("0123_sub.png"), cv2.COLOR_BGR2RGB)
iterations = np.arange(10) * 4
sigmas = np.arange(10) * 4
images = []
fig = plt.figure(figsize=(10, 10))
grid = ImageGrid(fig, 111, nrows_ncols=(10, 10), axes_pad=0)
for n in iterations:
for s in sigmas:
images.append(nBilateral(test_image, n, (s, 50)))
for axis, img in zip(grid, images):
axis.imshow(img)
plt.savefig("test.png", dpi=400) | StarcoderdataPython |
4912464 | from actions.RequestOpenFood import RequestOpenFood
def get_open_food_info(req):
print(req)
result = req.get("result")
parameters = result.get("parameters")
barcode = parameters.get("barcode")
data = {}
if barcode:
try:
res = RequestOpenFood.get_product(barcode=barcode)
# res = ProductBuilder.clean_data(res)
data["info"] = res
except:
data["info"] = {"source": None}
return data
def make_product_info_webhook_result(req):
data = get_open_food_info(req)
if data is None:
return {}
if data.get("info") is None:
return {}
info = data.get("info").get("_source")
if info is None:
return {
"speech": "Can't find info about this product",
"displayText": "Can't find info about this product",
"source": "jarvis-on-apiai"
}
speech = info.get("name_en")
if speech is None:
speech = info.get("name_fr")
if speech is None:
speech = info.get("name_ge")
if speech is None:
speech = info.get("name_it")
print("Response:")
print(speech)
images = info.get("images")[0]
if images is not None:
images_data = images.get("data")
if images_data is not None:
image_url = images_data.get("url")
if image_url is None:
print("Can't find image url")
image_url = ""
else:
print("Image URL:")
print(image_url)
json_response = {
"speech": speech,
"displayText": speech,
"data": {
"facebook": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": speech,
"image_url": image_url,
"subtitle": "Provided by OpenFood.ch"
}
]
}
}
}
},
"source": "jarvis-on-apiai"
}
if json_response is None:
print("problem")
return json_response
'''
json_response = {
"speech": speech,
"displayText": speech,
"data": {
"facebook": {
"attachment":{
"type":"image",
"payload":{
"url":image_url
}
}
}
},
# "contextOut": [],
"source": "apiai-weather-webhook-sample",
}
'''
| StarcoderdataPython |
12858964 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from .config import settings
SQLALCHEMY_DATABASE_URL = 'postgresql://{user}:{password}@{host}:{port}/{db}'.format(
user=settings.DB_USER,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
db=settings.DB_NAME
)
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
"""
while True:
try:
conn = psycopg2.connect(
host=settings.DB_HOST,
port=settings.DB_PORT,
database=settings.DB_NAME,
user=settings.DB_USER,
password=<PASSWORD>,
cursor_factory=RealDictCursor
)
cur = conn.cursor()
print("Connected to the database")
break
except (Exception, psycopg2.Error) as error:
print(error)
print(f"Retrying in {settings.SLEEP_TIME} secs")
sleep(settings.SLEEP_TIME)
"""
| StarcoderdataPython |
3440644 | <filename>python/py-itertools/compress-the-string.py
# Compress the String!
# groupby()
#
# https://www.hackerrank.com/challenges/compress-the-string/problem
#
import itertools
s = input()
print(' '.join(str((len(list(g)), int(c)))
for c, g in itertools.groupby(s)))
| StarcoderdataPython |
6644198 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.violations.consistency import (
MultilineLoopViolation,
)
from wemake_python_styleguide.visitors.ast.loops import WrongLoopVisitor
incorrect_loop1 = """
def wrapper():
for x in some_func(1,
3):
...
"""
incorrect_loop2 = """
def wrapper():
for x in (1,
2, 3, 4):
...
"""
incorrect_loop3 = """
while some_func(1,
3):
...
"""
correct_loop1 = """
def wrapper():
for x in (1, 2, 3, 4):
...
"""
correct_loop2 = """
def wrapper():
for x in (1, 2, 3, 4):
...
return
"""
correct_loop3 = """
while some_func(1,3):
...
"""
@pytest.mark.parametrize('code', [
incorrect_loop1,
incorrect_loop2,
incorrect_loop3,
])
def test_incorrect_multiline_loops(
assert_errors,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing multiline loops."""
tree = parse_ast_tree(mode(code))
visitor = WrongLoopVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [MultilineLoopViolation])
@pytest.mark.parametrize('code', [
correct_loop1,
correct_loop2,
correct_loop3,
])
def test_correct_multiline_loops(
assert_errors,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing multiline loops."""
tree = parse_ast_tree(mode(code))
visitor = WrongLoopVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
| StarcoderdataPython |
8145182 | from .stub import Stub
from .calls import Call
from .doubles import Spy
from .doubles import TestDouble
from .interfaces import Observer
from .wrapper import AttributeWrapper
__all__ = ('Spy', 'Call', 'Stub', 'TestDouble', 'Observer', 'AttributeWrapper')
| StarcoderdataPython |
5096466 | <reponame>awesome-archive/webrecorder<gh_stars>0
import json
import time
import redis
import os
import re
import gevent
from operator import itemgetter
from bottle import request, HTTPError, response
from datetime import datetime
from re import sub
from webrecorder.basecontroller import BaseController, wr_api_spec
from webrecorder.models import Stats, User
from datetime import datetime, timedelta
# Custom Stats
USER_TABLE = 'User Table'
COLL_TABLE = 'Collections Public'
TEMP_TABLE = 'Temp Table'
ACTIVE_SESSIONS = 'Active Sessions'
TOTAL_USERS = 'Total Users'
USER_LOGINS = 'User-Logins-Any'
USER_LOGINS_100 = 'User-Logins-100MB'
USER_LOGINS_1000 = 'User-Logins-1GB'
COLL_SIZES_CREATED = 'Collections Created'
COLL_SIZES_UPDATED = 'Collections Updated'
# ============================================================================
class AdminController(BaseController):
STATS_LABELS = {
'All Capture Logged In': Stats.ALL_CAPTURE_USER_KEY,
'All Capture Temp': Stats.ALL_CAPTURE_TEMP_KEY,
'Replay Logged In': Stats.REPLAY_USER_KEY,
'Replay Temp': Stats.REPLAY_TEMP_KEY,
'Patch Logged In': Stats.PATCH_USER_KEY,
'Patch Temp': Stats.PATCH_TEMP_KEY,
'Delete Logged In': Stats.DELETE_USER_KEY,
'Delete Temp': Stats.DELETE_TEMP_KEY,
'Num Downloads Logged In': Stats.DOWNLOADS_USER_COUNT_KEY,
'Downloaded Size Logged In': Stats.DOWNLOADS_USER_SIZE_KEY,
'Num Downloads Temp': Stats.DOWNLOADS_TEMP_COUNT_KEY,
'Downloaded Size Temp': Stats.DOWNLOADS_TEMP_SIZE_KEY,
'Num Uploads': Stats.UPLOADS_COUNT_KEY,
'Uploaded Size': Stats.UPLOADS_SIZE_KEY,
'Bookmarks Added': Stats.BOOKMARK_ADD_KEY,
'Bookmarks Changed': Stats.BOOKMARK_MOD_KEY,
'Bookmarks Deleted': Stats.BOOKMARK_DEL_KEY,
'Num Temp Collections Added': Stats.TEMP_MOVE_COUNT_KEY,
'Temp Collection Size Added': Stats.TEMP_MOVE_SIZE_KEY,
}
CUSTOM_STATS = [
USER_TABLE, COLL_TABLE, TEMP_TABLE,
ACTIVE_SESSIONS, TOTAL_USERS,
USER_LOGINS, USER_LOGINS_100, USER_LOGINS_1000,
COLL_SIZES_CREATED, COLL_SIZES_UPDATED
]
CACHE_TTL = 600
CACHE_USER_TABLE = 'stc:users'
CACHE_COLL_TABLE = 'stc:colls'
def __init__(self, *args, **kwargs):
super(AdminController, self).__init__(*args, **kwargs)
config = kwargs['config']
self.default_user_desc = config['user_desc']
self.user_usage_key = config['user_usage_key']
self.temp_usage_key = config['temp_usage_key']
#self.temp_user_key = config['temp_prefix']
self.tags_key = config['tags_key']
self.temp_user_key = 'u:{0}'.format(User.TEMP_PREFIX)
self.temp_user_search = 'u:{0}*:info'.format(User.TEMP_PREFIX)
self.announce_list = os.environ.get('ANNOUNCE_MAILING_LIST_ENDPOINT', False)
self.session_redis = kwargs.get('session_redis')
self.all_stats = {}
gevent.spawn(self.init_all_stats)
def init_all_stats(self):
for name, key in self.STATS_LABELS.items():
self.all_stats[name] = key
for key in self.redis.scan_iter(Stats.BROWSERS_KEY.format('*')):
name = 'Browser ' + key[len(Stats.BROWSERS_KEY.format('')):]
self.all_stats[name] = key
for key in self.redis.scan_iter(Stats.SOURCES_KEY.format('*')):
name = 'Sources ' + key[len(Stats.SOURCES_KEY.format('')):]
self.all_stats[name] = key
for key in self.CUSTOM_STATS:
self.all_stats[key] = key
def admin_view(self, function):
def check_access(*args, **kwargs):
if not self.access.is_superuser():
self._raise_error(404)
return function(*args, **kwargs)
return check_access
def grafana_time_stats(self, req):
req = request.json or {}
from_var = req['range']['from'][:10]
to_var = req['range']['to'][:10]
from_dt = datetime.strptime(from_var, '%Y-%m-%d')
to_dt = datetime.strptime(to_var, '%Y-%m-%d')
td = timedelta(days=1)
dates = []
timestamps = []
while from_dt <= to_dt:
dates.append(from_dt.date().isoformat())
timestamps.append(from_dt.timestamp() * 1000)
from_dt += td
resp = [self.load_series(target, dates, timestamps) for target in req['targets']]
return resp
def load_series(self, target, dates, timestamps):
name = target.get('target', '')
if target['type'] == 'timeserie':
if name == ACTIVE_SESSIONS:
return self.load_active_sessions(name)
elif name == TOTAL_USERS:
return self.load_total_users(name)
elif name == USER_LOGINS:
return self.load_user_logins(name, dates, timestamps)
elif name == USER_LOGINS_100:
return self.load_user_logins(name, dates, timestamps, 100000000)
elif name == USER_LOGINS_1000:
return self.load_user_logins(name, dates, timestamps, 1000000000)
elif name == COLL_SIZES_CREATED or name == COLL_SIZES_UPDATED:
return self.load_coll_series_by_size(name, dates, timestamps)
return self.load_time_series(name, dates, timestamps)
elif target['type'] == 'table':
if name == USER_TABLE:
return self.load_user_table()
elif name == TEMP_TABLE:
return self.load_temp_table()
elif name == COLL_TABLE:
return self.load_coll_table()
return {}
def load_time_series(self, key, dates, timestamps):
datapoints = []
redis_key = self.all_stats.get(key, 'st:' + key)
if dates:
results = self.redis.hmget(redis_key, dates)
else:
results = []
for count, ts in zip(results, timestamps):
count = int(count or 0)
datapoints.append((count, ts))
return {'target': key,
'datapoints': datapoints
}
def load_temp_table(self):
columns = [
{'text': 'Id', 'type': 'string'},
{'text': 'Size', 'type': 'number'},
{'text': 'Creation Date', 'type': 'time'},
{'text': 'Updated Date', 'type': 'time'},
]
column_keys = ['size', 'created_at', 'updated_at']
users = []
for user_key in self.redis.scan_iter(self.temp_user_search, count=100):
user_data = self.redis.hmget(user_key, column_keys)
user_data.insert(0, user_key.split(':')[1])
user_data[1] = int(user_data[1])
user_data[2] = self.parse_iso_or_ts(user_data[2])
user_data[3] = self.parse_iso_or_ts(user_data[3])
users.append(user_data)
return {'columns': columns,
'rows': users,
'type': 'table'
}
def load_total_users(self, key):
ts = int(datetime.utcnow().timestamp()) * 1000
num_users = self.redis.scard('s:users')
datapoints = [[num_users, ts]]
return {'target': key,
'datapoints': datapoints
}
def load_active_sessions(self, key):
ts = int(datetime.utcnow().timestamp()) * 1000
num_sessions = sum(1 for i in self.session_redis.scan_iter('sesh:*', count=10))
datapoints = [[num_sessions, ts]]
return {'target': key,
'datapoints': datapoints
}
# USER TABLE
def fetch_user_table(self):
users = self.redis.get(self.CACHE_USER_TABLE)
if users:
return json.loads(users)
column_keys = ['size', 'max_size', 'last_login', 'created_at', 'updated_at', 'role', 'email_addr']
users = []
for user_key in self.redis.scan_iter(User.INFO_KEY.format(user='*'), count=100):
if user_key.startswith(self.temp_user_key):
continue
user_data = self.redis.hmget(user_key, column_keys)
user_data.insert(0, user_key.split(':')[1])
user_data[1] = int(user_data[1])
user_data[2] = int(user_data[2])
user_data.insert(3, 100.0 * user_data[1] / user_data[2])
user_data[4] = self.parse_iso_or_ts(user_data[4])
user_data[5] = self.parse_iso_or_ts(user_data[5])
user_data[6] = self.parse_iso_or_ts(user_data[6])
users.append(user_data)
self.redis.setex(self.CACHE_USER_TABLE, self.CACHE_TTL, json.dumps(users))
return users
def load_user_table(self):
columns = [
{'text': 'Id', 'type': 'string'},
{'text': 'Size', 'type': 'number'},
{'text': 'Max Size', 'type': 'number'},
{'text': 'Percent', 'type': 'number'},
{'text': 'Last Login Date', 'type': 'time'},
{'text': 'Creation Date', 'type': 'time'},
{'text': 'Updated Date', 'type': 'time'},
{'text': 'Role', 'type': 'string'},
{'text': 'Email', 'type': 'string'},
]
return {'columns': columns,
'rows': self.fetch_user_table(),
'type': 'table'
}
def load_user_logins(self, key, dates, timestamps, size_threshold=None):
date_bucket = {}
for user_data in self.fetch_user_table():
if size_threshold is not None and user_data[1] < size_threshold:
continue
# note: ts should already be utc!
dt = datetime.fromtimestamp(user_data[6] / 1000)
dt = dt.date().isoformat()
date_bucket[dt] = date_bucket.get(dt, 0) + 1
datapoints = []
for dt, ts in zip(dates, timestamps):
count = date_bucket.get(dt, 0)
datapoints.append((count, ts))
return {'target': key,
'datapoints': datapoints
}
# COLL TABLE
def fetch_coll_table(self):
colls = self.redis.get(self.CACHE_COLL_TABLE)
if colls:
return json.loads(colls)
column_keys = ['slug', 'title', 'size', 'owner', 'created_at', 'updated_at', 'public']
colls = []
for coll_key in self.redis.scan_iter('c:*:info', count=100):
coll_data = self.redis.hmget(coll_key, column_keys)
# exclude temp user collections
try:
user = self.user_manager.all_users[coll_data[3]]
if user.is_anon():
continue
except:
continue
coll_data[2] = int(coll_data[2])
coll_data[4] = self.parse_iso_or_ts(coll_data[4])
coll_data[5] = self.parse_iso_or_ts(coll_data[5])
colls.append(coll_data)
self.redis.setex(self.CACHE_COLL_TABLE, self.CACHE_TTL, json.dumps(colls))
return colls
def load_coll_table(self):
columns = [
{'text': 'Slug', 'type': 'string'},
{'text': 'Title', 'type': 'string'},
{'text': 'Size', 'type': 'number'},
{'text': 'Owner', 'type': 'string'},
{'text': 'Creation Date', 'type': 'time'},
{'text': 'Updated Date', 'type': 'time'},
{'text': 'Public', 'type': 'string'},
]
public_colls = [row for row in self.fetch_coll_table() if row[6] == '1']
return {'columns': columns,
'rows': public_colls,
'type': 'table'
}
def load_coll_series_by_size(self, key, dates, timestamps):
date_bucket = {}
if key == COLL_SIZES_CREATED:
index = 4
else:
index = 5
for coll_data in self.fetch_coll_table():
# note: ts should already be utc!
dt = datetime.fromtimestamp(coll_data[index] / 1000)
dt = dt.date().isoformat()
date_bucket[dt] = date_bucket.get(dt, 0) + coll_data[2]
datapoints = []
for dt, ts in zip(dates, timestamps):
count = date_bucket.get(dt, 0)
datapoints.append((count, ts))
return {'target': key,
'datapoints': datapoints
}
@classmethod
def parse_iso_or_ts(self, value):
try:
return int(value) * 1000
except:
pass
try:
return int(datetime.strptime(value[:19], '%Y-%m-%d %H:%M:%S').timestamp()) * 1000
except:
return 0
def init_routes(self):
wr_api_spec.set_curr_tag('Admin')
@self.app.get('/api/v1/admin/defaults')
@self.admin_view
def get_defaults():
data = self.redis.hgetall('h:defaults')
data['max_size'] = int(data['max_size'])
data['max_anon_size'] = int(data['max_anon_size'])
return {'defaults': data}
@self.app.put('/api/v1/admin/defaults')
def update_defaults():
data = request.json
if 'max_size' in data:
try:
self.redis.hset('h:defaults', 'max_size', int(data['max_size']))
except Exception as e:
return {'error': 'error setting max_size'}
if 'max_anon_size' in data:
try:
self.redis.hset('h:defaults', 'max_anon_size', int(data['max_anon_size']))
except Exception as e:
return {'error': 'error setting max_anon_size'}
data = self.redis.hgetall('h:defaults')
data['max_size'] = int(data['max_size'])
data['max_anon_size'] = int(data['max_anon_size'])
return {'defaults': data}
@self.app.get('/api/v1/admin/user_roles')
@self.admin_view
def api_get_user_roles():
return {"roles": self.user_manager.get_roles()}
@self.app.get('/api/v1/admin/dashboard')
@self.admin_view
def api_dashboard():
cache_key = self.cache_template.format('dashboard')
expiry = 5 * 60 # 5 min
cache = self.redis.get(cache_key)
if cache:
return json.loads(cache)
users = self.user_manager.all_users
temp = self.redis.hgetall(self.temp_usage_key)
user = self.redis.hgetall(self.user_usage_key)
temp = [(k, int(v)) for k, v in temp.items()]
user = [(k, int(v)) for k, v in user.items()]
all_collections = []
for username in users:
u = self.get_user(user=username)
all_collections.extend(
[c.serialize() for c in u.get_collections()]
)
data = {
'user_count': len(users),
'collections': all_collections,
'temp_usage': sorted(temp, key=itemgetter(0)),
'user_usage': sorted(user, key=itemgetter(0)),
}
self.redis.setex(cache_key, expiry,
json.dumps(data, cls=CustomJSONEncoder))
return data
@self.app.get('/api/v1/admin/users')
@self.admin_view
def api_users():
"""Full admin API resource of all users.
Containing user info and public collections
- Provides basic (1 dimension) RESTful sorting
- TODO: Pagination
"""
sorting = request.query.getunicode('sort', None)
sort_key = sub(r'^-{1}?', '', sorting) if sorting is not None else None
reverse = sorting.startswith('-') if sorting is not None else False
def dt(d):
return datetime.strptime(d, '%Y-%m-%d %H:%M:%S.%f')
# sortable fields, with optional key unpacking functions
filters = {
'created': {'key': lambda obj: dt(obj[1]['creation_date'])},
'email': {'key': lambda obj: obj[1]['email_addr']},
'last_login': {'key': lambda obj: dt(obj[1]['last_login'])},
'name': {'key': lambda obj: json.loads(obj[1]['desc'] or '{}')['name']},
'username': {},
}
if sorting is not None and sort_key not in filters:
raise HTTPError(400, 'Bad Request')
sort_by = filters[sort_key] if sorting is not None else None
users = sorted(self.user_manager.all_users,
key=sort_by,
reverse=reverse)
return {'users': [self.user_manager.all_users[user].serialize() for user in users]}
@self.app.get('/api/v1/admin/temp-users')
@self.admin_view
def temp_users():
""" Resource returning active temp users
"""
temp_user_keys = list(self.redis.scan_iter(self.temp_user_search))
temp_user_data = []
for user_key in temp_user_keys:
username = user_key.split(':')[1]
user = self.user_manager.all_users[username]
if not user or not user.get_prop('created_at'):
continue
temp_user_data.append(user.serialize())
return {'users': temp_user_data}
@self.app.post('/api/v1/admin/users')
@self.admin_view
def api_create_user():
"""API enpoint to create a user"""
data = request.json
errs, res = self.user_manager.create_user_as_admin(
email=data['email'],
username=data['username'],
role=data['role'],
passwd=data['password'],
passwd2=data['password'],
name=data.get('full_name', ''))
# validate
if errs:
return {'errors': errs}
user, first_coll = res
return {'user': user.name, 'first_coll': first_coll.name if first_coll else ''}
@self.app.put('/api/v1/admin/user/<username>')
@self.admin_view
def api_update_user(username):
"""API enpoint to update user info (full access)
"""
user = self.get_user(user=username)
errs = self.user_manager.update_user_as_admin(user, request.json)
if errs:
return {'errors': errs}
return {'user': user.serialize()}
# Grafana Stats APIs
wr_api_spec.set_curr_tag('Stats')
@self.app.get('/api/v1/stats/')
@self.admin_view
def stats_ping():
return {}
@self.app.post('/api/v1/stats/search')
@self.admin_view
def stats_search():
stats = sorted(list(self.all_stats.keys()))
response.content_type = 'application/json'
return json.dumps(stats)
@self.app.post('/api/v1/stats/query')
@self.admin_view
def stats_query():
stats = self.grafana_time_stats(request.json)
response.content_type = 'application/json'
return json.dumps(stats)
@self.app.post('/api/v1/stats/annotations')
@self.admin_view
def stats_annotations():
return []
| StarcoderdataPython |
3296803 | <reponame>nprez83/hyundai_kia_connect_api
import os
from hyundai_kia_connect_api.VehicleManager import VehicleManager
def test_login():
username = os.environ["KIA_CA_CDNNINJA_USERNAME"]
password = os.environ["KIA_CA_CDNNINJA_PASSWORD"]
pin = os.environ["KIA_CA_CDNNINJA_PIN"]
vm = VehicleManager(
region=2, brand=1, username=username, password=password, pin=pin
)
print(vm.check_and_refresh_token())
print(vm.vehicles)
assert len(vm.vehicles.keys()) > 0
| StarcoderdataPython |
11313109 | """set up a new table for the Titanic data """
import os
import psycopg2
from dotenv import load_dotenv
load_dotenv() # looks inside the .env file for some env vars
# passes env var values to python var
DB_HOST = os.getenv("DB_HOST", default="OOPS")
DB_NAME = os.getenv("DB_NAME", default="OOPS")
DB_USER = os.getenv("DB_USER", default="OOPS")
DB_PASSWORD = os.getenv("DB_PASSWORD", default="<PASSWORD>")
conn= psycopg2.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST
)
# # A "cursor", a structure to iterate over db record to perform queries
cur = conn.cursor()
# ### An example query
cur.execute ('SELECT * from test_table;')
# # fetch the query
result = cur.fetchall()
print(result)
for row in results:
print(row)
print("------------")
insert_query = """
INSERT INTO test_table (name, data) VALUES
(
'A row name',
null
),
(
'Another row, with JSON',
'{ "a": 1, "b": ["dog", "cat", 42], "c": true }'::JSONB
);
"""
print(insert_query)
cursor.execute(insert_query)
print("------------")
query = "SELECT * from test_table;"
print(query)
cursor.execute(query)
#results = cursor.fetchone()
results = cursor.fetchall()
#print(results)
for row in results:
print(row)
# committing the transaction:
connection.commit()
| StarcoderdataPython |
1904477 | import os.path
from gzip import GzipFile
from typing import List, Dict, Optional, Union
import structlog
import rapidjson
import pandas as pd
from .handle_value_error import handle_value_error
class DsReaderFs:
def __init__(
self,
*,
root_path: str,
log: object = None,
manifest_key: Optional[str] = None,
prefix: Optional[str] = None,
):
self._log = log if log is not None else structlog.get_logger()
self._log = self._log.bind(
client="ds_reader_fs", root_path=root_path, manifest_key=manifest_key
)
self._is_gzipped = True
self._root_path = root_path
self._manifest_key = manifest_key
self._prefix = prefix
def read_manifest(self) -> Dict:
self._log.info("Read Manifest: Start")
file_location = os.path.join(
self._root_path, add_prefix(self._manifest_key, self._prefix)
)
if self._is_gzipped:
with GzipFile(file_location, "r") as fin:
data = rapidjson.loads(fin.read().decode("utf-8"))
else:
with open(file_location, "r", encoding="utf-8") as fin:
data = rapidjson.loads(fin.read())
return data
def read_metadata(self) -> Dict:
self._log.info("Read Metadata: Start")
return {}
def read_parquet_channel(self, channel: Dict, columns: List[str]) -> pd.DataFrame:
self._log.info(
"Read parquet Channel: Start", channel=channel["channel"], columns=columns
)
file_location = self._get_file_location(channel)
try:
df = pd.read_parquet(file_location, columns=columns)
except ValueError as value_error:
df = handle_value_error(value_error, channel)
return df
def _get_file_location(self, channel, /) -> str:
key = self._get_file_path(channel)
return os.path.join(self._root_path, key)
def _get_file_path(self, channel: Union[str, Dict]) -> str:
if isinstance(channel, str):
key = os.path.join(
os.path.normpath(self._manifest_key).split(os.path.sep)[:-1]
)
key = os.path.join(key, channel)
elif isinstance(channel, dict):
key = os.path.normpath(channel["key"])
else:
raise Exception(f"Unknown channel type {type(channel)}")
return add_prefix(key, self._prefix)
def add_prefix(key, prefix, /) -> str:
if prefix is None:
return key
return os.path.join(prefix, key)
| StarcoderdataPython |
11278901 | <filename>generated-libraries/python/netapp/ntdtest/ntdtest_nonlist_group_info.py
from netapp.ntdtest.group3_stats_info import Group3StatsInfo
from netapp.ntdtest.group1_stats_info import Group1StatsInfo
from netapp.ntdtest.group4_stats_info import Group4StatsInfo
from netapp.ntdtest.group2_stats_info import Group2StatsInfo
from netapp.netapp_object import NetAppObject
class NtdtestNonlistGroupInfo(NetAppObject):
"""
Top-Level Typedef containing 4 nested typedefs all at the same
level
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_group3_stats = None
@property
def group3_stats(self):
"""
3rd nested typedef at level 1
"""
return self._group3_stats
@group3_stats.setter
def group3_stats(self, val):
if val != None:
self.validate('group3_stats', val)
self._group3_stats = val
_group1_stats = None
@property
def group1_stats(self):
"""
1st nested typedef at level 1
"""
return self._group1_stats
@group1_stats.setter
def group1_stats(self, val):
if val != None:
self.validate('group1_stats', val)
self._group1_stats = val
_group4_stats = None
@property
def group4_stats(self):
"""
4th nested typedef at level 1
"""
return self._group4_stats
@group4_stats.setter
def group4_stats(self, val):
if val != None:
self.validate('group4_stats', val)
self._group4_stats = val
_group2_stats = None
@property
def group2_stats(self):
"""
2nd nested typedef at level 1
"""
return self._group2_stats
@group2_stats.setter
def group2_stats(self, val):
if val != None:
self.validate('group2_stats', val)
self._group2_stats = val
@staticmethod
def get_api_name():
return "ntdtest-nonlist-group-info"
@staticmethod
def get_desired_attrs():
return [
'group3-stats',
'group1-stats',
'group4-stats',
'group2-stats',
]
def describe_properties(self):
return {
'group3_stats': { 'class': Group3StatsInfo, 'is_list': False, 'required': 'optional' },
'group1_stats': { 'class': Group1StatsInfo, 'is_list': False, 'required': 'optional' },
'group4_stats': { 'class': Group4StatsInfo, 'is_list': False, 'required': 'optional' },
'group2_stats': { 'class': Group2StatsInfo, 'is_list': False, 'required': 'optional' },
}
| StarcoderdataPython |
3455018 | <reponame>yausername/whatsapp-cli<filename>whatsappCli/feed/pb_feeder.py
from collections import OrderedDict
import os
import io
import json
import requests
import threading
import time
from feeder import Feeder
import pb_stream
from pb_stream import contacts
from pb_stream import config
from pkg_resources import resource_string
class PBFeeder(Feeder):
""" whatsapp chat feed using pushbullet"""
def __init__(self, pb_token, dir=os.path.join(os.path.expanduser("~"),".whatsapp-cli")):
self.pb_token = pb_token
if not os.path.exists(dir):
os.makedirs(dir)
self.dir = dir
self.starting_lines = 10
self.reply_template = json.loads(resource_string(__name__, 'data/reply.json'), object_pairs_hook=OrderedDict)
self.__pb_start_stream()
def __pb_start_stream(self):
pb_stream.init(self.dir)
thread = threading.Thread(
target=pb_stream.start_stream, args=(self.pb_token,))
thread.daemon = True
thread.start()
def users(self):
return contacts.values()
def add_user(self, number, name):
pb_stream.update_user_info(None, name, number + '@s.whatsapp.net')
def get(self, user = None):
if user is not None:
tag = self.resolve_user(user)
full_name = contacts[tag]
return self.__tail(self.__filter(full_name))
else:
return self.__tail()
def post(self, user, msg):
tag = self.resolve_user(user)
req = self.reply_template.copy()
req["push"]["conversation_iden"]["tag"] = tag
req["push"]["message"] = msg
req["push"]["source_user_iden"] = config["source_user_iden"]
req["push"]["target_device_iden"] = config["source_device_iden"]
headers = {}
headers["Content-Type"] = "application/json"
headers["Access-Token"] = self.pb_token
url = 'https://api.pushbullet.com/v2/ephemerals'
requests.post(url, data=json.dumps(req), headers=headers)
def resolve_user(self, name):
results = []
if name.isnumeric() and len(name) == 12:
results.append(name + '@s.whatsapp.net')
else:
for c_tag, c_name in contacts.iteritems():
if name.lower() in c_name.lower():
results.append(c_tag)
if len(results) == 1:
return results[0]
else:
raise ValueError('0 or more than 1 matches', results)
def __filter(self, pattern):
return lambda line: line.startswith(pattern)
def __seek_to_n_lines_from_end(self, f, numlines=10):
"""
Seek to `numlines` lines from the end of the file `f`.
"""
f.seek(0, 2) # seek to the end of the file
file_pos = f.tell()
avg_line_len = 100
toread = min(avg_line_len*numlines, file_pos)
f.seek(file_pos - toread, 1)
def __tail(self, filter = None):
"""
A generator for reading new lines off of the end of a file. To start with,
the last `starting_lines` lines will be read from the end.
"""
filename = self.dir + "/msg"
with io.open(filename,mode='r',encoding='utf-8') as f:
current_size = os.stat(filename).st_size
#no seek to n lines. read from beginning instead
#self.__seek_to_n_lines_from_end(f, self.starting_lines)
while True:
new_size = os.stat(filename).st_size
where = f.tell()
line = f.readline()
if not line:
if new_size < current_size:
# the file was probably truncated, reopen
f.close()
f = io.open(filename,mode='r',encoding='utf-8')
current_size = new_size
dashes = "-" * 20
yield "\n"
yield "\n"
yield "%s messages might be missing %s" % (dashes, dashes)
yield "\n"
yield "\n"
#time.sleep(0.25)
else:
time.sleep(0.04)
f.seek(where)
else:
current_size = new_size
if filter is not None and not filter(line):
pass #filtered out
else:
yield line
| StarcoderdataPython |
1908343 | <filename>bin/upload_cdms.py
''' This program will Upload Color Depth MIPs to AWS S3.
'''
__version__ = '1.3.3'
import argparse
from datetime import datetime
import glob
import json
import os
import re
import socket
import sys
from time import strftime, time
import boto3
from botocore.exceptions import ClientError
import colorlog
import inquirer
import jwt
import requests
from simple_term_menu import TerminalMenu
from tqdm import tqdm
import MySQLdb
from PIL import Image
import neuronbridge_lib as NB
# Configuration
CONFIG = {'config': {'url': 'http://config.int.janelia.org/'}}
AWS = dict()
CLOAD = dict()
LIBRARY = dict()
MANIFOLDS = ['dev', 'prod', 'devpre', 'prodpre']
VARIANTS = ["gradient", "searchable_neurons", "zgap"]
WILL_LOAD = list()
# Database
CONN = dict()
CURSOR = dict()
# General use
COUNT = {'Amazon S3 uploads': 0, 'Files to upload': 0, 'Samples': 0, 'No Consensus': 0,
'No sampleRef': 0, 'No publishing name': 0, 'No driver': 0, 'Sample not published': 0,
'Line not published': 0, 'Skipped': 0, 'Already on S3': 0, 'Already on JACS': 0,
'Bad driver': 0, 'Duplicate objects': 0, 'Unparsable files': 0, 'Updated on JACS': 0,
'FlyEM flips': 0, 'Images': 0}
SUBDIVISION = {'prefix': 1, 'counter': 0, 'limit': 100} #PLUG
TRANSACTIONS = dict()
PNAME = dict()
REC = {'line': '', 'slide_code': '', 'gender': '', 'objective': '', 'area': ''}
S3_CLIENT = S3_RESOURCE = ''
FULL_NAME = ''
MAX_SIZE = 500
CREATE_THUMBNAIL = False
S3_SECONDS = 60 * 60 * 12
VARIANT_UPLOADS = dict()
UPLOADED_NAME = dict()
KEY_LIST = list()
def terminate_program(code):
''' Terminate the program gracefully
Keyword arguments:
code: return code
Returns:
None
'''
if S3CP:
ERR.close()
S3CP.close()
if not os.path.getsize(S3CP_FILE):
os.remove(S3CP_FILE)
for fpath in [ERR_FILE, S3CP_FILE]:
if os.path.exists(fpath) and not os.path.getsize(fpath):
os.remove(fpath)
sys.exit(code)
def call_responder(server, endpoint, payload='', authenticate=False):
''' Call a responder
Keyword arguments:
server: server
endpoint: REST endpoint
payload: payload for POST requests
authenticate: pass along token in header
Returns:
JSON response
'''
if not server in TRANSACTIONS:
TRANSACTIONS[server] = 1
else:
TRANSACTIONS[server] += 1
url = (CONFIG[server]['url'] if server else '') + endpoint
try:
if payload or authenticate:
headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + os.environ['JACS_JWT']}
if payload:
headers['Accept'] = 'application/json'
headers['host'] = socket.gethostname()
req = requests.put(url, headers=headers, json=payload)
else:
if authenticate:
req = requests.get(url, headers=headers)
else:
req = requests.get(url)
except requests.exceptions.RequestException as err:
LOGGER.critical(err)
terminate_program(-1)
if req.status_code == 200:
return req.json()
print("Could not get response from %s: %s" % (url, req.text))
terminate_program(-1)
return False
def sql_error(err):
""" Log a critical SQL error and exit """
try:
LOGGER.critical('MySQL error [%d]: %s', err.args[0], err.args[1])
except IndexError:
LOGGER.critical('MySQL error: %s', err)
terminate_program(-1)
def db_connect(dbd):
""" Connect to a database
Keyword arguments:
dbd: database dictionary
"""
LOGGER.info("Connecting to %s on %s", dbd['name'], dbd['host'])
try:
conn = MySQLdb.connect(host=dbd['host'], user=dbd['user'],
passwd=dbd['password'], db=dbd['name'])
except MySQLdb.Error as err:
sql_error(err)
try:
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
return conn, cursor
except MySQLdb.Error as err:
sql_error(err)
def decode_token(token):
''' Decode a given JWT token
Keyword arguments:
token: JWT token
Returns:
decoded token JSON
'''
try:
response = jwt.decode(token, verify=False)
except jwt.exceptions.DecodeError:
LOGGER.critical("Token failed validation")
terminate_program(-1)
except jwt.exceptions.InvalidTokenError:
LOGGER.critical("Could not decode token")
terminate_program(-1)
return response
def initialize_s3():
""" Initialize
"""
global S3_CLIENT, S3_RESOURCE # pylint: disable=W0603
LOGGER.info("Opening S3 client and resource")
if "dev" in ARG.MANIFOLD:
S3_CLIENT = boto3.client('s3')
S3_RESOURCE = boto3.resource('s3')
else:
sts_client = boto3.client('sts')
aro = sts_client.assume_role(RoleArn=AWS['role_arn'],
RoleSessionName="AssumeRoleSession1",
DurationSeconds=S3_SECONDS)
credentials = aro['Credentials']
S3_CLIENT = boto3.client('s3',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'])
S3_RESOURCE = boto3.resource('s3',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'])
def get_parms():
""" Query the user for the CDM library and manifold
Keyword arguments:
None
Returns:
None
"""
if not ARG.LIBRARY:
print("Select a library:")
cdmlist = list()
liblist = list()
for cdmlib in LIBRARY:
if ARG.MANIFOLD not in LIBRARY[cdmlib]:
LIBRARY[cdmlib][ARG.MANIFOLD] = {'updated': None}
liblist.append(cdmlib)
text = cdmlib
if 'updated' in LIBRARY[cdmlib][ARG.MANIFOLD] and \
LIBRARY[cdmlib][ARG.MANIFOLD]['updated'] \
and "0000-00-00" not in LIBRARY[cdmlib][ARG.MANIFOLD]['updated']:
text += " (last updated %s on %s)" \
% (LIBRARY[cdmlib][ARG.MANIFOLD]['updated'], ARG.MANIFOLD)
cdmlist.append(text)
terminal_menu = TerminalMenu(cdmlist)
chosen = terminal_menu.show()
if chosen is None:
LOGGER.error("No library selected")
terminate_program(0)
ARG.LIBRARY = liblist[chosen].replace(' ', '_')
if not ARG.NEURONBRIDGE:
ARG.NEURONBRIDGE = NB.get_neuronbridge_version()
if not ARG.NEURONBRIDGE:
LOGGER.error("No NeuronBridge version selected")
terminate_program(0)
print(ARG.NEURONBRIDGE)
if not ARG.JSON:
print("Select a JSON file:")
json_base = CLOAD['json_dir'] + "/%s" % (ARG.NEURONBRIDGE)
jsonlist = list(map(lambda jfile: jfile.split('/')[-1],
glob.glob(json_base + "/*.json")))
jsonlist.sort()
terminal_menu = TerminalMenu(jsonlist)
chosen = terminal_menu.show()
if chosen is None:
LOGGER.error("No JSON file selected")
terminate_program(0)
ARG.JSON = '/'.join([json_base, jsonlist[chosen]])
def set_searchable_subdivision(smp):
""" Set the first searchable_neurons subdivision
Keyword arguments:
smp: first sample from JSON file
Returns:
Alignment space
"""
if "alignmentSpace" not in smp:
LOGGER.critical("Could not find alignment space in first sample")
terminate_program(-1)
bucket = AWS["s3_bucket"]["cdm"]
if ARG.INTERNAL:
bucket += '-int'
elif ARG.MANIFOLD != 'prod':
bucket += '-' + ARG.MANIFOLD
library = LIBRARY[ARG.LIBRARY]['name'].replace(' ', '_')
prefix = "/".join([smp["alignmentSpace"], library, "searchable_neurons"])
maxnum = 0
for pag in S3_CLIENT.get_paginator("list_objects")\
.paginate(Bucket=bucket, Prefix=prefix+"/", Delimiter="/"):
if "CommonPrefixes" not in pag:
break
for obj in pag["CommonPrefixes"]:
num = obj["Prefix"].split("/")[-2]
if num.isdigit() and int(num) > maxnum:
maxnum = int(num)
SUBDIVISION['prefix'] = maxnum + 1
LOGGER.warning("Will upload searchable neurons starting with subdivision %d",
SUBDIVISION['prefix'])
return smp["alignmentSpace"]
def select_uploads():
""" Query the user for which image types to upload
Keyword arguments:
None
Returns:
None
"""
global WILL_LOAD # pylint: disable=W0603
quest = [inquirer.Checkbox('checklist',
message='Select image types to upload',
choices=VARIANTS, default=VARIANTS)]
WILL_LOAD = inquirer.prompt(quest)['checklist']
def initialize_program():
""" Initialize
"""
global AWS, CLOAD, CONFIG, FULL_NAME, LIBRARY # pylint: disable=W0603
CONFIG = (call_responder('config', 'config/rest_services'))["config"]
CLOAD = (call_responder('config', 'config/upload_cdms'))["config"]
AWS = (call_responder('config', 'config/aws'))["config"]
LIBRARY = (call_responder('config', 'config/cdm_library'))["config"]
if ARG.WRITE:
if 'JACS_JWT' not in os.environ:
LOGGER.critical("Missing token - set in JACS_JWT environment variable")
terminate_program(-1)
response = decode_token(os.environ['JACS_JWT'])
if int(time()) >= response['exp']:
LOGGER.critical("Your token is expired")
terminate_program(-1)
FULL_NAME = response['full_name']
LOGGER.info("Authenticated as %s", FULL_NAME)
if not ARG.MANIFOLD:
print("Select a manifold")
terminal_menu = TerminalMenu(MANIFOLDS)
chosen = terminal_menu.show()
if chosen is None:
LOGGER.critical("You must select a manifold")
terminate_program(-1)
ARG.MANIFOLD = MANIFOLDS[chosen]
get_parms()
select_uploads()
data = call_responder('config', 'config/db_config')
(CONN['sage'], CURSOR['sage']) = db_connect(data['config']['sage']['prod'])
if ARG.LIBRARY not in LIBRARY:
LOGGER.critical("Unknown library %s", ARG.LIBRARY)
terminate_program(-1)
initialize_s3()
def log_error(err_text):
''' Log an error and write to error output file
Keyword arguments:
err_text: error message
Returns:
None
'''
LOGGER.error(err_text)
ERR.write(err_text + "\n")
def get_s3_names(bucket, newname):
''' Return an S3 bucket and prefixed object name
Keyword arguments:
bucket: base bucket
newname: file to upload
Returns:
bucket and object name
'''
if ARG.INTERNAL:
bucket += '-int'
elif ARG.MANIFOLD != 'prod':
bucket += '-' + ARG.MANIFOLD
library = LIBRARY[ARG.LIBRARY]['name'].replace(' ', '_')
if ARG.LIBRARY in CLOAD['version_required']:
library += '_v' + ARG.VERSION
object_name = '/'.join([REC['alignment_space'], library, newname])
return bucket, object_name
def upload_aws(bucket, dirpath, fname, newname, force=False):
''' Transfer a file to Amazon S3
Keyword arguments:
bucket: S3 bucket
dirpath: source directory
fname: file name
newname: new file name
force: force upload (regardless of AWS parm)
Returns:
url
'''
COUNT['Files to upload'] += 1
complete_fpath = '/'.join([dirpath, fname])
bucket, object_name = get_s3_names(bucket, newname)
LOGGER.debug("Uploading %s to S3 as %s", complete_fpath, object_name)
if object_name in UPLOADED_NAME:
if complete_fpath != UPLOADED_NAME[object_name]:
err_text = "%s was already uploaded from %s, but is now being uploaded from %s" \
% (object_name, UPLOADED_NAME[object_name], complete_fpath)
LOGGER.error(err_text)
ERR.write(err_text + "\n")
COUNT['Duplicate objects'] += 1
return False
LOGGER.debug("Already uploaded %s", object_name)
COUNT['Duplicate objects'] += 1
return 'Skipped'
UPLOADED_NAME[object_name] = complete_fpath
url = '/'.join([AWS['base_aws_url'], bucket, object_name])
url = url.replace(' ', '+')
if "/searchable_neurons/" in object_name:
KEY_LIST.append(object_name)
S3CP.write("%s\t%s\n" % (complete_fpath, '/'.join([bucket, object_name])))
LOGGER.info("Upload %s", object_name)
COUNT['Images'] += 1
if (not ARG.AWS) and (not force):
return url
if not ARG.WRITE:
COUNT['Amazon S3 uploads'] += 1
return url
if newname.endswith('.png'):
mimetype = 'image/png'
elif newname.endswith('.jpg'):
mimetype = 'image/jpeg'
else:
mimetype = 'image/tiff'
try:
payload = {'ContentType': mimetype}
if ARG.MANIFOLD == 'prod':
payload['ACL'] = 'public-read'
S3_CLIENT.upload_file(complete_fpath, bucket,
object_name,
ExtraArgs=payload)
except ClientError as err:
LOGGER.critical(err)
return False
COUNT['Amazon S3 uploads'] += 1
return url
def get_line_mapping():
''' Create a mapping of publishing names to drivers. Note that "GAL4-Collection"
is remapped to "GAL4".
Keyword arguments:
None
Returns:
driver dictionary
'''
driver = dict()
LOGGER.info("Getting line/driver mapping")
try:
CURSOR['sage'].execute("SELECT DISTINCT publishing_name,driver FROM image_data_mv " \
+ "WHERE publishing_name IS NOT NULL")
rows = CURSOR['sage'].fetchall()
except MySQLdb.Error as err:
sql_error(err)
for row in rows:
if row['driver']:
driver[row['publishing_name']] = \
row['driver'].replace("_Collection", "").replace("-", "_")
return driver
def get_image_mapping():
''' Create a dictionary of published sample IDs
Keyword arguments:
None
Returns:
sample ID dictionary
'''
LOGGER.info("Getting image mapping")
published_ids = dict()
stmt = "SELECT DISTINCT workstation_sample_id FROM image_data_mv WHERE " \
+ "to_publish='Y' AND alps_release IS NOT NULL"
try:
CURSOR['sage'].execute(stmt)
rows = CURSOR['sage'].fetchall()
except MySQLdb.Error as err:
sql_error(err)
for row in rows:
published_ids[row['workstation_sample_id']] = 1
return published_ids
def convert_file(sourcepath, newname):
''' Convert file to PNG format
Keyword arguments:
sourcepath: source filepath
newname: new file name
Returns:
New filepath
'''
LOGGER.debug("Converting %s to %s", sourcepath, newname)
newpath = CLOAD['temp_dir']+ newname
with Image.open(sourcepath) as image:
image.save(newpath, 'PNG')
return newpath
def process_flyem(smp, convert=True):
''' Return the file name for a FlyEM sample.
Keyword arguments:
smp: sample record
Returns:
New file name
'''
# Temporary!
#bodyid, status = smp['name'].split('_')[0:2]
bodyid = smp['publishedName']
#field = re.match('.*-(.*)_.*\..*', smp['name'])
#status = field[1]
#if bodyid.endswith('-'):
# return False
newname = '%s-%s-CDM.png' \
% (bodyid, REC['alignment_space'])
if convert:
smp['filepath'] = convert_file(smp['filepath'], newname)
else:
newname = newname.replace('.png', '.tif')
if '_FL' in smp['imageName']: # Used to be "name" for API call
newname = newname.replace('CDM.', 'CDM-FL.')
return newname
def translate_slide_code(isc, line0):
''' Translate a slide code to remove initials.
Keyword arguments:
isc: initial slide doce
line0: line
Returns:
New slide code
'''
if 'sample_BJD' in isc:
return isc.replace("BJD", "")
if 'GMR' in isc:
new = isc.replace(line0 + "_", "")
new = new.replace("-", "_")
return new
return isc
def get_smp_info(smp, published_ids):
''' Return the sample ID and publishing name
Keyword arguments:
smp: sample record
published_ids: sample dictionary
Returns:
Sample ID and publishing name, or None if error
'''
if 'sampleRef' not in smp or not smp['sampleRef']:
COUNT['No sampleRef'] += 1
err_text = "No sampleRef for %s (%s)" % (smp['_id'], smp['name'])
LOGGER.warning(err_text)
ERR.write(err_text + "\n")
return None, None
sid = (smp['sampleRef'].split('#'))[-1]
LOGGER.debug(sid)
if ARG.LIBRARY in ['flylight_splitgal4_drivers']:
if sid not in published_ids:
COUNT['Sample not published'] += 1
err_text = "Sample %s was not published" % (sid)
LOGGER.error(err_text)
ERR.write(err_text + "\n")
return None, None
if 'publishedName' not in smp or not smp['publishedName']:
COUNT['No publishing name'] += 1
err_text = "No publishing name for sample %s" % (sid)
LOGGER.error(err_text)
ERR.write(err_text + "\n")
return None, None
publishing_name = smp['publishedName']
if publishing_name == 'No Consensus':
COUNT['No Consensus'] += 1
err_text = "No consensus line for sample %s (%s)" % (sid, publishing_name)
LOGGER.error(err_text)
ERR.write(err_text + "\n")
if ARG.WRITE:
return False
if publishing_name not in PNAME:
PNAME[publishing_name] = 1
else:
PNAME[publishing_name] += 1
return sid, publishing_name
def process_light(smp, driver, published_ids):
''' Return the file name for a light microscopy sample.
Keyword arguments:
smp: sample record
driver: driver mapping dictionary
published_ids: sample dictionary
Returns:
New file name
'''
sid, publishing_name = get_smp_info(smp, published_ids)
if not sid:
return False
REC['line'] = publishing_name
REC['slide_code'] = smp['slideCode']
REC['gender'] = smp['gender']
REC['objective'] = smp['objective']
REC['area'] = smp['anatomicalArea'].lower()
if publishing_name in driver:
if not driver[publishing_name]:
COUNT['No driver'] += 1
err_text = "No driver for sample %s (%s)" % (sid, publishing_name)
LOGGER.error(err_text)
ERR.write(err_text + "\n")
if ARG.WRITE:
terminate_program(-1)
return False
drv = driver[publishing_name]
if drv not in CLOAD['drivers']:
COUNT['Bad driver'] += 1
err_text = "Bad driver for sample %s (%s)" % (sid, publishing_name)
LOGGER.error(err_text)
ERR.write(err_text + "\n")
if ARG.WRITE:
terminate_program(-1)
return False
else:
COUNT['Line not published'] += 1
err_text = "Sample %s (%s) is not published in SAGE" % (sid, publishing_name)
LOGGER.error(err_text)
ERR.write(err_text + "\n")
if ARG.WRITE:
terminate_program(-1)
return False
fname = os.path.basename(smp['filepath'])
if 'gamma' in fname:
chan = fname.split('-')[-2]
else:
chan = fname.split('-')[-1]
chan = chan.split('_')[0].replace('CH', '')
if chan not in ['1', '2', '3', '4']:
LOGGER.critical("Could not find channel for %s (%s)", fname, chan)
terminate_program(-1)
newname = '%s-%s-%s-%s-%s-%s-%s-CDM_%s.png' \
% (REC['line'], REC['slide_code'], drv, REC['gender'],
REC['objective'], REC['area'], REC['alignment_space'], chan)
return newname
def calculate_size(dim):
''' Return the fnew dimensions for an image. The longest side will be scaled down to MAX_SIZE.
Keyword arguments:
dim: tuple with (X,Y) dimensions
Returns:
Tuple with new (X,Y) dimensions
'''
xdim, ydim = list(dim)
if xdim <= MAX_SIZE and ydim <= MAX_SIZE:
return dim
if xdim > ydim:
ratio = xdim / MAX_SIZE
xdim, ydim = [MAX_SIZE, int(ydim/ratio)]
else:
ratio = ydim / MAX_SIZE
xdim, ydim = [int(xdim/ratio), MAX_SIZE]
return tuple((xdim, ydim))
def resize_image(image_path, resized_path):
''' Read in an image, resize it, and write a copy.
Keyword arguments:
image_path: CDM image path
resized_path: path for resized image
Returns:
None
'''
with Image.open(image_path) as image:
new_size = calculate_size(image.size)
image.thumbnail(new_size)
image.save(resized_path, 'JPEG')
def produce_thumbnail(dirpath, fname, newname, url):
''' Transfer a file to Amazon S3
Keyword arguments:
dirpath: source directory
fname: file name
Returns:
thumbnail url
'''
turl = url.replace('.png', '.jpg')
turl = turl.replace(AWS['s3_bucket']['cdm'], AWS['s3_bucket']['cdm-thumbnail'])
if CREATE_THUMBNAIL:
tname = newname.replace('.png', '.jpg')
complete_fpath = '/'.join([dirpath, fname])
resize_image(complete_fpath, '/tmp/' + tname)
turl = upload_aws(AWS['s3_bucket']['cdm-thumbnail'], '/tmp', tname, tname)
return turl
def update_jacs(sid, url, turl):
''' Update a sample in JACS with URL and thumbnail URL for viewable image
Keyword arguments:
sid: sample ID
url: image URL
turl: thumbnail URL
Returns:
None
'''
pay = {"class": "org.janelia.model.domain.gui.cdmip.ColorDepthImage",
"publicImageUrl": url,
"publicThumbnailUrl": turl}
call_responder('jacsv2', 'colorDepthMIPs/' + sid \
+ '/publicURLs', pay, True)
COUNT['Updated on JACS'] += 1
def set_name_and_filepath(smp):
''' Determine a sample's name and filepath
Keyword arguments:
smp: sample record
Returns:
None
'''
smp['filepath'] = smp['cdmPath']
smp['name'] = os.path.basename(smp['filepath'])
def upload_flyem_variants(smp, newname):
''' Upload variant files for FlyEM
Keyword arguments:
smp: sample record
newname: computed filename
Returns:
None
'''
if 'variants' not in smp:
LOGGER.warning("No variants for %s", smp['name'])
return
fbase = newname.split('.')[0]
for variant in smp['variants']:
if variant not in VARIANTS:
LOGGER.error("Unknown variant %s", variant)
terminate_program(-1)
if variant not in WILL_LOAD:
continue
fname, ext = os.path.basename(smp['variants'][variant]).split('.')
ancname = '.'.join([fbase, ext])
ancname = '/'.join([variant, ancname])
dirpath = os.path.dirname(smp['variants'][variant])
fname = os.path.basename(smp['variants'][variant])
if variant == 'searchable_neurons':
if SUBDIVISION['counter'] >= SUBDIVISION['limit']:
SUBDIVISION['prefix'] += 1
SUBDIVISION['counter'] = 0
ancname = ancname.replace('searchable_neurons/',
'searchable_neurons/%s/' % str(SUBDIVISION['prefix']))
SUBDIVISION['counter'] += 1
_ = upload_aws(AWS['s3_bucket']['cdm'], dirpath, fname, ancname)
if variant not in VARIANT_UPLOADS:
VARIANT_UPLOADS[variant] = 1
else:
VARIANT_UPLOADS[variant] += 1
def upload_flylight_variants(smp, newname):
''' Upload variant files for FlyLight
Keyword arguments:
smp: sample record
newname: computed filename
Returns:
None
'''
if 'variants' not in smp:
LOGGER.warning("No variants for %s", smp['name'])
return
fbase = newname.split('.')[0]
for variant in smp['variants']:
if variant not in VARIANTS:
LOGGER.error("Unknown variant %s", variant)
terminate_program(-1)
if variant not in WILL_LOAD:
continue
if '.' not in smp['variants'][variant]:
LOGGER.error("%s file %s has no extension", variant, fname)
COUNT['Unparsable files'] += 1
continue
fname, ext = os.path.basename(smp['variants'][variant]).split('.')
# MB002B-20121003_31_B2-f_20x_c1_01
seqsearch = re.search(r"-CH\d+-(\d+)", fname)
if seqsearch is None:
LOGGER.error("Could not extract sequence number from %s file %s", variant, fname)
COUNT['Unparsable files'] += 1
continue
seq = seqsearch[1]
ancname = '.'.join(['-'.join([fbase, seq]), ext])
ancname = '/'.join([variant, ancname])
dirpath = os.path.dirname(smp['variants'][variant])
fname = os.path.basename(smp['variants'][variant])
#print(fname)
#print(ancname)
if variant == 'searchable_neurons':
if SUBDIVISION['counter'] >= SUBDIVISION['limit']:
SUBDIVISION['prefix'] += 1
SUBDIVISION['counter'] = 0
ancname = ancname.replace('searchable_neurons/',
'searchable_neurons/%s/' % str(SUBDIVISION['prefix']))
SUBDIVISION['counter'] += 1
_ = upload_aws(AWS['s3_bucket']['cdm'], dirpath, fname, ancname)
if variant not in VARIANT_UPLOADS:
VARIANT_UPLOADS[variant] = 1
else:
VARIANT_UPLOADS[variant] += 1
def check_image(smp):
''' Check that the image exists and see if the URL is already specified
Keyword arguments:
smp: sample record
Returns:
False if error, True otherwise
'''
if 'imageName' not in smp:
LOGGER.critical("Missing imageName in sample")
print(smp)
terminate_program(-1)
LOGGER.debug('----- %s', smp['imageName'])
if 'publicImageUrl' in smp and smp['publicImageUrl'] and not ARG.REWRITE:
COUNT['Already on JACS'] += 1
return False
return True
def upload_primary(smp, newname):
''' Handle uploading of the primary image
Keyword arguments:
smp: sample record
newname: new file name
Returns:
None
'''
dirpath = os.path.dirname(smp['filepath'])
fname = os.path.basename(smp['filepath'])
url = upload_aws(AWS['s3_bucket']['cdm'], dirpath, fname, newname)
if url:
if url != 'Skipped':
turl = produce_thumbnail(dirpath, fname, newname, url)
if ARG.WRITE:
if ARG.AWS and ('flyem_' in ARG.LIBRARY):
os.remove(smp['filepath'])
update_jacs(smp['_id'], url, turl)
else:
LOGGER.info("Primary %s", url)
elif ARG.WRITE:
LOGGER.error("Did not transfer primary image %s", fname)
def handle_primary(smp, driver, published_ids):
''' Handle the primary image
Keyword arguments:
smp: sample record
driver: driver mapping dictionary
published_ids: sample dictionary
Returns:
New file name
'''
skip_primary = False
newname = None
if 'flyem_' in ARG.LIBRARY:
if '_FL' in smp['imageName']:
COUNT['FlyEM flips'] += 1
skip_primary = True
else:
set_name_and_filepath(smp)
newname = process_flyem(smp)
if not newname:
err_text = "No publishing name for FlyEM %s" % smp['name']
LOGGER.error(err_text)
ERR.write(err_text + "\n")
COUNT['No publishing name'] += 1
return None
else:
if 'variants' in smp and ARG.GAMMA in smp['variants']:
smp['cdmPath'] = smp['variants'][ARG.GAMMA]
del smp['variants'][ARG.GAMMA]
set_name_and_filepath(smp)
newname = process_light(smp, driver, published_ids)
if not newname:
err_text = "No publishing name for FlyLight %s" % smp['name']
LOGGER.error(err_text)
ERR.write(err_text + "\n")
return None
if 'imageArchivePath' in smp and 'imageName' in smp:
smp['searchableNeuronsName'] = '/'.join([smp['imageArchivePath'], smp['imageName']])
if not skip_primary:
upload_primary(smp, newname)
return newname
def handle_variants(smp, newname):
''' Handle uploading of the variants
Keyword arguments:
smp: sample record
newname: new file name
Returns:
None
'''
if 'flyem_' in ARG.LIBRARY:
if '_FL' in smp['imageName']:
set_name_and_filepath(smp)
newname = process_flyem(smp, False)
if not newname:
return
if newname.count('.') > 1:
LOGGER.critical("Internal error for newname computation")
terminate_program(-1)
upload_flyem_variants(smp, newname)
#newname = 'searchable_neurons/' + newname
#dirpath = os.path.dirname(smp['filepath'])
#fname = os.path.basename(smp['filepath'])
#url = upload_aws(AWS['s3_bucket']['cdm'], dirpath, fname, newname)
else:
upload_flylight_variants(smp, newname)
def confirm_run(alignment_space):
''' Display parms and confirm run
Keyword arguments:
alignment_space: alignment space
Returns:
True or False
'''
print("Manifold: %s" % (ARG.MANIFOLD))
print("Library: %s" % (ARG.LIBRARY))
print("Alignment space: %s" % (alignment_space))
print("NeuronBridge version: %s" % (ARG.NEURONBRIDGE))
print("JSON file: %s" % (ARG.JSON))
print("Files to upload: %s" % (", ".join(WILL_LOAD)))
print("Upload files to AWS: %s" % ("Yes" if ARG.AWS else "No"))
print("Update JACS: %s" % ("Yes" if ARG.WRITE else "No"))
print("Do you want to proceed?")
allowed = ['No', 'Yes']
terminal_menu = TerminalMenu(allowed)
chosen = terminal_menu.show()
if chosen is None or allowed[chosen] != "Yes":
return False
return True
def upload_cdms_from_file():
''' Upload color depth MIPs and other files to AWS S3.
The list of color depth MIPs comes from a supplied JSON file.
Keyword arguments:
None
Returns:
None
'''
if 'flyem_' not in ARG.LIBRARY:
print("Getting image mapping")
driver = get_line_mapping()
published_ids = get_image_mapping()
else:
driver = {}
published_ids = {}
print("Loading JSON file")
jfile = open(ARG.JSON, 'r')
data = json.load(jfile)
jfile.close()
entries = len(data)
print("Number of entries in JSON: %d" % entries)
alignment_space = set_searchable_subdivision(data[0])
if not confirm_run(alignment_space):
return
print("Processing %s on %s manifold" % (ARG.LIBRARY, ARG.MANIFOLD))
for smp in tqdm(data):
if alignment_space != smp["alignmentSpace"]:
log_error("JSON file contains multiple alignment spaces")
terminate_program(-1)
smp['_id'] = smp['id']
if ARG.SAMPLES and COUNT['Samples'] >= ARG.SAMPLES:
break
COUNT['Samples'] += 1
if not check_image(smp):
continue
REC['alignment_space'] = smp['alignmentSpace']
# Primary image
newname = handle_primary(smp, driver, published_ids)
# Variants
if newname:
handle_variants(smp, newname)
def update_library_config():
''' Update the config JSON for this library
Keyword arguments:
None
Returns:
None
'''
if ARG.MANIFOLD not in LIBRARY[ARG.LIBRARY]:
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD] = dict()
if ARG.JSON not in LIBRARY[ARG.LIBRARY][ARG.MANIFOLD]:
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD][ARG.JSON] = dict()
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD][ARG.JSON]['samples'] = COUNT['Samples']
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD][ARG.JSON]['images'] = COUNT['Images']
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD][ARG.JSON]['updated'] = re.sub(r"\..*", '',
str(datetime.now()))
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD]['updated'] = \
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD][ARG.JSON]['updated']
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD][ARG.JSON]['updated_by'] = FULL_NAME
LIBRARY[ARG.LIBRARY][ARG.MANIFOLD][ARG.JSON]['method'] = 'JSON file'
if ARG.WRITE or ARG.CONFIG:
resp = requests.post(CONFIG['config']['url'] + 'importjson/cdm_library/' + ARG.LIBRARY,
{"config": json.dumps(LIBRARY[ARG.LIBRARY])})
if resp.status_code != 200:
LOGGER.error(resp.json()['rest']['message'])
else:
LOGGER.info("Updated cdm_library configuration")
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description="Upload Color Depth MIPs to AWS S3")
PARSER.add_argument('--library', dest='LIBRARY', action='store',
default='', help='color depth library')
PARSER.add_argument('--neuronbridge', dest='NEURONBRIDGE', action='store',
help='NeuronBridge version')
PARSER.add_argument('--json', dest='JSON', action='store',
help='JSON file')
PARSER.add_argument('--internal', dest='INTERNAL', action='store_true',
default=False, help='Upload to internal bucket')
PARSER.add_argument('--gamma', dest='GAMMA', action='store',
default='gamma1_4', help='Variant key for gamma image to replace cdmPath')
PARSER.add_argument('--rewrite', dest='REWRITE', action='store_true',
default=False,
help='Flag, Update image in AWS and on JACS')
PARSER.add_argument('--aws', dest='AWS', action='store_true',
default=False, help='Write files to AWS')
PARSER.add_argument('--config', dest='CONFIG', action='store_true',
default=False, help='Update configuration')
PARSER.add_argument('--samples', dest='SAMPLES', action='store', type=int,
default=0, help='Number of samples to transfer')
PARSER.add_argument('--version', dest='VERSION', action='store',
default='1.0', help='EM Version')
PARSER.add_argument('--check', dest='CHECK', action='store_true',
default=False,
help='Flag, Check for previous AWS upload')
PARSER.add_argument('--manifold', dest='MANIFOLD', action='store',
choices=MANIFOLDS, help='S3 manifold')
PARSER.add_argument('--write', dest='WRITE', action='store_true',
default=False,
help='Flag, Actually write to JACS (and AWS if flag set)')
PARSER.add_argument('--verbose', dest='VERBOSE', action='store_true',
default=False, help='Flag, Chatty')
PARSER.add_argument('--debug', dest='DEBUG', action='store_true',
default=False, help='Flag, Very chatty')
ARG = PARSER.parse_args()
LOGGER = colorlog.getLogger()
if ARG.DEBUG:
LOGGER.setLevel(colorlog.colorlog.logging.DEBUG)
elif ARG.VERBOSE:
LOGGER.setLevel(colorlog.colorlog.logging.INFO)
else:
LOGGER.setLevel(colorlog.colorlog.logging.WARNING)
HANDLER = colorlog.StreamHandler()
HANDLER.setFormatter(colorlog.ColoredFormatter())
LOGGER.addHandler(HANDLER)
S3CP = ERR = ''
initialize_program()
STAMP = strftime("%Y%m%dT%H%M%S")
ERR_FILE = '%s_errors_%s.txt' % (ARG.LIBRARY, STAMP)
ERR = open(ERR_FILE, 'w')
S3CP_FILE = '%s_s3cp_%s.txt' % (ARG.LIBRARY, STAMP)
S3CP = open(S3CP_FILE, 'w')
START_TIME = datetime.now()
upload_cdms_from_file()
STOP_TIME = datetime.now()
print("Elapsed time: %s" % (STOP_TIME - START_TIME))
update_library_config()
if KEY_LIST:
KEY_FILE = '%s_keys_%s.txt' % (ARG.LIBRARY, STAMP)
KEY = open(KEY_FILE, 'w')
KEY.write("%s\n" % json.dumps(KEY_LIST))
KEY.close()
for key in sorted(COUNT):
print("%-20s %d" % (key + ':', COUNT[key]))
if VARIANT_UPLOADS:
print('Uploaded variants:')
for key in sorted(VARIANT_UPLOADS):
print(" %-20s %d" % (key + ':', VARIANT_UPLOADS[key]))
print("Server calls (excluding AWS)")
print(TRANSACTIONS)
terminate_program(0)
| StarcoderdataPython |
3459589 | import datetime
from django.test import TestCase
from dimagi.utils.dates import DateSpan
from pillowtop.es_utils import initialize_index_and_mapping
from corehq.apps.app_manager.const import AMPLIFIES_YES
from corehq.apps.app_manager.models import Application
from corehq.apps.data_analytics.const import NOT_SET, YES
from corehq.apps.data_analytics.malt_generator import MALTTableGenerator
from corehq.apps.data_analytics.models import MALTRow
from corehq.apps.data_analytics.tests.utils import save_to_es_analytics_db
from corehq.apps.domain.models import Domain
from corehq.apps.smsforms.app import COMMCONNECT_DEVICE_ID
from corehq.apps.users.models import CommCareUser
from corehq.const import MISSING_APP_ID
from corehq.elastic import get_es_new
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted
class MaltGeneratorTest(TestCase):
DOMAIN_NAME = "test"
USERNAME = "malt-user"
DEVICE_ID = "my_phone"
UNKNOWN_ID = "UNKNOWN_ID"
correct_date = datetime.datetime.now()
out_of_range_date = correct_date - datetime.timedelta(days=32)
malt_month = DateSpan.from_month(correct_date.month, correct_date.year)
@classmethod
def setUpClass(cls):
super(MaltGeneratorTest, cls).setUpClass()
cls.es = get_es_new()
ensure_index_deleted(XFORM_INDEX_INFO.index)
initialize_index_and_mapping(cls.es, XFORM_INDEX_INFO)
cls._setup_domain_user()
cls._setup_apps()
cls._setup_forms()
cls.es.indices.refresh(XFORM_INDEX_INFO.index)
cls.run_malt_generation()
@classmethod
def tearDownClass(cls):
cls.domain.delete()
MALTRow.objects.all().delete()
ensure_index_deleted(XFORM_INDEX_INFO.index)
super(MaltGeneratorTest, cls).tearDownClass()
@classmethod
def _setup_domain_user(cls):
cls.domain = Domain(name=cls.DOMAIN_NAME)
cls.domain.save()
cls.user = CommCareUser.create(cls.DOMAIN_NAME, cls.USERNAME, '*****')
cls.user.save()
cls.user_id = cls.user._id
@classmethod
def _setup_apps(cls):
cls.non_wam_app = Application.new_app(cls.DOMAIN_NAME, "app 1")
cls.wam_app = Application.new_app(cls.DOMAIN_NAME, "app 2")
cls.wam_app.amplifies_workers = AMPLIFIES_YES
cls.non_wam_app.save()
cls.wam_app.save()
cls.non_wam_app_id = cls.non_wam_app._id
cls.wam_app_id = cls.wam_app._id
@classmethod
def _setup_forms(cls):
def _save_form_data(app_id, received_on=cls.correct_date, device_id=cls.DEVICE_ID):
save_to_es_analytics_db(
domain=cls.DOMAIN_NAME,
received_on=received_on,
device_id=device_id,
user_id=cls.user_id,
app_id=app_id,
)
def _save_multiple_forms(app_ids, received_on):
for app_id in app_ids:
_save_form_data(app_id, received_on=received_on)
out_of_range_form_apps = [
cls.non_wam_app_id,
cls.wam_app_id,
]
in_range_form_apps = [
# should be included in MALT
cls.non_wam_app_id,
cls.non_wam_app_id,
cls.non_wam_app_id,
cls.wam_app_id,
cls.wam_app_id,
# should be included in MALT
'',
]
_save_multiple_forms(out_of_range_form_apps, cls.out_of_range_date)
_save_multiple_forms(in_range_form_apps, cls.correct_date)
# should be included in MALT
_save_form_data(cls.non_wam_app_id, device_id=COMMCONNECT_DEVICE_ID)
@classmethod
def run_malt_generation(cls):
generator = MALTTableGenerator([cls.malt_month])
generator.build_table()
def _assert_malt_row_exists(self, query_filters):
rows = MALTRow.objects.filter(username=self.USERNAME, **query_filters)
self.assertEqual(rows.count(), 1)
def test_wam_yes_malt_counts(self):
# 2 forms for WAM.YES app
self._assert_malt_row_exists({
'app_id': self.wam_app_id,
'num_of_forms': 2,
'wam': YES,
})
def test_wam_not_set_malt_counts(self):
# 3 forms from self.DEVICE_ID for WAM not-set app
self._assert_malt_row_exists({
'app_id': self.non_wam_app_id,
'num_of_forms': 3,
'wam': NOT_SET,
'device_id': self.DEVICE_ID,
})
# 1 form from COMMONCONNECT_DEVICE_ID for WAM not-set app
self._assert_malt_row_exists({
'app_id': self.non_wam_app_id,
'num_of_forms': 1,
'wam': NOT_SET,
'device_id': COMMCONNECT_DEVICE_ID,
})
def test_missing_app_id_is_included(self):
# apps with MISSING_APP_ID should be included in MALT
self._assert_malt_row_exists({
'app_id': MISSING_APP_ID,
'num_of_forms': 1,
'wam': NOT_SET,
})
| StarcoderdataPython |
3375849 | from PIL import Image
def plus(str):
# 返回指定长度的字符串,原字符串右对齐,前面填充0。
return str.zfill(8)
def getCode(img):
str = ""
# 获取到水印的宽和高进行遍历
for i in range(img.size[0]):
for j in range(img.size[1]):
# 获取水印的每个像素值
rgb = img.getpixel((i, j))
# 将像素值转为二进制后保存
str = str + plus(bin(rgb[0]).replace('0b', ''))
str = str + plus(bin(rgb[1]).replace('0b', ''))
str = str + plus(bin(rgb[2]).replace('0b', ''))
# print(plus(bin(rgb[0]).replace('0b', ''))+"\n")
# print(plus(bin(rgb[1]).replace('0b', '')) + "\n")
# print(plus(bin(rgb[2]).replace('0b', '')) + "\n")
print(str)
return str
def encry(img, code):
# 计数器
count = 0
# 二进制像素值的长度,可以认为要写入图像的文本长度,提取(解密)时也需要此变量
codeLen = len(code)
print(codeLen)
# 获取到图像的宽、高进行遍历
for i in range(img.size[0]):
for j in range(img.size[1]):
# 获取到图片的每个像素值
data = img.getpixel((i, j))
# 如果计数器等于长度,代表水印写入完成
if count == codeLen:
break
# 将获取到的RGB数值分别保存
r = data[0]
g = data[1]
b = data[2]
"""
下面的是像素值替换,通过取模2得到最后一位像素值(0或1),
然后减去最后一位像素值,在将code的值添加过来
"""
r = (r - r % 2) + int(code[count])
count += 1
if count == codeLen:
img.putpixel((i, j), (r, g, b))
break
g = (g - g % 2) + int(code[count])
count += 1
if count == codeLen:
img.putpixel((i, j), (r, g, b))
break
b = (b - b % 2) + int(code[count])
count += 1
if count == codeLen:
img.putpixel((i, j), (r, g, b))
break
# 每3次循环表示一组RGB值被替换完毕,可以进行写入
if count % 3 == 0:
img.putpixel((i, j), (r, g, b))
img.save('output/encryption.bmp')
# 获取图像对象
# 这里是原图
img1 = Image.open('pic/original.bmp')
# 这里包含版权信息的96*96的二维码图片
img2 = Image.open('pic/QR.bmp')
# 将图像转换为RGB通道,才能分别获取R,G,B的值
rgb_im1 = img1.convert('RGB')
rgb_im2 = img2.convert('RGB')
# 将水印的像素值转为文本
code = getCode(rgb_im2)
# 将水印写入图像
encry(rgb_im1, code)
| StarcoderdataPython |
6434727 | # Copyright (c) 2020 fortiss GmbH
#
# Authors: <NAME>
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.networks import network, utils
@gin.configurable
class GNNCriticNetwork(network.Network):
"""Creates a critic network."""
def __init__(self,
input_tensor_spec,
gnn,
observation_fc_layer_params=None,
observation_dropout_layer_params=None,
observation_conv_layer_params=None,
observation_activation_fn=tf.nn.relu,
action_fc_layer_params=None,
action_dropout_layer_params=None,
action_conv_layer_params=None,
action_activation_fn=tf.nn.relu,
joint_fc_layer_params=None,
joint_dropout_layer_params=None,
joint_activation_fn=tf.nn.relu,
output_activation_fn=None,
name='CriticNetwork'):
"""Creates an instance of `GNNCriticNetwork`.
Args:
input_tensor_spec: A tuple of (observation, action) each a nest of
`tensor_spec.TensorSpec` representing the inputs.
gnn: The graph neural network that accepts the input observations and
computes node embeddings.
observation_fc_layer_params: Optional list of fully connected parameters
for observations, where each item is the number of units in the layer.
observation_dropout_layer_params: Optional list of dropout layer
parameters, each item is the fraction of input units to drop or a
dictionary of parameters according to the keras.Dropout documentation.
The additional parameter `permanent', if set to True, allows to apply
dropout at inference for approximated Bayesian inference. The dropout
layers are interleaved with the fully connected layers; there is a
dropout layer after each fully connected layer, except if the entry in
the list is None. This list must have the same length of
observation_fc_layer_params, or be None.
observation_conv_layer_params: Optional list of convolution layer
parameters for observations, where each item is a length-three tuple
indicating (num_units, kernel_size, stride).
observation_activation_fn: Activation function applied to the observation
layers, e.g. tf.nn.relu, slim.leaky_relu, ...
action_fc_layer_params: Optional list of fully connected parameters for
actions, where each item is the number of units in the layer.
action_dropout_layer_params: Optional list of dropout layer parameters,
each item is the fraction of input units to drop or a dictionary of
parameters according to the keras.Dropout documentation. The additional
parameter `permanent', if set to True, allows to apply dropout at
inference for approximated Bayesian inference. The dropout layers are
interleaved with the fully connected layers; there is a dropout layer
after each fully connected layer, except if the entry in the list is
None. This list must have the same length of action_fc_layer_params, or
be None.
action_conv_layer_params: Optional list of convolution layer
parameters for actions, where each item is a length-three tuple
indicating (num_units, kernel_size, stride).
action_activation_fn: Activation function applied to the action layers,
e.g. tf.nn.relu, slim.leaky_relu, ...
joint_fc_layer_params: Optional list of fully connected parameters after
merging observations and actions, where each item is the number of units
in the layer.
joint_dropout_layer_params: Optional list of dropout layer parameters,
each item is the fraction of input units to drop or a dictionary of
parameters according to the keras.Dropout documentation. The additional
parameter `permanent', if set to True, allows to apply dropout at
inference for approximated Bayesian inference. The dropout layers are
interleaved with the fully connected layers; there is a dropout layer
after each fully connected layer, except if the entry in the list is
None. This list must have the same length of joint_fc_layer_params, or
be None.
joint_activation_fn: Activation function applied to the joint layers,
e.g. tf.nn.relu, slim.leaky_relu, ...
output_activation_fn: Activation function for the last layer. This can be
used to restrict the range of the output. For example, one can pass
tf.keras.activations.sigmoid here to restrict the output to be bounded
between 0 and 1.
name: A string representing name of the network.
Raises:
ValueError: If `observation_spec` or `action_spec` contains more than one
observation.
"""
super(GNNCriticNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
observation_spec, action_spec = input_tensor_spec
if len(tf.nest.flatten(observation_spec)) > 1:
raise ValueError('Only a single observation is supported by this network')
if len(tf.nest.flatten(action_spec)) > 1:
raise ValueError('Only a single action is supported by this network')
if gnn is None:
raise ValueError('`gnn` must not be `None`.')
self._gnn = gnn
self._observation_layers = utils.mlp_layers(
observation_conv_layer_params,
observation_fc_layer_params,
observation_dropout_layer_params,
activation_fn=observation_activation_fn,
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(
scale=1./3., mode='fan_in', distribution='uniform'),
name='observation_encoding')
self._action_layers = utils.mlp_layers(
action_conv_layer_params,
action_fc_layer_params,
action_dropout_layer_params,
activation_fn=action_activation_fn,
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(
scale=1./3., mode='fan_in', distribution='uniform'),
name='action_encoding')
self._joint_layers = utils.mlp_layers(
None,
joint_fc_layer_params,
joint_dropout_layer_params,
activation_fn=joint_activation_fn,
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(
scale=1./3., mode='fan_in', distribution='uniform'),
name='joint_mlp')
self._joint_layers.append(
tf.keras.layers.Dense(
units=1,
activation=output_activation_fn,
kernel_initializer=tf.keras.initializers.RandomUniform(
minval=-0.003, maxval=0.003),
name='value'))
def call(self, inputs, step_type=(), network_state=(), training=False):
del step_type # unused.
observations, actions = inputs
batch_size = observations.shape[0]
embeddings = self._gnn(observations, training=training)
if batch_size > 0:
embeddings = embeddings[:, 0] # extract ego state
actions = tf.reshape(actions, [batch_size, -1])
else:
actions = tf.zeros([0, actions.shape[-1]])
embeddings = tf.cast(tf.nest.flatten(embeddings)[0], tf.float32)
for layer in self._observation_layers:
embeddings = layer(embeddings, training=training)
actions = tf.cast(tf.nest.flatten(actions)[0], tf.float32)
for layer in self._action_layers:
actions = layer(actions, training=training)
joint = tf.concat([embeddings, actions], 1)
for layer in self._joint_layers:
joint = layer(joint, training=training)
with tf.name_scope("GNNCriticNetwork"):
tf.summary.histogram("critic_output_value", joint)
return tf.reshape(joint, [-1]), network_state
| StarcoderdataPython |
301144 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 21 17:28:04 2015
@author: <NAME>
"""
import pytest
from aneris.entity import Simulation
from aneris.entity.data import DataCatalog, DataPool
from aneris.control.pipeline import Sequencer
from aneris.control.simulation import Loader, Controller
from aneris.control.data import DataStorage, DataValidation
import data_plugins
import interface_plugins as interfaces
from data_plugins.definitions import UnitData
@pytest.fixture(scope="module")
def loader():
data_store = DataStorage(data_plugins)
loader = Loader(data_store)
return loader
@pytest.fixture(scope="module")
def controller():
data_store = DataStorage(data_plugins)
sequencer = Sequencer(["DummyInterface"],
interfaces)
control = Controller(data_store, sequencer)
return control
@pytest.fixture(scope="module")
def catalog():
catalog = DataCatalog()
validation = DataValidation(meta_cls=data_plugins.MyMetaData)
validation.update_data_catalog_from_definitions(catalog,
data_plugins)
return catalog
def test_init_loader(loader):
assert isinstance(loader, Loader)
def test_get_structure(loader):
unitdata = loader.get_structure("UnitData")
assert isinstance(unitdata, UnitData)
def test_has_data(loader):
'''Test for existing variable in a data state'''
test_var = 'site:wave:dir'
new_sim = Simulation("Hello World!")
result = loader.has_data(new_sim, test_var)
assert result == False
def test_get_data_value(loader, controller, catalog):
pool = DataPool()
new_sim = Simulation("Hello World!")
controller.add_datastate(pool,
new_sim,
"executed",
catalog,
["Technology:Common:DeviceType"],
["Tidal"])
new_data_value = loader.get_data_value(pool,
new_sim,
"Technology:Common:DeviceType")
assert new_data_value == "Tidal"
def test_create_merged_state_none(loader):
new_sim = Simulation("Hello World!")
result = loader.create_merged_state(new_sim)
assert result is None
def test_create_merged_state(loader, controller, catalog):
pool = DataPool()
new_sim = Simulation("Hello World!")
test_inputs = {'demo:demo:high': 2,
'demo:demo:rows': 5}
controller.add_datastate(pool,
new_sim,
"input1",
catalog,
test_inputs.keys(),
test_inputs.values())
controller.add_datastate(pool,
new_sim,
"input2",
catalog,
['demo:demo:low'],
[1])
new_sim.set_merged_state(None)
result = loader.create_merged_state(new_sim)
assert len(result) == 3
def test_create_merged_state_existing(loader, controller, catalog):
pool = DataPool()
new_sim = Simulation("Hello World!")
test_inputs = {'demo:demo:high': 2,
'demo:demo:rows': 5}
controller.add_datastate(pool,
new_sim,
"input1",
catalog,
test_inputs.keys(),
test_inputs.values())
controller.add_datastate(pool,
new_sim,
"input2",
catalog,
['demo:demo:low'],
[1])
existing = new_sim.get_merged_state()
result = loader.create_merged_state(new_sim)
assert result == existing
def test_create_merged_state_not_existing(loader, controller, catalog):
pool = DataPool()
new_sim = Simulation("Hello World!")
test_inputs = {'demo:demo:high': 2,
'demo:demo:rows': 5}
controller.add_datastate(pool,
new_sim,
"input1",
catalog,
test_inputs.keys(),
test_inputs.values())
controller.add_datastate(pool,
new_sim,
"input2",
catalog,
['demo:demo:low'],
[1])
existing = new_sim.get_merged_state()
result = loader.create_merged_state(new_sim, use_existing=False)
assert existing != result
| StarcoderdataPython |
3584527 | <reponame>magechaP/WayaHoodie<filename>hood/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-09-18 04:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('business_name', models.CharField(max_length=64, unique=True)),
('business_email', models.EmailField(max_length=64, unique=True)),
('description', models.TextField(max_length=500)),
('descriptive_image', models.ImageField(default='photos/default_biz.jpg', upload_to='photos/')),
],
),
migrations.CreateModel(
name='Neighbourhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('location', models.CharField(max_length=50, null=True)),
('members_count', models.IntegerField(default=0, null=True)),
('police_dept', models.CharField(max_length=50)),
('police_dept_address', models.CharField(max_length=50)),
('health_dept', models.CharField(max_length=50)),
('health_dept_address', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_title', models.CharField(max_length=20)),
('text', models.TextField(max_length=500)),
('descriptive_picture', models.ImageField(default='photos/default_post.jpg', upload_to='photos/')),
('post_date', models.DateTimeField(auto_now_add=True)),
('hood', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_hood', to='hood.Neighbourhood')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('bio', models.CharField(max_length=200)),
('profile_pic', models.ImageField(default='photos/default.jpg', upload_to='photos/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('contact', models.CharField(max_length=12)),
('neighbourhood', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='people_count', to='hood.Neighbourhood')),
],
),
migrations.AddField(
model_name='post',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_name', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='post',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='hood.Profile'),
),
migrations.AddField(
model_name='neighbourhood',
name='headman',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='business',
name='biz_hood',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='hood.Neighbourhood'),
),
migrations.AddField(
model_name='business',
name='biz_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| StarcoderdataPython |
3418932 | # TODO: maybe all this could be removed, and loading the model done in specific commands
# like in populate_db and
import numpy as np
import sys
import os
from pathlib import Path
# Importing rse_watch package and config
dir_path = Path(os.getcwd())
sys.path.append(str(dir_path / 'polls/rse_model'))
model_directory = dir_path / "data/model"
from rse_watch.conf import Config, DebugConfig
from rse_watch.indexer import run as run_indexer
nlp = None
if {'batch7rse.wsgi', 'runserver'}.intersection(sys.argv):
# Load config, and will load existing vectorizer if already exists, else will create it.
config = Config(model_directory)
nlp = run_indexer(config)
# todo: revert when dev finished
# class nlp:
# vector = np.random.random((300,))
#
# def __init__(self, query):
# pass
| StarcoderdataPython |
9651331 | <reponame>ollelauribostrom/pi-home-monitor<filename>tests/MockResponse.py
class MockResponse:
def __init__(self):
self.msg = None
def message(self, msg):
self.msg = msg | StarcoderdataPython |
271558 | import numpy as np
import math
#def recognit(forCounter,x_in,y_in,heading):
#data = np.genfromtxt('data'+str(forCounter)+'.csv',delimiter=',')
x_in = 1000
y_in = -800
heading = math.pi*0.1355
#data = np.genfromtxt('T1/data36.csv',delimiter=',')
data = np.concatenate ((data[211:400], data[0:211]),axis=0)
pos_flag = 0
heading-=math.pi/2
global min_ind,max_ind
theta = []
for i in range(400):
theta.append(i*math.pi/200)
for i in range(400):
if (data[i]==0):
data[i] = 500
rmin=min(data)
rmin_ind=np.argmin(data)
if(rmin_ind>370)|(rmin_ind<30):
pos_flag = 1
data = np.concatenate ((data[100:400], data[0:100]),axis=0)
rmin=min(data)
rmin_ind=np.argmin(data)
for i in range(30):
if(data[(rmin_ind+i)] < 240):
max_ind = rmin_ind+i+1
if(data[(rmin_ind-i)] < 240):
min_ind = rmin_ind-i
sel_r = data[min_ind:(max_ind+1)]
sel_th = theta[min_ind:(max_ind+1)]
rm_ind=np.argmin(sel_r)
sel_x = np.multiply(sel_r,np.cos(sel_th))
sel_y = np.multiply(sel_r,np.sin(sel_th))
der = sel_r[1:len(sel_r)]-sel_r[0:(len(sel_r)-1)]
filt_der = np.convolve(der,[1/3, 1/3, 1/3])
filt_der = filt_der[1:(len(filt_der)-1)]
xmin = sel_x[rm_ind]
ymin = sel_y[rm_ind]
p1x = sel_x[0]
p1y = sel_y[0]
p4x = sel_x[(len(sel_x)-1)]
p4y = sel_y[(len(sel_y)-1)]
rms = math.sqrt(sum(np.multiply(der,der))/len(der))
rms = math.sqrt(rms)
for i in range(1,len(der)):
if(filt_der[i]>=-rms):
p2x = sel_x[i]
p2y = sel_y[i]
break
for i in range(1,len(der)):
if(filt_der[(len(filt_der)-i)] <= rms):
p3x = sel_x[(len(sel_r)-i)]
p3y = sel_y[(len(sel_th)-i)]
break
de1 = np.power((p1x-p2x),2)+np.power((p1y-p2y),2);
de2 = np.power((p3x-p2x),2)+np.power((p3y-p2y),2);
de3 = np.power((p4x-p3x),2)+np.power((p3y-p4y),2);
dq1 = np.power((p1x-p3x),2)+np.power((p1y-p3y),2);
dq2 = np.power((p2x-p4x),2)+np.power((p2y-p4y),2);
a1 = (de1+de2-dq1)/(2*math.sqrt(de1*de2));
a2 = (de3+de2-dq2)/(2*math.sqrt(de3*de2));
a1 = math.acos(a1)*180/math.pi
a2 = math.acos(a2)*180/math.pi
orian = (p3x-p2x)*(p2x+p3x)+(p3y-p2y)*(p2y+p3y)
orian = orian/(math.sqrt(de2*(np.power((p2x+p3x),2)+np.power((p2y+p3y),2))))
orian = math.acos(orian)*180/math.pi
d1 = np.power((ymin-p1y),2)+np.power((xmin-p1x),2)
d2 = np.power((ymin-p4y),2)+np.power((xmin-p4x),2)
corner_angle = np.power((p1x-p4x),2)+np.power((p1y-p4y),2);
corner_angle = math.acos((d1+d2-corner_angle)/(2*math.sqrt(d1*d2)))
corner_angle = corner_angle*180/math.pi;
#classification
if(de2 < 1300):
if(corner_angle < 75):
print("T cor.an= ",corner_angle)
r_center = rmin+ 49.0748
x_center = x_in+r_center*np.cos(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
y_center = y_in+r_center*np.sin(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
x_corner = x_in+rmin*np.cos(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
y_corner = y_in+rmin*np.sin(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
print(x_center,y_center,x_corner,y_corner,0)
print( math.sqrt(np.power((x_center-x_corner),2)+np.power((y_center-y_corner),2))*math.sqrt(3))
#return x_center,y_center,x_corner,y_corner,0
if(corner_angle > 75):
print("S cor.an= ",corner_angle)
r_center = rmin+49.5
x_center = x_in+r_center*np.cos(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
y_center = y_in+r_center*np.sin(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
x_corner = x_in+rmin*np.cos(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
y_corner = y_in+rmin*np.sin(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
print(x_center,y_center,x_corner,y_corner,1)
print( math.sqrt(np.power((x_center-x_corner),2)+np.power((y_center-y_corner),2))*math.sqrt(2))
#return x_center,y_center,x_corner,y_corner,1
if((a1 >= 130)&(de1 > 400)):
if(a2 < 80):
print("T ed.an2= ",a2)
xc1 = p2x
yc1 = p2y
xc2 = p3x
yc2 = p3y
leng = math.sqrt(np.power((xc1-xc2),2)+np.power((yc1-yc2),2))
unit_vectorx = 85*(xc1-xc2)/leng
unit_vectory = 85*(yc1-yc2)/leng
x_corner1 = xc2 + unit_vectorx
y_corner1 = yc2 + unit_vectory
x_corner2 = xc2
y_corner2 = yc2
r1 = math.sqrt(np.power(x_corner1,2)+np.power(y_corner1,2))
t1 = np.arctan2(y_corner1,x_corner1)
r2 = math.sqrt(np.power(x_corner2,2)+np.power(y_corner2,2))
t2 = np.arctan2(y_corner2,x_corner2)
x_corner1 = x_in+r1*np.cos(t1+heading+pos_flag*math.pi/2)
y_corner1 = y_in+r1*np.sin(t1+heading+pos_flag*math.pi/2)
x_corner2 = x_in+r2*np.cos(t2+heading+pos_flag*math.pi/2)
y_corner2 = y_in+r2*np.sin(t2+heading+pos_flag*math.pi/2)
print(x_corner1,y_corner1,x_corner2,y_corner2,3)
print(math.sqrt(np.power((x_corner1-x_corner2),2)+np.power((y_corner1-y_corner2),2)))
#return x_corner1,y_corner1,x_corner2,y_corner2,3
if((a2 >= 130)&(de3 > 400)):
if(a1 < 80):
print("T ed.an1= ",a1)
xc1 = p2x
yc1 = p2y
xc2 = p3x
yc2 = p3y
leng = math.sqrt(np.power((xc1-xc2),2)+np.power((yc1-yc2),2))
unit_vectorx = 85*(xc2-xc1)/leng
unit_vectory = 85*(yc2-yc1)/leng
x_corner2 = xc1 + unit_vectorx
y_corner2 = yc1 + unit_vectory
x_corner1 = xc1
y_corner1 = yc1
r1 = math.sqrt(np.power(x_corner1,2)+np.power(y_corner1,2))
t1 = np.arctan2(y_corner1,x_corner1)
r2 = math.sqrt(np.power(x_corner2,2)+np.power(y_corner2,2))
t2 = np.arctan2(y_corner2,x_corner2)
x_corner1 = x_in+r1*np.cos(t1+heading+pos_flag*math.pi/2)
y_corner1 = y_in+r1*np.sin(t1+heading+pos_flag*math.pi/2)
x_corner2 = x_in+r2*np.cos(t2+heading+pos_flag*math.pi/2)
y_corner2 = y_in+r2*np.sin(t2+heading+pos_flag*math.pi/2)
print(x_corner1,y_corner1,x_corner2,y_corner2,3)
print(math.sqrt(np.power((x_corner1-x_corner2),2)+np.power((y_corner1-y_corner2),2)))
#return x_corner1,y_corner1,x_corner2,y_corner2,3
if(orian<90):
rotation = 90-orian
a = a1
if((rotation >= 30)&(a <= 85)):
print("T1< ")
xc1 = p2x
yc1 = p2y
xc2 = p3x
yc2 = p3y
leng = math.sqrt(np.power((xc1-xc2),2)+np.power((yc1-yc2),2))
unit_vectorx = 85*(xc2-xc1)/leng
unit_vectory = 85*(yc2-yc1)/leng
x_corner2 = xc1 + unit_vectorx
y_corner2 = yc1 + unit_vectory
x_corner1 = xc1
y_corner1 = yc1
r1 = math.sqrt(np.power(x_corner1,2)+np.power(y_corner1,2))
t1 = np.arctan2(y_corner1,x_corner1)
r2 = math.sqrt(np.power(x_corner2,2)+np.power(y_corner2,2))
t2 = np.arctan2(y_corner2,x_corner2)
x_corner1 = x_in+r1*np.cos(t1+heading+pos_flag*math.pi/2)
y_corner1 = y_in+r1*np.sin(t1+heading+pos_flag*math.pi/2)
x_corner2 = x_in+r2*np.cos(t2+heading+pos_flag*math.pi/2)
y_corner2 = y_in+r2*np.sin(t2+heading+pos_flag*math.pi/2)
print( x_corner1,y_corner1,x_corner2,y_corner2,3)
print(math.sqrt(np.power((x_corner1-x_corner2),2)+np.power((y_corner1-y_corner2),2)))
#return x_corner1,y_corner1,x_corner2,y_corner2,3
if(orian>90):
rotation = orian-90
a = a2
if((rotation >= 30)&(a <= 85)):
print("T1> ")
xc1 = p2x
yc1 = p2y
xc2 = p3x
yc2 = p3y
leng = math.sqrt(np.power((xc1-xc2),2)+np.power((yc1-yc2),2))
unit_vectorx = 85*(xc1-xc2)/leng
unit_vectory = 85*(yc1-yc2)/leng
x_corner1 = xc2 + unit_vectorx
y_corner1 = yc2 + unit_vectory
x_corner2 = xc2
y_corner2 = yc2
r1 = math.sqrt(np.power(x_corner1,2)+np.power(y_corner1,2))
t1 = np.arctan2(y_corner1,x_corner1)
r2 = math.sqrt(np.power(x_corner2,2)+np.power(y_corner2,2))
t2 = np.arctan2(y_corner2,x_corner2)
x_corner1 = x_in+r1*np.cos(t1+heading+pos_flag*math.pi/2)
y_corner1 = y_in+r1*np.sin(t1+heading+pos_flag*math.pi/2)
x_corner2 = x_in+r2*np.cos(t2+heading+pos_flag*math.pi/2)
y_corner2 = y_in+r2*np.sin(t2+heading+pos_flag*math.pi/2)
print(x_corner1,y_corner1,x_corner2,y_corner2,3)
print(math.sqrt(np.power((x_corner1-x_corner2),2)+np.power((y_corner1-y_corner2),2)))
#return x_corner1,y_corner1,x_corner2,y_corner2,3
ax1 = np.power((p2x-xmin),2)+np.power((p2y-ymin),2)
ay1 = np.power((p1x-xmin),2)+np.power((p1y-ymin),2)
ay1 = (ax1+de1-ay1)/(2*math.sqrt(ax1*de1))
ay1 = math.acos(ay1)*180/math.pi
ax2 = np.power((p3x-xmin),2)+np.power((p3y-ymin),2)
ay2 = np.power((p4x-xmin),2)+np.power((p4y-ymin),2)
ay2 = (ax2+de3-ay2)/(2*math.sqrt(ax2*de3))
ay2 = math.acos(ay2)*180/math.pi
if((ay1>155)&(ay2>155)):
if(corner_angle <= 75):
print("SX")
r_center = rmin+49.5
x_center = x_in+r_center*np.cos(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
y_center = y_in+r_center*np.sin(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
x_corner = x_in+rmin*np.cos(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
y_corner = y_in+rmin*np.sin(math.pi*rmin_ind/200+heading+pos_flag*math.pi/2)
print(x_center,y_center,x_corner,y_corner,1)
print( math.sqrt(np.power((x_center-x_corner),2)+np.power((y_center-y_corner),2))*math.sqrt(2))
#return x_center,y_center,x_corner,y_corner,1
if(math.sqrt(de2) >= 69):
print("T2 ")
xc1 = p2x
yc1 = p2y
xc2 = p3x
yc2 = p3y
leng = math.sqrt(np.power((xc1-xc2),2)+np.power((yc1-yc2),2))
unit_vectorx = 85*(xc1-xc2)/leng
unit_vectory = 85*(yc1-yc2)/leng
x_corner1 = xc2 + unit_vectorx
y_corner1 = yc2 + unit_vectory
x_corner2 = xc2
y_corner2 = yc2
r1 = math.sqrt(np.power(x_corner1,2)+np.power(y_corner1,2))
t1 = np.arctan2(y_corner1,x_corner1)
r2 = math.sqrt(np.power(x_corner2,2)+np.power(y_corner2,2))
t2 = np.arctan2(y_corner2,x_corner2)
x_corner1 = x_in+r1*np.cos(t1+heading+pos_flag*math.pi/2)
y_corner1 = y_in+r1*np.sin(t1+heading+pos_flag*math.pi/2)
x_corner2 = x_in+r2*np.cos(t2+heading+pos_flag*math.pi/2)
y_corner2 = y_in+r2*np.sin(t2+heading+pos_flag*math.pi/2)
print(x_corner1,y_corner1,x_corner2,y_corner2,3)
print(math.sqrt(np.power((x_corner1-x_corner2),2)+np.power((y_corner1-y_corner2),2)))
#return x_corner1,y_corner1,x_corner2,y_corner2,3 | StarcoderdataPython |
111997 | from bullet import Bullet, YesNo
from pathlib import Path
class BlueprintManager:
def __init__(self):
self.config = None
def setup(self, config: dict):
self.config = config
def get_blueprint_folder(self, blueprint_name: str = None):
library_folder = Path(self.config.get("library_folder", "~/.ptah/library/")).expanduser()
if blueprint_name:
return library_folder / blueprint_name
available_shelvings = library_folder.iterdir()
available_blueprints = {}
for shelving in available_shelvings:
for path in shelving.iterdir():
available_blueprints[f" {shelving.name}/{path.name}"] = path
ui = Bullet(prompt="Choose your blueprint:", choices=list(available_blueprints.keys()))
return available_blueprints[ui.launch()]
| StarcoderdataPython |
6706609 | <reponame>amcclead7336/Enterprise_Data_Science_Final
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ReleaseStartMetadata(Model):
"""ReleaseStartMetadata.
:param artifacts: Sets list of artifact to create a release.
:type artifacts: list of :class:`ArtifactMetadata <release.v4_1.models.ArtifactMetadata>`
:param definition_id: Sets definition Id to create a release.
:type definition_id: int
:param description: Sets description to create a release.
:type description: str
:param is_draft: Sets 'true' to create release in draft mode, 'false' otherwise.
:type is_draft: bool
:param manual_environments: Sets list of environments to manual as condition.
:type manual_environments: list of str
:param properties:
:type properties: :class:`object <release.v4_1.models.object>`
:param reason: Sets reason to create a release.
:type reason: object
"""
_attribute_map = {
'artifacts': {'key': 'artifacts', 'type': '[ArtifactMetadata]'},
'definition_id': {'key': 'definitionId', 'type': 'int'},
'description': {'key': 'description', 'type': 'str'},
'is_draft': {'key': 'isDraft', 'type': 'bool'},
'manual_environments': {'key': 'manualEnvironments', 'type': '[str]'},
'properties': {'key': 'properties', 'type': 'object'},
'reason': {'key': 'reason', 'type': 'object'}
}
def __init__(self, artifacts=None, definition_id=None, description=None, is_draft=None, manual_environments=None, properties=None, reason=None):
super(ReleaseStartMetadata, self).__init__()
self.artifacts = artifacts
self.definition_id = definition_id
self.description = description
self.is_draft = is_draft
self.manual_environments = manual_environments
self.properties = properties
self.reason = reason
| StarcoderdataPython |
4984541 | <reponame>Mudassaralimosu/imdb-rotten_tomatoes-metacritic<filename>src/imdb_rtomatoes_metacritic.py
import requests
from bs4 import BeautifulSoup
class Rating:
imdb_url=""
metacritic_url=""
rtomatoes_url=""
def __init__(self, name):
self.name=name
google_search=requests.get("https://www.google.com/search?q="+self.name)
soup = BeautifulSoup(google_search.text, 'html.parser')
span = soup.find_all('span', class_="BNeawe")
for i in range(3):
for link in span[i]:
last=link['href'].find('&')
url=link['href'][7:last]
if i == 0:
Rating.imdb_url=url
if i == 1:
Rating.rtomatoes_url=url
if i == 2:
Rating.metacritic_url=url
def soup(self,url):
user_agent = {'User-agent': 'Mozilla/5.0'}
return BeautifulSoup(requests.get(url, headers= user_agent).content, 'html.parser')
def imdb_score(self):
imdb_r = self.soup(Rating.imdb_url).find_all('span', class_="AggregateRatingButton__RatingScore-sc-1ll29m0-1 iTLWoV")
return (imdb_r[0].contents[0])
def imdb_votes(self):
imdb_v = self.soup(Rating.imdb_url+"ratings/?ref_=tt_ov_rt").find_all('div', class_ = "allText")
votes = imdb_v[0].contents[1].contents[0]
end = votes.rfind('\n')
start = votes[:23].rfind(' ')
return (votes[start+1:end])
def metacritic_score(self):
meta_s = self.soup(Rating.imdb_url).find_all('span', class_="score-meta")
return (meta_s[0].contents[0])
def metacritic_votes(self):
meta_c = self.soup(Rating.metacritic_url).find_all('span', class_ ="based_on")
return (meta_c[0].contents[0].split(' ')[2])
def tomatometer(self):
t_meter = self.soup(Rating.rtomatoes_url).find('score-board').attrs['tomatometerscore']
return (t_meter)
def tomatometer_reviews(self):
t_review = self.soup(Rating.rtomatoes_url).find_all('a', class_="scoreboard__link scoreboard__link--tomatometer")
return (t_review[0].contents[0][:3])
| StarcoderdataPython |
3541557 | <reponame>PauloLuna/ansible-pan<filename>library/panos_gre_tunnel.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_gre_tunnel
short_description: Create GRE tunnels on PAN-OS devices.
description:
- Create GRE tunnel objects on PAN-OS devices.
author:
- <NAME> (@shinmog)
version_added: "2.9"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
notes:
- 'Minimum PAN-OS version: 9.0'
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- panos.transitional_provider
- panos.full_template_support
- panos.state
options:
name:
description:
- Name of object to create.
required: true
interface:
description:
- Interface to terminate the tunnel.
local_address_type:
description:
Type of local address.
choices:
- ip
- floating-ip
default: ip
local_address_value:
description:
- IP address value.
peer_address:
description:
- Peer IP address.
tunnel_interface:
description:
- To apply GRE tunnels to tunnel interface.
ttl:
description:
- TTL.
type: int
default: 64
copy_tos:
description:
- Copy IP TOS bits from inner packet to GRE packet.
type: bool
enable_keep_alive:
description:
- Enable tunnel monitoring.
type: bool
keep_alive_interval:
description:
- Keep alive interval.
type: int
default: 10
keep_alive_retry:
description:
- Keep alive retry time.
type: int
default: 3
keep_alive_hold_timer:
description:
- Keep alive hold timer.
type: int
default: 5
disabled:
description:
- Disable the GRE tunnel.
type: bool
'''
EXAMPLES = '''
- name: Create GRE tunnel
panos_gre_tunnel:
provider: '{{ provider }}'
name: 'myGreTunnel'
interface: 'ethernet1/5'
local_address_value: '10.1.1.1/24'
peer_address: '192.168.1.1'
tunnel_interface: 'tunnel.7'
ttl: 42
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.network import GreTunnel
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
template=True,
template_stack=True,
with_classic_provider_spec=True,
with_state=True,
min_pandevice_version=(0, 13, 0),
min_panos_version=(9, 0, 0),
argument_spec=dict(
name=dict(required=True),
interface=dict(),
local_address_type=dict(default='ip', choices=['ip', 'floating-ip']),
local_address_value=dict(),
peer_address=dict(),
tunnel_interface=dict(),
ttl=dict(type='int', default=64),
copy_tos=dict(type='bool'),
enable_keep_alive=dict(type='bool'),
keep_alive_interval=dict(type='int', default=10),
keep_alive_retry=dict(type='int', default=3),
keep_alive_hold_timer=dict(type='int', default=5),
disabled=dict(type='bool'),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
required_one_of=helper.required_one_of,
supports_check_mode=True,
)
# Verify libs are present, get parent object.
parent = helper.get_pandevice_parent(module)
# Object params.
spec = {
'name': module.params['name'],
'interface': module.params['interface'],
'local_address_type': module.params['local_address_type'],
'local_address_value': module.params['local_address_value'],
'peer_address': module.params['peer_address'],
'tunnel_interface': module.params['tunnel_interface'],
'ttl': module.params['ttl'],
'copy_tos': module.params['copy_tos'],
'enable_keep_alive': module.params['enable_keep_alive'],
'keep_alive_interval': module.params['keep_alive_interval'],
'keep_alive_retry': module.params['keep_alive_retry'],
'keep_alive_hold_timer': module.params['keep_alive_hold_timer'],
'disabled': module.params['disabled'],
}
# Retrieve current info.
try:
listing = GreTunnel.refreshall(parent, add=False)
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
# Build the object based on the user spec.
obj = GreTunnel(**spec)
parent.add(obj)
# Apply the state.
changed = helper.apply_state(obj, listing, module)
# Done.
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3591936 | <reponame>sararob/python-aiplatform
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import log_pipeline_job_to_experiment_sample
import test_constants as constants
def test_log_pipeline_job_sample(
mock_sdk_init, mock_pipeline_job_create, mock_pipeline_job_submit
):
log_pipeline_job_to_experiment_sample.log_pipeline_job_to_experiment_sample(
experiment_name=constants.EXPERIMENT_NAME,
pipeline_job_display_name=constants.DISPLAY_NAME,
template_path=constants.TEMPLATE_PATH,
pipeline_root=constants.STAGING_BUCKET,
parameter_values=constants.PARAMS,
project=constants.PROJECT,
location=constants.LOCATION,
)
mock_sdk_init.assert_called_with(
project=constants.PROJECT, location=constants.LOCATION
)
mock_pipeline_job_create.assert_called_with(
display_name=constants.DISPLAY_NAME,
template_path=constants.TEMPLATE_PATH,
pipeline_root=constants.STAGING_BUCKET,
parameter_values=constants.PARAMS,
)
mock_pipeline_job_submit.assert_called_with(experiment=constants.EXPERIMENT_NAME)
| StarcoderdataPython |
4809317 | <filename>scripts/vwize.py<gh_stars>10-100
import sys
import argparse
import os
from collections import Counter
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--strains", type=str, required=False, dest="strains", nargs="+")
parser.add_argument("-i", "--input", type=str, required=True, dest="searchfiles", nargs="+")
parser.add_argument("-K", "--kmer", type=int, required=False, default=16, dest="kmer")
parser.add_argument("-N", "--sketchSize", type=int, required=False, default=1000, dest="sketchSize")
parser.add_argument("-C", "--coinf", dest="coinf", required=True, type=int, nargs="+")
parser.add_argument("-M", "--multiclass", dest="multiclass", required=False, action="store_true")
parser.add_argument("-n", "--normalize", dest="normalize", required=False, action="store_true")
parser.add_argument("-c", "--collapse", dest="collapse", required=False, action="store_true")
parser.add_argument("-L", "--label", dest="label", required=False, type=str, default="hpv")
parser.add_argument("-P", "--predict", dest="predict", required=False, type=str, help="A file 2-column TSV file with mappings from strains names to prediction labels (i.e. integer keys for the wabbit")
return parser.parse_args()
def make_vw_classes(strainlist):
vw_dict = {}
for i in xrange(0, len(strainlist)):
vw_dict[strainlist[i]] = i
vw_dict["coinfected"] = len(strainlist)
return vw_dict
def quantify_strains(strainlist, searchfile, collapse=False):
str_d = Counter()
if strainlist is not None:
for i in strainlist:
str_d[i] = 0
with open(searchfile, "r") as sfi:
for line in sfi:
tokens = line.split("\t")
#cls = tokens[1].strip().split(" ")[1]
try:
if not collapse:
cls = tokens[1].strip().split(" ")[1]
else:
cls = tokens[1].strip().split(" ")[1][0]
except IndexError:
if not collapse:
cls = "unclassified"
else:
cls = "U"
str_d[cls] += 1;
return str_d
def vw_line(str_d, isCoinfected, sketchSize, kmer, multiclass, class_d, label_str, normalize, arg_label):
vw_s = []
tot = 0
for i in str_d:
tot += str_d[i]
if multiclass:
vw_s.append( class_d[label_str] )
elif isCoinfected:
vw_s.append( "1")
else:
vw_s.append("-1")
vw_s.append(" 1.0")
vw_s.append( " '" + arg_label)
vw_s.append(" |vir")
strain_s = ""
for i in str_d.keys():
strain_s += " "
strain_s += str(i)
strain_s += ":"
if normalize:
strain_s += str( float(str_d[i]) / float(tot) )
else:
strain_s += str(str_d[i])
vw_s.append(strain_s)
#vw_s.append(" |sketch")
#vw_s.append(" sketchSize=")
#vw_s.append(sketchSize)
#vw_s.append(" kmer=")
#vw_s.append(kmer)
return "".join([str(i) for i in vw_s])
if __name__ == "__main__":
args = parse_args()
class_d = {}
if args.multiclass:
class_d["A"] = "1"
class_d["B"] = "2"
class_d["C"] = "3"
class_d["D"] = "4"
class_d["coinfected"] = "5"
if args.predict is not None:
args.multiclass = True
custom_key_d = {}
with open(args.predict, "r") as ifi:
for line in ifi:
tokens = line.split("\t")
custom_key_d[tokens[0]] = int(tokens[1])
class_d = custom_key_d
for i in xrange(0, len(args.searchfiles)):
str_d = quantify_strains(args.strains, args.searchfiles[i], args.collapse)
if args.coinf[i]:
label_str = "coinfected"
else:
label_str = os.path.basename(args.searchfiles[i]).split("_")[0][0]
print vw_line(str_d, args.coinf[i], args.sketchSize, args.kmer, args.multiclass, class_d, label_str, args.normalize, args.label)
| StarcoderdataPython |
135830 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
https://developer.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
"""
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class EndPointsClient(rest_client.RestClient):
api_version = "v3"
def list_endpoints(self):
"""GET endpoints."""
resp, body = self.get('endpoints')
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_endpoint(self, **kwargs):
"""Create endpoint.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/identity/v3/index.html#create-endpoint
"""
post_body = json.dumps({'endpoint': kwargs})
resp, body = self.post('endpoints', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_endpoint(self, endpoint_id, **kwargs):
"""Updates an endpoint with given parameters.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref/identity/v3/index.html#update-endpoint
"""
post_body = json.dumps({'endpoint': kwargs})
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_endpoint(self, endpoint_id):
"""Delete endpoint."""
resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
self.expected_success(204, resp_header.status)
return rest_client.ResponseBody(resp_header, resp_body)
def show_endpoint(self, endpoint_id):
"""Get endpoint."""
resp_header, resp_body = self.get('endpoints/%s' % endpoint_id)
self.expected_success(200, resp_header.status)
resp_body = json.loads(resp_body)
return rest_client.ResponseBody(resp_header, resp_body)
| StarcoderdataPython |
3576518 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 <NAME> ( <EMAIL> )
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""
Short functions for working with collections of raw reads.
"""
from trseeker.seqio.sra_file import fastq_reader
from trseeker.tools.jellyfish_tools import query_kmers, query_kmers_new
from collections import Counter, defaultdict
from trseeker.tools.edit_distance import get_ed_similarity
def raw_reads_load_fastq_reads(fastq_file, limit=1000000):
''' Return list of raw reads from fastq file.
@param fastq_file: fastq file with reads.
@param limit: maximum number of reads
'''
result = []
for i, read in enumerate(fastq_reader(fastq_file)):
print(i, "\r", end=" ")
result.append(read.sequence)
if i > limit:
break
print
return result
def get_reads_by_kmer(kmer, all_reads):
''' Return reads with given kmer.
@param kmer: kmer
@param all_reads: list of reads
'''
result = []
for read in all_reads:
if kmer in read:
result.append(read)
return result
def raw_reads_get_flanks(kmer, reads):
''' Get kmer flanks with raw reads.
@param kmer: given kmer
@param reads: list of reads with kmer
@return: [(left_flank, right_flank),...], unsplitted_reads
'''
k = len(kmer)
result = []
errors = []
for read in reads:
try:
left, right = read.split(kmer)
except:
errors.append(read)
result.append((left, right))
return result, errors
def raw_reads_get_next_kmers(right_flanks, k, i=0):
''' Return next kmers for right flanks
@param right_flanks: list of flanks
@param k: kmer length
@param i: start position
'''
result = []
for flank in right_flanks:
if len(flank)-i < k:
continue
result.append(flank[i:k])
return result
def raw_reads_get_shifted_kmers(right_flanks, kmer, i):
''' Return list of shifted kmers by i bases from right_flanks.
@param right_flanks: list of flanks
@param kmer: given kmer
@param i: shift value
'''
result = []
k = len(kmer)
errors = 0
if i < k:
prefix = kmer[i:]
j = 0
l = k - len(prefix)
else:
prefix = None
j = i - k
l = k
for flank in right_flanks:
if prefix:
data = prefix+flank[j:j+l]
else:
data = flank[j:j+l]
if len(data) == k:
result.append(data)
else:
errors += 1
return result, errors
def raw_reads_get_variants(kmer, case="upper"):
''' Return left and rigth shift variants for given kmer.
@param kmer: given kmer
@param case: case upper (default) or lower
'''
if case == "upper":
alphabet = ["A", "C", "G", "T"]
else:
alphabet = ["a", "c", "g", "t"]
kmer = kmer.strip()
left_kmer = kmer[1:]
right_kmer = kmer[:-1]
left_data = []
right_data = []
for letter in alphabet:
right_data.append(left_kmer+letter)
left_data.append(letter+right_kmer)
return left_data, right_data
def raw_reads_continue_kmer_right(kmer, jellyfish_db, cutoff=0):
''' Return next kmers for given kmer according to jellyfish database.
@param kmer: given kmer
@param jellyfish_db: jellyfish database
'''
left_data, right_data = raw_reads_get_variants(kmer)
if not isinstance(jellyfish_db, defaultdict):
R = query_kmers(jellyfish_db, right_data, both_strands=True, verbose=False)
R = [(int(v),k) for k,v in R.items()]
else:
R = [(int(jellyfish_db[_kmer]), _kmer) for _kmer in right_data]
if cutoff:
for i, x in enumerate(R):
if x[0] < cutoff:
R[i] = (0, x[1])
R.sort()
return R
def raw_reads_continue_kmer_left(kmer, jellyfish_db, cutoff=0):
''' Return previous kmers for given kmer according to jellyfish database.
@param kmer: given kmer
@param jellyfish_db: jellyfish database
'''
left_data, right_data = raw_reads_get_variants(kmer)
if not isinstance(jellyfish_db, defaultdict):
L = query_kmers(jellyfish_db, left_data, both_strands=True, verbose=False)
L = [(int(v),k) for k,v in L.items()]
else:
L = [(int(jellyfish_db[_kmer]), _kmer) for _kmer in left_data]
L.sort()
if cutoff:
for i, x in enumerate(L):
if x[0] < cutoff:
L[i] = (0, x[1])
return L
def raw_reads_get_variability(right_flanks, k, used_kmers, ed_cutoff=80):
''' Return variability of shifted kmers.
@param right_flanks: list of flanks
@param k: k
@param used_kmers: set of previously used kmers
@param ed_cutoff: cutoff of edit distance
@return: (fraction_of_ok, ref_kmer, used_kmers, new_kmers)
'''
next_kmers = raw_reads_get_next_kmers(right_flanks, k)
c = Counter(next_kmers)
ref_kmer = c.most_common()[0][0]
new_kmers = set()
ok = 0
error = 0
for x in c:
ed = get_ed_similarity(ref_kmer, x)
if ed > ed_cutoff:
ok += c[x]
used_kmers.add(x)
used_kmers.add(get_revcomp(x))
new_kmers.add(x)
else:
error += c[x]
return float(ok)/(error + ok), ref_kmer, used_kmers, new_kmers
def group_kmers_by_hamming(kmers, d=1):
''' Return group of kmer joined by hamming distance.
@param kmers: list of kmers
@param d: hamming distance
'''
pass | StarcoderdataPython |
9797947 | import string
import secrets # for random token generation
from datetime import datetime
from .extensions import db
class Link(db.Model):
id = db.Column(db.Integer, primary_key=True)
original_url = db.Column(db.String(512))
short_url = db.Column(db.String(8), unique = True)
visits = db.Column(db.Integer, default = 0)
date_created = db.Column(db.DateTime, default = datetime.now)
def __init__(self, **kwargs): # keyword argument coz while using flask sqlalchemy when class is a model you pass in columns that you want to set in the beginning
super().__init__(**kwargs)
self.short_url = self.generate_short_link() #random generation here
def generate_short_link(self):
#characters = string.digits + string.ascii_letters
short_url = secrets.token_hex(4)
link = self.query.filter_by(short_url = short_url).first()
if link: #if it is already present
return self.generate_short_link()
return short_url
| StarcoderdataPython |
8184509 | <reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
import iotbx.pdb
from libtbx.str_utils import split_keeping_spaces
import sys
import six
trans_dict = {}
for k,v in six.iteritems(iotbx.pdb.rna_dna_atom_names_reference_to_mon_lib_translation_dict):
trans_dict[k.strip()] = v
trans_dict["H2'"] = "H2*"
def trans_field(flds, i):
v3 = flds[i]
v2 = trans_dict[v3]
flds[i] = v2
if (i+1 < len(flds)):
l = len(flds[i+1])
d = len(v2) - len(v3)
assert l > d
flds[i+1] = " " * (l-d)
def iter_until_loop(lines):
for line in lines:
if ( line.startswith("#")
or line == "loop_"):
print(line)
return
yield line
def rename_generic(lines, len_flds, i_list):
for line in iter_until_loop(lines):
flds = split_keeping_spaces(line)
assert len(flds) == len_flds
for i in i_list:
trans_field(flds, i)
print("".join(flds))
def rename_atom(lines):
rename_generic(lines, 10, [3])
def rename_tree(lines):
for line in iter_until_loop(lines):
flds = split_keeping_spaces(line)
assert len(flds) == 10
for i in [3, 5, 7, 9]:
if (flds[i] not in ["n/a", "START", "ADD", "END", "."]):
trans_field(flds, i)
print("".join(flds))
def rename_bond(lines):
rename_generic(lines, 12, [3, 5])
def rename_angle(lines):
rename_generic(lines, 12, [3, 5, 7])
def rename_tor(lines):
rename_generic(lines, 18, [5, 7, 9, 11])
def rename_chir(lines):
rename_generic(lines, 14, [5, 7, 9, 11])
def rename_plan(lines):
rename_generic(lines, 8, [5])
def rename_link_bond(lines):
rename_generic(lines, 16, [5, 9])
def rename_link_angle(lines):
rename_generic(lines, 18, [5, 9, 13])
def rename_link_tor(lines):
rename_generic(lines, 26, [7, 11, 15, 19])
def run(args):
assert len(args) == 1
lines = iter(open(args[0]).read().splitlines())
for line in lines:
print(line)
if (line == "_chem_comp_atom.partial_charge"):
rename_atom(lines)
elif (line == "_chem_comp_tree.connect_type"):
rename_tree(lines)
elif (line == "_chem_comp_bond.value_dist_esd"):
rename_bond(lines)
elif (line == "_chem_comp_angle.value_angle_esd"):
rename_angle(lines)
elif (line == "_chem_comp_tor.period"):
rename_tor(lines)
elif (line == "_chem_comp_chir.volume_sign"):
rename_chir(lines)
elif (line == "_chem_comp_plane_atom.dist_esd"):
rename_plan(lines)
#
elif (line == "_chem_link_bond.value_dist_esd"):
rename_link_bond(lines)
elif (line == "_chem_link_angle.value_angle_esd"):
rename_link_angle(lines)
elif (line == "_chem_link_tor.period"):
rename_link_tor(lines)
elif (line == "_chem_link_chir.volume_sign"):
raise RuntimeError("Not implemented.")
elif (line == "_chem_link_plane.dist_esd"):
raise RuntimeError("Not implemented.")
if (__name__ == "__main__"):
run(args=sys.argv[1:])
| StarcoderdataPython |
1943589 |
from jumpscale import j
# TODO: P2 S4 :eduard TextLineEditor tool does not work any more, is a
# pitty because ideal to parse config files on a filesystem (id:83)
JSBASE = j.application.jsbase_get_class()
class TextLineEditor(JSBASE):
"""
represents a piece of text but broken appart in blocks/tokens
this one works on a line basis
"""
def __init__(self, text, path):
JSBASE.__init__(self)
self.lines = []
self._higestblocknr = {} # key is name of block, the value is the last used blocknr
self.path = path
for line in text.split("\n"):
self.lines.append(LTLine(line))
def getNrLines(self):
return len(self.lines)
def existsBlock(self, blockname):
return blockname in self._higestblocknr
def getBlockNames(self):
return list(self._higestblocknr.keys())
def matchBlocks(
self,
blockname,
blockStartPatterns=['.*'],
blockStartPatternsNegative=[],
blockStopPatterns=[],
blockStopPatternsNegative=[],
blockfilter=""):
"""
walk over blocks which are marked as matched and split blocks in more blocks depending criteria
can be usefull to do this multiple times (sort of iterative) e.g. find class and then in class remove comments
@param blockfilter will always match beginning of blockname e.g. can use userauth.sites then change userauth.sites will match all sites
look for blocks starting with line which matches one of patterns in blockStartPatterns and not matching one of patterns in blockStartPatternsNegative
block will stop when line found which matches one of patterns in blockStopPatterns and not in blockStopPatternsNegative or when next match for start is found
example pattern: '^class ' looks for class at beginning of line with space behind
"""
# check types of input
if type(blockStartPatterns).__name__ != 'list' or type(blockStartPatternsNegative).__name__ != 'list' or type(
blockStopPatterns).__name__ != 'list' or type(blockStopPatternsNegative).__name__ != 'list':
raise j.exceptions.RuntimeError(
"Blockstartpatterns,blockStartPatternsNegative,blockStopPatterns,blockStopPatternsNegative has to be of type list")
state = "scan"
lines = self.lines
line = ""
for t in range(len(lines)):
lineObject = lines[t]
line = lineObject.line
# print "\nPROCESS: %s,%s state:%s line:%s" % (t,len(lines)-1,state,line)
emptyLine = line.strip() == ""
if t == len(lines) - 1:
# end of file
if state == "foundblock": # still in found block so add the last line
self._processLine(lineObject, blockname) # add last line
return
if state == "foundblock" and j.data.regex.matchMultiple(
blockStopPatterns, line) and not j.data.regex.matchMultiple(blockStopPatternsNegative, line):
# found end of block
state = "scan" # can continue to scan for next line
self._processLine(lineObject, blockname)
continue
if state == "foundblock": # still in found block so add the last line
self._processLine(lineObject, blockname) # add last line
if j.data.regex.matchMultiple(blockStartPatterns, line) and not j.data.regex.matchMultiple(
blockStartPatternsNegative, line):
# found beginning of block
state = "foundblock"
self._processLine(lineObject, blockname, next=True)
def _processLine(self, lineObject, blockname, next=False):
if lineObject.block == blockname:
j.errorhandler.raiseBug(
message="Cannot find block with name %s in block which has already same name" %
blo, category="lineeditor")
lineObject.block = blockname
if next:
lineObject.blocknr = self.getNextBlockNr(blockname)
else:
lineObject.blocknr = self.getHighestBlockNr(blockname)
def getNextBlockNr(self, name):
if name not in self._higestblocknr:
self._higestblocknr[name] = 1
else:
self._higestblocknr[name] += 1
return self._higestblocknr[name]
def getHighestBlockNr(self, name):
if name not in self._higestblocknr:
raise j.exceptions.RuntimeError("Cound not find block with name %s" % name)
else:
return self._higestblocknr[name]
def appendBlock(self, text, blockname=""):
"""
@param match means block was found and matching
"""
blocknr = self.getNextBlockNr(blockname)
for line in text.split("\n"):
self.lines.append(LTLine(line, blockname, blocknr))
def insertBlock(self, linenr, text, blockname="", blocknr=None):
"""
block will be inserted at linenr, means line with linenr will be moved backwards
"""
if blocknr is None:
blocknr = self.getNextBlockNr(blockname)
for line in text.split("\n").revert():
self.lines.insert(linenr, LTLine(line, blockname, blocknr))
def deleteBlock(self, blockname, blocknr=None):
"""
mark block as not matching based on startline
"""
if blocknr is None:
if not self.existsBlock(blockname):
return
else:
self.getBlock(blockname, blocknr) # just to check if block exists
if blocknr is None:
self.lines = [line for line in self.lines if (line.block != blockname)]
else:
self.lines = [line for line in self.lines if (line.block != blockname and line.blocknr == blocknr)]
def getBlock(self, blockname, blocknr):
"""
get block based on startline
"""
block = [line for line in self.lines if (line.block == blockname and line.blocknr == blocknr)]
if len(block) == 0:
raise j.exceptions.RuntimeError(
"Cannot find block from text with blockname %s and blocknr %s" % (blockname, blocknr))
return str.join(block)
def replaceBlock(self, blockname, text, blocknr=1):
"""
set block based on startline with new content
"""
if text[-1] != "\n":
text += "\n"
state = "scan"
lastBlockNameNr = ""
linenr = 0
nrlines = len(self.lines)
x = 0
lastx = 0
while True:
if x > nrlines - 1:
break
line = self.lines[x]
# print "%s %s"%(x,line)
if line.block == blockname and line.blocknr == blocknr:
state = "found"
lastBlockNameNr = "%s_%s" % (line.block, line.blocknr)
self.lines.pop(x)
x = x - 1
nrlines = len(self.lines)
lastx = x
if state == "found" and lastBlockNameNr != lastBlockNameNr:
# end of block
break
x = x + 1
self.lines.append(None)
x = lastx + 1
text = text.rstrip()
for line in text.split("\n"):
self.lines.insert(x, LTLine(line, blockname, blocknr))
x += 1
self.lines = self.lines[:-1]
def save(self):
txt = "\n".join([item.line for item in self.lines])
j.sal.fs.writeFile(filename=self.path, contents=txt)
def getFirstLineNrForBlock(self, blockname, blocknr):
for linenr in range(len(self.lines)):
line = self.lines[linenr]
if line.name == blockname and line.blocknr == blocknr:
return linenr
raise j.exceptions.RuntimeError("Could not find block with name %s and blocknr %s" % (blockname, blocknr))
def addBlock(self, blockname, text):
first = True
for line in text.split("\n"):
if first:
self.lines.append(LTLine(line, blockname, self.getNextBlockNr(blockname)))
first = False
else:
self.lines.append(LTLine(line, blockname, self.getHighestBlockNr(blockname)))
def __repr__(self):
return self.__str__()
def __str__(self):
if len(self.lines) > 0:
return "".join([str(block) for block in self.lines])
else:
return ""
class LTLine(JSBASE):
def __init__(self, line, blockname="", blocknr=0):
"""
@param no blockname means ignore
"""
JSBASE.__init__(self)
self.block = blockname
self.line = line
self.blocknr = blocknr
def __repr__(self):
return self.__str__()
def __str__(self):
if self.block != "":
text = "+ %s %s: %s\n" % (self.block, self.blocknr, self.line)
return text
else:
text = "- %s\n" % (self.line)
return text
| StarcoderdataPython |
8094997 | <reponame>cermegno/mqtt-dash
import paho.mqtt.client as mqtt
import time
import redis
import json
# Choose the right Redis database
r = redis.Redis(host='127.0.0.1', port='6379')
#r = redis.Redis(host='myredis.in.pws.cloud', port='12345', password='<PASSWORD>')
# Select the right MQTT broker
#broker_address="127.0.0.1"
#broker_address="test.mosquitto.org"
broker_address="broker.hivemq.com"
# We will tune in to this MQTT topic
topic = "albpiper123"
# This tells where the readings are from
location = "Alberto"
print "Collecting data for location: " + location
def on_message(client, userdata, message):
m = str(message.payload.decode("utf-8"))
#print("- New message received: " + m)
# Create a dictionary and extract the current values
obj = json.loads(m)
humidity = obj["readings"][0]["value"]
temp = obj["readings"][1]["value"]
# Write the received data to a database
print "Temp: " + str(temp) + " - Humidity: " + str(humidity)
r.hmset(location,{'temp':temp, 'humidity': humidity})
# You can also use other Redis structures, ex:
#r.set('RPIvalue',m)
# These are other things you can extract from the message
#print("message topic=",message.topic)
#print("message qos=",message.qos)
#print("message retain flag=",message.retain)
print("Creating new instance ...")
client = mqtt.Client("sub1") #create new instance
client.on_message=on_message #attach function to callback
print("Connecting to broker ...")
client.connect(broker_address) #connect to broker
client.loop_start() #start the loop
while True:
client.subscribe(topic) ### USE YOUR OWN TOPIC NAME
time.sleep(1) # wait
client.loop_stop() #stop the loop
| StarcoderdataPython |
1937436 | import os
from pysnark import runtime
from pysnark.runtime import LinComb
from pysnark.fixedpoint import LinCombFxp
from pysnark.boolean import LinCombBool
from pysnark.poseidon_constants import poseidon_constants
"""
Implements the Poseidon hash function with 128-bit security and 4-1 reduction
Currently supports the zkinterface and zkifbellman backends
"""
# Load Poseidon parameters
try:
backend = os.environ["PYSNARK_BACKEND"]
except KeyError:
backend = "nobackend"
if backend in poseidon_constants:
constants = poseidon_constants[backend]
else:
raise NotImplementedError("Poseidon is currently not implemented for this backend")
R_F = constants["R_F"]
R_P = constants["R_P"]
t = constants["t"]
a = constants["a"]
round_constants = constants["round_constants"]
matrix = constants["matrix"]
def matmul(x, y):
assert(len(x[0]) == len(y))
result = [[LinComb.ZERO for _ in range(len(y[0]))] for _ in range(len(x))]
for i in range(len(x)):
for j in range(len(y[0])):
for k in range(len(y)):
result[i][j] += x[i][k] * y[k][j]
return result
def transpose(inputs):
result = [[None for _ in range(len(inputs))] for _ in range(len(inputs[0]))]
for i in range(len(inputs)):
for j in range(len(inputs[0])):
result[j][i] = inputs[i][j]
return result
def permute(sponge):
"""
Runs the Poseidon permutation
Costs 400 constraints per permutation call for a power of 5 with a 4-1 reduction
"""
# First full rounds
for r in range(R_F // 2):
# Add round constants
sponge = [x + y for (x,y) in zip(sponge, round_constants[r])]
# Full S-box layer
sponge = [x ** a for x in sponge]
# Mix layer
sponge = transpose(matmul(matrix, transpose([sponge])))[0]
# Reduce internal PySNARK representation of LinComb value modulo prime field
# Does not affect output circuit
for x in sponge:
x.value %= runtime.backend.get_modulus()
# Partial rounds
for r in range(R_P):
# Add round constants
sponge = [x + y for (x,y) in zip(sponge, round_constants[R_F // 2 + r])]
# Partial S-box layer
sponge[0] = sponge[0] ** a
# Mix layer
sponge = transpose(matmul(matrix, transpose([sponge])))[0]
# Reduce internal PySNARK representation of LinComb value modulo prime field
# Does not affect output circuit
for x in sponge:
x.value %= runtime.backend.get_modulus()
# Final full rounds
for r in range(R_F // 2):
# Add round constants
sponge = [x + y for (x,y) in zip(sponge, round_constants[R_F // 2 + R_P + r])]
# Full S-box layer
sponge = [x ** a for x in sponge]
# Mix layer
sponge = transpose(matmul(matrix, transpose([sponge])))[0]
# Reduce internal PySNARK representation of LinComb value modulo prime field
# Does not affect output circuit
for x in sponge:
x.value %= runtime.backend.get_modulus()
return sponge
def poseidon_hash(inputs):
"""
Runs the Poseidon hash on a list of LinCombs
"""
if not isinstance(inputs, list):
raise RuntimeError("Can only hash lists of LinCombs")
if not all(map(lambda x : isinstance(x, LinComb) or isinstance(x, LinCombFxp) or isinstance(x, LinCombBool), inputs)):
raise RuntimeError("Can only hash lists of LinCombs")
# Convert inputs to LinCombs
inputs = [x.lc if isinstance(x, LinCombFxp) or isinstance(x, LinCombBool) else x for x in inputs]
# Pad inputs
inputs_per_round = t - 1
num_pad = inputs_per_round - len(inputs) % inputs_per_round
num_zeros = num_pad - 1
inputs = inputs + [LinComb.ONE] + [LinComb.ZERO] * num_zeros
assert len(inputs) % inputs_per_round == 0
# Run hash
sponge = [LinComb.ZERO] * t
hash_rounds = len(inputs) // inputs_per_round
for i in range(hash_rounds):
# Add inputs
round_inputs = inputs[i * inputs_per_round: (i + 1) * inputs_per_round]
sponge[1:] = [a + b for (a,b) in zip(sponge[1:], round_inputs)]
# Run permutation
sponge = permute(sponge)
return sponge[1:] | StarcoderdataPython |
1983795 | from swagger_spec_validator import validate_spec_url
import sys
validate_spec_url(sys.argv[1])
| StarcoderdataPython |
1836470 | <filename>ProblemSet2/ProblemSet2-2.py
balance = 3926
annualInterestRate = 0.2
#import time
monthlyInterest = annualInterestRate / 12.0
workingBalance = balance
minimumPayment = 0
while workingBalance > 0:
minimumPayment += 10
workingBalance = balance
for month in range(1, 13):
workingBalance -= minimumPayment
workingBalance = workingBalance + (workingBalance * monthlyInterest)
# print month
# print minimumPayment
# print workingBalance
# time.sleep(0.01)
print "Lowest Payment: " + str(minimumPayment)
| StarcoderdataPython |
6680921 | <filename>stockTrainerApp/views.py
import stripe
import quandl
import json
import datetime
import pandas as pd
from functools import wraps
from jose import jwt
from decouple import config
import json
from rest_framework.decorators import api_view
from . models import Test, Stock, User, Portfolio, Study
from . serializers import TestSerializer, StudySerializer
from rest_framework import generics
from django.shortcuts import render
from rest_framework.decorators import api_view
from django.conf import settings
from django.views.generic.base import TemplateView
from django.http import JsonResponse
from django.http import HttpResponseRedirect
# from django.contrib.auth.models import User
from rest_framework import permissions, status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import UserSerializer, UserSerializerWithToken
stripe.api_key = settings.STRIPE_SECRET_TEST_KEY
quandl.ApiConfig.api_key = config("QUANDL_API_KEY")
class TestListCreate(generics.ListCreateAPIView):
queryset = Test.objects.all()
serializer_class = TestSerializer
def stock(request):
if request.method != 'GET':
return JsonResponse(status=405, data={
'error': 'Please use get request'
})
stockName = request.GET.get('NAME', '')
# without a name, hard to know what to request
if stockName == '':
return JsonResponse(status=400, data={
'error': 'Please include stock symbol'
})
# the "2018-01-01" is the default value if STARTDATE isn't set
startDate = request.GET.get('STARTDATE', "2018-01-01"
)
try:
datetime.datetime.strptime(startDate, '%Y-%m-%d')
except ValueError:
return JsonResponse(status=400, data={
'error': 'Please include a valid date in the format YYYY-MM-DD'
})
endDate = request.GET.get('ENDDATE', "2018-01-02"
)
try:
datetime.datetime.strptime(endDate, '%Y-%m-%d')
except ValueError:
return JsonResponse(status=400, data={
'error': 'Please include a valid date in the format YYYY-MM-DD'
})
# gets FIELDS, converts to uppercase, then splits into an array
fields = request.GET.get('FIELDS', "Close").upper().split(',')
# DL data from the Quandl API
quandl.ApiConfig.api_key = config('QUANDL_API_KEY')
try:
df = quandl.get(f"WIKI/{stockName}", start_date=startDate,
end_date=endDate)
except:
# This might need to get changed to a more generic answer
print("Query error: please change your inputs (possibly invaild NAME, STARTDATE, ENDDATE) or check your API "
"key.")
return JsonResponse(status=500, data={
'error': 'query error'
})
# frustratingly enough is quandl doesn't have data due to something be impossible it won't error, it'll just
# return an empty dataframe. For example requesting google stock from 1999, before they went public. This won't
# pop if the dates are set wrong, but sometimes will if they're set to the same day.
if df.empty:
return JsonResponse(status=404, data={
'error': 'Data was not found for this stock, please verify that the dates and stock symbol are valid and '
'try again '
})
returnObj = {'symbol': stockName, 'startDate': startDate,
'endDate': endDate, 'data': []}
# check if study exists in the database, if it does, then it returns the study
check_study = Study.objects.all().filter(stock_name=stockName, start_date=startDate, end_date=endDate)
if check_study:
print('already here')
temp = {}
for check_data in check_study.values("data"):
# json.loads allow for our data to be "unstringified" so we can return it as readable data
temp = json.loads(check_data['data'])
returnObj['data'] = temp
return JsonResponse(status=200, data=returnObj)
# this moves the date from being a row key, to another column, then converts the whole dataframe to strings. Even
# all the numbers. This is to avoid problems with handling the date
df_r = df.reset_index().astype(str)
# this preps the return value by iterating over all the df rows then shoving them inside the data array in
# returnObj. I was unsure if I should use an object instead of an array but using a date as a key seemed much
# messier then letting an array preserve order
for index, row in df_r.iterrows():
rowObj = {'date': row['Date']}
# if 'OPEN' in fields:
rowObj['open'] = row['Open']
# if 'CLOSE' in fields:
rowObj['close'] = row['Close']
# if 'LOW' in fields:
rowObj['low'] = row['Low']
# if 'HIGH' in fields:
rowObj['high'] = row['High']
if 'EXDIVIDEND' in fields:
rowObj['exdividend'] = row['Ex-Dividend']
# if 'VOLUME' in fields:
rowObj['volume'] = row['Volume']
if 'SPLITRATIO' in fields:
rowObj['splitRatio'] = row['Split Ratio']
if 'ADJHIGH' in fields:
rowObj['adjHigh'] = row['Adj. High']
if 'ADJOPEN' in fields:
rowObj['adjOpen'] = row['Adj. Open']
if 'ADJCLOSE' in fields:
rowObj['adjClose'] = row['Adj. Close']
if 'ADJLOW' in fields:
rowObj['adjLow'] = row['Adj. Low']
if 'ADJVOLUME' in fields:
rowObj['adjVolume'] = row['Adj. Volume']
returnObj["data"].append(rowObj)
string_json = json.dumps(returnObj["data"])
stock = Stock.objects.all().filter(symbol=returnObj['symbol']).first()
if not stock:
stock = Stock(symbol=returnObj['symbol'])
stock.save()
# Data is being saved as a stringified json
new_study = Study(start_date=returnObj["startDate"], end_date=returnObj["endDate"], data=string_json)
new_study.save()
stock.study_set.add(new_study)
# TODO: Need to save study into user's portfolio when this route becomes protected.
return JsonResponse(status=200, data=returnObj)
def favorite(request):
# this route must have an access token attached
if request.method == 'POST':
# body should have stock symbol
# i.e. {symbol: "AMZN"}
username = get_username(request)
body = json.loads(request.body)
# user is found, and then stock is added to favorite
user = User.objects.all().filter(username=username).first()
stock = Stock.objects.all().filter(symbol=body.get('symbol')).first()
if not user.premium and len(list(user.favorites.all())) >= 10:
return JsonResponse(status=405, data={'message': 'You must be a premium user to have more than 10 favorites'})
if not stock:
# if stock doesn't exist in DB, creates one
stock = Stock(symbol=body.get('symbol'))
stock.save()
user.favorites.add(stock)
user = User.objects.all().filter(username=username)
fav_ret = []
for fav in list(user.values('favorites')):
if fav['favorites'] is not None:
fav_ret.append(fav['favorites'])
return JsonResponse(status=200, data={'favorites': fav_ret})
if request.method == 'DELETE':
# body should contain stock symbol
# i.e. {symbol: "AMZN"}
username = get_username(request)
body = json.loads(request.body)
if not body.get('symbol'):
return JsonResponse(status=400, data={'message': 'Please check if the stock symbol is in the request body'})
user = User.objects.all().filter(username=username).first()
stock = Stock.objects.all().filter(symbol=body.get('symbol')).first()
user.favorites.remove(stock)
user = User.objects.all().filter(username=username)
fav_ret = []
for fav in list(user.values('favorites')):
if fav['favorites'] is not None:
fav_ret.append(fav['favorites'])
return JsonResponse(status=200, data={'favorites': fav_ret})
return JsonResponse(status=404, data={'message': 'Please use a POST or DELETE request.'})
class HomePageView(TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['STRIPE_PUBLISHABLE_TEST_KEY'] = settings.STRIPE_PUBLISHABLE_TEST_KEY
# TODO: set this key to STRIPE_PUBLISHABLE_KEY post testing
return context
def get_username(request):
# gets username from the token, should be something like github.asdfasdf or google-oauth2.asdfasdf
token = jwt.decode(get_token_auth_header(request), OAUTH_CERT, algorithms=['RS256'],
audience='https://stock-trainer.auth0.com/api/v2/')
username = token.get('sub').replace('|', '.')
return username
# look into protecting this route, so that only logged in and users in DB can actually be charged
def charge(request):
if request.method == 'POST':
# username is taken from the request header
username = get_username(request)
# body of request is parsed by the loads function
body = json.loads(request.body)
# currently we're looking at the token only, but there we can add more to the body to id the user
token = body['token']
charge = stripe.Charge.create(
amount=500,
currency='usd',
description="It's just stuff... Don't worry about it...",
source=token
)
print(charge)
print("status:", charge['status'])
# we can change our jsonresponse depending on the error from stripe, or the status of the charge
if charge['status'] == 'succeeded': # hard coded for now, there are WAY better ways to check for this and errors
print('payment success')
# currently, whether a user is premium or not is a boolean, but should be updated to be an expiration date
User.objects.all().filter(username=username).update(premium=True)
return JsonResponse({
'message': 'The payment has been successful'
})
else:
print('payment failed')
return JsonResponse({
'message': 'The payment was not successful'
})
# Oauth cert
# this was attempted to add to .env, but didn't work
OAUTH_CERT = """-----<KEY>
-----END CERTIFICATE-----"""
# @api_view(['GET'])
def current_user(request):
"""
Determine the current user by their token, and return their data
"""
username = get_username(request)
user = User.objects.all().filter(username=username)
# Can DRY this up probably
if user:
portfolio_id_dict = user.values('portfolio_id_id').first()
portfolio_id = portfolio_id_dict.get('portfolio_id_id')
studies = Study.objects.all().filter(portfolio_id=portfolio_id).values()
favorites = list(user.values('favorites'))
fav_ret = []
for fav in favorites:
if fav['favorites'] is not None:
fav_ret.append(fav['favorites'])
return JsonResponse({'portfolio': list(studies), 'favorites': fav_ret})
else:
# creates new user and portfolio if user does not exist.
new_user = User.objects.create_user(username=username)
new_user.save()
new_portfolio = Portfolio.objects.create()
new_portfolio.save()
new_portfolio.user_set.add(new_user)
# for some reason, new_user is just a string, need to requery for now, but there should be a more elegant
# implementation for that
user = User.objects.all().filter(username=username)
portfolio_id_iter = user.values('portfolio_id_id')
portfolio_id = 0
for portfolio in portfolio_id_iter:
portfolio_id = portfolio.get('portfolio_id_id')
studies = Study.objects.all().filter(portfolio_id=portfolio_id).values()
return JsonResponse({'portfolio': list(studies), 'favorites': []})
class UserList(APIView):
"""
Create a new user. It's called 'UserList' because normally we'd have a get
method here too, for retrieving a list of all User objects.
"""
permission_classes = (permissions.AllowAny,)
def post(self, request, format=None):
serializer = UserSerializerWithToken(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Auth0 check for granted scopes from access_token
def get_token_auth_header(request):
"""Obtains the Access Token from the Authorization Header
"""
auth = request.META.get("HTTP_AUTHORIZATION", None)
parts = auth.split()
token = parts[1]
return token
def requires_scope(required_scope):
"""Determines if the required scope is present in the Access Token
Args:
required_scope (str): The scope required to access the resource
"""
def require_scope(f):
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header(args[0])
unverified_claims = jwt.get_unverified_claims(token)
token_scopes = unverified_claims["scope"].split()
for token_scope in token_scopes:
if token_scope == required_scope:
return f(*args, **kwargs)
response = JsonResponse(
{'message': 'You don\'t have access to this resource'})
response.status_code = 403
return response
return decorated
return require_scope
def public(request):
return JsonResponse({'message': 'Hello from a public endpoint! You don\'t need to be authenticated to see this.'})
@api_view(['GET'])
def private(request):
return JsonResponse({'message': 'Hello from a private endpoint! You need to be authenticated to see this.'})
@api_view(['GET'])
@requires_scope('read:messages')
def private_scoped(request):
return JsonResponse("Hello from a private endpoint! You need to be authenticated and have a scope of read:messages to see this.")
| StarcoderdataPython |
8131888 | <gh_stars>1-10
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look on text below...
__author__ = "VirtualV <github.com/virtualvfix>"
__date__ = "07/10/17 13:16"
import platform
from pkg_resources import parse_version
# check python version
PYTHON_MINIMAL_VERSION_SUPPORT = '3.5'
if parse_version(platform.python_version()) < parse_version(PYTHON_MINIMAL_VERSION_SUPPORT):
raise ImportError('Incompatible python version ! Minimum require: %s; Found: %s'
% (PYTHON_MINIMAL_VERSION_SUPPORT, platform.python_version()))
from build.debug_import import EncImport
__all__ = ['EncImport']
| StarcoderdataPython |
40533 | <gh_stars>1-10
# Copyright 2016 Canonical Limited.
#
# This file is part of charms.hardening.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
from charms_hardening.utils import get_settings
from charms_hardening.audits.apt import (
AptConfig,
RestrictedPackages,
)
def get_audits():
"""Get OS hardening apt audits.
:returns: dictionary of audits
"""
audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
'expected': 'false'}])]
settings = get_settings('os')
clean_packages = settings['security']['packages_clean']
if clean_packages:
security_packages = settings['security']['packages_list']
if security_packages:
audits.append(RestrictedPackages(security_packages))
return audits
| StarcoderdataPython |
1778626 | import requests
from firepunch.slack_notifier import SlackNotifier
def test_post_slack(mocker):
mocker.patch("requests.post")
token = "dummy_<PASSWORD>"
channel_name = "#channel_name"
text = \
"1 commits between 2019-03-20 12:39:59 and 2019-03-21 12:39:58.\n" + \
"------------------------\ndate: 2019-03-21T12:39:58Z\nInitial commit"
SlackNotifier(token=token, channel_name=channel_name).post(text=text)
exptected_url = SlackNotifier.SLACK_POST_MESSAGE_URL
expected_params = {
"token": token,
"channel": channel_name,
"text": text,
"unfurl_links": True
}
requests.post.assert_called_once_with(url=exptected_url,
params=expected_params)
| StarcoderdataPython |
1966013 | <filename>Algorithms_easy/0203. Remove Linked List Elements.py
"""
203. Remove Linked List Elements
Remove all elements from a linked list of integers that have value val.
Example:
Input: 1->2->6->3->4->5->6, val = 6
Output: 1->2->3->4->5
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
prev = ListNode(0)
prev.next = head
record = prev
trav = head
while trav:
if trav.val == val:
prev.next = trav.next
trav = prev.next
else:
prev = trav
trav = trav.next
return record.next | StarcoderdataPython |
1761354 | import testbase
from sqlalchemy import *
class CompileTest(testbase.AssertMixin):
"""test various mapper compilation scenarios"""
def tearDownAll(self):
clear_mappers()
def testone(self):
global metadata, order, employee, product, tax, orderproduct
metadata = BoundMetaData(testbase.db)
order = Table('orders', metadata,
Column('id', Integer, primary_key=True),
Column('employee_id', Integer, ForeignKey('employees.id'), nullable=False),
Column('type', Unicode(16)))
employee = Table('employees', metadata,
Column('id', Integer, primary_key=True),
Column('name', Unicode(16), unique=True, nullable=False))
product = Table('products', metadata,
Column('id', Integer, primary_key=True),
)
orderproduct = Table('orderproducts', metadata,
Column('id', Integer, primary_key=True),
Column('order_id', Integer, ForeignKey("orders.id"), nullable=False),
Column('product_id', Integer, ForeignKey("products.id"), nullable=False),
)
class Order(object):
pass
class Employee(object):
pass
class Product(object):
pass
class OrderProduct(object):
pass
order_join = order.select().alias('pjoin')
order_mapper = mapper(Order, order,
select_table=order_join,
polymorphic_on=order_join.c.type,
polymorphic_identity='order',
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='order')}
)
mapper(Product, product,
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='product')}
)
mapper(Employee, employee,
properties={
'orders': relation(Order, lazy=True, backref='employee')})
mapper(OrderProduct, orderproduct)
# this requires that the compilation of order_mapper's "surrogate mapper" occur after
# the initial setup of MapperProperty objects on the mapper.
class_mapper(Product).compile()
def testtwo(self):
"""test that conflicting backrefs raises an exception"""
global metadata, order, employee, product, tax, orderproduct
metadata = BoundMetaData(testbase.db)
order = Table('orders', metadata,
Column('id', Integer, primary_key=True),
Column('type', Unicode(16)))
product = Table('products', metadata,
Column('id', Integer, primary_key=True),
)
orderproduct = Table('orderproducts', metadata,
Column('id', Integer, primary_key=True),
Column('order_id', Integer, ForeignKey("orders.id"), nullable=False),
Column('product_id', Integer, ForeignKey("products.id"), nullable=False),
)
class Order(object):
pass
class Product(object):
pass
class OrderProduct(object):
pass
order_join = order.select().alias('pjoin')
order_mapper = mapper(Order, order,
select_table=order_join,
polymorphic_on=order_join.c.type,
polymorphic_identity='order',
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='product')}
)
mapper(Product, product,
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='product')}
)
mapper(OrderProduct, orderproduct)
try:
class_mapper(Product).compile()
assert False
except exceptions.ArgumentError, e:
assert str(e).index("Backrefs do not match") > -1
def testthree(self):
metadata = BoundMetaData(testbase.db)
node_table = Table("node", metadata,
Column('node_id', Integer, primary_key=True),
Column('name_index', Integer, nullable=True),
)
node_name_table = Table("node_name", metadata,
Column('node_name_id', Integer, primary_key=True),
Column('node_id', Integer, ForeignKey('node.node_id')),
Column('host_id', Integer, ForeignKey('host.host_id')),
Column('name', String(64), nullable=False),
)
host_table = Table("host", metadata,
Column('host_id', Integer, primary_key=True),
Column('hostname', String(64), nullable=False,
unique=True),
)
metadata.create_all()
try:
node_table.insert().execute(node_id=1, node_index=5)
class Node(object):pass
class NodeName(object):pass
class Host(object):pass
node_mapper = mapper(Node, node_table)
host_mapper = mapper(Host, host_table)
node_name_mapper = mapper(NodeName, node_name_table,
properties = {
'node' : relation(Node, backref=backref('names')),
'host' : relation(Host),
}
)
sess = create_session()
assert sess.query(Node).get(1).names == []
finally:
metadata.drop_all()
if __name__ == '__main__':
testbase.main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.