id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
25050 | from webdriver_manager.driver import EdgeDriver, IEDriver
from webdriver_manager.manager import DriverManager
from webdriver_manager import utils
class EdgeDriverManager(DriverManager):
def __init__(self, version=None,
os_type=utils.os_name()):
super(EdgeDriverManager, self).__init__()
self.driver = EdgeDriver(version=version,
os_type=os_type)
def install(self, path=None):
# type: () -> str
return self._file_manager.download_binary(self.driver, path).path
class IEDriverManager(DriverManager):
def __init__(self, version=None, os_type=utils.os_type()):
super(IEDriverManager, self).__init__()
self.driver = IEDriver(version=version, os_type=os_type)
def install(self, path=None):
# type: () -> str
return self._file_manager.download_driver(self.driver, path).path
| StarcoderdataPython |
1739777 | import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms, models
from collections import OrderedDict
from torch import nn
from torch import optim
import torch.nn.functional as F
import time
from workspace_utils import active_session
import numpy as np
from PIL import Image
from torch.autograd import Variable
import argparse
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = checkpoint['model']
classifier=checkpoint['classifier']
model.classifier = classifier
criterion=checkpoint['criterion']
model.load_state_dict(checkpoint['state_dict'])
optimizer=checkpoint['optimizer']
class_to_idx=checkpoint['class_to_idx']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
return model,optimizer,criterion,class_to_idx,device
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
II = Image.open(image)
II.load()
if II.size[0] > II.size[1]:
II.thumbnail((100000, 256))
else:
II.thumbnail((256, 100000))
left = (II.size[0] - 224) / 2
lower = (II.size[1] - 224) / 2
right = (II.size[0] + 224) / 2
upper = (II.size[1] + 224) / 2
cropped_img = II.crop((left, lower, right,
upper))
np_img = np.array(cropped_img) / 255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
Std_image = (np_img - mean) / std
trans_img = Std_image.transpose((2, 0, 1))
return trans_img
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
if title:
plt.title(title)
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, device, topk, cat_to_name):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.to(device)
model.eval()
with torch.no_grad():
image = process_image(image_path)
image = torch.from_numpy(image)
image = image.unsqueeze(0)
image = image.type(torch.FloatTensor)
image = image.to(device)
output = model.forward(image)
ps = torch.exp(output)
top_p, top_c = torch.topk(ps, topk)
tp = []
for p in top_p[0]:
tp.append(float(p))
tc = []
for c in top_c[0]:
tc.append(float(c))
cti = dict(model.class_to_idx.items())
ind = []
for i in tc:
ind.append(list(cti.keys())[list(cti.values()).index(i)])
flower_names = []
for i in ind:
flower_names.append(cat_to_name[i])
return tp, ind, flower_names | StarcoderdataPython |
3369172 | <filename>DataGeneration_database2_question2.py
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 01:30:30 2020
@author: <NAME>
"""
import json
import pandas as pd
import numpy as np
import re
import random
import sqlite3
import datetime
import calendar
from dateutil.relativedelta import *
with open('lookup1.json') as json_file:
data = json.load(json_file)
with open('uniquelookup.json') as json_file:
data2 = json.load(json_file)
with open('state_dict.json') as json_file:
state_dict = json.load(json_file)
question_template = 'What state has the (Value Entity) number of people (Hospitalization Entity)?'
question_template_id = 'db2q2'
output = {}
entities = ['Value Entity', 'Hospitalization Entity']
conn = sqlite3.connect('testQ.db')
c = conn.cursor()
question_key = {}
count = 1
def hospitalizationQuery(query, hospitalization):
date = datetime.date.today()-datetime.timedelta(days=1)
date = str(date)
date = date.replace("-", "")
if hospitalization == 'Currently in ICU':
query = query.replace("Hospitalization Entity Column", "inICUCurrently")
query = query.replace("(Null)", "inICUCurrently is not null")
elif hospitalization == 'Cumulatively in ICU':
query = query.replace("Hospitalization Entity Column", "inICUCumulative")
query = query.replace("(Null)", "inICUCumulative is not null")
elif hospitalization == 'Currently on ventilators':
query = query.replace("Hospitalization Entity Column", "onVentilatorCurrently")
query = query.replace("(Null)", "onVentilatorCurrently is not null")
elif hospitalization == 'Cumulatively on ventilators':
query = query.replace("Hospitalization Entity Column", "onVentilatorCumulative")
query = query.replace("(Null)", "onVentilatorCumulative is not null")
elif hospitalization == 'Cumulatively hospitalized':
query = query.replace("Hospitalization Entity Column", "hospitalizedCumulative")
query = query.replace("(Null)", "hospitalizedCumulative is not null")
else:
query = query.replace("Hospitalization Entity Column", "hospitalizedCurrently")
query = query.replace("(Null)", "hospitalizedCurrently is not null")
query = query.replace("given date", date)
return query
while count <140:
populated_entities = []
output[count] = []
val = random.choice(data['Value Entity'])
if val.find("(x)") >= 0:
order = random.randint(1,10)
val = val.replace("(x)", str(order))
if order == 2:
val = val.replace("th", "nd")
if order == 3:
val = val.replace("th", "rd")
if order == 1:
val = val.replace("th", "st")
else:
order = 1
if val.find("most") >= 0 or val.find("highest") >=0 or val.find("Highest") >=0:
ascending = False
else:
ascending = True
hospitalization = random.choice(data['Hospitalization Entity'])
sql_template = "Select state from db2state where date = 'given date' and (Null) order by Hospitalization Entity Column Value Entity"
query = sql_template
if ascending == False:
query = query.replace('Value Entity','desc limit ' + str(order-1) + ', 1')
else:
query = query.replace('Value Entity','asc limit ' + str(order-1) + ', 1')
query = hospitalizationQuery(query, hospitalization)
real_question = question_template.replace("(Hospitalization Entity)", hospitalization)
real_question = real_question.replace("(Value Entity)", val)
populated_entities.append(val)
populated_entities.append(hospitalization)
c.execute(query)
result = c.fetchall()
#if len(result) == 0 or result[0][0] = None:
# continue
if real_question in question_key.keys():
continue
else:
question_key[real_question] = True
output[count].append({'question_template_id' : question_template_id, 'question_template' : question_template,
'entities' : entities, 'question' : real_question,
'populated_entities': populated_entities, 'query_template' : sql_template, 'query' : query, 'database': 'database 2'})
print(count)
print(question_template)
print(sql_template)
print(real_question)
print(query)
print(result)
count = count +1
with open('db2q2data.json', 'w') as outfile:
json.dump(output,outfile)
print("done") | StarcoderdataPython |
164572 | import pandas as pd
import numpy as np
def downgrade_dtypes(df):
"""Downgrade column types in the dataframe from float64/int64 type to float32/int32 type
Parameters
----------
df : DataFrame
Returns
-------
df: DataFrame
the output column types will be changed from float64/int64 type to float32/int32 type
"""
# Select columns to downcast
float_cols = [c for c in df if df[c].dtype == "float64"]
int_cols = [c for c in df if df[c].dtype == "int64"]
# Downcast
df[float_cols] = df[float_cols].astype(np.float32)
df[int_cols] = df[int_cols].astype(np.int32)
return df
def create_submission_csv(file_name, test, y_test):
"""Create csv file for submission
Parameters
----------
file_name : str
The string represents the name of csv file or filepath.
Csv will consists of two columns: "ID" and "item_cnt_month"
test : DateFrame
DataFrame with test submission
y_test : ndarray
An array object of (n,) shape where n is number of submission instances in the order given in test.csv file
Returns
-------
None
"""
y_test = np.clip(y_test, 0,20)
submission = pd.DataFrame({"ID": test.index, "item_cnt_month": y_test})
submission.to_csv(file_name, index=False)
def rename_shop_ids(df):
"""Rename shop ids in Data Frame 10 -> 11, 0 -> 57, 1 -> 58,
Parameters
----------
df : DataFrame
DateFrame should contain "shop_id" column
Returns
-------
None
"""
df.loc[df["shop_id"]==11,"shop_id"] = 10
df.loc[df["shop_id"]==0,"shop_id"] = 57
df.loc[df["shop_id"]==1,"shop_id"] = 58
def get_X_y(df, target_name):
"""Split DataFrame to feature and target
Parameters
----------
df : DataFrame
target_name : str
Name of target column present in DataFrame
Returns
-------
X : DataFrame
Original DataFrame without target column
y : Series
Target Series
"""
y = df[target_name]
X = df.drop(target_name, axis=1)
return X, y
| StarcoderdataPython |
1743984 | <reponame>Utsav-Patel/Partial-Sensing
from src.TheBlindfoldedAgent import TheBlindfoldedAgent
from src.TheFourNeighborAgent import TheFourNeighborAgent
from src.TheExampleInferenceAgent import TheExampleInferenceAgent
from src.helper import generate_grid_manually, generate_grid_with_probability_p
from constants import GOAL_POSITION_OF_AGENT
full_maze = generate_grid_manually()
print(full_maze)
# Check Implementation for blindfolded agent
# blinded_folded_agent = TheBlindfoldedAgent()
# while blinded_folded_agent.current_position != GOAL_POSITION_OF_AGENT:
# blinded_folded_agent.planning()
# blinded_folded_agent.execution(full_maze)
#
# print("Hurray! Reach Goal")
# print(blinded_folded_agent.final_paths)
# Check Implementation for four dimension agent
# four_neighbor_agent = TheFourNeighborAgent()
#
# while four_neighbor_agent.current_position != GOAL_POSITION_OF_AGENT:
# four_neighbor_agent.planning()
# four_neighbor_agent.execution(full_maze)
#
# print("Hurray! Reach Goal")
# print(four_neighbor_agent.final_paths)
# Check Implementation for example inference agent
example_inference_agent = TheExampleInferenceAgent()
while example_inference_agent.current_position != GOAL_POSITION_OF_AGENT:
example_inference_agent.planning()
example_inference_agent.execution(full_maze)
print("Hurray! Reach Goal")
print(example_inference_agent.final_paths)
| StarcoderdataPython |
189742 | <gh_stars>10-100
# Process: Manaaki Whenua Land Cover Database (LCDB v5.0)
# Import required packages
import sys, subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Import helper functions relevant to this script
sys.path.append('E:/mdm123/D/scripts/geo/')
from geo_helpers import extract_projection_info
# List paths to GDAL scripts
ogr2ogr = 'C:/Anaconda3/envs/geo/Library/bin/ogr2ogr.exe'
gdal_warp = 'C:/Anaconda3/envs/geo/Library/bin/gdalwarp.exe'
gdal_rasterise = 'C:/Anaconda3/envs/geo/Library/bin/gdal_rasterize.exe'
# Define paths to SRTM & LCDB folders
folder_srtm = 'E:/mdm123/D/data/DSM/SRTM/'
folder_lcdb = 'E:/mdm123/D/data/LRIS/lris-lcdb-v50'
folder_inputs = 'E:/mdm123/D/ML/inputs/1D'
folder_fig = 'E:/mdm123/D/figures'
# Define list of zones to be processed (separate LiDAR coverage areas)
zones = ['MRL18_WPE', 'MRL18_WVL', 'MRL18_WKW', 'MRL18_FGA', 'TSM17_STA', 'TSM17_LDM', 'TSM17_GLB', 'TSM16_ATG']
# Define dictionary to hold information relating to each zone covered by the Marlborough (2018) survey
dtm_dict = {'MRL18_WPE':{'label':'Wairau Plains East (Marlborough 2018)', 'year':'2018'},
'MRL18_WVL':{'label':'Wairau Valley (Marlborough 2018)', 'year':'2018'},
'MRL18_WKW':{'label':'Picton - Waikawa (Marlborough 2018)', 'year':'2018'},
'MRL18_FGA':{'label':'Flaxbourne, Grassmere & Lower Awatere (Marlborough 2018)', 'year':'2018'},
'TSM17_STA':{'label':'St Arnaud (Tasman 2017)', 'year':'2017'},
'TSM17_LDM':{'label':'Lee Dam (Tasman 2017)', 'year':'2017'},
'TSM17_GLB':{'label':'Golden Bay & Farewell Spit (Tasman 2017)', 'year':'2017'},
'TSM16_ATG':{'label':'Abel Tasman & Golden Bay (Tasman 2016)', 'year':'2016'}}
# Define the number of cells of padding to add along each raster boundary
pad = 44
###############################################################################
# 1. Reproject LCDB SHP (from NZTM2000 to WGS84) #
###############################################################################
# Reproject the Manaaki Whenua land cover SHP from NZTM2000 to WGS84 (EPSG:4326)
LCDB_shp_NZTM2000 = '{}/raw/lcdb-v50-land-cover-database-version-50-mainland-new-zealand.shp'.format(folder_lcdb)
LCDB_shp_WGS84 = '{}/proc/LCDB_v50_WGS84.shp'.format(folder_lcdb)
WGS84 = 'EPSG:4326'
reproject_command = [ogr2ogr, LCDB_shp_WGS84, LCDB_shp_NZTM2000, '-t_srs', WGS84, '-overwrite']
reproject_result = subprocess.run(reproject_command, stdout=subprocess.PIPE)
if reproject_result.returncode != 0: print(reproject_result.stdout)
###############################################################################
# 2. ArcMap codeblocks for reclassification of sub-classes to main groupings #
###############################################################################
# Used for Python codeblock of field calculation in ArcMap (based on LCDB v4+ definitions of CLASS & NAME)
def classify_group(c):
# Artificial surfaces
if c in [1, 2, 5, 6]:
return 'Artificial Surfaces'
# Bare or Lightly-vegetated Surfaces
elif c in [10, 12, 14, 15, 16]:
return 'Bare or Lightly-vegetated Surfaces'
# Water Bodies
elif c in [20, 21, 22]:
return 'Water Bodies'
# Cropland
elif c in [30, 33]:
return 'Cropland'
# Grassland, Sedgeland and Marshland
elif c in [40, 41, 43, 44, 45, 46, 47]:
return 'Grassland, Sedgeland and Marshland'
# Scrub and Shrubland
elif c in [50, 51, 52, 54, 55, 56, 58, 80, 81]:
return 'Scrub and Shrubland'
# Forest
elif c in [64, 68, 69, 70, 71]:
return 'Forest'
# Other
else:
return 'Other'
# Used for Python codeblock of field calculation in ArcMap (based on LCDB v4+ definitions of CLASS & NAME)
def classify_ID(c):
# Artificial surfaces = 1
if c in [1, 2, 5, 6]:
return 1
# Bare or Lightly-vegetated Surfaces = 2
elif c in [10, 12, 14, 15, 16]:
return 2
# Water Bodies = 3
elif c in [20, 21, 22]:
return 3
# Cropland = 4
elif c in [30, 33]:
return 4
# Grassland, Sedgeland and Marshland = 5
elif c in [40, 41, 43, 44, 45, 46, 47]:
return 5
# Scrub and Shrubland = 6
elif c in [50, 51, 52, 54, 55, 56, 58, 80, 81]:
return 6
# Forest = 7
elif c in [64, 68, 69, 70, 71]:
return 7
# Other = 8
else:
return 8
###############################################################################
# 3. Resample LCDB rasters to match padded SRTM grids for each zone #
###############################################################################
# Loop through all available DTM survey zones
for zone in zones:
print('\nProcessing {} zone:'.format(zone))
# Open a template raster (DEM for that zone, with pad=44) & extract its properties
print(' - Analysing zonal SRTM raster to align grids...')
srtm_filename = '{}proc/{}/SRTM_{}_Z.tif'.format(folder_srtm, zone, zone)
srtm_proj, srtm_res_x, srtm_res_y, srtm_x_min, srtm_x_max, srtm_y_min, srtm_y_max, srtm_width, srtm_height = extract_projection_info(srtm_filename)
# Define a new bounding box, including the padding required for the 2D convnet data pre-processing
pad_x_min = srtm_x_min - pad*srtm_res_x
pad_x_max = srtm_x_max + pad*srtm_res_x
pad_y_min = srtm_y_min - pad*-srtm_res_y
pad_y_max = srtm_y_max + pad*-srtm_res_y
pad_width = srtm_width + 2*pad
pad_height = srtm_height + 2*pad
# Rasterise the Manaaki Whenua land cover SHP to a raster (aligning it with the others, in terms of resolution & extent)
print(' - Rasterising land cover SHP to zone GeoTIFF...')
LCDB_shp = '{}/proc/LCDB_v50_WGS84.shp'.format(folder_lcdb)
LCDB_tif = '{}/proc/LCDB_GroupID_{}_Pad44.tif'.format(folder_lcdb, zone)
rasterise_command = [gdal_rasterise, '-a', 'GrpID_2018', '-l', LCDB_shp.split('/')[-1][:-4], LCDB_shp, LCDB_tif, '-a_nodata', '-9999', '-tr', str(srtm_res_x), str(-srtm_res_y), '-te', str(pad_x_min), str(pad_y_min), str(pad_x_max), str(pad_y_max)]
rasterise_result = subprocess.run(rasterise_command, stdout=subprocess.PIPE)
if rasterise_result.returncode != 0:
print('\nProcess failed, with error message: {}\n'.format(rasterise_result.stdout))
break
###############################################################################
# 4. More processing of LCDB rasters within the geo_process_LiDAR_SRTM script #
###############################################################################
# Further processing & visualisation of the LCDB raster data was done in the "geo_process_LiDAR_SRTM.py" script
###############################################################################
# 5. Generate thumbnail histograms for each zone, to include in LCDB map #
###############################################################################
# Set up a dictionary of properties for each Manaaki Whenua landclass type present
lcdb_dict = {1:{'label':'Artificial\nsurfaces', 'colour':(78/255, 78/255, 78/255)},
2:{'label':'Bare or Lightly-\nvegetated Surfaces', 'colour':(255/255, 235/255, 190/255)},
3:{'label':'Water\nBodies', 'colour':(0/255, 197/255, 255/255)},
4:{'label':'Cropland', 'colour':(255/255, 170/255, 0/255)},
5:{'label':'Grassland, Sedgeland\nand Marshland', 'colour':(255/255, 255/255, 115/255)},
6:{'label':'Scrub and\nShrubland', 'colour':(137/255, 205/255, 102/255)},
7:{'label':'Forest', 'colour':(38/255, 115/255, 0/255)},
8:{'label':'Other', 'colour':'red'}}
# Set up list of bin edges & colours
lcdb_bins = np.linspace(0.5, 7.5, num=8)
lcdb_colours = [lcdb_dict[l]['colour'] for l in range(1,8)]
# Loop through each zone, generating a very simple histogram (colours only) of the land cover classes present
for zone in zones:
# Read 1D vector of processed input data for that zone
df = pd.read_csv('{}/Input1D_ByZone_{}.csv'.format(folder_inputs, zone))
# Get a list of LCDB class codes for all valid pixels
lcdb_list = df['lcdb'].loc[df['diff'] != -9999].tolist()
lcdb_list = [l for l in lcdb_list if (not np.isnan(l) and l != None and l != -9999)]
# Generate histogram manually, to ensure all classes covered (even if not present in that zone)
fig, axes = plt.subplots(figsize=(2,0.8))
_,_,patches = axes.hist(lcdb_list, bins=lcdb_bins, edgecolor='dimgrey', linewidth=0.3)
for patch, colour in zip(patches, lcdb_colours):
patch.set_facecolor(colour)
# Tidy up figure & save
[axes.spines[edge].set_visible(False) for edge in ['left','top','right']]
axes.spines['bottom'].set_color('black')
axes.spines['bottom'].set_linewidth(0.5)
axes.yaxis.set_visible(False)
axes.set_xticklabels([])
axes.set_xticks([])
fig.tight_layout()
fig.savefig('{}/All/Distributions/LCDB/landcover_hist_{}.png'.format(folder_fig, zone), dpi=150, transparent=True, bbox='tight')
plt.close() | StarcoderdataPython |
1680043 | # Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import annotations
import itertools
from abc import ABC, abstractmethod
from functools import singledispatch
from typing import Any, Dict, List, Optional, Union
import numpy as np
from braket.default_simulator.observables import (
Hadamard,
Hermitian,
Identity,
PauliX,
PauliY,
PauliZ,
TensorProduct,
)
from braket.default_simulator.operation import Observable
from braket.default_simulator.operation_helpers import ir_matrix_to_ndarray
from braket.default_simulator.simulation import StateVectorSimulation
from braket.ir import jaqcd
def from_braket_result_type(result_type) -> ResultType:
""" Creates a `ResultType` corresponding to the given Braket instruction.
Args:
result_type: Result type for a circuit specified using the `braket.ir.jacqd` format.
Returns:
ResultType: Instance of specific `ResultType` corresponding to the type of result_type
Raises:
ValueError: If no concrete `ResultType` class has been registered
for the Braket instruction type
"""
return _from_braket_result_type(result_type)
@singledispatch
def _from_braket_result_type(result_type):
raise ValueError(f"Result type {result_type} not recognized")
class ResultType(ABC):
"""
An abstract class that when implemented defines a calculation on a
quantum state simulation.
"""
@abstractmethod
def calculate(self, simulation: StateVectorSimulation) -> Any:
# Return type of any due to lack of sum type support in Python
""" Calculate a result from the given quantum state vector simulation.
Args:
simulation (StateVectorSimulation): The quantum state vector simulation
to use in the calculation
Returns:
Any: The result of the calculation
"""
class ObservableResultType(ResultType, ABC):
"""
Holds an observable to perform a calculation in conjunction with a state.
"""
def __init__(self, observable: Observable):
"""
Args:
observable (Observable): The observable for which the desired result is calculated
"""
self._observable = observable
@property
def observable(self):
""" Observable: The observable for which the desired result is calculated."""
return self._observable
def calculate(self, simulation: StateVectorSimulation) -> Union[float, List[float]]:
state = simulation.state_with_observables
qubit_count = simulation.qubit_count
eigenvalues = self._observable.eigenvalues
targets = self._observable.measured_qubits
if targets:
return ObservableResultType._calculate_for_targets(
state, qubit_count, targets, eigenvalues, self._calculate_from_prob_distribution,
)
else:
return [
ObservableResultType._calculate_for_targets(
state, qubit_count, [i], eigenvalues, self._calculate_from_prob_distribution,
)
for i in range(qubit_count)
]
@staticmethod
@abstractmethod
def _calculate_from_prob_distribution(
probabilities: np.ndarray, eigenvalues: np.ndarray
) -> float:
""" Calculates a result from the probabilities of eigenvalues.
Args:
probabilities (np.ndarray): The probability of measuring each eigenstate
eigenvalues (np.ndarray): The eigenvalue corresponding to each eigenstate
Returns:
float: The result of the calculation
"""
@staticmethod
def _calculate_for_targets(
state, qubit_count, targets, eigenvalues, calculate_from_prob_distribution
):
prob = _marginal_probability(state, qubit_count, targets)
return calculate_from_prob_distribution(prob, eigenvalues)
class StateVector(ResultType):
"""
Simply returns the given state vector.
"""
def calculate(self, simulation: StateVectorSimulation) -> np.ndarray:
""" Return the given state vector of the simulation.
Args:
simulation (StateVectorSimulation): The simulation whose state vector will be returned
Returns:
np.ndarray: The state vector (before observables) of the simulation
"""
return simulation.state_vector
@_from_braket_result_type.register
def _(statevector: jaqcd.StateVector):
return StateVector()
class Amplitude(ResultType):
"""
Extracts the amplitudes of the desired computational basis states.
"""
def __init__(self, states: List[str]):
"""
Args:
states (List[str]): The computational basis states whose amplitudes are desired
"""
self._states = states
def calculate(self, simulation: StateVectorSimulation) -> Dict[str, complex]:
""" Return the amplitudes of the desired computational basis states in the state
of the given simulation.
Args:
simulation (StateVectorSimulation): The simulation whose state vector amplitudes
will be returned
Returns:
Dict[str, complex]: A dict keyed on computational basis states as bitstrings,
with corresponding values the amplitudes
"""
state = simulation.state_vector
return {basis_state: state[int(basis_state, 2)] for basis_state in self._states}
@_from_braket_result_type.register
def _(amplitude: jaqcd.Amplitude):
return Amplitude(amplitude.states)
class Probability(ResultType):
"""
Computes the marginal probabilities of computational basis states on the desired qubits.
"""
def __init__(self, targets: Optional[List[int]] = None):
"""
Args:
targets (Optional[List[int]]): The qubit indices on which probabilities are desired.
If no targets are specified, the probabilities are calculated on the entire state.
Default: `None`
"""
self._targets = targets
def calculate(self, simulation: StateVectorSimulation) -> np.ndarray:
""" Return the marginal probabilities of computational basis states on the target qubits.
Probabilities are marginalized over all non-target qubits.
Args:
simulation (StateVectorSimulation): The simulation from which probabilities
are calculated
Returns:
np.ndarray: An array of probabilities of length equal to 2^(number of target qubits),
indexed by the decimal encoding of the computational basis state on the target qubits
"""
return _marginal_probability(simulation.state_vector, simulation.qubit_count, self._targets)
@_from_braket_result_type.register
def _(probability: jaqcd.Probability):
return Probability(probability.targets)
class Expectation(ObservableResultType):
"""
Holds an observable :math:`O` to calculate its expected value.
"""
def __init__(self, observable: Observable):
"""
Args:
observable (Observable): The observable for which expected value is calculated
"""
super().__init__(observable)
@staticmethod
def _calculate_from_prob_distribution(
probabilities: np.ndarray, eigenvalues: np.ndarray
) -> float:
return (probabilities @ eigenvalues).real
@_from_braket_result_type.register
def _(expectation: jaqcd.Expectation):
return Expectation(_from_braket_observable(expectation.observable, expectation.targets))
class Variance(ObservableResultType):
"""
Holds an observable :math:`O` to calculate its variance.
"""
def __init__(self, observable: Observable):
"""
Args:
observable (Observable): The observable for which variance is calculated
"""
super().__init__(observable)
@staticmethod
def _calculate_from_prob_distribution(
probabilities: np.ndarray, eigenvalues: np.ndarray
) -> float:
return probabilities @ (eigenvalues.real ** 2) - (probabilities @ eigenvalues).real ** 2
@_from_braket_result_type.register
def _(variance: jaqcd.Variance):
return Variance(_from_braket_observable(variance.observable, variance.targets))
def _from_braket_observable(
ir_observable: List[Union[str, List[List[List[float]]]]], ir_targets: Optional[List[int]] = None
) -> Observable:
targets = list(ir_targets) if ir_targets else None
if len(ir_observable) == 1:
return _from_single_observable(ir_observable[0], targets)
else:
observable = TensorProduct(
[_from_single_observable(factor, targets, is_factor=True) for factor in ir_observable]
)
if targets:
raise ValueError(
f"Found {len(targets)} more target qubits than the tensor product acts on"
)
return observable
def _from_single_observable(
observable: Union[str, List[List[List[float]]]],
targets: Optional[List[int]] = None,
# IR tensor product observables are decoupled from targets
is_factor: bool = False,
) -> Observable:
if observable == "i":
return Identity(_actual_targets(targets, 1, is_factor))
elif observable == "h":
return Hadamard(_actual_targets(targets, 1, is_factor))
elif observable == "x":
return PauliX(_actual_targets(targets, 1, is_factor))
elif observable == "y":
return PauliY(_actual_targets(targets, 1, is_factor))
elif observable == "z":
return PauliZ(_actual_targets(targets, 1, is_factor))
else:
try:
matrix = ir_matrix_to_ndarray(observable)
if is_factor:
num_qubits = int(np.log2(len(matrix)))
return Hermitian(matrix, _actual_targets(targets, num_qubits, True))
else:
return Hermitian(matrix, targets)
except Exception:
raise ValueError(f"Invalid observable specified: {observable}")
def _actual_targets(targets: List[int], num_qubits: int, is_factor: bool):
if not is_factor:
return targets
try:
return [targets.pop(0) for _ in range(num_qubits)]
except Exception:
raise ValueError("Insufficient qubits for tensor product")
def _marginal_probability(
state: np.ndarray, qubit_count: int, targets: List[int] = None
) -> np.ndarray:
""" Return the marginal probability of the computational basis states.
The marginal probability is obtained by summing the probabilities on
the unused qubits. If no targets are specified, then the probability
of all basis states is returned.
"""
probabilities = np.abs(state) ** 2
if targets is None or targets == list(range(qubit_count)):
# All qubits targeted, no need to marginalize
return probabilities
targets = np.hstack(targets)
# Find unused qubits and sum over them
unused_qubits = list(set(range(qubit_count)) - set(targets))
as_tensor = probabilities.reshape([2] * qubit_count)
marginal = np.apply_over_axes(np.sum, as_tensor, unused_qubits).flatten()
# Reorder qubits to match targets
basis_states = np.array(list(itertools.product([0, 1], repeat=len(targets))))
perm = np.ravel_multi_index(
basis_states[:, np.argsort(np.argsort(targets))].T, [2] * len(targets)
)
return marginal[perm]
| StarcoderdataPython |
1642556 | from django.conf.urls import url
from home import views
app_name = 'home'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^maps/$', views.maps_api, name='maps'),
# url(r'^msgs/$', views.message_api, name='msgs')
]
| StarcoderdataPython |
1632821 | <filename>dashboard/config/prod/settings.py
import sys
from os.path import join
from dashboard.config.base.settings import *
INTERNAL_IPS=('127.0.0.1')
ROOT_URLCONF = 'dashboard.config.prod.urls'
WSGI_APPLICATION = 'dashboard.config.prod.wsgi.application'
# Default to sqlite
DATABASES = {
'default': {
'ENGINE':'django.db.backends.sqlite3',
'NAME': join(PROJECT_ROOT, 'dashboard.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# If not using sqlite, move database settings to
# 'local_settings.py' outside of version control
try:
from dashboard.config.local_settings import *
except ImportError:
pass
INSTALLED_APPS += (
#'debug_toolbar',
'django_extensions',
'south',
#'test_utils',
)
# Test config tweaks/customizations
if 'test' in sys.argv:
"""
#DATABASES['default'] = {'ENGINE':'django.db.backends.sqlite3'}
#FIXTURE_DIRS = (
# PROJECT_ROOT + '/foo/bar/fixtures',
#)
"""
SOUTH_TESTS_MIGRATE = False
| StarcoderdataPython |
1795638 | # Grade: 12.5 / 15
# This grade applies to the whole assignment, not just this part. Assignments should be completed in one file unless otherwise stated.
#<NAME>
#5/25/2016
#Homework2
numbers = [22,90,0,-10,3,22, 48]
#display th enumber of elements in the list
# TA-COMMENT: (-0.5) The question asks for the number of elements in the list (not the list itself). We were looking for len(numbers)
print(numbers)
#display the 4th elements
print("The 4th element is", numbers[3])
#Display the sum of the 2nd and 4th element of the list.
print("The sum of 2nd and 4th element is", numbers[1] + numbers[3])
#Display the 2nd-largest value in the list.
sorted_numbers = sorted(numbers)
print("The second largest number is", sorted_numbers[5])
# TA-COMMENT: There is a programmatic way to display the second largest value in a list (other than calling it by its index). For example: numbers_sorted[(len(numbers_sorted) - 2)] OR: numbers_sorted[-2].
#Display the last element of the original unsorted list
print("The last element of the original list is", numbers[6])
# TA-COMMENT: Same comment applies -- numbers[-1] is the most efficient way.
#Sum the result of each of the numbers divided by two.
new = 0
sum = 0
for i in numbers:
new = i / 2
sum = sum + new
print("The sum is", sum)
# TA-COMMENT: This would also work:
sum = 0
for i in numbers:
sum = sum + (i / 2)
print(sum)
# question no:6
#For each number, display a number:
for number in numbers:
print(number)
#if your original number is less than 10, multiply it by thirty.
for number in numbers:
if number < 10:
new_num= number * 30
print("If original number is less than 10, this is the answer", new_num)
#If it's ALSO (less than ten and even I assume) even, add six
if number%2 ==0:
new_new_no = new_num + 6
print("If the number is less than 10 and an even number, the result is", new_new_no)
# If it's greater than 50 subtract ten.
if number > 50:
x_no = number - 50
print("If the number is greater than 50, the result is", x_no)
# If it's not negative ten, subtract one
if number !=-10:
xy_no = number - 1
print("if the number is not -10, then the result is ", xy_no)
# TA-COMMENT: (-1) Each number should be only be printed once. Though the logic of your if-statements are correct, you switch the name of the variable where you're saving the newly calculated number. This leads to their being multiple results for each original number and the final calculated number to be incorrect (some numbers should go through multiple conditions successively). Let me know if you have any questions about this!
| StarcoderdataPython |
3206710 | from functools import partial
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from ..editor import EditorScene
from .layerlist import LayerListWidget
class InspectorWidget(QWidget):
image_changed = Signal()
scene_changed = Signal(EditorScene)
def __init__(self):
super().__init__()
self.scene = None
self.current_image = 0
self._slider_down_value = 0
self._setup_ui()
self.slider_box.hide()
self.layer_box.hide()
def set_scene(self, scene):
self.scene = scene
self.current_image = 0
if not scene:
self.slider_box.hide()
self.layer_box.hide()
self._remove_tool_inspector()
return
self._add_layer_widgets()
self.slider.setValue(0)
self.slider.setMaximum(self.scene.image_count-1)
self.scene_changed.emit(scene)
self.slider_box.show()
self.layer_box.show()
def show_next(self):
if self.current_image < self.scene.image_count-1:
command = ChangeImageCommand(
self.slider, self.current_image, self.current_image + 1)
command.setText("Next Image")
self.scene.undo_stack.push(command)
def show_previous(self):
if self.current_image > 0:
command = ChangeImageCommand(
self.slider, self.current_image, self.current_image - 1)
command.setText("Previous Image")
self.scene.undo_stack.push(command)
def change_image(self, idx):
self.current_image = idx
active_layer = self.scene.active_layer
self.scene.load(idx)
self.slider_box.setTitle("Image {0}/{1}".format(idx+1, self.scene.image_count))
self._activate_layer(active_layer)
self.image_changed.emit()
def show_tool_inspector(self):
self._remove_tool_inspector()
self._add_tool_inspector()
def _activate_layer(self, idx):
self.scene.active_layer = idx
self.scene.update()
self.show_tool_inspector()
def _slider_pressed(self):
self._slider_down_value = self.slider.value()
def _slider_released(self):
command = ChangeImageCommand(
self.slider, self._slider_down_value, self.slider.value())
command.setText("Change Image")
self.scene.undo_stack.push(command)
def _add_tool_inspector(self):
idx = self.scene.active_layer
widget = self.scene.layers.tool_widget
if widget:
self.dock_layout.insertWidget(1, widget)
def _remove_tool_inspector(self):
if self.dock_layout.count() <= 3:
return
widget = self.dock_layout.itemAt(1).widget()
if widget:
widget.deleteLater()
def _add_layer_widgets(self):
self.layer_box.clear()
for index, name in enumerate(self.scene.data_store.folders):
self.layer_box.add(name.title())
def _change_layer_opacity(self, idx, value):
self.scene.set_layer_opacity(idx, value)
def _setup_ui(self):
self.dock_layout = QVBoxLayout(self)
self.dock_layout.setContentsMargins(4, 4, 4, 0)
self.slider_box = QGroupBox("Images")
self.slider_box.setObjectName("imageSlider")
hlayout = QHBoxLayout(self.slider_box)
arrow_left = QToolButton(self)
arrow_left.setMaximumSize(25, 25)
arrow_left.setArrowType(Qt.LeftArrow)
left_action = QAction()
left_action.triggered.connect(self.show_previous)
arrow_left.setDefaultAction(left_action)
hlayout.addWidget(arrow_left)
self.slider = QSlider(Qt.Horizontal)
self.slider.setValue(0)
self.slider.valueChanged.connect(self.change_image)
self.slider.sliderPressed.connect(self._slider_pressed)
self.slider.sliderReleased.connect(self._slider_released)
hlayout.addWidget(self.slider)
arrow_right = QToolButton()
arrow_right.setMaximumSize(25, 25)
arrow_right.setArrowType(Qt.RightArrow)
right_action = QAction()
right_action.triggered.connect(self.show_next)
arrow_right.setDefaultAction(right_action)
hlayout.addWidget(arrow_right)
self.dock_layout.addWidget(self.slider_box)
self.layer_box = LayerListWidget()
self.layer_box.opacity_changed.connect(self._change_layer_opacity)
self.layer_box.layer_activated.connect(self._activate_layer)
self.dock_layout.addWidget(self.layer_box)
self.dock_layout.addItem(
QSpacerItem(1, 1, QSizePolicy.Minimum, QSizePolicy.Expanding))
class ChangeImageCommand(QUndoCommand):
def __init__(self, slider, old_value, new_value):
super().__init__()
self.slider = slider
self.old_value = old_value
self.new_value = new_value
def undo(self):
if self.slider:
self.slider.setValue(self.old_value)
def redo(self):
if self.slider:
self.slider.setValue(self.new_value)
| StarcoderdataPython |
4819578 | import random
import itertools
import string
import cProfile
from constants import * # @UnusedWildImport
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
# print "Loading word list from file..." # DEBUG
# open file
the_file = open(WORDLIST_FILENAME, 'r', 0)
# create word_list
word_list = []
# word_list: list of strings separated by line
for line in the_file:
word_list.append(string.strip(line))
# print " ", len(word_list), "words loaded." # DEBUG
return word_list
def create_simple_dict_from_word_list(word_list):
"""
DEPRECATED: Use create_dict_from_word_list
word_list (list): a list of words
Returns a dict of {'a':[words starting with 'a'], 'b':[words starting with 'b'], etc...}
"""
word_dict = {}
for word in word_list:
if len(word) in word_dict:
word_dict[len(word)].append(word)
else:
word_dict[len(word)] = [word]
return word_dict
def create_dict_from_word_list(word_list):
word_dict = {}
for word in word_list:
if len(word) in word_dict:
if word[0] in word_dict[len(word)]:
word_dict[len(word)][word[0]].append(word)
else:
word_dict[len(word)][word[0]] = [word]
else:
word_dict[len(word)] = {word[0] : [word]} # order by word length, then by first letter
return word_dict
def choose_seq_letters(letter_freqs, num_letters):
"""
letter_freqs (dict): a dictionary of the frequencies of letters appearing the English language (float)
num_letters (int): number of letters to be included in the sequence
Raises ValueError if num_letters is not in interval [0,26]
Returns a list of letters
"""
letter_freqs = letter_freqs.copy()
if num_letters not in xrange(0,27):
raise ValueError(str(num_letters) + ' is not in [0,26]')
elif num_letters == 0:
return []
elif num_letters == 26:
return letter_freqs.keys()
else:
letter_list = []
for i in xrange(num_letters): # @UnusedVariable
pos = random.random() * 100
s = 0
letter = 'a'
iter_keys = letter_freqs.iterkeys()
iter_vals = letter_freqs.itervalues()
while (s < pos):
letter = iter_keys.next()
s += iter_vals.next()
letter_list.append(letter)
return letter_list
def find_valid_words(letter_list, word_dict):
"""
letter_list (list): a list of letters
word_dict (dict): a dict. Supports simple and advanced word_dicts
Raises ValueError if len(letter_list) < 3
Returns all possible words of length 3 or greater that can be constructed using the letters in letter_list
"""
if len(letter_list) < 3:
raise ValueError('letter_list contains less than 3 elements.')
word_matches = set() # 3 letter matches only
prefixes = [] # possible words based off of all 3 letter permutations
iterator = itertools.permutations(letter_list, 3)
while (True): # find 3 letter matches and prefixes
try:
candidate = iterator.next()
candidate = string.join(candidate, '')
prefixes.append(candidate) # all candidates are added to prefixes
if is_word(candidate, word_dict):
word_matches.add(candidate)
except StopIteration:
break
return list(word_matches) + find_valid_words_helper(letter_list, prefixes, word_dict)
def find_valid_words_helper(letter_list, prefixes, word_dict):
"""
Helper method to find_valid_words.
"""
if len(letter_list) > len(prefixes[0]):
candidates = set() # doubles as new prefixes to be passed into recursive call
for prefix in prefixes:
letter_dict = Letter_Dict(letter_list)
for i in xrange(len(prefix)):
letter_dict.decrement_letter(prefix[i])
for letter in letter_dict.get_list():
candidates.add(prefix + letter) # add prefix-letter combination to candidates
new_word_matches = find_valid_words_helper(letter_list, list(candidates), word_dict)
for candidate in candidates.copy():
if not is_word(candidate, word_dict):
candidates.remove(candidate)
return list(candidates) + new_word_matches
else:
return []
def is_word(candidate, word_dict):
word_dict_key_type = type(word_dict.values()[0])
if word_dict_key_type == list: # backwards compatibility
if candidate in word_dict[len(candidate)]:
return True
else:
return False
else:
assert (word_dict_key_type == dict)
if candidate in word_dict[len(candidate)][candidate[0]]:
return True
else:
return False
def find_valid_words_brute_force(letter_list, word_list):
"""
DEPRECATED: Brute force. Slows especially when len(letter_list > 7. Use with caution.
letter_list (list): a list of letters
Raises ValueError if len(letter_list) < 3
Returns all possible words of length 3 or greater that can be constructed using the letters in letter_list
"""
if len(letter_list) < 3:
raise ValueError('letter_list contains less than 3 elements.')
word_matches = set()
for num_letters in xrange(3, len(letter_list)+1):
hasNext = True
iterator = itertools.permutations(letter_list, num_letters)
while hasNext:
try:
candidate = iterator.next()
candidate = string.join(candidate, '')
if candidate in word_list:
word_matches.add(candidate)
except StopIteration:
hasNext = False
return list(word_matches)
class Letter_Dict(object):
def __init__(self, letter_list):
self.letter_dict = {}
for letter in letter_list:
self.increment_letter(letter)
def increment_letter(self, letter):
if len(letter) == 1 and letter in string.ascii_lowercase:
if letter in self.letter_dict:
self.letter_dict[letter] += 1
else:
self.letter_dict[letter] = 1
else:
raise ValueError(letter + ' is not a single lowercase letter')
def decrement_letter(self, letter):
if letter in self.letter_dict:
if len(letter) == 1 and letter in string.ascii_lowercase:
if self.letter_dict[letter] > 0:
self.letter_dict[letter] -= 1
if self.letter_dict[letter] == 0:
self.letter_dict.pop(letter) # remove letter if its value is 0
else:
raise ValueError(letter + '\'s value is not a positive number')
else:
raise ValueError(letter + ' is not a single lowercase letter')
else:
raise KeyError(letter + ' does not exist in letter_dict')
def get_dict(self):
return self.letter_dict
def get_list(self):
letter_list = []
for letter in self.letter_dict:
for i in xrange(self.letter_dict[letter]): # @UnusedVariable
letter_list.append(letter)
return letter_list
# TEST SUITES
def test_suite_1(): # test choose_seq_letters
try:
print choose_seq_letters(LETTER_FREQS, 30) # raise error
except ValueError:
print 'Correctly raised error'
print choose_seq_letters(LETTER_FREQS, 0), '0 letters'
print choose_seq_letters(LETTER_FREQS, 26), '26 letters'
def test_suite_2(): # test Letter_Dict class
letter_dict = Letter_Dict(['a','b','c','d','e','f'])
print letter_dict.get_dict(), 'abcdef all 1s'
letter_dict.increment_letter('g')
print letter_dict.get_dict(), 'abcdefg all 1s'
letter_dict.increment_letter('b')
print letter_dict.get_dict(), 'acdefg all 1s, b is 2'
letter_dict.decrement_letter('c')
print letter_dict.get_dict(), 'adefg all 1s, b is 2'
print letter_dict.get_list()
def test_suite_3(letter_list): # test brute force find_valid_words_brute_force function WITHOUT dictionary optimization
if len(letter_list) <= 6: # time constraints... if len(letter_list) == 7, then it would take over 30 seconds for completion
word_list = load_words()
print 'Starting brute force function'
bf = find_valid_words_brute_force(letter_list, word_list)
print bf, len(bf)
else:
print 'Quit brute force function to save time.'
def test_suite_4(letter_list): # test optimized find_valid_words function WITH dictionary optimization
if len(letter_list) <= 8: # time increases drastically at len(letter_list) == 9
word_list = load_words()
word_dict = create_simple_dict_from_word_list(word_list)
print 'Starting optimized function'
op = find_valid_words(letter_list, word_dict)
print op, len(op)
else:
print 'Quit optimized w/ simple dict to save time.'
def test_suite_5(letter_list): # test optimized find_valid_words function WITH advanced dictionary optimization
word_list = load_words()
word_dict = create_dict_from_word_list(word_list)
print 'Starting optimized function'
adv = find_valid_words(letter_list, word_dict)
print adv, len(adv)
def test_suite_6(): # cProfile test suites 3, 4, 5
global letter_list
# ['t','r','a','c','e','s']
# ['b','o','m','b','a','r','d']
# ['e','a','s','i','n','e','s','s']
# ['v','a','r','i','a','b','l','e','s']
letter_list = ['v','a','r','i','a','b','l','e','s']
print 'Brute Force'
cProfile.run('test_suite_3(letter_list)')
print 'Optimized w/ Simple Dict'
cProfile.run('test_suite_4(letter_list)')
print 'Optimized w/ Advanced Dict'
cProfile.run('test_suite_5(letter_list)') # at len(letter_list) == 8, time starts to increase more and more...
def test_suite_7(): # test simple_word_dict
word_list = load_words()
word_dict = create_simple_dict_from_word_list(word_list)
print word_dict[3] # print all three letter words
print word_dict[4] # print all four letter words
def test_suite_8(): # test word_dict
word_list = load_words()
word_dict = create_dict_from_word_list(word_list)
print word_dict[3]
print word_dict[3]['a']
def test_suite_final(): # test valid words
word_dict = create_dict_from_word_list(load_words())
round_to_num_letters = {1:6, 2:7, 3:8}
for rnd in xrange(1, len(round_to_num_letters)+1):
letter_seq = choose_seq_letters(LETTER_FREQS, round_to_num_letters[rnd])
print 'Your letters for round', rnd, 'are', letter_seq
all_words = find_valid_words(letter_seq, word_dict)
print all_words
print 'There are', len(all_words), 'possible words in this round\n'
if __name__ == '__main__':
test_suite_final() | StarcoderdataPython |
3284789 | <filename>UVC/T2TViT/models/token_performer.py
"""
Take Performer as T2T Transformer
"""
import math
import torch
import torch.nn as nn
class Token_performer(nn.Module):
def __init__(self, dim, in_dim, head_cnt=1, kernel_ratio=0.5, dp1=0.1, dp2 = 0.1):
super().__init__()
self.emb = in_dim * head_cnt # we use 1, so it is no need here
self.kqv = nn.Linear(dim, 3 * self.emb)
self.dp = nn.Dropout(dp1)
self.proj = nn.Linear(self.emb, self.emb)
self.head_cnt = head_cnt
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(self.emb)
self.epsilon = 1e-8 # for stable in division
self.mlp = nn.Sequential(
nn.Linear(self.emb, 1 * self.emb),
nn.GELU(),
nn.Linear(1 * self.emb, self.emb),
nn.Dropout(dp2),
)
self.m = int(self.emb * kernel_ratio)
self.w = torch.randn(self.m, self.emb)
self.w = nn.Parameter(nn.init.orthogonal_(self.w) * math.sqrt(self.m), requires_grad=False)
def prm_exp(self, x):
# part of the function is borrow from https://github.com/lucidrains/performer-pytorch
# and <NAME> (https://github.com/cloneofsimo)
# ==== positive random features for gaussian kernels ====
# x = (B, T, hs)
# w = (m, hs)
# return : x : B, T, m
# SM(x, y) = E_w[exp(w^T x - |x|/2) exp(w^T y - |y|/2)]
# therefore return exp(w^Tx - |x|/2)/sqrt(m)
xd = ((x * x).sum(dim=-1, keepdim=True)).repeat(1, 1, self.m) / 2
wtx = torch.einsum('bti,mi->btm', x.float(), self.w)
return torch.exp(wtx - xd) / math.sqrt(self.m)
def single_attn(self, x):
k, q, v = torch.split(self.kqv(x), self.emb, dim=-1)
kp, qp = self.prm_exp(k), self.prm_exp(q) # (B, T, m), (B, T, m)
D = torch.einsum('bti,bi->bt', qp, kp.sum(dim=1)).unsqueeze(dim=2) # (B, T, m) * (B, m) -> (B, T, 1)
kptv = torch.einsum('bin,bim->bnm', v.float(), kp) # (B, emb, m)
y = torch.einsum('bti,bni->btn', qp, kptv) / (D.repeat(1, 1, self.emb) + self.epsilon) # (B, T, emb)/Diag
# skip connection
y = v + self.dp(self.proj(y)) # same as token_transformer in T2T layer, use v as skip connection
single_attn_macs = x.shape[0]*(
x.shape[1]*x.shape[2]*3*self.emb + # self.kqv
k.shape[1]*k.shape[2] + self.emb*k.shape[1]*k.shape[2] + # prm_exp(k)
q.shape[1]*q.shape[2] + self.emb*q.shape[1]*q.shape[2] + # prm_exp(q)
qp.shape[1]*qp.shape[2] + # D
v.shape[1]*v.shape[2]* kp.shape[2] + # kptv
qp.shape[1]*qp.shape[2]*kptv.shape[1] + # y
qp.shape[1]*self.emb*self.emb) # proj
return y, single_attn_macs
def forward(self, x):
x, single_attn_macs = self.single_attn(self.norm1(x))
x = x + self.mlp(self.norm2(x))
mlp_macs = x.shape[0]*(x.shape[1]*x.shape[2] *self.emb + x.shape[2]*self.emb*self.emb)
return x, single_attn_macs+mlp_macs
| StarcoderdataPython |
1761621 | <filename>setup.py
from setuptools import setup
requirements = []
with open("requirements.txt") as f:
requirements = f.read().splitlines()
readme = ""
with open("README.rst") as f:
readme = f.read()
setup(
name="nekos.life-async",
author="igna",
project_urls={
"Website": "https://nekos.life",
"Issue tracker": "https://github.com/ysIgnacio/nekos.life-async/issues",
},
version="1.0.5",
packages=["nekos"],
license="MIT",
description="An unofficial asynchronous wrapper for nekos.life API",
long_description=readme,
long_description_content_type="text/restructured",
include_package_data=True,
install_requires=requirements,
python_requires=">=3.7.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Internet",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
| StarcoderdataPython |
189459 | import matplotlib
import matplotlib.pyplot as plt
import pickle
To_Svg = True
if To_Svg:
figure_spike_sweep = "n_spikes_sweep.svg"
figure_tem_sweep ="n_tems_sweep.svg"
plt.rc('text', usetex=False)
plt.rc('text.latex', unicode = False)
plt.rc('svg',fonttype = 'none')
else:
figure_spike_sweep = "n_spikes_sweep.png"
figure_tem_sweep = "n_tems_sweep.png"
clr = plt.rcParams["axes.prop_cycle"].by_key()["color"]
def set_x_y_lims():
plt.xlim(3,36)
plt.ylim(1e-14,10)
pass
plt.figure(figsize = (6,6))
plt.subplot(3,1,1)
data_filename = "nspike_sweep_9x15_spacing.pkl"
with open(data_filename, "rb") as f: # Python 3: open(..., 'wb')
obj = pickle.load(f, encoding="latin1")
n_spikes, error = obj
plt.title("Mean-Squared Reconstruction Error")
plt.plot(n_spikes-1.5, error)
ax = plt.gca()
ax.set_yscale("log")
plt.axvline(6, color = clr[1], linewidth = 3)
plt.axvline(6, linestyle = (0, (5,5)), color = clr[8], linewidth = 3)
plt.ylabel("9x15 TEMs")#, rotation = 0, ha = 'right', va = 'center')
set_x_y_lims()
plt.subplot(3,1,2)
data_filename = "nspike_sweep_9x9_spacing.pkl"
with open(data_filename, "rb") as f: # Python 3: open(..., 'wb')
obj = pickle.load(f, encoding="latin1")
n_spikes, error = obj
plt.plot(n_spikes-1.5, error)
ax = plt.gca()
ax.set_yscale("log")
plt.axvline(9, color = clr[1], linewidth = 3)
plt.axvline(9, linestyle = (0, (5,5)), color = clr[8], linewidth = 3)
plt.ylabel("9x9 TEMs")#, rotation = 0, ha = 'right', va = 'center')
set_x_y_lims()
plt.subplot(3,1,3)
data_filename = "nspike_sweep_9x5_spacing.pkl"
with open(data_filename, "rb") as f: # Python 3: open(..., 'wb')
obj = pickle.load(f, encoding="latin1")
n_spikes, error = obj
plt.plot(n_spikes-1.5, error)
ax = plt.gca()
ax.set_yscale("log")
plt.axvline(17, linestyle = (0, (5,5)), color = clr[8], linewidth = 3)
plt.ylabel("9x5 TEMs")#, rotation = 0, ha = 'right', va = 'center')
plt.xlim(3, 36)
plt.ylim(0.1,1)
# print(plt.gca().get_yticks())
plt.gca().set_yticks([], minor = True)
plt.gca().set_yticks([0.1,1], minor = False)
print(plt.gca().get_yticks())
plt.xlabel("Number of Spikes per Machine")
plt.tight_layout()
plt.savefig(figure_spike_sweep)
plt.figure(figsize = (6,6))
def set_x_y_lims():
plt.xlim(25,14*14)
plt.ylim(1e-14,10)
pass
plt.subplot(3,1,1)
data_filename = "ntem_sweep_9_5_spikes.pkl"
with open(data_filename, "rb") as f: # Python 3: open(..., 'wb')
obj = pickle.load(f, encoding="latin1")
n_tems, error = obj
plt.title("Mean-Squared Reconstruction Error")
plt.plot([n_t**2 for n_t in n_tems], error)
ax = plt.gca()
ax.set_yscale("log")
plt.axvline(169, color = clr[1], linewidth = 3)
plt.axvline(169, linestyle = (0, (5,5)), color = clr[8], linewidth = 3)
yl = plt.ylabel("8 spikes\n per TEM")#, ha = 'right', va = 'center')
set_x_y_lims()
plt.subplot(3,1,2)
data_filename = "ntem_sweep_9_9_spikes.pkl"
with open(data_filename, "rb") as f: # Python 3: open(..., 'wb')
obj = pickle.load(f, encoding="latin1")
n_tems, error = obj
plt.plot([n_t**2 for n_t in n_tems], error)
ax = plt.gca()
ax.set_yscale("log")
plt.axvline(81, color = clr[1], linewidth = 3)
plt.axvline(81, linestyle = (0, (5,5)), color = clr[8], linewidth = 3)
yl = plt.ylabel("15 spikes\n per TEM")#, rotation = 0, ha = 'right', va = 'center')
set_x_y_lims()
plt.subplot(3,1,3)
data_filename = "ntem_sweep_9_15_spikes.pkl"
with open(data_filename, "rb") as f: # Python 3: open(..., 'wb')
obj = pickle.load(f, encoding="latin1")
n_tems, error = obj
plt.plot([n_t**2 for n_t in n_tems], error)
ax = plt.gca()
ax.set_yscale("log")
plt.axvline(81, color = clr[1], linewidth = 3)
plt.axvline(49, linestyle = (0, (5,5)), color = clr[8], linewidth = 3)
yl = plt.ylabel("25 spikes\n per TEM")#, rotation = 0, ha = 'right', va = 'center')
# plt.xlim(25,15*15)
plt.xlabel("Number of TEMs")
set_x_y_lims()
plt.tight_layout()
plt.savefig(figure_tem_sweep)
| StarcoderdataPython |
596 | from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
| StarcoderdataPython |
1707541 | import requests
APEX_VALUES = ['172.16.31.10']
CNAME_VALUE = ["domains.tumblr.com"]
RESPONSE_FINGERPRINT = "Whatever you were looking for doesn't currently exist at this address."
def detector(domain, ip, cname):
if APEX_VALUES:
if ip in APEX_VALUES:
return True
if filter(lambda x: x in cname, CNAME_VALUE):
return True
try:
if RESPONSE_FINGERPRINT in requests.get('http://%s' % domain).text:
return True
except Exception as e:
pass
return False | StarcoderdataPython |
3331438 | <reponame>AiondaDotCom/tools<filename>kimaiCSV2PDF/helperUnitTest.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# File: helperUnitTest.py
# Author: arwk
# Github: https://github.com/AiondaDotCom/tools
# Created: 25.10.17
# Modified: 25.10.17
##
import helper as hlp
import unittest
class TestUM(unittest.TestCase):
def setUp(self):
pass
def testDecimalToTimedelta(self):
"""
Test decimalToTimedelta & timedeltaToDecimal
"""
for h in range(0,24):
for m in range(0,60):
timedelta = hlp.hoursMinutesToTimedelta(h, m)
newTimedelta = hlp.decimalToTimedelta(hlp.timedeltaToDecimal(timedelta))
#print h, m, timedelta, newTimedelta
#print timedelta, newTimedelta, timedelta == newTimedelta
self.assertEqual(timedelta, newTimedelta)
# Test roundToNearest()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1609364 | # Generated by Django 3.2.3 on 2021-07-05 09:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('engine', '0009_quizattempt_images'),
]
operations = [
migrations.RemoveField(
model_name='quizattempt',
name='images',
),
migrations.AddField(
model_name='quizchoice',
name='images',
field=models.ManyToManyField(blank=True, to='engine.Gesture'),
),
]
| StarcoderdataPython |
58257 | <filename>tests/test_landsat_c2.py
from pathlib import Path
from unittest.mock import patch
import numpy
import pytest
import rasterio
from rio_tiler.errors import InvalidBandName, MissingBands, TileOutsideBounds
from rio_tiler_pds.errors import InvalidLandsatSceneId
from rio_tiler_pds.landsat.aws import LandsatC2Reader
from rio_tiler_pds.landsat.utils import (
ETM_L1_BANDS,
MSS_L1_BANDS,
OLI_L1_BANDS,
OLI_L1_QA_BANDS,
OLI_SR_BANDS,
TIRS_L1_BANDS,
TIRS_L1_QA_BANDS,
TIRS_ST_BANDS,
TM_L1_BANDS,
TM_SR_BANDS,
TM_ST_BANDS,
sceneid_parser,
)
# sceneid,expected_content
LANDSAT_SCENE_PARSER_TEST_CASES = (
# Collection 2 Level 2 OLI-TIRS 8 SP (both SR and ST)
(
"LC08_L2SP_001062_20201031_20201106_02_T2",
{
"sensor": "C",
"satellite": "08",
"processingCorrectionLevel": "L2SP",
"path": "001",
"row": "062",
"acquisitionYear": "2020",
"acquisitionMonth": "10",
"acquisitionDay": "31",
"processingYear": "2020",
"processingMonth": "11",
"processingDay": "06",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LC08_L2SP_001062_20201031_20201106_02_T2",
"date": "2020-10-31",
"_processingLevelNum": "2",
"sensor_name": "oli-tirs",
"_sensor_s3_prefix": "oli-tirs",
"bands": OLI_SR_BANDS + TIRS_ST_BANDS,
"category": "standard",
},
),
# Collection 2 Level 2 OLI-TIRS 8 SR (no ST)
(
"LC08_L2SR_122108_20201031_20201106_02_T2",
{
"sensor": "C",
"satellite": "08",
"processingCorrectionLevel": "L2SR",
"path": "122",
"row": "108",
"acquisitionYear": "2020",
"acquisitionMonth": "10",
"acquisitionDay": "31",
"processingYear": "2020",
"processingMonth": "11",
"processingDay": "06",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LC08_L2SR_122108_20201031_20201106_02_T2",
"date": "2020-10-31",
"_processingLevelNum": "2",
"sensor_name": "oli-tirs",
"_sensor_s3_prefix": "oli-tirs",
"bands": OLI_SR_BANDS,
"category": "standard",
},
),
# Collection 2 Level 2 TM SP (both SR and ST)
(
"LT05_L2SP_014032_20111018_20200820_02_T1",
{
"sensor": "T",
"satellite": "05",
"processingCorrectionLevel": "L2SP",
"path": "014",
"row": "032",
"acquisitionYear": "2011",
"acquisitionMonth": "10",
"acquisitionDay": "18",
"processingYear": "2020",
"processingMonth": "08",
"processingDay": "20",
"collectionNumber": "02",
"collectionCategory": "T1",
"scene": "LT05_L2SP_014032_20111018_20200820_02_T1",
"date": "2011-10-18",
"_processingLevelNum": "2",
"sensor_name": "tm",
"_sensor_s3_prefix": "tm",
"bands": TM_SR_BANDS + TM_ST_BANDS,
"category": "standard",
},
),
# Collection 2 Level 2 TM SR (no ST)
(
"LT05_L2SR_089076_20110929_20200820_02_T2",
{
"sensor": "T",
"satellite": "05",
"processingCorrectionLevel": "L2SR",
"path": "089",
"row": "076",
"acquisitionYear": "2011",
"acquisitionMonth": "09",
"acquisitionDay": "29",
"processingYear": "2020",
"processingMonth": "08",
"processingDay": "20",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LT05_L2SR_089076_20110929_20200820_02_T2",
"date": "2011-09-29",
"_processingLevelNum": "2",
"sensor_name": "tm",
"_sensor_s3_prefix": "tm",
"bands": TM_SR_BANDS,
"category": "standard",
},
),
# Collection 2 Level 2 ETM SP (both SR and ST)
(
"LE07_L2SP_175066_20201026_20201121_02_T1",
{
"sensor": "E",
"satellite": "07",
"processingCorrectionLevel": "L2SP",
"path": "175",
"row": "066",
"acquisitionYear": "2020",
"acquisitionMonth": "10",
"acquisitionDay": "26",
"processingYear": "2020",
"processingMonth": "11",
"processingDay": "21",
"collectionNumber": "02",
"collectionCategory": "T1",
"scene": "LE07_L2SP_175066_20201026_20201121_02_T1",
"date": "2020-10-26",
"_processingLevelNum": "2",
"sensor_name": "etm",
"_sensor_s3_prefix": "etm",
"bands": TM_SR_BANDS + TM_ST_BANDS,
"category": "standard",
},
),
# Collection 2 Level 2 ETM SR (no ST)
(
"LE07_L2SR_123067_20201030_20201126_02_T1",
{
"sensor": "E",
"satellite": "07",
"processingCorrectionLevel": "L2SR",
"path": "123",
"row": "067",
"acquisitionYear": "2020",
"acquisitionMonth": "10",
"acquisitionDay": "30",
"processingYear": "2020",
"processingMonth": "11",
"processingDay": "26",
"collectionNumber": "02",
"collectionCategory": "T1",
"scene": "LE07_L2SR_123067_20201030_20201126_02_T1",
"date": "2020-10-30",
"_processingLevelNum": "2",
"sensor_name": "etm",
"_sensor_s3_prefix": "etm",
"bands": TM_SR_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 OLI, L1GT
(
"LO08_L1GT_108030_20201114_20201119_02_T2",
{
"sensor": "O",
"satellite": "08",
"processingCorrectionLevel": "L1GT",
"path": "108",
"row": "030",
"acquisitionYear": "2020",
"acquisitionMonth": "11",
"acquisitionDay": "14",
"processingYear": "2020",
"processingMonth": "11",
"processingDay": "19",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LO08_L1GT_108030_20201114_20201119_02_T2",
"date": "2020-11-14",
"_processingLevelNum": "1",
"sensor_name": "oli",
"_sensor_s3_prefix": "oli-tirs",
"bands": OLI_L1_BANDS + OLI_L1_QA_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 OLI, L1TP
(
"LO08_L1TP_108070_20201114_20201119_02_T1",
{
"sensor": "O",
"satellite": "08",
"processingCorrectionLevel": "L1TP",
"path": "108",
"row": "070",
"acquisitionYear": "2020",
"acquisitionMonth": "11",
"acquisitionDay": "14",
"processingYear": "2020",
"processingMonth": "11",
"processingDay": "19",
"collectionNumber": "02",
"collectionCategory": "T1",
"scene": "LO08_L1TP_108070_20201114_20201119_02_T1",
"date": "2020-11-14",
"_processingLevelNum": "1",
"sensor_name": "oli",
"_sensor_s3_prefix": "oli-tirs",
"bands": OLI_L1_BANDS + OLI_L1_QA_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 OLI-TIRS, L1GT
(
"LC08_L1GT_229113_20201129_20201211_02_T2",
{
"sensor": "C",
"satellite": "08",
"processingCorrectionLevel": "L1GT",
"path": "229",
"row": "113",
"acquisitionYear": "2020",
"acquisitionMonth": "11",
"acquisitionDay": "29",
"processingYear": "2020",
"processingMonth": "12",
"processingDay": "11",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LC08_L1GT_229113_20201129_20201211_02_T2",
"date": "2020-11-29",
"_processingLevelNum": "1",
"sensor_name": "oli-tirs",
"_sensor_s3_prefix": "oli-tirs",
"bands": OLI_L1_BANDS + TIRS_L1_BANDS + OLI_L1_QA_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 OLI-TIRS, L1TP
(
"LC08_L1TP_092017_20201129_20201210_02_T1",
{
"sensor": "C",
"satellite": "08",
"processingCorrectionLevel": "L1TP",
"path": "092",
"row": "017",
"acquisitionYear": "2020",
"acquisitionMonth": "11",
"acquisitionDay": "29",
"processingYear": "2020",
"processingMonth": "12",
"processingDay": "10",
"collectionNumber": "02",
"collectionCategory": "T1",
"scene": "LC08_L1TP_092017_20201129_20201210_02_T1",
"date": "2020-11-29",
"_processingLevelNum": "1",
"sensor_name": "oli-tirs",
"_sensor_s3_prefix": "oli-tirs",
"bands": OLI_L1_BANDS + TIRS_L1_BANDS + OLI_L1_QA_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 TIRS, L1GT
(
"LT08_L1GT_019213_20201130_20201210_02_T2",
{
"sensor": "T",
"satellite": "08",
"processingCorrectionLevel": "L1GT",
"path": "019",
"row": "213",
"acquisitionYear": "2020",
"acquisitionMonth": "11",
"acquisitionDay": "30",
"processingYear": "2020",
"processingMonth": "12",
"processingDay": "10",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LT08_L1GT_019213_20201130_20201210_02_T2",
"date": "2020-11-30",
"_processingLevelNum": "1",
"sensor_name": "tirs",
"_sensor_s3_prefix": "oli-tirs",
"bands": TIRS_L1_BANDS + TIRS_L1_QA_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 ETM, L1GS
(
"LE07_L1GS_189036_20201129_20201129_02_RT",
{
"sensor": "E",
"satellite": "07",
"processingCorrectionLevel": "L1GS",
"path": "189",
"row": "036",
"acquisitionYear": "2020",
"acquisitionMonth": "11",
"acquisitionDay": "29",
"processingYear": "2020",
"processingMonth": "11",
"processingDay": "29",
"collectionNumber": "02",
"collectionCategory": "RT",
"scene": "LE07_L1GS_189036_20201129_20201129_02_RT",
"date": "2020-11-29",
"_processingLevelNum": "1",
"sensor_name": "etm",
"_sensor_s3_prefix": "etm",
"bands": ETM_L1_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 ETM, L1GT
(
"LE07_L1GT_023046_20201204_20201206_02_RT",
{
"sensor": "E",
"satellite": "07",
"processingCorrectionLevel": "L1GT",
"path": "023",
"row": "046",
"acquisitionYear": "2020",
"acquisitionMonth": "12",
"acquisitionDay": "04",
"processingYear": "2020",
"processingMonth": "12",
"processingDay": "06",
"collectionNumber": "02",
"collectionCategory": "RT",
"scene": "LE07_L1GT_023046_20201204_20201206_02_RT",
"date": "2020-12-04",
"_processingLevelNum": "1",
"sensor_name": "etm",
"_sensor_s3_prefix": "etm",
"bands": ETM_L1_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 ETM, L1TP
(
"LE07_L1TP_030042_20201205_20201205_02_RT",
{
"sensor": "E",
"satellite": "07",
"processingCorrectionLevel": "L1TP",
"path": "030",
"row": "042",
"acquisitionYear": "2020",
"acquisitionMonth": "12",
"acquisitionDay": "05",
"processingYear": "2020",
"processingMonth": "12",
"processingDay": "05",
"collectionNumber": "02",
"collectionCategory": "RT",
"scene": "LE07_L1TP_030042_20201205_20201205_02_RT",
"date": "2020-12-05",
"_processingLevelNum": "1",
"sensor_name": "etm",
"_sensor_s3_prefix": "etm",
"bands": ETM_L1_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 TM, L1GS
(
"LT05_L1GS_127054_20111111_20200820_02_T2",
{
"sensor": "T",
"satellite": "05",
"processingCorrectionLevel": "L1GS",
"path": "127",
"row": "054",
"acquisitionYear": "2011",
"acquisitionMonth": "11",
"acquisitionDay": "11",
"processingYear": "2020",
"processingMonth": "08",
"processingDay": "20",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LT05_L1GS_127054_20111111_20200820_02_T2",
"date": "2011-11-11",
"_processingLevelNum": "1",
"sensor_name": "tm",
"_sensor_s3_prefix": "tm",
"bands": TM_L1_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 TM, L1TP
(
"LT05_L1TP_014032_20111018_20200820_02_T1",
{
"sensor": "T",
"satellite": "05",
"processingCorrectionLevel": "L1TP",
"path": "014",
"row": "032",
"acquisitionYear": "2011",
"acquisitionMonth": "10",
"acquisitionDay": "18",
"processingYear": "2020",
"processingMonth": "08",
"processingDay": "20",
"collectionNumber": "02",
"collectionCategory": "T1",
"scene": "LT05_L1TP_014032_20111018_20200820_02_T1",
"date": "2011-10-18",
"_processingLevelNum": "1",
"sensor_name": "tm",
"_sensor_s3_prefix": "tm",
"bands": TM_L1_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 MSS, L1GS
(
"LM05_L1GS_176025_20120901_20200820_02_T2",
{
"sensor": "M",
"satellite": "05",
"processingCorrectionLevel": "L1GS",
"path": "176",
"row": "025",
"acquisitionYear": "2012",
"acquisitionMonth": "09",
"acquisitionDay": "01",
"processingYear": "2020",
"processingMonth": "08",
"processingDay": "20",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LM05_L1GS_176025_20120901_20200820_02_T2",
"date": "2012-09-01",
"_processingLevelNum": "1",
"sensor_name": "mss",
"_sensor_s3_prefix": "mss",
"bands": MSS_L1_BANDS,
"category": "standard",
},
),
# Collection 2 Level 1 MSS, L1TP
(
"LM05_L1TP_015032_20121230_20200820_02_T2",
{
"sensor": "M",
"satellite": "05",
"processingCorrectionLevel": "L1TP",
"path": "015",
"row": "032",
"acquisitionYear": "2012",
"acquisitionMonth": "12",
"acquisitionDay": "30",
"processingYear": "2020",
"processingMonth": "08",
"processingDay": "20",
"collectionNumber": "02",
"collectionCategory": "T2",
"scene": "LM05_L1TP_015032_20121230_20200820_02_T2",
"date": "2012-12-30",
"_processingLevelNum": "1",
"sensor_name": "mss",
"_sensor_s3_prefix": "mss",
"bands": MSS_L1_BANDS,
"category": "standard",
},
),
# Collection 2 Level2 Albers
(
"LC08_L2SP_077010_20210616_20210623_02_A1",
{
"sensor": "C",
"satellite": "08",
"processingCorrectionLevel": "L2SP",
"path": "077",
"row": "010",
"acquisitionYear": "2021",
"acquisitionMonth": "06",
"acquisitionDay": "16",
"processingYear": "2021",
"processingMonth": "06",
"processingDay": "23",
"collectionNumber": "02",
"collectionCategory": "A1",
"scene": "LC08_L2SP_077010_20210616_20210623_02_A1",
"date": "2021-06-16",
"_processingLevelNum": "2",
"sensor_name": "oli-tirs",
"_sensor_s3_prefix": "oli-tirs",
"bands": OLI_SR_BANDS + TIRS_ST_BANDS,
"category": "albers",
},
),
)
@pytest.mark.parametrize("sceneid,expected_content", LANDSAT_SCENE_PARSER_TEST_CASES)
def test_landsat_sceneid_parser(sceneid, expected_content):
"""Parse landsat valid collection1 sceneid and return metadata."""
assert sceneid_parser(sceneid) == expected_content
LANDSAT_SCENE_C2 = "LC08_L2SP_001062_20201031_20201106_02_T2"
LANDSAT_BUCKET = Path(__file__).resolve().parent / "fixtures" / "usgs-landsat"
LANDSAT_PATH = (
LANDSAT_BUCKET
/ "collection02"
/ "level-2"
/ "standard"
/ "oli-tirs"
/ "2020"
/ "001"
/ "062"
/ LANDSAT_SCENE_C2
)
INVALID_LANDSAT_SCENE_C2 = "LC08_001062_20201031_20201106_02_T2"
with open(LANDSAT_PATH / f"{LANDSAT_SCENE_C2}_SR_stac.json", "r") as f:
LANDSAT_METADATA = f.read().encode("utf-8")
@pytest.fixture(autouse=True)
def testing_env_var(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "TRUE")
def mock_rasterio_open(band):
"""Mock rasterio Open."""
assert band.startswith("s3://usgs-landsat")
band = band.replace("s3://usgs-landsat", str(LANDSAT_BUCKET))
return rasterio.open(band)
@patch("rio_tiler_pds.landsat.aws.landsat_collection2.get_object")
@patch("rio_tiler.io.cogeo.rasterio")
def test_LandsatC2L2Reader(rio, get_object):
"""Should work as expected (get and parse metadata)."""
rio.open = mock_rasterio_open
get_object.return_value = LANDSAT_METADATA
with pytest.raises(InvalidLandsatSceneId):
with LandsatC2Reader(INVALID_LANDSAT_SCENE_C2):
pass
with LandsatC2Reader(LANDSAT_SCENE_C2) as landsat:
assert landsat.scene_params["scene"] == LANDSAT_SCENE_C2
assert landsat.minzoom == 5
assert landsat.maxzoom == 12
assert len(landsat.bounds) == 4
assert landsat.bands == OLI_SR_BANDS + TIRS_ST_BANDS
with pytest.raises(MissingBands):
landsat.info()
with pytest.raises(InvalidBandName):
landsat.info(bands="BAND5")
metadata = landsat.info(bands="SR_B5")
assert len(metadata["band_metadata"]) == 1
assert metadata["band_descriptions"] == [("SR_B5", "")]
metadata = landsat.info(bands=landsat.bands)
assert len(metadata["band_metadata"]) == len(OLI_SR_BANDS + TIRS_ST_BANDS)
with pytest.raises(MissingBands):
landsat.stats()
stats = landsat.stats(bands="SR_B1")
assert stats["SR_B1"]["percentiles"] == [7926, 49017]
stats = landsat.stats(bands=landsat.bands)
assert len(stats.items()) == len(OLI_SR_BANDS + TIRS_ST_BANDS)
assert list(stats) == list(landsat.bands)
stats = landsat.stats(bands="SR_B1", hist_options=dict(bins=20))
assert len(stats["SR_B1"]["histogram"][0]) == 20
stats = landsat.stats(pmin=10, pmax=90, bands="SR_B1")
assert stats["SR_B1"]["percentiles"] == [8524, 43038]
stats = landsat.stats(bands="QA_PIXEL")
assert stats["QA_PIXEL"]["min"] == 1
stats = landsat.stats(bands="QA_PIXEL", nodata=0, resampling_method="bilinear")
# nodata and resampling_method are set at reader level an shouldn't be set
assert stats["QA_PIXEL"]["min"] == 1
with pytest.raises(MissingBands):
landsat.metadata()
metadata = landsat.metadata(bands="SR_B1")
assert metadata["statistics"]["SR_B1"]["percentiles"] == [7926, 49017]
assert metadata["band_metadata"] == [("SR_B1", {})]
assert metadata["band_descriptions"] == [("SR_B1", "")]
metadata = landsat.metadata(bands=("SR_B1", "SR_B2"))
assert metadata["band_metadata"] == [("SR_B1", {}), ("SR_B2", {})]
assert metadata["band_descriptions"] == [("SR_B1", ""), ("SR_B2", "")]
# nodata and resampling_method are set at reader level an shouldn't be set
metadata = landsat.metadata(
bands="QA_PIXEL", nodata=0, resampling_method="bilinear"
)
assert metadata["statistics"]["QA_PIXEL"]["min"] == 1
tile_z = 8
tile_x = 81
tile_y = 130
with pytest.raises(MissingBands):
landsat.tile(tile_x, tile_y, tile_z)
data, mask = landsat.tile(
tile_x, tile_y, tile_z, bands=("SR_B4", "SR_B3", "SR_B2")
)
assert data.shape == (3, 256, 256)
assert data.dtype == numpy.uint16
assert mask.shape == (256, 256)
assert not mask.all()
# Level 2 collection 2 temperatures are uint16
data, mask = landsat.tile(tile_x, tile_y, tile_z, bands="ST_B10")
assert data.shape == (1, 256, 256)
assert data.dtype == numpy.uint16
assert mask.shape == (256, 256)
data, mask = landsat.tile(
tile_x,
tile_y,
tile_z,
bands="QA_PIXEL",
nodata=0,
resampling_method="bilinear",
)
assert data.shape == (1, 256, 256)
assert not mask.all()
# Pansharpening not yet implemented
# data, mask = landsat.tile(
# tile_x, tile_y, tile_z, bands=("SR_B4", "SR_B3", "SR_B2"), pan=True
# )
# assert data.shape == (3, 256, 256)
# assert data.dtype == numpy.uint16
# assert mask.shape == (256, 256)
with pytest.raises(TileOutsideBounds):
landsat.tile(701, 102, 8, bands=("SR_B4", "SR_B3", "SR_B2"))
data, mask = landsat.tile(
tile_x, tile_y, tile_z, expression="SR_B5*0.8, SR_B4*1.1, SR_B3*0.8"
)
assert data.shape == (3, 256, 256)
assert data.dtype == numpy.float64
assert mask.shape == (256, 256)
with pytest.raises(MissingBands):
landsat.preview()
data, mask = landsat.preview(bands=("SR_B4", "SR_B3", "SR_B2"))
assert data.shape == (3, 386, 379)
assert data.dtype == numpy.uint16
assert mask.shape == (386, 379)
assert not mask.all()
# Level 2 collection 2 temperatures are uint16
data, mask = landsat.preview(bands="ST_B10")
assert data.shape == (1, 386, 379)
assert data.dtype == numpy.uint16
assert mask.shape == (386, 379)
# Pansharpening not yet implemented
# data, mask = landsat.preview(
# bands=("SR_B4", "SR_B3", "SR_B2"), pan=True, width=256, height=256
# )
# assert data.shape == (3, 256, 256)
# assert data.dtype == numpy.uint16
# assert mask.shape == (256, 256)
data, mask = landsat.preview(expression="SR_B5*0.8, SR_B4*1.1, SR_B3*0.8")
assert data.shape == (3, 386, 379)
assert data.dtype == numpy.float64
assert mask.shape == (386, 379)
data, mask = landsat.preview(bands="QA_PIXEL")
assert data.shape == (1, 386, 379)
assert mask.all()
bbox = landsat.bounds
point_x = (bbox[0] + bbox[2]) / 2
point_y = (bbox[1] + bbox[3]) / 2
with pytest.raises(MissingBands):
landsat.point(point_x, point_y)
values = landsat.point(point_x, point_y, bands="SR_B7")
assert values == [16414]
values = landsat.point(point_x, point_y, bands="QA_PIXEL")
assert values[0] == 22280
values = landsat.point(point_x, point_y, bands=("SR_B7", "SR_B4"))
assert len(values) == 2
values = landsat.point(
point_x, point_y, expression="SR_B5*0.8, SR_B4*1.1, SR_B3*0.8"
)
assert len(values) == 3
bbox = landsat.bounds
minx = (bbox[0] + bbox[2]) * 0.25
miny = (bbox[1] + bbox[3]) * 0.25
maxx = (bbox[0] + bbox[2]) * 0.75
maxy = (bbox[1] + bbox[3]) * 0.75
part = (minx, miny, maxx, maxy)
with pytest.raises(MissingBands):
landsat.part(part)
data, mask = landsat.part(part, bands="SR_B7")
assert 1024 in data.shape
assert data.dtype == numpy.uint16
assert not mask.all()
data, _ = landsat.part(
part, bands="QA_PIXEL", nodata=0, resampling_method="bilinear",
)
assert data.shape == (1, 46, 1024)
data, mask = landsat.part(part, expression="SR_B5*0.8, SR_B4*1.1, SR_B3*0.8")
assert 1024 in data.shape
assert data.shape[0] == 3
assert data.dtype == numpy.float64
assert not mask.all()
data, mask = landsat.part(
part, bands=("SR_B4", "SR_B3", "SR_B2"), width=80, height=80,
)
assert data.shape == (3, 80, 80)
assert data.dtype == numpy.uint16
assert mask.shape == (80, 80)
ll = (minx, miny)
lr = (maxx, miny)
ul = (minx, maxy)
ur = (maxx, maxy)
feat = {
"type": "Feature",
"properties": {},
"geometry": {"type": "Polygon", "coordinates": [[ll, lr, ur, ul, ll]]},
}
with pytest.raises(MissingBands):
landsat.feature(feat)
data, mask = landsat.feature(feat, bands="SR_B7")
assert 1024 in data.shape
assert data.dtype == numpy.uint16
assert 1024 in mask.shape
data, _ = landsat.feature(
feat, bands="QA_PIXEL", nodata=0, resampling_method="bilinear",
)
assert data.any()
data, mask = landsat.feature(feat, expression="SR_B5*0.8, SR_B4*1.1, SR_B3*0.8")
assert data.any()
assert data.shape[0] == 3
assert data.dtype == numpy.float64
data, mask = landsat.feature(
feat, bands=("SR_B4", "SR_B3", "SR_B2"), width=80, height=80,
)
assert data.shape == (3, 80, 80)
assert data.dtype == numpy.uint16
assert mask.shape == (80, 80)
C2_SENSOR_TEST_CASES = [
# Collection 2 Level 2 OLI-TIRS 8 SP (both SR and ST)
("LC08_L2SP_001062_20201031_20201106_02_T2", OLI_SR_BANDS + TIRS_ST_BANDS),
# Collection 2 Level 2 OLI-TIRS 8 SR (no ST)
("LC08_L2SR_122108_20201031_20201106_02_T2", OLI_SR_BANDS),
# Collection 2 Level 2 TM SP (both SR and ST)
("LT05_L2SP_014032_20111018_20200820_02_T1", TM_SR_BANDS + TM_ST_BANDS),
# Collection 2 Level 2 TM SR (no ST)
("LT05_L2SR_089076_20110929_20200820_02_T2", TM_SR_BANDS),
# Collection 2 Level 2 ETM SP (both SR and ST)
("LE07_L2SP_175066_20201026_20201121_02_T1", TM_SR_BANDS + TM_ST_BANDS),
# Collection 2 Level 2 ETM SR (no ST)
("LE07_L2SR_123067_20201030_20201126_02_T1", TM_SR_BANDS),
]
@patch("rio_tiler_pds.landsat.aws.landsat_collection2.get_object")
@patch("rio_tiler.io.cogeo.rasterio")
def test_LandsatC2L2Reader_bands(rio, get_object):
"""Should work as expected (get and parse metadata)."""
rio.open = mock_rasterio_open
get_object.return_value = LANDSAT_METADATA
with pytest.raises(InvalidLandsatSceneId):
with LandsatC2Reader(INVALID_LANDSAT_SCENE_C2):
pass
for sceneid, expected_bands in C2_SENSOR_TEST_CASES:
with LandsatC2Reader(sceneid) as landsat:
assert landsat.bands == expected_bands
| StarcoderdataPython |
1735580 | #!/usr/bin/env python
from datetime import datetime
import annotate_support_functions
import subprocess
import difflib, sys
import threading, Queue
import time
import os.path
import struct
import hashlib
start = time.time()
import cPickle as pickle
######################################
#
# Task => Annotate variants
# infile => VCF file with complete sample wise genotype information
# outfile => text file (csv) with complete annotation with sample wise genotype information
# Extra outfile => text file (csv) without genic annotation with sample wise information for variants that are not bi-allelic (e.g tri-allelic)
#
#######################################
#if needed print usage:
##############################################################################################
## SETTINGS
##############################################################################################
## variables
help_ = 0
infile = ""
geneDef = "refGene" ## gene Definition
sep = "," ## separator for annotation outfile currently comma (,) is default
type_var = "all" ## type of variants to annotate from input vcf file
gtMode = "complete" ## type of variants to annotate from input vcf file
onlygenic = False ## variable for only genic annotation
forceDel = False ## varibale for force deleting the output annotation file (if exists)
qlookup = "NA" ## varibale for enabling quick lookup mode of the program
templocation = "INPATH" ## scratch place for creating the temp files while annotating
variants = dict() ## hash to hold input variants from VCF
not_biallelic_variants = dict() ## hash to hold input variants from VCF where the site is not bi-allelic and annovar annotation is absent
thrds = list() ## list to hold threads
ediva = dict() ## hash to hold input INDELs with ediva annotation SHARED
Annovar = dict() ## hash to hold input INDELs with ANNOVAR annotation SHARED
samples = dict() ## hash to hold sample information SHARED
edivaStr = dict() ## hash to hold simple tandem repeat data from ediva public omics database SHARED
headers = list()
fileSuffix = str(datetime.now().time())
fileSuffix = fileSuffix.replace(':','_')
fileSuffix = fileSuffix.replace('.','_')
## ANNOVAR settings
ANNOVAR = "/users/GD/tools/eDiVaCommandLine/lib/Annovar"
ANNOVARWEB ="/home/rrahman/soft/Annovar"
TABIX = "PATH=$PATH:/users/GD/tools/tabix/"
##############################################################################################
## INPUT PARSE - ANNOVAR CONFIGURATION CHECK - OUTPUT CREATION
##############################################################################################
parser_ = {"geneDef": geneDef, "type": type_var,"infile":infile,
"onlygenic":onlygenic,"qlookup":qlookup,
"forcedel":forceDel,"gtmode":gtMode,
"templocation":templocation,"help":help_,"csvfile":''}# Arguments of the input parser
mailer_path='/home/rrahman/soft/python-mailer/pymailer.py'
parser_ = annotate_support_functions.input_parse(parser_)
help_ = parser_["help"]
infile = os.path.abspath(parser_["infile"])
geneDef = parser_["geneDef"]
type_var = parser_["type"]
gtMode = parser_["gtmode"]
onlygenic = parser_["onlygenic"]
forceDel = parser_["forcedel"]
qlookup = parser_["qlookup"]
templocation = parser_["templocation"]
csvfile = parser_["csvfile"]
try:
if os.path.isdir(ANNOVARWEB):
ANNOVAR = ANNOVARWEB
(vcftoAnn,genicAnn) = annotate_support_functions.annovar_check(ANNOVAR)
except IOError:
sys.exit(1)
##############################################################################################
## MAIN starts
##############################################################################################
## start processing input VCF file
print "MESSAGE :: Processing input file - %s "%infile
MAF =0
## start browsing over the VCF file
try:
if infile.endswith('.vcf') or infile.endswith('.vcf.gz'):
(samples,variants,not_biallelic_variants,headers) = annotate_support_functions.vcf_processing(infile,qlookup,gtMode,type_var)
elif infile.endswith('.maf') :
print 'MAF file processing'
MAF=1
(samples,variants,not_biallelic_variants,headers) = annotate_support_functions.vcf_processing(infile,infile,gtMode,type_var)
else:
print 'The extension is not recognized. By default process like a VCF file '
if qlookup!='NA' and qlookup.endswith('.maf'): MAF =1
(samples,variants,not_biallelic_variants,headers) = annotate_support_functions.vcf_processing(infile,qlookup,gtMode,type_var)
except IOError:
sys.exit(1)
## Initialization completed
print "MESSAGE :: Finished processing input VCF file - %s "%(infile);
try:
(outFile,SortedoutFile,outFileIns,templocation) = annotate_support_functions.out_file_generate(infile,qlookup,templocation,forceDel,fileSuffix,MAF)
except IOError:
sys.exit(1)
## prepare missing data handler for db annotation
(missandb,missandb_coordinate,missanndbindel) = annotate_support_functions.preparemissdb(sep) #<<<<<<<<<<< missdb etc values!
##?This should be set to multithread once we checked it works properly [at least for annotation inside the pipeline]
if (qlookup == "NA"):
out_queue = Queue.Queue()
if (onlygenic):
## start a sigle thread for annovar genic annotation
print "MESSAGE :: Annotation starting "
Annovar = annotate_support_functions.AnnovarAnnotation(infile,templocation,fileSuffix,geneDef,ANNOVAR,Annovar,TABIX,MAF) ## spawn a thread for Annovar annotation
else:
## start threading and annotating
print "MESSAGE :: Annotation starting "
#ediva = annotate_support_functions.edivaAnnotation(variants,not_biallelic_variants,sep,missandb,missandb_coordinate,missanndbindel)## spawn a thread for ediva annotation
#Annovar = annotate_support_functions.AnnovarAnnotation(infile,templocation,fileSuffix,geneDef,ANNOVAR,Annovar) ## spawn a thread for Annovar annotation
#edivaStr = annotate_support_functions.edivaPublicOmics() ## spawn a thread for ediva public omics
thread_ediva = threading.Thread(out_queue.put(annotate_support_functions.edivaAnnotation(variants,not_biallelic_variants,sep,missandb,missandb_coordinate,missanndbindel)))
thread_annovar= threading.Thread(out_queue.put(annotate_support_functions.AnnovarAnnotation(infile,templocation,fileSuffix,geneDef,ANNOVAR,Annovar,TABIX,MAF))) ## spawn a thread for Annovar annotation
#thread_pub = threading.Thread(out_queue.put(annotate_support_functions.edivaPublicOmics())) ## spawn a thread for ediva public omics
thread_ediva.start()
thread_annovar.start()
#thread_pub.start()
# now get the queue values and join threads
thread_ediva.join()
thread_annovar.join()
#thread_pub.join()
ediva = out_queue.get()
Annovar = out_queue.get()
#edivaStr = out_queue.get()
print 'threading done'
else:
out_queue = Queue.Queue()
thread_ediva = threading.Thread(out_queue.put(annotate_support_functions.edivaAnnotation(variants,not_biallelic_variants,sep,missandb,missandb_coordinate,missanndbindel)))
thread_ediva.start()
#if os.path.exists(qlookup):
# thread_pub = threading.Thread(out_queue.put(annotate_support_functions.edivaPublicOmics())) ## spawn a thread for ediva public omic
# thread_pub.start()
# now get the queue values and join threads
thread_ediva.join()
#if os.path.exists(qlookup):
#thread_pub.join()
ediva = out_queue.get()
#if os.path.exists(qlookup):
# edivaStr = out_queue.get()
print 'threading done'
## join spawned threads
## write annotation to file or ender output
if qlookup == "NA":
## write annotaton in file
print "MESSAGE :: Writing annotation to output file %s" % (outFile)
## open file handler
dir_path = os.path.dirname(os.path.realpath(__file__))
pLI_dict = pickle.load( open( dir_path + "/" + "pLI_db.pkl", "rb" ) )
with open(outFile,'w+') as ANN, open(infile) as FL:
## write header to output file
headerOutputFile = annotate_support_functions.getHeader(onlygenic,geneDef,headers,gtMode)
if MAF ==0 :
ANN.write(headerOutputFile+'\n')
header_fields = len(headerOutputFile.split(','))
print 'len header_fields : %d'%header_fields
counter= 0
for line in FL:
if counter==0 :
if line.startswith('##'):
pass
else:
counter +=1
else:
fields = line.strip().split('\t')
# print fields
(chr_col,position,ref,alt) = [fields[0],fields[1],fields[3],fields[4]]
if chr_col.startswith('chr') or chr_col.startswith('Chr'):chr_col=chr_col[3:]
alt= alt.split(',')[0]
if len(ref)+len(alt)>2:
## indel then recover the key
hash_ref = hashlib.md5(str(ref).encode())
hash_alt = hashlib.md5(str(alt).encode())
token_ref = str(struct.unpack('<L', hash_ref.digest()[:4])[0])
token_alt = str(struct.unpack('<L', hash_alt.digest()[:4])[0])
key=';'.join((chr_col,position,token_ref,token_alt))
else:
key=';'.join((chr_col,position,ref,alt))
if variants.get(key,False):
(chr_col,position,ref,alt,aftoprint,qual,filter_) = variants.get(key,"NA").split(';')
elif not_biallelic_variants.get(key,False):
(chr_col,position,ref,alt,aftoprint,qual,filter_) = not_biallelic_variants.get(key2,"NA").split(';')
else:
print 'skipping',
print ';'.join((chr_col,position,ref,alt))
continue
annovarValueToMatch = ';'.join((chr_col,position,ref,alt))
### now get the info from ediva annotatio and annovar annotation
edivaannotationtoprint = ediva.get(key,"NA")
if geneDef != 'all':
annovarannotationtoprint = Annovar.get(annovarValueToMatch,"NA,"*3+"NA")
gene_name = annovarannotationtoprint.split(',')[1]
else:
annovarannotationtoprint = Annovar.get(annovarValueToMatch,"NA,"*11+"NA")
gene_name = annovarannotationtoprint.split(',')[5]
### pLI and pRec part
(pLI,pRec) = pLI_dict.get(gene_name,['NA','NA'])
samplewiseinfortoprint = samples.get(key,"NA")
write_str=(chr_col+sep+position+sep+ref+sep+alt+sep+
qual+sep+filter_+sep+
aftoprint+sep+
annovarannotationtoprint+sep+edivaannotationtoprint+sep+
pLI + sep + pRec + sep +
samplewiseinfortoprint)
#edivapublicanntoprint+sep+samplewiseinfortoprint)
write_str.replace('\n','')
tmpstr = write_str.split(',')[0:header_fields]
write_str=','.join(tmpstr)
ANN.write(write_str+'\n')
#### write data lines to main output file
##for key, value in variants.items():
## (chr_col,position,ref,alt,aftoprint,qual,filter_) = value.split(';')
## annovarValueToMatch = ';'.join((chr_col,position,ref,alt))
## edivaannotationtoprint = ediva.get(key,"NA")
## #print edivaannotationtoprint
## annovarannotationtoprint = Annovar.get(annovarValueToMatch,"NA,"*3+"NA")
## samplewiseinfortoprint = samples.get(key,"NA")
## #edivapublicanntoprint = edivaStr.get(';'.join((chr_col,position)),"NA,NA")
## # write annotation to file
##
## write_str=(chr_col+sep+position+sep+ref+sep+alt+sep+
## qual+sep+filter_+sep+
## aftoprint+sep+
## annovarannotationtoprint+sep+edivaannotationtoprint+sep+samplewiseinfortoprint)
## #edivapublicanntoprint+sep+samplewiseinfortoprint)
##
## write_str.replace('\n','')
##
## ANN.write(write_str+'\n')
else:
maf_separator='\t'
counter= 0
for line in FL:
if counter==0 :
if line.startswith('#'):
ANN.write(line)
else:
counter +=1
headerOutputFile=headerOutputFile.split(sep)[7:-1]
annovar_head = headerOutputFile[:4]
headerOutputFile= headerOutputFile[4:]
headerOutputFile.extend(annovar_head)
print headerOutputFile
missing_entry =maf_separator.join(["NA"]*(len(headerOutputFile)-4)) #compensates for Annovar header
headerOutputFile = maf_separator.join(headerOutputFile)
ANN.write(line.strip()+maf_separator+headerOutputFile+'\n')
else:
fields = line.strip().split('\t')
if fields[11] == fields[10]:
(chr_col,position,ref,alt) = [fields[4],fields[5],fields[10],fields[12]]
else:
(chr_col,position,ref,alt) = [fields[4],fields[5],fields[10],fields[11]]
if chr_col.startswith('chr') or chr_col.startswith('Chr'):chr_col=chr_col[3:]
if len(ref)+len(alt)>2:
## indel then recover the key
hash_ref = hashlib.md5(str(ref).encode())
hash_alt = hashlib.md5(str(alt).encode())
token_ref = str(struct.unpack('<L', hash_ref.digest()[:4])[0])
token_alt = str(struct.unpack('<L', hash_alt.digest()[:4])[0])
key=';'.join((chr_col,position,token_ref,token_alt))
else:
key=';'.join((chr_col,position,ref,alt))
annovarValueToMatch = ';'.join((chr_col,position,ref,alt))
### now get the info from ediva annotatio and annovar annotation
edivaannotationtoprint = ediva.get(key,missing_entry).replace(sep,maf_separator)
if geneDef != 'all':
annovarannotationtoprint = Annovar.get(annovarValueToMatch,"NA,"*3+"NA").replace(sep,maf_separator)
else:
annovarannotationtoprint = Annovar.get(annovarValueToMatch,"NA,"*11+"NA").replace(sep,maf_separator)
#print edivaannotationtoprint
#edivapublicanntoprint = edivaStr.get(';'.join((chr_col,position)),"NA,NA").replace(sep,maf_separator)
write_str=(line.strip() + maf_separator + edivaannotationtoprint +maf_separator +annovarannotationtoprint)
write_str.replace('\n','')
ANN.write(write_str+'\n')
#if MAF ==0:
# with open(outFileIns,'w+') as ANNINS:
# ## write header for inconsistent file
# headerOutputFile = annotate_support_functions.getHeaderIns(headers)
#
# print "MESSAGE :: Writing annotation to output file %s" % (outFileIns)
# ANNINS.write(headerOutputFile+'\n')
# ## write data lines to main output file
# for key, value in not_biallelic_variants.items():
# edivaannotationtoprint,annovarannotationtoprint,samplewiseinfortoprint = ("NA","NA","NA")
# edivapublicanntoprint = "NA,NA"
# (chr_col,position,ref,alt,aftoprint,qual,filter_) = value.split(';')
# edivaannotationtoprint = ediva.get(key,"NA")
# samplewiseinfortoprint = samples.get(key,"NA")
# #edivapublicanntoprint = edivaStr.get(';'.join((chr_col,position)),"NA,NA")
# ## write annotation to fileprint
#
# write_str=(chr_col+sep+position+sep+ref+sep+alt+sep+
# qual+sep+filter_+sep+
# aftoprint+sep+
# annovarannotationtoprint+sep+edivaannotationtoprint+sep+samplewiseinfortoprint)
# #edivapublicanntoprint+sep+samplewiseinfortoprint)
# write_str.replace('\n','')
# ANNINS.write(write_str+'\n')
#
if MAF ==0:## sort the file
mvCmm = 'mv %s %s'%(outFile,SortedoutFile)
subprocess.call(mvCmm,shell=True)
#srtCmm = "sort -k1,1 -n -k2,2 --field-separator=, %s > %s " %(outFile,SortedoutFile)
#subprocess.call(srtCmm,shell=True)
## writing completed
print "MESSAGE :: Writing annotation completed "
print "MESSAGE :: Your annotated file is %s " %(outFile)
if MAF ==0:
print "MESSAGE :: Your sorted annotated file is %s "%(SortedoutFile)
#print "MESSAGE :: Reported non bi-allelic sites are in %s " %(outFileIns)
## Finalize everything
print "MESSAGE :: Finalizing annotation process "
annotate_support_functions.finalize(templocation,fileSuffix)
print "MESSAGE :: Finalization completed "
print "MESSAGE :: Templocation %s cleared"%(templocation)
else:
if os.path.isfile(qlookup):
with open(qlookup) as rd:
for line in rd:
tmp = line.strip()
if not(line.startswith('#')):
if line.count(':') != 3 and line.count('\t')>1:
print 'Reading file as a MAF file'
MAF=1
break
## render annotation to output file
with open(outFile,'a') as ANN,open(qlookup) as FL:
#MAF = 0
#tmp= open(qlookup)
#line= tmp.readline().strip()
#if line.count(':') != 3 and line.count('\t')>1:
# print 'Reading file as a MAF file'
# MAF=1
#tmp.close()
counter = 0
headerOutputFile = annotate_support_functions.getHeaderQlookup(headers)
if MAF ==0:
var = line.rstrip('\n').split(':')
ANN.write(headerOutputFile+'\n')
## get header
for key, value in variants.items():
#print value
(chr_col,position,ref,alt,dummy) = value.split(';')
annovarValueToMatch = ';'.join((chr_col,position,ref,alt))
edivaannotationtoprint = ediva.get(key,"NA")
#print edivaannotationtoprint
#edivapublicanntoprint = edivaStr.get(';'.join((chr_col,position)),"NA,NA")
write_str=(chr_col+sep+position+sep+ref+sep+alt+sep+edivaannotationtoprint)
write_str.replace('\n','')
ANN.write(write_str+'\n')
else:
maf_separator='\t'
for line in FL:
if counter==0 :
if line.startswith('#'):
ANN.write(line)
else:
counter +=1
headerOutputFile=headerOutputFile.split(sep)[4:]
missing_entry =maf_separator.join(["NA"]*len(headerOutputFile))
headerOutputFile = maf_separator.join(headerOutputFile)
ANN.write(line.strip()+maf_separator+headerOutputFile+'\n')
else:
fields = line.strip().split('\t')
if fields[11] == fields[10]:
(chr_col,position,ref,alt) = [fields[4],fields[5],fields[10],fields[12]]
else:
(chr_col,position,ref,alt) = [fields[4],fields[5],fields[10],fields[11]]
if len(ref)+len(alt)>2:
## indel then recover the key
hash_ref = hashlib.md5(str(ref).encode())
hash_alt = hashlib.md5(str(alt).encode())
token_ref = str(struct.unpack('<L', hash_ref.digest()[:4])[0])
token_alt = str(struct.unpack('<L', hash_alt.digest()[:4])[0])
key=';'.join((chr_col,position,token_ref,token_alt))
else:
key=';'.join((chr_col,position,ref,alt))
### now get the info from ediva annotatio and annovar annotation
edivaannotationtoprint = ediva.get(key,missing_entry).replace(sep,maf_separator)
#print edivaannotationtoprint
#edivapublicanntoprint = edivaStr.get(';'.join((chr_col,position)),"NA,NA").replace(sep,maf_separator)
write_str=(line.strip() + maf_separator + edivaannotationtoprint )
write_str.replace('\n','')
ANN.write(write_str+'\n')
## sort the file
print "MESSAGE :: Writing annotation completed"
if MAF ==0:
srtCmm = "sort -k1,1 -n -k2,2 --field-separator=, %s > %s " %(outFile,SortedoutFile)
subprocess.call(srtCmm,shell=True)
## writing completed
print "MESSAGE :: Your annotated file is %s " %outFile
print "MESSAGE :: Your sorted annotated file is %s " %SortedoutFile
else:
for key, value in variants.items():
(chr_col,position,ref,alt,aftoprint) = value.split(';')
annovarValueToMatch = ';'.join((chr_col,position,ref,alt))
edivaannotationtoprint = ediva.get(key,"NA")
#print edivaannotationtoprint
edivapublicanntoprint = annotate_support_functions.edivaPublicOmics_search(chr_col,position)
# edivaStr.get(';'.join((chr_col,position)),"NA,NA")
edivavals = edivaannotationtoprint.split(',')
edivapublicvals = edivapublicanntoprint.split(',')
## render to command line output
print "chromosome: %s " % chr_col
print "position: %s " %position
print "Reference: %s " %ref
print "Alteration: %s " %alt
print "dbSNP identifier: %s" % edivavals[0]
print "EVS european frequency: %s " % edivavals[1]
print "EVS african frequency: %s " % edivavals[2]
print "EVS total frequency: %s " % edivavals[3]
print "1000genomes european frequency: %s " % edivavals[4]
print "1000genomes african frequency: %s " % edivavals[5]
print "1000genomes american frequency: %s " % edivavals[6]
print "1000genomes asian frequency: %s " % edivavals[7]
print "1000genomes total frequency: %s " % edivavals[8]
print "Segment duplication: %s " % edivavals[9]
print "Placental mammal phyloP: %s " % edivavals[10]
print "Primates phyloP: %s " % edivavals[11]
print "Vertebrates phyloP: %s " % edivavals[12]
print "Placental mammal phastcons: %s" % edivavals[13]
print "Primates phastcons: %s " % edivavals[14]
print "Vertebrates phastcons: %s " % edivavals[15]
print "Gerp score1: %s " % edivavals[16]
print "Gerp score2: %s " % edivavals[17]
print "Sift: %s " % edivavals[18]
print "polyphen2: %s " % edivavals[19]
print "Mutationassessor: %s " % edivavals[20]
print "Condel: %s " % edivavals[21]
print "Cadd score1: %s " % edivavals[22]
print "Cadd score2: %s " % edivavals[23]
print "Eigen raw: %s " % edivavals[24]
print "Eigen Phred: %s " % edivavals[25]
print "Simple tandem repeat region: %s " % edivapublicvals[0]
print "Simple tandem repeat length: %s " % edivapublicvals[1]
end = time.time()
py_time = end-start
if len(csvfile)>1 and os.path.isfile(csvfile):
mailCmd = 'python '+ mailer_path +' -s /home/rrahman/soft/python-mailer/annotation.html '+ str(csvfile) +' Annotation'
print mailCmd
os.system(mailCmd)
| StarcoderdataPython |
1726259 | <reponame>SummerOf15/Robot-Control-and-Planification
"""
Test ICP localisation
Apply a random displacement to a scan and check the error of the recovered position through ICP
author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import time
import readDatasets as datasets
import icp
def test(dist_thres, percent):
# Reading some data
scanList = datasets.read_u2is(56)
scanOriginal = scanList[55]
scanTruePose = np.array([0.3620, 0.0143, 0.0483]) # Manual estimation for scan 55 of u2is dataset
# Initialise error log
nb_test = 10
poseError = np.zeros((3, nb_test))
time_start = time.process_time()
for a in range(nb_test):
idref = np.random.randint(50)
refscan = scanList[idref]
# Generate random displacement and applies it to the second scan
randT = np.random.rand(2, 1) - 0.5
randR = 0.6*np.random.rand(1, 1) - 0.3
R = np.array([[math.cos(randR), -math.sin(randR)], [math.sin(randR), math.cos(randR)]])
scan = datasets.transform_scan(scanOriginal, R, randT)
# # Displays initial positions
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(refscan["x"], refscan["y"], "ob", label='Ref Scan')
plt.plot(scan["x"], scan["y"], ".r", label='Scan before ICP')
plt.axis("equal")
# perform ICP
R, t, error = icp.icp(refscan, scan, 200, 1e-7, dist_thres, percent)
# Apply motion to scan
scan = datasets.transform_scan(scan, R, t)
poseError[:, a] = np.transpose(scan["pose"] - scanTruePose)
# # Display
plt.axis("equal")
plt.plot(scan["x"], scan["y"], ".g", label='Scan after ICP')
plt.legend()
plt.pause(0.1)
time_elapsed = time.process_time() - time_start
tErrors = np.sqrt(np.square(poseError[0, :]) + np.square(poseError[1, :]))
oErrors = np.sqrt(np.square(poseError[2, :]))
print("Mean (var) translation error : {:e} ({:e})".format(np.mean(tErrors), np.var(tErrors)))
print("Mean (var) rotation error : {:e} ({:e})".format(np.mean(oErrors), np.var(oErrors)))
print("Mean computation time : {:f}".format(time_elapsed/nb_test))
print("Press Q in figure to finish...")
plt.show()
return np.mean(tErrors),np.var(tErrors),np.mean(oErrors),np.var(oErrors),time_elapsed/nb_test
if __name__=="__main__":
# dist_thres_list=[0.02*i for i in range(20)]
# # percent_list=[0.05*i for i in range(3,20)]
# te_list=[]
# vt_list=[]
# oe_list=[]
# vo_list=[]
# time_list=[]
# for t in dist_thres_list:
# te,vt,oe,vo,ti=test(t,0.85)
# te_list.append(te)
# vt_list.append(vt)
# oe_list.append(oe)
# vo_list.append(vo)
# time_list.append(ti)
#
# plt.figure()
# plt.errorbar(dist_thres_list,te_list,yerr=vt_list,elinewidth=2,fmt="-ro",label="translation error")
# plt.errorbar(dist_thres_list,oe_list,yerr=vo_list,elinewidth=2,fmt="-bo",label="rotation error")
# plt.plot(dist_thres_list, time_list, "-yo", label="time")
# plt.title("Performance with different thresholds")
# plt.xlabel("threshold")
# plt.ylabel("value")
# plt.legend()
# plt.show()
test(0.3,0.85) | StarcoderdataPython |
3231990 | <filename>pyKairosDB/tests/test_get_all_metric_names.py
#!/usr/bin/env python
import pyKairosDB
import time
import sys
c = pyKairosDB.connect() # use localhost:8080, the default, no ssl
print pyKairosDB.metadata.get_all_metric_names(c)
| StarcoderdataPython |
113413 | <filename>smzdmCheckin/smzdmCheckinForSCF.py
# -*- coding: utf8 -*-
import requests, json, time, os
requests.packages.urllib3.disable_warnings()
cookie = os.environ.get("cookie_smzdm")
def main(*arg):
try:
msg = ""
SCKEY = os.environ.get('SCKEY')
s = requests.Session()
s.headers.update({'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'})
t = round(int(time.time() * 1000))
url = f'https://zhiyou.smzdm.com/user/checkin/jsonp_checkin?_={t}'
headers = {
"cookie" : cookie,
'Referer': 'https://www.smzdm.com/'
}
r = s.get(url, headers=headers, verify=False)
print(r.text.encode('latin-1').decode('unicode_escape'))
if r.json()["error_code"] != 0 and SCKEY:
scurl = f"https://sc.ftqq.com/{SCKEY}.send"
data = {
"text" : "smzdm Cookie过期",
"desp" : r.text
}
requests.post(scurl, data=data)
print("smzdm cookie失效")
msg += "smzdm cookie失效"
else:
msg += "smzdm签到成功"
except Exception as e:
print('repr(e):', repr(e))
msg += '运行出错,repr(e):'+repr(e)
return msg + "\n"
def smzdm_pc(*arg):
msg = ""
global cookie
clist = cookie.split("\n")
i = 0
while i < len(clist):
msg += f"第 {i+1} 个账号开始执行任务\n"
cookie = clist[i]
msg += main(cookie)
i += 1
return msg
if __name__ == "__main__":
if cookie:
print("----------什么值得买开始尝试签到----------")
smzdm_pc()
print("----------什么值得买签到执行完毕----------")
| StarcoderdataPython |
99306 | from django.db import transaction
from django.utils.translation import gettext_lazy as _
import django_filters
import reversion
from rest_framework import exceptions, serializers, viewsets
from resources.api.base import NullableDateTimeField, TranslatedModelSerializer, register_view
from .models import CateringProduct, CateringProductCategory, CateringOrder, CateringOrderLine, CateringProvider
class CateringProviderSerializer(TranslatedModelSerializer):
class Meta:
model = CateringProvider
fields = ('id', 'name', 'price_list_url', 'units')
class CateringProviderFilter(django_filters.rest_framework.FilterSet):
unit = django_filters.CharFilter(field_name='units')
class Meta:
model = CateringProvider
fields = ('unit',)
class CateringProvider(viewsets.ReadOnlyModelViewSet):
queryset = CateringProvider.objects.prefetch_related('units')
serializer_class = CateringProviderSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringProviderFilter
register_view(CateringProvider, 'catering_provider')
class CateringProductCategorySerializer(TranslatedModelSerializer):
class Meta:
model = CateringProductCategory
fields = ('id', 'name', 'products', 'provider')
class CateringProductCategoryFilter(django_filters.rest_framework.FilterSet):
class Meta:
model = CateringProductCategory
fields = ('provider',)
class CateringProductCategoryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = CateringProductCategory.objects.prefetch_related('products')
serializer_class = CateringProductCategorySerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringProductCategoryFilter
register_view(CateringProductCategoryViewSet, 'catering_product_category')
class CateringProductSerializer(TranslatedModelSerializer):
class Meta:
model = CateringProduct
fields = ('id', 'name', 'category', 'description')
class CateringProductFilter(django_filters.rest_framework.FilterSet):
provider = django_filters.NumberFilter(field_name='category__provider')
class Meta:
model = CateringProduct
fields = ('provider', 'category')
class CateringProductViewSet(viewsets.ReadOnlyModelViewSet):
queryset = CateringProduct.objects.all()
serializer_class = CateringProductSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringProductFilter
register_view(CateringProductViewSet, 'catering_product')
# taken from https://github.com/encode/django-rest-framework/issues/3847
# needed because product field must be required always, also with PATCH
class MonkeyPatchPartial:
"""
Work around bug #3847 in djangorestframework by monkey-patching the partial
attribute of the root serializer during the call to validate_empty_values.
"""
def __init__(self, root):
self._root = root
def __enter__(self):
self._old = getattr(self._root, 'partial')
setattr(self._root, 'partial', False)
def __exit__(self, *args):
setattr(self._root, 'partial', self._old)
class CateringOrderLineSerializer(serializers.ModelSerializer):
class Meta:
model = CateringOrderLine
fields = ('product', 'quantity')
def run_validation(self, *args, **kwargs):
with MonkeyPatchPartial(self.root):
return super().run_validation(*args, **kwargs)
class CateringOrderSerializer(serializers.ModelSerializer):
created_at = NullableDateTimeField(read_only=True)
modified_at = NullableDateTimeField(read_only=True)
order_lines = CateringOrderLineSerializer(many=True, required=True, allow_empty=False)
class Meta:
model = CateringOrder
fields = (
'id', 'created_at', 'modified_at', 'reservation', 'order_lines', 'invoicing_data', 'message',
'serving_time',
)
def _handle_order_lines(self, order, order_line_data):
order.order_lines.all().delete()
for order_line_datum in order_line_data:
CateringOrderLine.objects.create(order=order, **order_line_datum)
@transaction.atomic
def create(self, validated_data):
order_line_data = validated_data.pop('order_lines', [])
new_order = super().create(validated_data)
self._handle_order_lines(new_order, order_line_data)
return new_order
@transaction.atomic
def update(self, instance, validated_data):
order_line_data = validated_data.pop('order_lines', [])
updated_order = super().update(instance, validated_data)
self._handle_order_lines(updated_order, order_line_data)
return updated_order
def to_internal_value(self, data):
# Remove order lines with quantity == 0
if 'order_lines' in data and isinstance(data['order_lines'], list):
order_lines = data['order_lines']
data['order_lines'] = [x for x in order_lines if x.get('quantity') != 0]
return super().to_internal_value(data)
def validate(self, validated_data):
reservation = validated_data.get('reservation') or self.instance.reservation
if reservation:
resource = reservation.resource
user = self.context['request'].user
if reservation.user != user and not resource.can_modify_catering_orders(user):
raise exceptions.PermissionDenied(_("No permission to modify this reservation's catering orders."))
provider = validated_data['order_lines'][0]['product'].category.provider
validated_data['provider'] = provider
for order_line in validated_data['order_lines'][1:]:
if order_line['product'].category.provider != provider:
raise exceptions.ValidationError(_('The order contains products from several providers.'))
if reservation.resource.unit not in provider.units.all():
raise exceptions.ValidationError(
"The provider isn't available in the reservation's unit."
)
return validated_data
class CateringOrderFilter(django_filters.rest_framework.FilterSet):
class Meta:
model = CateringOrder
fields = ('reservation',)
class CateringOrderViewSet(viewsets.ModelViewSet):
queryset = CateringOrder.objects.prefetch_related('order_lines')
serializer_class = CateringOrderSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = CateringOrderFilter
def get_queryset(self):
return super().get_queryset().can_view(self.request.user)
def perform_create(self, serializer):
with reversion.create_revision():
instance = serializer.save()
reversion.set_user(self.request.user)
reversion.set_comment('Created using the API.')
instance.send_created_notification(request=self.request)
def perform_update(self, serializer):
with reversion.create_revision():
instance = serializer.save()
reversion.set_user(self.request.user)
reversion.set_comment('Updated using the API.')
# TODO somehow check that the order is actually modified before sending the notification?
instance.send_modified_notification(request=self.request)
def perform_destroy(self, instance):
instance.send_deleted_notification(request=self.request)
super().perform_destroy(instance)
register_view(CateringOrderViewSet, 'catering_order')
| StarcoderdataPython |
3247721 | <reponame>alpha-leo/ComputationalPhysics-Fall2020<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 12:36:26 2020
@author: win_10
"""
import numpy as np
import random
import matplotlib.pyplot as plt
from numpy.random import choice
class rand_walk_1:
def __init__(self, V_p, L):
self.speed = V_p
self.end = L
self.pos = 0
self.lifetime = 0
def next(self):
"""one time step advancement"""
# random walk
self.pos += self.speed * choice([-1, 1])
# one step still alive
self.lifetime += 1
# can't go lower that 0
if self.pos < 0:
self.pos = 0
if self.pos >= self.end:
# success
return 1
return 0
def init (grid,N):
L = 10
counter=0
while counter<N:
row=random.randint(0,L-1)
col=random.randint(0,L-1)
if grid[row,col]==0:
grid[row,col]=-1
counter+=1
else:
continue
return grid
def car_mov(grid, v, N):
L = 10
for row in range(L):
for col in range(L):
if grid[row, col] == -1:
if row - v > -1:
grid[row, col] = 0
grid[row - v, col] = -1
else:
row_prime, col_prime = new_car(grid, N, L)
grid[row, col] = 0
grid[row_prime, col_prime] = -1
return grid
def new_car(grid, N, L):
while True:
row_prime = random.randint(0, L-1)
col_prime = random.randint(0, L-1)
if grid[row_prime, col_prime] == 0:
break
else:
continue
return row_prime, col_prime
def death_check(drunk, grid, v):
"""check to see if drunk dies"""
L = 10
for i in range(L):
if grid[i, int(drunk.pos)] == -1:
if i + v < L:
if i + v > L//2 > i:
return 1 #means dead
else:
if ((L//2) < ((i + v) % L) or (L // 2) > i):
return 1
return 0 #means alive
def main():
"""define variables and simulate"""
L = 10
v = 40 #velocity of cars
V_p = 2 #velocity of drunk man
N_array = np.arange(1, 40, 1) #N is number of cars in street
sample_number = 10000
cross_prob_array = np.zeros(len(N_array)) #says that considering an 'N', with what probability the drunk passes the street
mean_life = np.zeros(len(N_array))
n = 0
while n < len(N_array):
N = N_array[n] # How many cars in the street
drunk = rand_walk_1(V_p, L) # initialize drunk
grid = np.zeros((L, L))
grid = init(grid, N) # Make the street
# to calculate the mean life span of drunk
sum_life = 0
death_count = 0
for i in range(sample_number):
while True:
# Evolve one time step
temp = drunk.next() #if temp==1,drunk has crossed the street
grid = car_mov(grid, v, N)
if temp == 1:
# reset drunk
drunk.pos = 0
drunk.lifetime = 0
# update cross probability
cross_prob_array[n] += 1
break
if death_check(drunk, grid, v) == 1:
# update sum life and death count
death_count += 1
sum_life += drunk.lifetime
# reset drunk
drunk.pos = 0
drunk.lifetime = 0
break
mean_life[n] = sum_life / death_count
cross_prob_array[n] = cross_prob_array[n] / sample_number
n += 1
# Graphics
plt.plot(N_array, cross_prob_array)
plt.xlabel("Number of cars in the street")
plt.ylabel("success rate")
plt.savefig("success.jpg", bbox_inches='tight')
plt.show()
plt.plot(N_array, mean_life)
plt.xlabel("Number of cars in the street")
plt.ylabel("mean life time in unsuccessful attemps")
plt.savefig("meanlife.jpg", bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()
| StarcoderdataPython |
181815 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
class VM(object):
def __init__(self, ip):
self.ip = ip
self.reg = [0, 0, 0, 0, 0, 0]
self.reg[self.ip] -= 1
def run(self, prog):
while -1 <= self.reg[self.ip] < len(prog)-1:
self.reg[self.ip] += 1
op, a, b, c = prog[self.reg[self.ip]]
VM.__dict__[op](self.reg, a, b, c)
print(self.reg)
return self.reg
def addr(R, a, b, c): R[c] = R[a] + R[b]
def addi(R, a, b, c): R[c] = R[a] + b
def mulr(R, a, b, c): R[c] = R[a] * R[b]
def muli(R, a, b, c): R[c] = R[a] * b
def banr(R, a, b, c): R[c] = R[a] & R[b]
def bani(R, a, b, c): R[c] = R[a] & b
def borr(R, a, b, c): R[c] = R[a] | R[b]
def bori(R, a, b, c): R[c] = R[a] | b
def setr(R, a, b, c): R[c] = R[a]
def seti(R, a, b, c): R[c] = a
def gtir(R, a, b, c): R[c] = 1 if a > R[b] else 0
def gtri(R, a, b, c): R[c] = 1 if R[a] > b else 0
def gtrr(R, a, b, c): R[c] = 1 if R[a] > R[b] else 0
def eqir(R, a, b, c): R[c] = 1 if a == R[b] else 0
def eqri(R, a, b, c): R[c] = 1 if R[a] == b else 0
def eqrr(R, a, b, c): R[c] = 1 if R[a] == R[b] else 0
def MAIN(argv):
with open(argv.fname, 'r', encoding='UTF-8') as fh:
prog = []
for line in fh.readlines():
if line.startswith("#ip "):
vm = VM(int(line[4]))
else:
l = line.split()
prog.append([ l[0], int(l[1]), int(l[2]), int(l[3]) ])
# print("Part 1:", vm.run(prog))
vm.reg = [1, 0, 0, 0, 0, 0]
vm.reg[vm.ip] -= 1
print("Part 2:", vm.run(prog))
def getopts():
parser = argparse.ArgumentParser(description="""Advent of code day 19""")
parser.add_argument('fname', type=str, nargs='?', default="19.in", help='File name')
return parser.parse_args()
if __name__ == '__main__':
MAIN(getopts())
| StarcoderdataPython |
155584 | import numpy as np
def PCA_numpy(data, n_components=2):
#1nd step is to find covarience matrix
data_vector = []
for i in range(data.shape[1]):
data_vector.append(data[:, i])
cov_matrix = np.cov(data_vector)
#2rd step is to compute eigen vectors and eigne values
eig_values, eig_vectors = np.linalg.eig(cov_matrix)
eig_values = np.reshape(eig_values, (len(cov_matrix), 1))
#Make pairs
eig_pairs = []
for i in range(len(eig_values)):
eig_pairs.append([np.abs(eig_values[i]), eig_vectors[:,i]])
eig_pairs.sort()
eig_pairs.reverse()
#This PCA is only for 2 components
reduced_data = np.hstack((eig_pairs[0][1].reshape(len(eig_pairs[0][1]),1), eig_pairs[1][1].reshape(len(eig_pairs[0][1]),1)))
return data.dot(reduced_data) | StarcoderdataPython |
1725404 | <gh_stars>0
from asyncio import create_task, sleep
from os import environ
from random import randrange
from typing import Optional
from discord import Client, Status, Game, TextChannel, Message
def verbose(*args) -> None:
"""Print the specified args only if $VERBOSE is set."""
if "VERBOSE" in environ.keys():
print("verbose:", *args)
class OobClient(Client):
# These delay values do not apply to replies to mentions.
DELAY_MIN = 1 # 1 second
DELAY_MAX = 72 * 60 * 60 # 72 hours
DELAY_POW = 0.9 # delay = delay ^ 0.9
def __init__(self, channel_id: int, **options) -> None:
super().__init__(**options)
self.channel_id = channel_id
self.delay_secs = self.DELAY_MAX
self.delay_task = None
async def oob(self, message: Optional[Message]) -> None:
"""Send an oob, optionally as a reply to a message."""
# If message is provided, send to the same channel as that message.
# Otherwise, send to the channel specified with $DISCORD_CHANNEL.
channel: TextChannel = (
message.channel if message else self.get_channel(self.channel_id)
)
verbose(
f"sending an oob to #{channel.name}"
+ (f" as a reply to {message.author}" if message else "")
)
# Send the message, spending a random amount of time "typing" to make
# things a little more fun :).
with channel.typing():
await sleep(randrange(1, 5))
if message:
await message.reply("oob")
else:
await channel.send("oob")
def start_delayed_oob(self) -> None:
"""Set an oob to be sent in the future.
This will replace the existing delay task if there is one. The delay
will be in the range of [0.5 * self.delay_secs, self.delay_secs)."""
# If there is already an oob waiting, cancel it.
if self.delay_task:
verbose(f"cancelling existing delay task '{self.delay_task.get_name()}'")
self.delay_task.cancel()
# Randomize the delay based on self.delay_secs.
delay = max(
self.DELAY_MIN, int(randrange(self.delay_secs // 2, self.delay_secs))
)
# Create a task that waits delay seconds before calling oob().
async def oob_delay_fn():
await sleep(delay)
# While unlikely, it is possible that oob_delay could be called
# while the current task is in the middle of running oob() (since
# it has a small delay to simulate typing). Running oob() in a new
# task should prevent this.
create_task(self.oob(None))
# Reset the delay to the maximum and start a new delay task.
# Restarting the task ensures that the bot will eventually send an
# oob again even if no one else sends one.
self.delay_secs = self.DELAY_MAX
self.start_delayed_oob()
self.delay_task = create_task(oob_delay_fn(), name=f"oob_delay_fn.{delay}")
verbose(f"started new delay task '{self.delay_task.get_name()}'")
m, s = divmod(delay, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
verbose(f"next oob will be in {delay}s", f"({d}d {h}h {m}m {s}s)")
async def on_ready(self) -> None:
"""Called when the bot is ready to start."""
print(f"logged in as {self.user}!")
await self.change_presence(status=Status.idle, activity=Game("oob"))
self.start_delayed_oob()
async def on_message(self, message: Message) -> None:
"""Called when a message is sent."""
# Never respond to our own messages.
if message.author == self.user:
return
# If the message mentions us directly, respond immediately.
if self.user.mentioned_in(message):
await self.oob(message)
return
# Otherwise, handle the message if it is in $DISCORD_CHANNEL.
elif message.channel.id == self.channel_id:
# Reduce the delay by DELAY_POW and start a new delayed oob task.
self.delay_secs = int(self.delay_secs ** self.DELAY_POW)
self.start_delayed_oob()
if __name__ == "__main__":
token = environ["DISCORD_TOKEN"]
channel = int(environ["DISCORD_CHANNEL"])
print(f"loaded configuration from environment:")
print(f" DISCORD_TOKEN=***")
print(f" DISCORD_CHANNEL={channel}")
print("connecting to Discord...")
OobClient(channel).run(token)
| StarcoderdataPython |
95576 | from fastapi_users.db.base import BaseUserDatabase, UserDatabaseDependency
__all__ = ["BaseUserDatabase", "UserDatabaseDependency"]
try: # pragma: no cover
from fastapi_users_db_sqlalchemy import ( # noqa: F401
SQLAlchemyBaseOAuthAccountTable,
SQLAlchemyBaseOAuthAccountTableUUID,
SQLAlchemyBaseUserTable,
SQLAlchemyBaseUserTableUUID,
SQLAlchemyUserDatabase,
)
__all__.append("SQLAlchemyBaseUserTable")
__all__.append("SQLAlchemyBaseUserTableUUID")
__all__.append("SQLAlchemyBaseOAuthAccountTable")
__all__.append("SQLAlchemyBaseOAuthAccountTableUUID")
__all__.append("SQLAlchemyUserDatabase")
except ImportError: # pragma: no cover
pass
try: # pragma: no cover
from fastapi_users_db_beanie import ( # noqa: F401
BaseOAuthAccount,
BeanieBaseUser,
BeanieUserDatabase,
ObjectIDIDMixin,
)
__all__.append("BeanieBaseUser")
__all__.append("BaseOAuthAccount")
__all__.append("BeanieUserDatabase")
__all__.append("ObjectIDIDMixin")
except ImportError: # pragma: no cover
pass
| StarcoderdataPython |
4827716 | <reponame>intact-solutions/pysparse<gh_stars>0
import math, os, sys, time
import numpy as np
from pysparse.sparse import spmatrix
from pysparse.itsolvers.krylov import pcg, minres, qmrs, cgs
from pysparse.precon import precon
ll = spmatrix.ll_mat(5,5)
print(ll)
print(ll[1,1])
print(ll)
ll[2,1] = 1.0
ll[1,3] = 2.0
print(ll)
print(ll.to_csr())
print(ll[1,3])
print(ll[1,-1])
print(ll.nnz)
ll.export_mtx('test.mtx')
L = spmatrix.ll_mat(10, 10)
for i in range(0, 10):
L[i,i] = float(i+1)
A = L.to_csr()
x = np.ones([10], 'd')
y = np.zeros([10], 'd')
print(A, x, y)
A.matvec(x, y)
print(y)
ll = spmatrix.ll_mat(100, 100)
for i in range(0, 100, 5):
for j in range(0, 100, 4):
ll[i,j] = 1.0/float(i+j+1)
A = ll.to_csr()
x = np.arange(100).astype(np.float)
y = np.zeros(100, 'd')
z = np.zeros(100, 'd')
A.matvec(x, y)
print(y)
print('norm(y) = ', math.sqrt(np.add.reduce(y)))
##A.matvec_transp(x, z)
##print z
##print 'norm(z) = ', math.sqrt(np.add.reduce(z))
L = spmatrix.ll_mat(10,10)
for i in range(10):
L[i,i] = float(i+1)
A = L.to_csr()
print(A)
x = np.zeros(10, 'd')
b = np.ones(10, 'd')
info, iter, relres = pcg(A, b, x, 1e-8, 100)
print(info, iter, relres)
print(x)
if (info != 0):
print('cg not converged', file=sys.stderr)
L2 = L.copy()
x = np.zeros(10, 'd')
info, iter, relres = pcg(A, b, x, 1e-8, 100)
print(info, iter, relres)
# -----------------------------------------------------------
print('remove test')
n = 100
L = spmatrix.ll_mat(n, n)
for run in range(5):
print('adding elements...')
for i in range(0,n,2):
for j in range (n):
L[i,j] = i+j+1
# print L
print(L.nnz)
print('removing elements...')
for j in range(0,n,2):
for i in range (n):
L[i,j] = 0.0
# print L
print(L.nnz)
# -----------------------------------------------------------
print('submatrix test')
n = 100
L = spmatrix.ll_mat(n, n)
for i in range (0, n, 2):
for j in range (1, n, 2):
L[i,j] = float(n*i + j);
print(L[10:18,75:80])
print(L[10:15,35:10])
print(L[19:15,35:10])
# -----------------------------------------------------------
print('submatrix assign test')
n = 10
L = spmatrix.ll_mat(n, n);
for i in range (0, n, 1):
for j in range (0, n, 1):
L[i,j] = 1.0;
print(L)
Z = spmatrix.ll_mat(n-2, n-2)
L[1:n-1,1:n-1] = Z
print(L)
print(L.nnz)
#------------------------------------------------------------
if 0:
f = open(os.environ['HOME']+'/matrices/poi2d_300.mtx')
t1 = time.clock()
L = ll_mat_from_mtx(f)
t_read = time.clock() - t1
f.close()
print('time for reading matrix data from file: %.2f sec' % t_read)
if 1:
t1 = time.clock()
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/poi2d_300.mtx')
t_read = time.clock() - t1
print('time for reading matrix data from file: %.2f sec' % t_read)
#------------------------------------------------------------
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/node4x3x1_A.mtx')
print(L.shape, L.nnz)
A = L.to_sss()
class diag_prec:
def __init__(self, A):
self.shape = A.shape
n = self.shape[0]
self.dinv = np.zeros(n, 'd')
for i in range(n):
self.dinv[i] = 1.0 / A[i,i]
def precon(self, x, y):
np.multiply(x, self.dinv, y)
def resid(A, b, x):
r = x.copy()
A.matvec(x, r)
r = b - r
return math.sqrt(np.dot(r, r))
K_diag = diag_prec(A)
K_jac = precon.jacobi(A, 1.0, 1)
K_ssor = precon.ssor(A, 1.0, 1)
# K_ilu = precon.ilutp(L)
n = L.shape[0];
b = np.arange(n).astype(np.Float)
x = np.zeros(n, 'd')
info, iter, relres = pcg(A, b, x, 1e-6, 1000)
print('pcg, K_none: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = pcg(A, b, x, 1e-6, 1000, K_diag)
print('pcg, K_diag: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = pcg(A, b, x, 1e-6, 1000, K_jac)
print('pcg, K_jac: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = pcg(A, b, x, 1e-6, 1000, K_ssor)
print('pcg, K_ssor: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = minres(A, b, x, 1e-6, 1000)
print('minres, K_none: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = minres(A, b, x, 1e-6, 1000, K_diag)
print('minres, K_diag: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = minres(A, b, x, 1e-6, 1000, K_jac)
print('minres, K_jac: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = minres(A, b, x, 1e-6, 1000, K_ssor)
print('minres, K_ssor: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = qmrs(A, b, x, 1e-6, 1000)
print('qmrs, K_none: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = qmrs(A, b, x, 1e-6, 1000, K_diag)
print('qmrs, K_diag: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = qmrs(A, b, x, 1e-6, 1000, K_jac)
print('qmrs, K_jac: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = qmrs(A, b, x, 1e-6, 1000, K_ssor)
print('qmrs, K_ssor: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = cgs(A, b, x, 1e-6, 1000)
print('cgs, K_none: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = cgs(A, b, x, 1e-6, 1000, K_diag)
print('cgs, K_diag: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = cgs(A, b, x, 1e-6, 1000, K_jac)
print('cgs, K_jac: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = cgs(A, b, x, 1e-6, 1000, K_ssor)
print('cgs, K_ssor: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = bicgstab(A, b, x, 1e-6, 1000)
print('bicgstab, K_none: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = bicgstab(A, b, x, 1e-6, 1000, K_diag)
print('bicgstab, K_diag: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = bicgstab(A, b, x, 1e-6, 1000, K_jac)
print('bicgstab, K_jac: ', info, iter, relres, resid(A, b, x))
x = np.zeros(n, 'd')
info, iter, relres = bicgstab(A, b, x, 1e-6, 1000, K_ssor)
print('bicgstab, K_ssor: ', info, iter, relres, resid(A, b, x))
#------------------------------------------------------------
import superlu
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/cop18_el3_A.mtx')
##f = open('cop18_el5_A.mtx')
##L = ll_mat_from_mtx(f)
##f.close()
n11 = 4688
L = L[0:n11, 0:n11] # extract (1,1)-block
# make matrix regular
for i in range(n11):
L[i,i] = 1
print(L.shape, L.nnz)
n = L.shape[0]
B = L.to_csr()
su = superlu.factorize(B, diag_pivot_thresh=0.0)
print(su.nnz)
b = np.arange(n).astype(np.Float) / n
x = np.zeros(n, 'd')
su.solve(b, x)
print('norm(b) = %g' % math.sqrt(np.dot(b, b)))
print('norm(x) = %g' % math.sqrt(np.dot(x, x)))
r = np.zeros(n, 'd')
B.matvec(x, r)
r = b - r
print('norm(b - A*x) = %g' % math.sqrt(np.dot(r, r)))
if 1:
for panel_size in [5, 10, 15]:
for relax in [1, 3, 5]:
for permc_spec in [0, 1, 2]:
for diag_pivot_thresh in [0.0, 0.5, 1.0]:
t1 = time.clock()
su = superlu.factorize(B,
panel_size=panel_size,
relax=relax,
permc_spec=permc_spec,
diag_pivot_thresh=diag_pivot_thresh)
t_fact = time.clock() - t1
t1 = time.clock()
su.solve(b, x)
t_solve = time.clock() - t1
print('panel_size=%2d, relax=%d, permc_spec=%d, diag_pivot_thresh=%.1f nnz=%d, t_fact=%.2f, t_solve=%.2f' % \
(panel_size, relax, permc_spec, diag_pivot_thresh, su.nnz, t_fact, t_solve))
| StarcoderdataPython |
3306831 | # -*- coding: utf-8 -*-
import bimon.core
def main():
bimon.core.BiMon().run()
| StarcoderdataPython |
90616 | <reponame>ProjectPepperHSB/Backend-Services<filename>get-mensa-data/get_mensa_data.py
#pip install pdf2image
#-- poppler --
#https://github.com/oschwartz10612/poppler-windows/releases/
#env: "C:\path\to\poppler-xx\bin"
import requests
import urllib.request
import json
import re
import traceback
from bs4 import BeautifulSoup
from pdf2image import convert_from_path
from PIL import Image
url = "https://www.stw-bremen.de/de/cafeteria/bremerhaven"
folder_location = "/home/docker-hbv-kms/repositories/NodeJS_Server4Pepper/public/"
def getMensaData():
try:
#download newest cafeteria plan
response = requests.get(url)
soup= BeautifulSoup(response.text, "html.parser")
#create url string
menucard = ((str(soup.select("a[href$='/print']")[0]).rsplit(' target', 1)[0]).split("href=",1)[1]).replace('"','')
urllib.request.urlretrieve(menucard, folder_location+"Mensaplan.pdf")
print("pdf downloaded")
#pdf to img
img = convert_from_path(folder_location+"Mensaplan.pdf", 500)[0]
#crop image
area = (0, 200, 4134, 2800) # R T L B
cropped_img = img.crop(area)
area = (0, 5200, 4134, 5800)
cropped_img2 = img.crop(area)
mergeImgs([cropped_img, cropped_img2]).save(folder_location+'images/mensaplan.png', 'JPEG')
print("images created from pdf")
#get all tbody tags from the site
menulist = soup.select("tbody")
menu = {}
day = ["Montag","Dienstag", "Mittwoch", "Donnerstag", "Freitag"]
offer1 = []
offer2 = []
for i in range(10):
tmp = (str(menulist[i]).split('description">',1)[1]).rsplit("</td><td",2)[0]
tmp = tmp.replace("\n","").replace("\r","").replace("a1","").replace("amp;","")
tmp = re.sub('<sup>.*?</sup>', '', tmp)
if(i%2==0):
offer1.append(tmp)
else:
offer2.append(tmp)
menu["day"] = day
menu["offer1"] = offer1
menu["offer2"] = offer2
with open(folder_location+"mensadata.json", "w+") as f:
json.dump(menu, f, ensure_ascii=False)
print("menu dataframe created")
except Exception:
traceback.print_exc()
def mergeImgs(imgs):
min_img_width = min(i.width for i in imgs)
total_height = 0
for i, img in enumerate(imgs):
# If the image is larger than the minimum width, resize it
if(img.width > min_img_width):
imgs[i] = img.resize((min_img_width, int(img.height / img.width * min_img_width)), Image.ANTIALIAS)
total_height += imgs[i].height
img_merge = Image.new(imgs[0].mode, (min_img_width, total_height))
y = 0
for img in imgs:
img_merge.paste(img, (0, y))
y += img.height
return img_merge
getMensaData() | StarcoderdataPython |
1711286 | <filename>vim_debug/subwindows.py
from window import VimWindow
import errors
import base64
class StackWindow(VimWindow):
'''Keeps track of the current execution stack'''
name = 'STACK'
dtext = '[[Execution Stack - most recent call first]]'
def __init__(self, name = None):
VimWindow.__init__(self, name)
self.at = 0
def refresh(self, node):
self.at = 0
stack = node.getElementsByTagName('stack')
self.stack = list(map(item.getAttribute, ('level', 'where', 'filename', 'lineno')) for item in stack)
self.clear()
tpl = '%-2s %-15s %s:%s'
lines = list(tpl % tuple(item) for item in self.stack)
self.writelines(lines)
self.highlight(0)
return self.stack[0]
def on_create(self):
self.command('highlight CurStack term=reverse ctermfg=White ctermbg=Red gui=reverse')
self.highlight(0)
def highlight(self, num):
self.command('syntax clear')
self.command('syntax region CurStack start="^%d " end="$"' % num)
class LogWindow(VimWindow):
'''I don't actually know what this does...'''
name = 'LOG'
dtext = '[[Logs all traffic]]'
def on_create(self):
self.command('set nowrap fdm=marker fmr={{{,}}} fdl=0')
class OutputWindow(VimWindow):
'''Logs the stdout + stderr'''
name = 'STDOUT_STDERR'
dtext = '[[Stdout and Stderr are copied here for your convenience]]\n'
def on_create(self):
self.command('set wrap fdm=marker fmr={{{,}}} fdl=0')
self.command('setlocal wfw')
self.last = 'stdout'
def add(self, type, text):
# TODO: highlight stderr
if type != self.last:
self.last = type
if type == 'stderr':
self.write('[[STDERR]]')
else:
self.write('[[STDOUT]]')
lines = text.split('\n')
self.buffer[-1] += lines[0]
for line in lines[1:]:
self.buffer.append(line)
self.command('normal G')
class WatchWindow:
''' window for watch expressions '''
def __init__(self):
self.expressions = VimWindow('WATCH')
self.expressions.dtext = '[[Type expressions here]]'
self.results = VimWindow('RESULTS')
self.results.dtext = '[[type \w for them to be evaluated]]'
def create(self, where=None):
self.expressions.create('leftabove new')
self.results.create('vertical belowright new')
def destroy(self):
self.expressions.destroy()
self.results.destroy()
def set_result(self, line, node):
l = len(self.results.buffer)
for a in range(len(self.results.buffer)-1, line):
self.results.buffer.append('')
errors = node.getElementsByTagName('error')
if len(errors):
res = 'ERROR: ' + str(get_child_text(errors[0], 'message'))
else:
prop = node.getElementsByTagName('property')[0]
res = str(get_text(prop))
if not res:
res = str(get_child_text(prop, 'value'))
self.results.buffer[line] = res
def get_text(node):
if not hasattr(node.firstChild, 'data'):
return ''
data = node.firstChild.data
if node.getAttribute('encoding') == 'base64':
return base64.decodestring(data)
return data
def get_child_text(node, child_tag):
tags = node.getElementsByTagName(child_tag)
if not tags:
return ''
return get_text(tags[0])
class ScopeWindow(VimWindow):
''' lists the current scope (context) '''
name = 'SCOPE'
dtext = '[[Current scope variables...]]'
def refresh(self, node):
self.clear()
for child in node.getElementsByTagName('property'):
name = child.getAttribute('fullname')
type = child.getAttribute('type')
children = child.getAttribute('children')
if not name:
text = get_child_text(child, 'value')
name = get_child_text(child, 'fullname')
else:
if not child.firstChild:
text = ''
elif hasattr(child.firstChild, 'data'):
text = child.firstChild.data
else:
text = ''
if child.hasAttribute('encoding') and child.getAttribute('encoding') == 'base64':
text = base64.decodestring(text)
self.write('%-20s = %-10s /* type: %s */' % (name, text, type))
help_text = '''\
[ Function Keys ] |
<F1> resize | [ Normal Mode ]
<F2> step into | ,e eval
<F3> step over |
<F4> step out |
<F5> run | [ Command Mode ]
<F6> quit debugging | :Bp toggle breakpoint
| :Up stack up
<F11> get all context | :Dn stack down
<F12> get property at cursor |
'''
# vim: et sw=4 sts=4
| StarcoderdataPython |
3396333 | <reponame>sony/nnabla-nas
# Copyright (c) 2020 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import module as Mo
class Model(Mo.Module):
r"""This class is a base `Model`. Your model should be based on this
class.
"""
def get_net_parameters(self, grad_only=False):
r"""Returns an `OrderedDict` containing all network parmeters of the model.
Args:
grad_only (bool, optional): If sets to `True`, then only
parameters with `need_grad=True` will be retrieved. Defaults
to `False`.
Raises:
NotImplementedError:
"""
raise NotImplementedError
def get_arch_parameters(self, grad_only=False):
r"""Returns an `OrderedDict` containing all architecture parameters of
the model.
Args:
grad_only (bool, optional): If sets to `True`, then only
parameters with `need_grad=True` will be retrieved. Defaults
to `False`.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError
def summary(self):
r"""Returns a string summarizing the model."""
return ''
def loss(self, outputs, targets, loss_weights=None, *args):
r"""Return a loss computed from a list of outputs and a list of targets.
Args:
outputs (list of nn.Variable): A list of output variables
computed from the model.
targets (list of nn.Variable): A list of target variables
loaded from the data.
loss_weights (list of float, optional): A list specifying scalar
coefficients to weight the loss contributions of different
model outputs. It is expected to have a 1:1 mapping to model
outputs. Defaults to None.
Returns:
nn.Variable: A scalar NNabla Variable represents the loss.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError
def metrics(self, outputs, targets):
r"""Return a dictionary of metrics to monitor during training.
It is expected to have a 1:1 mapping between the model outputs
and targets.
Args:
outputs (list of nn.Variable): A list of output variables
(nn.Variable) computed from the model.
targets (list of nn.Variable): A list of target variables
(nn.Variable) loaded from the data.
Returns:
dict: A dictionary containing all metrics to monitor, e.g.,
{
'accuracy': nn.Variable((1,)),
'F1': nn.Variable((1,))
}
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError
| StarcoderdataPython |
30215 | <filename>robotframework-ls/tests/robotframework_ls_tests/_resources/case_vars_file/robotvars.py
VARIABLE_1 = 10
VARIABLE_2 = 20
| StarcoderdataPython |
165244 | <filename>Calibration/HcalAlCaRecoProducers/python/ALCARECOHcalCalNoise_cff.py
import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
noiseHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
HLTPaths = ['HLT_MET25'],
# eventSetupPathsKey='HcalCalNoise',
throw = False #dont throw except on unknown path name
)
prescaler = cms.EDFilter("PrescalerFHN",
TriggerResultsTag = cms.InputTag("TriggerResults", "", "HLT"),
# Will select OR of all specified HLTs
# And increment if HLT is seen, regardless if
# others cause selection
Prescales = cms.VPSet(
cms.PSet(
HLTName = cms.string("HLT_MET25"),
PrescaleFactor = cms.uint32(1)
)
# cms.PSet(
# name = cms.string("HLTPath2"),
# factor = cms.uint32(100)
# )
))
from Calibration.HcalAlCaRecoProducers.alcahcalnoise_cfi import *
seqALCARECOHcalCalNoise = cms.Sequence(noiseHLT*prescaler*HcalNoiseProd)
| StarcoderdataPython |
3357907 | <filename>setup.py
#!/usr/bin/python -tt
# coding:utf-8
from setuptools import setup
if __name__ == '__main__':
setup(
name='gocdpb',
version='9.2',
description='Configure GoCD pipeline from the commandline.',
long_description=(
'The Go CD Pipeline Builder is designed to have the same '
'function in the GoCD eco system, as the Jenkins Job Builder '
'has in the Jenkins eco system. '
'Given a (git) repository and appropriate configuration, '
'it should be able to add a suitable pipeline to a Go-server. '
'The current version does not trigger on git events. '
'It is simply a command line driven tool.'
),
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='https://github.com/magnus-lycka/gocd-pipeline-builder',
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"Environment :: Console",
],
keywords='continuous deployment integration build automation go.cd',
package_dir={'gocdpb': 'src/gocdpb'},
packages=['gocdpb'],
install_requires=['jinja2', 'requests', 'PyYAML'],
entry_points={
'console_scripts': [
'gocdpb=gocdpb.gocdpb:main',
'gocdrepos=gocdpb.gocdpb:repos',
'gocdtagrepos=gocdpb.tagrepos:main',
'gocdbranchrepos=gocdpb.tagrepos:main_branchrepos',
'gocdupdaterepolist=gocdpb.tagrepos:main_updaterepolist',
]
}
)
| StarcoderdataPython |
1786532 | <filename>t66y-spider/src/http_request/__init__.py
from urllib.parse import urlsplit
import urllib3
from log import LoggerObject
urllib3.disable_warnings()
class Downloader(LoggerObject):
def __init__(self, response_processor=None, num_pools=10, **kw):
super().__init__("downloader")
if kw.get("proxy_url") is not None:
print(11)
self.logger.info("user proxy: %s" % kw.get("proxy_url"))
self.__pool = urllib3.ProxyManager(num_pools=num_pools, proxy_url=kw["proxy_url"])
else:
self.__pool = urllib3.PoolManager(num_pools=num_pools, timeout=18, retries=3)
self.__headers = kw.get("headers", None)
self.__response_processor = response_processor
self.__headers_cache = dict()
def __request(self, url, method, fields=None, headers=None, body=None, response_processor=None):
if headers is None:
headers = self.__headers
response = None
try:
_domain = urlsplit(url).netloc
cookies = self.__headers_cache.get(_domain, dict())
headers.update(cookies)
response = self.__pool.request(method, url=url, fields=fields, headers=headers)
response_headers = response.headers
_cookie = None
if "Set-Cookie" in response_headers:
_cookie = response_headers.get("Set-Cookie").split(";")
elif "set-cookie" in response_headers:
_cookie = response_headers.get("Set-Cookie").split(";")
if _cookie is None:
_cookie = list()
tmp_list = list(filter(lambda cookie: "PHPSESSID" in cookie, _cookie))
if len(tmp_list) == 1:
phpsessid = tmp_list[0][tmp_list[0].index("PHPSESSID"):]
cookies.update({"Cookie": phpsessid})
etag = response_headers.get("ETag")
if etag is None:
etag = response_headers.get("etag")
if etag is not None:
cookies.update({"If-None-Match": etag})
self.__headers_cache.update({_domain: cookies})
except Exception as e:
self.logger.error("time out: %s", url)
if response_processor is None:
return self.__response_processor.process(response)
else:
return response_processor.process(response)
def get(self, url, fields=None, headers=None, response_processor=None):
self.logger.debug("GET %s" % url)
return self.__request(url=url, method="GET", fields=fields, headers=headers,
response_processor=response_processor)
def post(self, url, fields=None, headers=None, body=None, response_processor=None):
self.logger.debug("POST %s" % url)
return self.__request(url=url, method="POST", fields=fields, headers=headers, body=body,
response_processor=response_processor)
@property
def response_processor(self):
return self.__response_processor
@response_processor.setter
def response_processor(self, response_processor):
self.__response_processor = response_processor
class ResponseProcessor(LoggerObject):
def __init__(self):
super().__init__("responseProcess")
def process(self, response):
if response is None:
self.logger.error("%s response is None" % response.geturl())
return None
if response.status != 200:
self.logger.error("%s response code: %s" % (response.geturl(), str(response.status)))
return None
return self._process(response)
def _process(self, response):
pass
class HtmlResponseProcessor(ResponseProcessor):
def __init__(self, charset="gbk"):
super().__init__()
self.__charset = charset
def _process(self, response):
return response.data.decode(self.__charset, "ignore")
class FileResponseProcessor(ResponseProcessor):
def _process(self, response):
return response.data
| StarcoderdataPython |
3230058 | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ujson
from polyaxon.utils.formatting import Printer, dict_tabulate, dict_to_tabulate
def get_entity_details(entity: any, entity_name: str):
if entity.description:
Printer.print_heading("{} description:".format(entity_name))
Printer.print("{}\n".format(entity.description))
if entity.settings:
Printer.print_heading("{} settings:".format(entity_name))
Printer.print(
"{}\n".format(
entity.settings.to_dict()
if hasattr(entity.settings, "to_dict")
else entity.settings
)
)
if entity.readme:
Printer.print_heading("{} readme:".format(entity_name))
Printer.print_md(entity.readme)
response = dict_to_tabulate(
entity.to_dict(),
humanize_values=True,
exclude_attrs=["description", "settings", "readme"],
)
Printer.print_heading("{} info:".format(entity_name))
dict_tabulate(response)
def handle_output(response: any, output: str):
if output == "json":
Printer.pprint(response)
return
if "path=" in output:
json_path = output.strip("path=")
with open(json_path, "w", encoding="utf8", newline="") as output_file:
output_file.write(ujson.dumps(response))
| StarcoderdataPython |
3342130 | #!/usr/bin/env python3
"""
BSc University Grade Viewer.
Usage:
grades.py [--y1] [--y2] [--y3] [--y4] [--pause]
grades.py -h | --help | -v | --version
Options:
-h --help Show this screen.
-v --version Show version.
-p --pause Pause between each graph
--y1 Show year one
--y2 Show year two
--y3 Show year three
--y4 Show year four
Dependencies:
pip install matplotlib
pip install docopt
Examples:
grades.py --y3 --y4 --pause
grades.py --y1 --y2
"""
from matplotlib import pyplot as plot
from docopt import docopt
year_one = {
"2D Graphics Programming": 81,
"Computing Systems": 60.03,
"Mathematics of Space & Change": 79,
"Introduction to Programming": 65,
"Intro to Games Development": 74,
"Creative Computing Profession": 61.5
}
year_two = {
"Real Time 3D Graphics": 81.08,
"Interactive Physical Modelling": 68,
"Computing Project": 84,
"Structures and Algorithms": 75.8,
"Computer Games Design": 73,
"Game Engine Design": 82.2
}
year_three = {
"Computer Game AI": 85,
"Games Technology Project": 81.6,
"Mobile Games Development": 87.83,
"Games Project : Design and Plan": 71.4,
"Advanced Games Programming": 92.8,
"Algorithms and Collections": 79.2
}
year_four = {
"Games Console Development": 85.8,
"Serious Games": 81,
"GPGPU & Accelerator Programming": 86,
"Computing Honours Project": 85.5,
"3D Level Design": 95
}
def next_figure():
next_figure.counter += 1
return next_figure.counter
next_figure.counter = 0
def show_year(graph_title, year, pause):
grades = list(year.values())
grade_count = len(grades)
labels = list(range(grade_count))
average = sum(grades) / grade_count
plot.rcParams["figure.figsize"] = (12, 6)
fig = plot.figure(next_figure())
fig.canvas.set_window_title('University Grades')
plot.title(graph_title)
plot.plot(labels, list(year.values()))
plot.plot(labels, list(year.values()), 'gs')
plot.plot([-1] + labels + [grade_count + 1], [average] * (2 + grade_count))
plot.axis([-1, len(grades), 0, 100])
plot.xticks(labels, list(year.keys()), fontsize=5)
plot.ylabel('Grades %')
if pause:
plot.show()
def main():
config = docopt(__doc__, version='0.1')
pause = config['--pause']
if config['--y1']:
show_year("Year One", year_one, pause)
if config['--y2']:
show_year("Year Two", year_two, pause)
if config['--y3']:
show_year("Year Three", year_three, pause)
if config['--y4']:
show_year("Year Four", year_four, pause)
if not pause:
plot.show()
if __name__ == "__main__":
main()
| StarcoderdataPython |
18041 | <gh_stars>1-10
"""Support for IHC binary sensors."""
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import CONF_TYPE
from . import IHC_CONTROLLER, IHC_INFO
from .const import CONF_INVERTING
from .ihcdevice import IHCDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC binary sensor platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device["ihc_id"]
product_cfg = device["product_cfg"]
product = device["product"]
# Find controller that corresponds with device id
ctrl_id = device["ctrl_id"]
ihc_key = f"<KEY>
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
sensor = IHCBinarySensor(
ihc_controller,
name,
ihc_id,
info,
product_cfg.get(CONF_TYPE),
product_cfg[CONF_INVERTING],
product,
)
devices.append(sensor)
add_entities(devices)
class IHCBinarySensor(IHCDevice, BinarySensorDevice):
"""IHC Binary Sensor.
The associated IHC resource can be any in or output from a IHC product
or function block, but it must be a boolean ON/OFF resources.
"""
def __init__(
self,
ihc_controller,
name,
ihc_id: int,
info: bool,
sensor_type: str,
inverting: bool,
product=None,
) -> None:
"""Initialize the IHC binary sensor."""
super().__init__(ihc_controller, name, ihc_id, info, product)
self._state = None
self._sensor_type = sensor_type
self.inverting = inverting
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self._state
def on_ihc_change(self, ihc_id, value):
"""IHC resource has changed."""
if self.inverting:
self._state = not value
else:
self._state = value
self.schedule_update_ha_state()
| StarcoderdataPython |
3352031 | <reponame>cyberphantom/Selfie-Drone-Stick
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import rospy
from cv_bridge import CvBridge, CvBridgeError
from phone import phone_IO
from drone import drone_IO
if __name__ == '__main__':
''' After Starting the ARDrone, We start getting Images from the Phone'''
rospy.init_node('ucf_selfiestick_drone', anonymous=True)
dio = drone_IO()
phio = phone_IO()
try:
phio.phone_subscriber()
dio.drone_subscriber()
rospy.spin()
if rospy.is_shutdown():
dio.co.SendLand()
dio.co.SendLand()
except CvBridgeError as e:
print(e)
except KeyboardInterrupt:
print("Shutting down")
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
16125 | # Create your views here.
from django.core.urlresolvers import reverse
from django.http.response import JsonResponse, HttpResponse
from settings.settings import AUTHORIZED_KEYS_FILE, SITE_URL
from bioshareX.models import Share, SSHKey, MetaData, Tag
from bioshareX.forms import MetaDataForm, json_form_validate
from guardian.shortcuts import get_perms, get_users_with_perms, remove_perm, assign_perm
from bioshareX.utils import JSONDecorator, json_response, json_error, share_access_decorator, safe_path_decorator, validate_email, fetchall,\
test_path, du
from django.contrib.auth.models import User, Group
from django.db.models import Q
import os
from rest_framework.decorators import api_view, detail_route, throttle_classes,\
action
from bioshareX.forms import ShareForm
from guardian.decorators import permission_required
from bioshareX.utils import ajax_login_required, email_users
from rest_framework import generics, viewsets, status
from bioshareX.models import ShareLog, Message
from bioshareX.api.serializers import ShareLogSerializer, ShareSerializer,\
GroupSerializer, UserSerializer, MessageSerializer
from rest_framework.permissions import DjangoModelPermissions, IsAuthenticated
from bioshareX.permissions import ManageGroupPermission
from rest_framework.response import Response
from guardian.models import UserObjectPermission
from django.contrib.contenttypes.models import ContentType
import datetime
from bioshareX.api.filters import UserShareFilter, ShareTagFilter,\
GroupShareFilter, ActiveMessageFilter
from rest_framework.throttling import UserRateThrottle
from django.utils import timezone
import csv
@ajax_login_required
def get_user(request):
query = request.GET.get('query')
try:
user = User.objects.get(Q(username=query)|Q(email=query))
return JsonResponse({'user':UserSerializer(user).data})
except Exception, e:
return JsonResponse({'status':'error','query':query,'errors':[e.message]},status=status.HTTP_404_NOT_FOUND)
@ajax_login_required
def get_address_book(request):
try:
emails = User.objects.filter(shareuserobjectpermission__content_object__in=Share.objects.filter(owner=request.user).values_list('id')).values_list('email').distinct().order_by('email')
groups = Group.objects.all().order_by('name')
return json_response({'emails':[email[0] for email in emails], 'groups':[g.name for g in groups]})
except Exception, e:
return json_error([e.message])
@ajax_login_required
def get_tags(request):
try:
tags = Tag.objects.filter(name__icontains=request.GET.get('tag'))
return json_response({'tags':[tag.name for tag in tags]})
except Exception, e:
return json_error([e.message])
@share_access_decorator(['admin'])
def share_with(request,share):
query = request.POST.get('query',request.GET.get('query'))
exists = []
new_users = []
groups = []
invalid = []
try:
emails = [email.strip().lower() for email in query.split(',')]
for email in emails:
if email == '':
continue
if email.startswith('group:'):
name = email.split('group:')[1].lower()
try:
group = Group.objects.get(name__iexact=name)
groups.append({'group':{'id':group.id,'name':group.name}})
except:
invalid.append(name)
elif validate_email(email):
try:
user = User.objects.get(email=email)
exists.append({'user':{'username':email}})
except:
new_users.append({'user':{'username':email}})
else:
invalid.append(email)
return json_response({'exists':exists, 'groups':groups,'new_users':new_users,'invalid':invalid})
except Exception, e:
return json_error([e.message])
@ajax_login_required
def share_autocomplete(request):
terms = [term.strip() for term in request.GET.get('query').split()]
query = reduce(lambda q,value: q&Q(name__icontains=value), terms , Q())
try:
share_objs = Share.user_queryset(request.user).filter(query).order_by('-created')[:10]
shares = [{'id':s.id,'url':reverse('list_directory',kwargs={'share':s.id}),'name':s.name,'notes':s.notes} for s in share_objs]
return json_response({'status':'success','shares':shares})
except Exception, e:
return json_error([e.message])
def get_group(request):
query = request.GET.get('query')
try:
group = Group.objects.get(name=query)
return json_response({'group':{'name':group.name}})
except Exception, e:
return json_error([e.message])
@api_view(['GET'])
@share_access_decorator(['admin'])
def get_permissions(request,share):
data = share.get_permissions(user_specific=True)
return json_response(data)
@share_access_decorator(['admin'])
@JSONDecorator
def update_share(request,share,json=None):
share.secure = json['secure']
share.save()
return json_response({'status':'okay'})
@api_view(['POST'])
@share_access_decorator(['admin'])
@JSONDecorator
def set_permissions(request,share,json=None):
from smtplib import SMTPException
emailed=[]
created=[]
failed=[]
# if not request.user.has_perm('admin',share):
# return json_response({'status':'error','error':'You do not have permission to write to this share.'})
if json.has_key('groups'):
for group, permissions in json['groups'].iteritems():
g = Group.objects.get(id__iexact=group)
current_perms = get_perms(g,share)
removed_perms = list(set(current_perms) - set(permissions))
added_perms = list(set(permissions) - set(current_perms))
for u in g.user_set.all():
if len(share.get_user_permissions(u,user_specific=True)) == 0 and len(added_perms) > 0 and json['email']:
email_users([u],'share/share_subject.txt','share/share_email_body.txt',{'user':u,'share':share,'sharer':request.user,'site_url':SITE_URL})
emailed.append(u.username)
for perm in removed_perms:
remove_perm(perm,g,share)
for perm in added_perms:
assign_perm(perm,g,share)
if json.has_key('users'):
for username, permissions in json['users'].iteritems():
username = username.lower()
try:
u = User.objects.get(username__iexact=username)
if len(share.get_user_permissions(u,user_specific=True)) == 0 and json['email']:
try:
email_users([u],'share/share_subject.txt','share/share_email_body.txt',{'user':u,'share':share,'sharer':request.user,'site_url':SITE_URL})
emailed.append(username)
except:
failed.append(username)
except:
if len(permissions) > 0:
password = User.objects.make_random_password()
u = User(username=username,email=username)
u.set_password(password)
u.save()
try:
email_users([u],'share/share_subject.txt','share/share_new_email_body.txt',{'user':u,'password':password,'share':share,'sharer':request.user,'site_url':SITE_URL})
created.append(username)
except:
failed.append(username)
u.delete()
current_perms = share.get_user_permissions(u,user_specific=True)
print 'CURRENT'
print current_perms
print 'PERMISSIONS'
print permissions
removed_perms = list(set(current_perms) - set(permissions))
added_perms = list(set(permissions) - set(current_perms))
print 'ADDING: '
print added_perms
print 'REMOVING: '
print removed_perms
for perm in removed_perms:
if u.username not in failed:
remove_perm(perm,u,share)
for perm in added_perms:
if u.username not in failed:
assign_perm(perm,u,share)
data = share.get_permissions(user_specific=True)
data['messages']=[]
if len(emailed) > 0:
data['messages'].append({'type':'info','content':'%s has/have been emailed'%', '.join(emailed)})
if len(created) > 0:
data['messages'].append({'type':'info','content':'Accounts has/have been created and emails have been sent to the following email addresses: %s'%', '.join(created)})
if len(failed) > 0:
data['messages'].append({'type':'info','content':'Delivery has failed to the following addresses: %s'%', '.join(failed)})
data['json']=json
return json_response(data)
@share_access_decorator(['view_share_files'])
def search_share(request,share,subdir=None):
from bioshareX.utils import find
query = request.GET.get('query',False)
response={}
if query:
response['results'] = find(share,"*%s*"%query,subdir)
else:
response = {'status':'error'}
return json_response(response)
@safe_path_decorator()
@share_access_decorator(['write_to_share'])
def edit_metadata(request, share, subpath):
try:
if share.get_path_type(subpath) is None:
raise Exception('The specified file or folder does not exist in this share.')
metadata = MetaData.objects.get_or_create(share=share, subpath=subpath)[0]
form = MetaDataForm(request.POST if request.method == 'POST' else request.GET)
data = json_form_validate(form)
if not form.is_valid():
return json_response(data)#return json_error(form.errors)
tags = []
for tag in form.cleaned_data['tags'].split(','):
tag = tag.strip()
if len(tag) >2 :
tags.append(Tag.objects.get_or_create(name=tag)[0])
metadata.tags = tags
metadata.notes = form.cleaned_data['notes']
metadata.save()
name = os.path.basename(os.path.normpath(subpath))
return json_response({'name':name,'notes':metadata.notes,'tags':[tag.name for tag in tags]})
except Exception, e:
return json_error([str(e)])
@ajax_login_required
def delete_ssh_key(request):
try:
id = request.POST.get('id')
key = SSHKey.objects.get(user=request.user,id=id)
# subprocess.call(['/bin/chmod','600',AUTHORIZED_KEYS_FILE])
keystring = key.get_key()
# remove_me = keystring.replace('/','\\/')#re.escape(key.extract_key())
# command = ['/bin/sed','-i','/%s/d'%remove_me,AUTHORIZED_KEYS_FILE]
# subprocess.check_call(command)
f = open(AUTHORIZED_KEYS_FILE,"r")
lines = f.readlines()
f.close()
f = open(AUTHORIZED_KEYS_FILE,"w")
for line in lines:
if line.find(keystring) ==-1:
f.write(line)
f.close()
# subprocess.call(['/bin/chmod','400',AUTHORIZED_KEYS_FILE])
key.delete()
SSHKey.objects.filter(key__contains=keystring).delete()
response = {'status':'success','deleted':id}
except Exception, e:
response = {'status':'error','message':'Unable to delete ssh key'+str(e)}
return json_response(response)
"""
Requires: "name", "notes", "filesystem" arguments.
Optional: "link_to_path", "read_only"
"""
@api_view(['POST'])
@permission_required('bioshareX.add_share', return_403=True)
def create_share(request):
form = ShareForm(request.user,request.data)
if form.is_valid():
share = form.save(commit=False)
share.owner=request.user
link_to_path = request.data.get('link_to_path',None)
if link_to_path:
if not request.user.has_perm('bioshareX.link_to_path'):
return JsonResponse({'error':"You do not have permission to link to a specific path."},status=400)
try:
share.save()
except Exception, e:
share.delete()
return JsonResponse({'error':e.message},status=400)
return JsonResponse({'url':"%s%s"%(SITE_URL,reverse('list_directory',kwargs={'share':share.id})),'id':share.id})
else:
return JsonResponse({'errors':form.errors},status=400)
@ajax_login_required
@share_access_decorator(['view_share_files'])
def email_participants(request,share,subdir=None):
try:
subject = request.POST.get('subject')
emails = request.POST.getlist('emails',[])
users = [u for u in get_users_with_perms(share, attach_perms=False, with_superusers=False, with_group_users=True)]
if len(emails) > 0:
users = [u for u in User.objects.filter(id__in=[u.id for u in users]).filter(email__in=emails)]
body = request.POST.get('body')
users.append(share.owner)
email_users(users, ctx_dict={}, subject=subject, body=body,from_email=request.user.email,content_subtype='plain')
response = {'status':'success','sent_to':[u.email for u in users]}
return json_response(response)
except Exception, e:
return JsonResponse({'errors':[str(e)]},status=400)
class ShareLogList(generics.ListAPIView):
serializer_class = ShareLogSerializer
permission_classes = (IsAuthenticated,)
filter_fields = {'action':['icontains'],'user__username':['icontains'],'text':['icontains'],'paths':['icontains'],'share':['exact']}
def get_queryset(self):
shares = Share.user_queryset(self.request.user,include_stats=False)
return ShareLog.objects.filter(share__in=shares)
class ShareViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = ShareSerializer
permission_classes = (IsAuthenticated,)
filter_backends = generics.ListAPIView.filter_backends + [UserShareFilter,ShareTagFilter,GroupShareFilter]
filter_fields = {'name':['icontains'],'notes':['icontains'],'owner__username':['icontains'],'path_exists':['exact']}
ordering_fields = ('name','owner__username','created','updated','stats__num_files','stats__bytes')
def get_queryset(self):
return Share.user_queryset(self.request.user,include_stats=False).select_related('owner','stats').prefetch_related('tags','user_permissions__user','group_permissions__group')
@detail_route(['GET'])
@throttle_classes([UserRateThrottle])
def directory_size(self, request, *args, **kwargs):
share = self.get_object()
subdir = request.query_params.get('subdir','')
test_path(subdir,share=share)
size = du(os.path.join(share.get_path(),subdir))
return Response({'share':share.id,'subdir':subdir,'size':size})
@action(detail=False, methods=['GET'], permission_classes=[IsAuthenticated])
def export(self, request):
queryset = self.get_queryset()
serializer = self.get_serializer(queryset, many=True)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="shares_{}.csv"'.format(str(timezone.now())[:19].replace(' ','_'))
writer = csv.writer(response, delimiter='\t')
writer.writerow(['id','name','url','users','groups','bytes','tags','owner','slug','created','updated','secure','read_only','notes','path_exists'])
for r in serializer.data:
row = [r['id'],r['name'],r['url'],', '.join(r['users']),', '.join(r['groups']),r['stats'].get('bytes') if r['stats'] else '',', '.join([t['name'] for t in r['tags']]),r['owner'].get('username'),r['slug'],r['created'],r['updated'],r['secure'],r['read_only'],r['notes'],r['path_exists'] ]
writer.writerow([c.encode('ascii', 'replace') if hasattr(c,'decode') else c for c in row])
return response
class GroupViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = GroupSerializer
permission_classes = (IsAuthenticated,DjangoModelPermissions,)
filter_fields = {'name':['icontains']}
model = Group
def get_queryset(self):
if self.request.user.is_superuser or self.request.user.is_staff:
return Group.objects.all()
else:
return self.request.user.groups.all()
@detail_route(['POST'],permission_classes=[ManageGroupPermission])
def update_users(self, request, *args, **kwargs):
users = request.data.get('users')
group = self.get_object()
# old_users = GroupSerializer(group).data['users']
# old_user_ids = [u['id'] for u in old_users]
# remove_users = set(old_user_ids) - set(user_ids)
# add_users = set(user_ids) - set(old_user_ids)
group.user_set = [u['id'] for u in users]
#clear permissions
ct = ContentType.objects.get_for_model(Group)
UserObjectPermission.objects.filter(content_type=ct,object_pk=group.id).delete()
#assign permissions
for user in users:
if 'manage_group' in user['permissions']:
user = User.objects.get(id=user['id'])
assign_perm('manage_group', user, group)
return self.retrieve(request,*args,**kwargs)#Response({'status':'success'})
# @detail_route(['POST'])
# def remove_user(self,request,*args,**kwargs):
# # user = request.query_params.get('user')
# # self.get_object().user_set.remove(user)
# return Response({'status':'success'})
class MessageViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = MessageSerializer
permission_classes = (IsAuthenticated,)
filter_backends = (ActiveMessageFilter,)
model = Message
def get_queryset(self):
return Message.objects.all().order_by('-created')
# return Message.objects.filter(active=True).filter(Q(expires__gte=datetime.datetime.today())|Q(expires=None)).exclude(viewed_by__id=self.request.user.id)
@detail_route(['POST','GET'],permission_classes=[IsAuthenticated])
def dismiss(self, request, pk=None):
message = self.get_object()
message.viewed_by.add(request.user)
message.save()
return Response({'status':'Message dismissed'})
| StarcoderdataPython |
4822965 | <filename>app/core/utils.py
import os
import requests
from django.contrib.gis.geos import Point
from rest_framework import status
from typing import Dict
def get_geolocation_from_address(address: str) -> Dict[Point, str]:
"""
Returns geolocation point with latitude and longitude
and formatted-address
from provided string adress
"""
payload = {
'key': os.environ.get('GOOGLE_GEOCODING_API_KEY'),
'language': 'cs',
'address': address
}
# Call API
response = requests.get(
os.environ.get('GOOGLE_GEOCODING_API_LINK'),
params=payload
)
# Parse response
r = response.json()
if response.status_code == status.HTTP_200_OK and r['status'] == 'OK':
lat = r['results'][0]['geometry']['location']['lat']
lng = r['results'][0]['geometry']['location']['lng']
address = r['results'][0]['formatted_address']
return {
'point': Point(lng, lat),
'address': address
}
# If error, returns default point (0, 0)
# and the error message as the address
return {
'point': Point(0, 0),
'address': 'ERROR: ' + r['status']
}
| StarcoderdataPython |
1732322 | <filename>CMC_coin_info_fetch.py<gh_stars>1-10
#Author : <NAME>
#Name : CMC_Coin_Info_Fetch.py
#Description :
#To pull out twitter handle of all the currency and make a xls file. This uses APIs provided by CMC.
#This twitter handles will be used for monitoring using twitter APIs in other module.
#
#
#production link: https://pro-api.coinmarketcap.com
#Test link: https://sandbox-api.coinmarketcap.com/
#
#Documentation : https://coinmarketcap.com/api/documentation/v1/
#Pro usage Tracking : https://pro.coinmarketcap.com/account
#Sandbox Tracking : https://sandbox.coinmarketcap.com/account
#
#This example uses Python 2.7 and the python-request library.
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
import csv
import math
import openpyxl
#sandbox
url_map = 'https://sandbox-api.coinmarketcap.com/v1/cryptocurrency/map'
url_info = 'https://sandbox-api.coinmarketcap.com/v1/cryptocurrency/info'
#production
#url_map = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/map'
#url_info = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/info'
parameters_map = {
}
parameters_info_ids = {
'id': '1,2'
}
headers = {
'Accepts': 'application/json',
#TEST
'X-CMC_PRO_API_KEY': 'Your key here',
#PRO
#'X-CMC_PRO_API_KEY': 'Your key here',
}
Map_details_list = [ 'Dummy',
'Main_id' ,
'Name',
'Symbol',
'URL_name',
'Is_Active',
'status',
'first_historical_data',
'last_historical_data',
'platform',
'platform_name',
'platform_token_address' ,
'catagory_coin_tocken',
'logo_url',
'description',
'date_added',
'notice',
'tags_minable_or_not',
'website',
'technical_doc',
'explorer',
'source_code',
'message_board',
'chat',
'announcement',
'reddit',
'twitter'
]
session = Session()
session.headers.update(headers)
#creating an excel file for writing data.
wb = openpyxl.Workbook()
sheet = wb.active
sheet.title = "CMC_details"
# writing titles in excel file
# carefull about index
for i in range(1, len(Map_details_list)):
c1 = sheet.cell(row = 1, column = i)
c1.value = Map_details_list[i]
#fetching list of availible currency and ids.
try:
response = session.get(url_map, params=parameters_map)
data_map = json.loads(response.text)
data_map_list = data_map['data']
length_map = len(data_map_list)
id_list = []
for i in range(length_map):
id_list.append(data_map_list[i]['id'])
sheet.cell(row = i+2, column = Map_details_list.index('Main_id')).value = data_map_list[i]['id']
sheet.cell(row = i+2, column = Map_details_list.index('Name')).value = data_map_list[i]['name']
sheet.cell(row = i+2, column = Map_details_list.index('Symbol')).value = data_map_list[i]['symbol']
sheet.cell(row = i+2, column = Map_details_list.index('URL_name')).value = data_map_list[i]['slug']
sheet.cell(row = i+2, column = Map_details_list.index('Is_Active')).value = data_map_list[i]['is_active']
sheet.cell(row = i+2, column = Map_details_list.index('status')).value = "NA"
sheet.cell(row = i+2, column = Map_details_list.index('first_historical_data')).value = \
data_map_list[i]['first_historical_data']
sheet.cell(row = i+2, column = Map_details_list.index('last_historical_data')).value = \
data_map_list[i]['last_historical_data']
if data_map_list[i]['platform']:
curr_platform_dict = data_map_list[i]['platform']
sheet.cell(row = i+2, column = Map_details_list.index('platform')).value = "YES"
sheet.cell(row = i+2, column = Map_details_list.index('platform_name')).value = \
curr_platform_dict['name']
sheet.cell(row = i+2, column = Map_details_list.index('platform_token_address')).value = \
curr_platform_dict['token_address']
else:
sheet.cell(row = i+2, column = Map_details_list.index('platform')).value = "NA"
#print(id_list)
#print(data['data'][1]['id'])
#print(data['data'])
except (ConnectionError, Timeout, TooManyRedirects) as e:
print("There is some issue fetching ID map , here is the error !!! " )
print(e)
#fetching list of twitter accounts
length_id = len(id_list)
for i in range(0, length_id, 100 ):
Curr_id_list = list(id_list[i: i+100 if i+100 < length_id else length_id ])
parameters_info_ids['id'] = ','.join(map(str, Curr_id_list))
try:
response = session.get(url_info, params=parameters_info_ids)
data_info = json.loads(response.text)
cnt = 0
for j in range(i, i+100 if i+100 < length_id else length_id ):
sheet.cell(row = j+2, column = Map_details_list.index('catagory_coin_tocken')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['category'])
sheet.cell(row = j+2, column = Map_details_list.index('logo_url')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['logo'])
sheet.cell(row = j+2, column = Map_details_list.index('description')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['description'])
sheet.cell(row = j+2, column = Map_details_list.index('date_added')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['date_added'])
sheet.cell(row = j+2, column = Map_details_list.index('notice')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['notice'])
sheet.cell(row = j+2, column = Map_details_list.index('tags_minable_or_not')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['tags'])
sheet.cell(row = j+2, column = Map_details_list.index('website')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['website'])
sheet.cell(row = j+2, column = Map_details_list.index('technical_doc')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['technical_doc'])
sheet.cell(row = j+2, column = Map_details_list.index('explorer')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['explorer'])
sheet.cell(row = j+2, column = Map_details_list.index('source_code')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['source_code'])
sheet.cell(row = j+2, column = Map_details_list.index('message_board')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['message_board'])
sheet.cell(row = j+2, column = Map_details_list.index('chat')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['chat'])
sheet.cell(row = j+2, column = Map_details_list.index('announcement')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['announcement'])
sheet.cell(row = j+2, column = Map_details_list.index('reddit')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['reddit'])
sheet.cell(row = j+2, column = Map_details_list.index('twitter')).value = \
str(data_info['data'][str(Curr_id_list[cnt])]['urls']['twitter'])
cnt = cnt + 1
#data_info = str(data_info).encode('utf8')
#print(data_info)
#print("************************************************************")
#print("************************************************************")
except (ConnectionError, Timeout, TooManyRedirects) as e:
print("There is some issue fetching meta data of currency , here is the error !!! " )
print(e)
wb.save("CMC_Coin_Details_Auto.xlsx")
| StarcoderdataPython |
156824 | from itertools import combinations
import numpy as np
import time
def friend_numbers_exhaustive_count(count_till_number):
friend_numbers_cnt = 0
for pair in combinations(np.arange(1,count_till_number),2):
str_1 = str(pair[0])
str_2 = str(pair[1])
# print(str_1, str_2)
if np.any([digit in str_2 for digit in str_1]):
friend_numbers_cnt +=1
return friend_numbers_cnt
N=1000
start = time.time()
result = friend_numbers_exhaustive_count(N)
end = time.time()
print("Counting friend numbers exhaustively till %s yields %s, took %0.5f seconds" %(N,result, end-start)) | StarcoderdataPython |
3237422 | <reponame>tzakrajs/yaiges<filename>run_server.py
#!/usr/bin/env python3
import socket
import sys
import pytest
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import yaml
from core import logging, main_loop, route
# YAML Config is located at the path below
YAML_CONFIG_PATH = './config.yml'
application = tornado.web.Application(route.get_routes())
def generate_redirect_app(hostname='localhost', port=443):
if not hostname:
hostname = socket.getfqdn()
if port == 443:
host = hostname
else:
host = '{0}:{1}'.format(hostname, port)
# Define http/ws server (only redirect to https/wss)
class RedirectToSSL(tornado.web.RequestHandler):
def get(self, path):
self.redirect('https://{0}/{1}'.format(host, path))
return tornado.web.Application([(r"/(.*)", RedirectToSSL),],)
# Load the configuration file
try:
config_file = open(YAML_CONFIG_PATH, 'r')
except FileNotFoundError:
error_message = "YAML config not found: {0}".format(YAML_CONFIG_PATH)
logging.error(error_message)
sys.exit(error_message)
config = yaml.load(config_file)
if __name__ == "__main__":
# Load the top level config dictionaries
general_config = config.get('general', {})
non_ssl_config = config.get('non_ssl', {})
ssl_config = config.get('ssl', {})
# Set our listening IP addresses and ports
ipv4_ip = general_config.get('ipv4_ip')
ipv6_ip = general_config.get('ipv6_ip')
http_port = non_ssl_config.get('port', 8080)
https_port = ssl_config.get('port', 8443)
# Non SSL
if non_ssl_config.get('enabled'):
# Setup the http/ws server
if non_ssl_config.get('redirect'):
# This one only redirects to https, ws does not and cannot be
# redirected since browsers disallow ws requests originating from
# https websites
hostname = general_config.get('hostname')
external_port = ssl_config.get('external_port', https_port)
redirect_app = generate_redirect_app(hostname, external_port)
http_server = tornado.httpserver.HTTPServer(redirect_app)
else:
# This one is the full application
http_server = tornado.httpserver.HTTPServer(application)
if ipv6_ip:
# Listen on IPv6 address
http_server.listen(http_port, ipv6_ip)
if ipv4_ip:
# Listen on IPv4 address
http_server.listen(http_port, ipv4_ip)
# SSL
if ssl_config.get('enabled'):
# Setup https/wss server
ssl_crt = ssl_config.get('crt_path')
ssl_key = ssl_config.get('key_path')
try:
ssl_options = {'certfile': ssl_crt,
'keyfile': ssl_key}
https_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
except Exception as e:
error_message = "Unable to setup SSL: {0}".format(e)
logging.error(error_message)
sys.exit(error_message)
if ipv6_ip:
# Listen on IPv6 address
https_server.listen(https_port, ipv6_ip)
if ipv4_ip:
# Listen on IPv4 address
https_server.listen(https_port, ipv4_ip)
# Initialize Main Loop
main_loop.start()
@pytest.fixture
def app():
return application
@pytest.mark.gen_test
def test_hello_world(http_client, base_url):
response = yield http_client.fetch(base_url)
assert response.code == 200
| StarcoderdataPython |
1620645 | #!/usr/bin/python
#
# HTTP Authentication: Basic and Digest Access Authentication
#
import traceback
import falcon
import json
import uuid
import time
import re
import base64
import hashlib
import random
from wsgiref.simple_server import make_server
DOMAIN = 'github.com'
USERS = {'admin': '<EMAIL>', 'minh': 'nguyen-hoang-minh@github'}
LISTEN_ADDR = '0.0.0.0'
LISTEN_PORT = 8088
def logger(message):
print(message)
class BasicAuthentication:
def __init__(self, users, realm):
self.realm = realm
self.secrets = []
for username, password in users.items():
self.secrets.append(base64.b64encode(username + ':' + password))
def gen_challenge(self):
return 'Basic realm="{}"'.format(self.realm)
def authenticate(self, credentials):
# auth_header = 'Basic <secret>'
if credentials[6:] in self.secrets:
return True
else:
return False
class DigestAuthentication:
def __init__(self, users, realm):
self.users = users
self.realm = realm
self.txnids = {}
def gen_challenge(self):
# challenge = "Digest (realm|[domain]|nonce|[opaque]|[stale]|[algorithm]|[qop]|[auth-param])"
current, ttl = int(time.time()), 60
# refresh txinids
for txnid in [key for key in self.txnids]:
if self.txnids[txnid]['expire'] < current:
self.txnids.pop(txnid, None)
# generate params for www_auth_header
nonce = base64.b64encode(self.realm + uuid.uuid4().hex)
qop = random.choice(['', 'auth', 'auth-int'])
algorithm = random.choice(['', 'MD5', 'MD5-sess']) # RFC7616 (SHA256, SHA256-sess)
# store nonce data per txn
nonce_data = {'qop': qop, 'algorithm': algorithm}
self.txnids[nonce] = {'expire': current + ttl, 'data': nonce_data}
# return challenge
challenge = 'Digest realm="{}", nonce="{}"'.format(self.realm, nonce)
for key, value in nonce_data.items():
if value:
challenge += ', {}="{}"'.format(key, value)
return challenge
@staticmethod
def _parse_credentials(_credentials):
_credentials_pattern = re.compile('([^", ]+) ?[=] ?"?([^", ]+)"?')
return dict(_credentials_pattern.findall(_credentials))
@staticmethod
def _H(data):
# H(data) = MD5(data)
return hashlib.md5(data).hexdigest()
def _KD(self, secret, data):
# KD(secret, data) = H(concat(secret, ":", data))
return self._H(secret + ':' + data)
def _A1(self, username, password, nonce, cnonce, algorithm):
# If the "algorithm" directive's value is "MD5" or is unspecified, then A1 is:
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
# If the "algorithm" directive's value is "MD5-sess"
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd ) ":" unq(nonce-value) ":" unq(cnonce-value)
# where passwd = < user's password >
a1 = username + ':' + self.realm + ':' + password
if algorithm[-5:] == '-sess':
a1 = self._H(username + ':' + self.realm + ':' + password) + ':' + nonce + ':' + cnonce
return a1
def _A2(self, qop, method, uri, entity_body):
# If the "qop" directive's value is "auth" or is unspecified
# A2 = Method ":" digest-uri-value
# If the "qop" value is "auth-int", then A2 is:
# A2 = Method ":" digest-uri-value ":" H(entity-body)
a2 = method + ':' + uri
if qop == 'auth-int':
a2 = method + ':' + uri + ':' + self._H(entity_body)
return a2
def response(self, method, username, password, nonce, qop, algorithm, cnonce, nc, uri, entity_body):
A1 = self._A1(username, password, nonce, cnonce, algorithm)
A2 = self._A2(qop, method, uri, entity_body)
if qop in ['auth', 'auth-int']:
response = self._KD(self._H(A1), nonce +
':' + nc +
':' + cnonce +
':' + qop +
':' + self._H(A2))
else:
response = self._KD(self._H(A1), nonce + ':' + self._H(A2))
return response
def authenticate(self, method, _credentials, entity_body):
result = False
digest_credentials = self._parse_credentials(_credentials)
_username = digest_credentials.get('username')
_nonce = digest_credentials.get('nonce')
_realm = digest_credentials.get('realm')
_algorithm = digest_credentials.get('algorithm', '')
_cnonce = digest_credentials.get('cnonce', '')
_uri = digest_credentials.get('uri')
_nc = digest_credentials.get('nc', '')
_qop = digest_credentials.get('qop', '')
_response = digest_credentials.get('response')
if _username and _realm and _nonce and _uri and _response:
if _nonce in self.txnids:
qop = self.txnids[_nonce]['data']['qop']
algorithm = self.txnids[_nonce]['data']['algorithm']
if _realm == self.realm and _qop == qop and _algorithm == algorithm:
if (_qop and _cnonce and _nc) or (not _qop and not _cnonce and not _nc):
if _username in self.users:
password = self.users[_username]
response = self.response(method, _username, password, _nonce, _qop, _algorithm,
_cnonce, _nc, _uri, entity_body)
logger('compare 2 responses ' + response + '|' + _response)
if response == _response:
result = True
# clear nonce
self.txnids.pop(_nonce, None)
return result
class Server:
def __init__(self, scheme='digest'):
""" initial variable, get module name and start"""
self.challenge_code = falcon.HTTP_401 # 401 Unauthorized | 407 Proxy Authentication Required
self.challenge_header = 'WWW-Authenticate' # WWW-Authenticate | Proxy-Authenticate
self.credentials_header = 'Authorization' # Authorization | Proxy-Authorization
self.scheme = 'digest'
if scheme.lower() in ['basic', 'digest']:
self.scheme = scheme.lower()
self.http_auth = DigestAuthentication(USERS, DOMAIN)
if self.scheme == 'basic':
self.http_auth = BasicAuthentication(USERS, DOMAIN)
def __call__(self, req, resp):
logger("-------------------------------------------------------------------------------------------------------------------------------------------------------------------")
response = None
status = falcon.HTTP_400
try:
request_method = req.method
request_url = req.uri
logger('REQUEST: {} {}'.format(request_method, request_url))
request_headers = req.headers
logger('HEADERS: {}'.format(request_headers))
dict_request_params = req.params
logger('PARAMS: {}'.format(dict_request_params))
request_body = req.stream.read()
logger('BODY: {}'.format(request_body))
authorization_header = req.get_header(self.credentials_header)
logger(authorization_header)
if authorization_header:
if self.scheme == 'basic':
verify = self.http_auth.authenticate(authorization_header)
else:
verify = self.http_auth.authenticate(request_method, authorization_header, request_body)
if verify:
status = falcon.HTTP_200
else:
status = falcon.HTTP_403
else:
challenge_auth_header = self.http_auth.gen_challenge()
resp.set_header(self.challenge_header, challenge_auth_header)
logger(challenge_auth_header)
status = self.challenge_code
response = 'failure'
except Exception as e:
logger('{} | {}'.format(e, traceback.format_exc()))
status = falcon.HTTP_500
response = 'failure'
finally:
resp.content_type = 'application/json'
resp.status = status
resp.body = json.dumps({'ack': response}).encode('utf-8')
logger('RESPONSE [{}] {}'.format(resp.status, resp.body))
##################################################################################
api = application = falcon.API()
api.add_sink(Server(), r'/*')
if __name__ == '__main__':
logger("Start HTTP Server {}:{}".format(LISTEN_ADDR, LISTEN_PORT))
httpd = make_server(LISTEN_ADDR, LISTEN_PORT, api)
httpd.serve_forever()
| StarcoderdataPython |
1771050 | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: An integer
"""
def maxPathSum(self, root):
self.maxSum = float('-inf')
self.helper(root)
return self.maxSum
def helper(self, root):
if not root:
return 0
# 寻找左子树中的最大sub path之和, 出现负数,就清零(i.e., 这条路径不要了)
leftSum = max(0, self.helper(root.left))
# 寻找右子树中的最大sub path之和, 出现负数,就清零(i.e., 这条路径不要了)
rightSum = max(0, self.helper(root.right))
'''
# self.maxSum是一个全局变量,用来追踪全局最大值
# left + right + node.val这个式子其实涵盖了case1-3, 因为在上一步,如果左子树sub path之后或右子树sub path之和为负数的话,会被清零
# 因为left和right有0的情况,所以这个式子可以看成:
# left + node.val
# right + node.val
# left + right + node.val
'''
self.maxSum = max(self.maxSum, root.val + leftSum + rightSum)
# 返回的值是可供上一层父节点接龙的值
return max(root.val + leftSum, root.val + rightSum) | StarcoderdataPython |
3349977 | <filename>config.py
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# ==============================================================================
# File description: Realize the parameter configuration function of data set, model, training and verification code.
# ==============================================================================
import os
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from torch.utils.tensorboard import SummaryWriter
from model import ContentLoss
from model import Discriminator
from model import Generator
# ==============================================================================
# Common configure
# ==============================================================================
torch.manual_seed(0) # Set random seed.
upscale_factor = 4 # How many times the size of the high-resolution image in the data set is than the low-resolution image.
device = torch.device("cuda:0") # Use the first GPU for processing by default.
cudnn.benchmark = True # If the dimension or type of the input data of the network does not change much, turn it on, otherwise turn it off.
mode = "valid" # Run mode. Specific mode loads specific variables.
exp_name = "exp000" # Experiment name.
# ==============================================================================
# Train configure
# ==============================================================================
if mode == "train":
# Configure dataset.
train_dir = "data/DFO2K/train" # The address of the training dataset.
valid_dir = "data/DFO2K/valid" # Verify the address of the dataset.
image_size = 128 # High-resolution image size in the training dataset.
batch_size = 16 # Data batch size.
# Configure model.
discriminator = Discriminator().to(device) # Load the discriminator model.
generator = Generator().to(device) # Load the generator model.
# Resume training.
start_p_epoch = 0 # The number of initial iterations of the generator training phase. When set to 0, it means incremental training.
start_epoch = 0 # The number of initial iterations of the adversarial training phase. When set to 0, it means incremental training.
resume = False # Set to `True` to continue training from the previous training progress.
resume_p_weight = "" # Restore the weight of the generator model during generator training.
resume_d_weight = "" # Restore the weight of the generator model during the training of the adversarial network.
resume_g_weight = "" # Restore the weight of the discriminator model during the training of the adversarial network.
# Train epochs.
p_epochs = 1162 # The total number of cycles of the generator training phase.
epochs = 465 # The total number of cycles in the training phase of the adversarial network.
# Loss function.
psnr_criterion = nn.MSELoss().to(device) # PSNR metrics.
pixel_criterion = nn.L1Loss().to(device) # Pixel loss.
content_criterion = ContentLoss().to(device) # Content loss.
adversarial_criterion = nn.BCELoss().to(device) # Adversarial loss.
# Perceptual loss function weight.
pixel_weight = 0.01
content_weight = 1.0
adversarial_weight = 0.005
# Optimizer.
p_optimizer = optim.Adam(generator.parameters(), 0.0002, (0.9, 0.999)) # Generator model learning rate during generator training.
d_optimizer = optim.Adam(discriminator.parameters(), 0.0001, (0.9, 0.999)) # Discriminator learning rate during adversarial network training.
g_optimizer = optim.Adam(generator.parameters(), 0.0001, (0.9, 0.999)) # Generator learning rate during adversarial network training.
# Scheduler.
milestones = [epochs * 0.125, epochs * 0.250, epochs * 0.500, epochs * 0.750]
p_scheduler = CosineAnnealingLR(p_optimizer, p_epochs // 4, 1e-7) # Generator model scheduler during generator training.
d_scheduler = MultiStepLR(d_optimizer, list(map(int, milestones)), 0.5) # Discriminator model scheduler during adversarial training.
g_scheduler = MultiStepLR(g_optimizer, list(map(int, milestones)), 0.5) # Generator model scheduler during adversarial training.
# Training log.
writer = SummaryWriter(os.path.join("samples", "logs", exp_name))
# Additional variables.
exp_dir1 = os.path.join("samples", exp_name)
exp_dir2 = os.path.join("results", exp_name)
# ==============================================================================
# Validate configure
# ==============================================================================
if mode == "valid":
# Additional variables.
exp_dir = os.path.join("results", "test", exp_name)
# Load model.
model = Generator().to(device)
model_path = f"weights/Weights_ESRGAN.pth"
# Test data address.
lr_dir = f"data/Set5/LRbicx4"
sr_dir = f"results/test/{exp_name}"
hr_dir = f"data/Set5/GTmod12"
| StarcoderdataPython |
4840944 | <filename>setup.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from codecs import open
import os.path as osp
import re
from setuptools import setup, find_packages
with open('nyc_signature/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
here = osp.abspath(osp.dirname(__file__))
with open(osp.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='nyc_signature',
version=version,
description='New York City Signature Canvasing Project',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Build Tools',
],
keywords='common tools utility',
packages=find_packages(exclude=['docs', 'tests*']),
install_requires=[
'bokeh',
'geocoder',
'matplotlib',
'mock',
'numpy',
'pandas',
'pytest',
'requests',
'seaborn',
],
package_dir={'nyc_signature': 'nyc_signature'},
include_package_data=True,
)
if __name__ == '__main__':
pass
| StarcoderdataPython |
113009 | <reponame>jfmaes/transformationsuite
import argparse
from transformer import Transformer
from format import Formatter
from Crypto.Hash import MD5
parser = argparse.ArgumentParser(description="Transformer next generation by jfmaes")
#DONT FORGET TO PUT REQUIRED TRUE
parser.add_argument("-f", "--file", help="the payload file", required=True)
parser.add_argument("-x", "--xor", help="use xor encryption", action="store_true")
parser.add_argument("-key", help="the xor key")
parser.add_argument("-c", "--caesar", help="use caesar cipher", action="store_true")
parser.add_argument("-rotation", help="the rotation to follow, can be + or - ")
parser.add_argument("-b64","-base64","--base64", help= "base 64 encode payload", action="store_true")
parser.add_argument("-rev","--reverse", help= "reverse payload", action="store_true")
parser.add_argument("-o", "--output-file", help="the output file")
parser.add_argument("-vba", help="format to vba", action="store_true")
parser.add_argument("-csharp", help="format to csharp", action="store_true")
parser.add_argument("-cpp", help="format to cpp", action="store_true")
parser.add_argument("-raw", help="format to raw payload", action="store_true")
parser.add_argument("-v", "--verbose", help="print shellcode to terminal", action="store_true")
parser.add_argument("--no-transform", help="doesnt transform payload, just formats.", action="store_true")
def check_args(args):
if args.xor and not args.key:
print(f"[!] XOR encryption needs a key")
quit()
if args.caesar and not args.rotation:
print(f"[!] Caesar encryption needs a rotation")
quit()
if not args.verbose and not args.output_file:
print(f"[!] Your payload needs to go somewhere. Use either verbose or outfile params, or both.")
quit()
def get_shellcode_from_file(inFile):
try:
with open(inFile, "rb") as shellcodeFileHandle:
shellcodeBytes = bytearray(shellcodeFileHandle.read())
shellcodeFileHandle.close()
print (f"[*] Payload file [{inFile}] successfully loaded")
except IOError:
print(f"[!] Could not open or read file [{inFile}]")
quit()
print("[*] MD5 hash of the initial payload: [{}]".format(MD5.new(shellcodeBytes).hexdigest()))
print("[*] Payload size: [{}] bytes".format(len(shellcodeBytes)))
return shellcodeBytes
def main(args):
transformer = Transformer()
formatter = Formatter()
data = get_shellcode_from_file(args.file)
transform_blob = transformer.transform(args, data)
if not args.no_transform:
formatter.format(args, transform_blob)
if args.no_transform:
formatter.format(args, data)
if __name__ == '__main__':
args = parser.parse_args()
check_args(args)
main(args)
| StarcoderdataPython |
3270315 | <gh_stars>10-100
from pwn import *
cn = remote('172.16.17.32', 9999) # nc 172.16.17.32 9999
cn.recv() # title
while True:
print cn.recvline() # stage info
prob = cn.recvline()
cn.recv()
log.info(prob)
prob = prob.replace('Question> ', '').replace('= ?', '')
log.info('PROBLEM : ' + prob)
ans = str(eval(prob))
log.info('ANSWER : ' + str(ans))
cn.sendline(ans)
log.info(cn.recv())
print '\n'
| StarcoderdataPython |
3303196 | <reponame>MinchinWeb/papermerge<gh_stars>0
from datetime import timedelta
import logging
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render, redirect
from knox.models import AuthToken
from papermerge.core.forms import AuthTokenForm
logger = logging.getLogger(__name__)
@login_required
def tokens_view(request):
if request.method == 'POST':
selected_action = request.POST.getlist('_selected_action')
go_action = request.POST['action']
if go_action == 'delete_selected':
AuthToken.objects.filter(
digest__in=selected_action
).delete()
tokens = AuthToken.objects.all()
return render(
request,
'admin/tokens.html',
{
'tokens': tokens,
}
)
@login_required
def token_view(request):
if request.method == 'POST':
form = AuthTokenForm(request.POST)
if form.is_valid():
instance, token = AuthToken.objects.create(
user=request.user,
expiry=timedelta(hours=form.cleaned_data['hours'])
)
html_message = "Please remember the token: "
html_message += f" <span class='text-success'>{token}</span>"
html_message += " It won't be displayed again."
messages.success(
request, html_message
)
return redirect('core:tokens')
form = AuthTokenForm()
return render(
request,
'admin/token.html',
{
'form': form,
}
)
| StarcoderdataPython |
72662 | <reponame>xihuaiwen/chinese_bert<gh_stars>0
# Copyright 2020 Graphcore Ltd.
import logging
import textwrap
from abc import abstractmethod
from copy import copy
from typing import Any, Iterable, List, Optional
import numpy as np
from scipy.stats import truncnorm
import popart
from pingpong.scope_manager import Scope
def normal_init_data(dtype, shape, mean, std_dev):
# Truncated random normal between 2 standard devations
data = truncnorm.rvs(-2, 2, loc=mean, scale=std_dev, size=np.prod(shape))
data = data.reshape(shape).astype(dtype)
return data
logger = logging.getLogger(__name__)
class Parameter:
def __init__(self,
name: str,
shape: tuple,
value: Optional[np.ndarray] = None,
popart_tensor: Optional[str] = None):
self.name = name
self.shape = shape
self.value = value
self.popart_tensor = popart_tensor
self.vgid = None
def flatten(l):
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
class Block:
"""Base class for popart layers."""
def __init__(self,
scope: Scope,
params: List[Parameter] = [],
dtype: np.dtype = np.float16,
builder=None,
initializers=None,
scope_provider=None):
self.scope = scope
self._params = params
self._dtype = dtype
self.num_params = 0
self.total_params = None
self.line_length = 98
self.sub_blocks = []
if builder is None:
raise ValueError('Builder must be passed in as kwarg.')
if scope_provider is None:
raise ValueError('ScopeProvider must be passed in as kwarg.')
self._builder = builder
self.scope_provider = scope_provider
self.initializers = {} if initializers is None else initializers
def __setattr__(self, name: str, value: Any) -> None:
if isinstance(value, Block):
self.sub_blocks.append(value)
elif isinstance(value, Iterable):
for el in flatten(value):
if isinstance(el, Block):
self.sub_blocks.append(el)
super(Block, self).__setattr__(name, value)
@property
def builder(self):
return self._builder
@builder.setter
def builder(self, _):
raise ValueError('Cannot set builder property outside of Block class.')
def format_row(self, fields, indent=False):
positions = [int(self.line_length * p) for p in [.45, .7, .8, 1.]]
line = ''
if indent:
fields[0] = ' ' + fields[0]
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
return line
def __repr__(self):
name = self.scope.name
cls_name = self.__class__.__name__
pp_phase = self.scope.ping_pong_phase if hasattr(
self.scope, 'ping_pong_phase') else None
fields = [
name + ' (' + cls_name + ')', pp_phase, self.scope.vgid,
f'{self.count_params():,}'.rjust(10)
]
s = self.format_row(fields)
if len(self.sub_blocks) > 1:
modstr = "\n".join([
"{block}".format(block=block.__repr__())
for block in self.sub_blocks
])
s = "\n".join([s, modstr])
return s
def summary(self):
header = ['Layer (type)', 'Pingpong Phase', 'VGID', 'Param #']
print(self.format_row(header))
print('_' * self.line_length)
total_params = 0
for blks in self.sub_blocks:
print(blks)
total_params += blks.count_params()
print('Total params: {:,}'.format(total_params))
def count_params(self):
if self.total_params is not None:
return self.total_params
child_params = 0
for item in self.sub_blocks:
child_params += item.count_params()
self.total_params = child_params + self.num_params
return self.total_params
def __create_params__(self):
# Create params only for a single block - not recursive
for param in self._params:
if param.popart_tensor is None:
data = self.initializers.get(param.name, None)
if isinstance(data, np.ndarray):
if data.dtype != self._dtype:
raise ValueError(
f"Type of {param.name} does not match value provided. \n"
f"Provided {data.dtype}. Required {self._dtype}")
if np.any(data.shape != np.array(param.shape)):
if np.all(data.T.shape == np.array(param.shape)):
data = data.T.copy()
logger.warn(
f"Initializer for {param.name} was provided transposed."
)
else:
raise RuntimeError(
f"Initializer {param.name} does not match shapes. \n"
f" Provided {data.shape}. Required {param.shape}")
param.value = data
else:
param.value = normal_init_data(self._dtype,
param.shape,
mean=0,
std_dev=1)
popart_tensor = self._builder.addInitializedInputTensor(
param.value, param.name)
self.num_params += np.prod(param.value.shape)
param.vgid = self.scope.vgid
else:
if param.vgid != self.scope.vgid:
raise RuntimeError(
'Shared parameter {} appears in different virtual graphs.' .format(
param.name))
# Transpose shared weights
if (self.builder.getTensorShape(param.popart_tensor) == reversed(param.shape)):
popart_tensor = self._builder.aiOnnx.transpose([param.popart_tensor])
else:
popart_tensor = param.popart_tensor
# Create copy of shared param so as to not modify
# the param for the previous layer
param = copy(param)
param.popart_tensor = popart_tensor
@property
def params(self):
"""Return this Block's parameter dictionary, not recursive.
Returns:
Dict: List of parameters for this block.
"""
return self._params
@params.setter
def params(self, value):
self._params = value
def next_phase(self):
return self.scope_provider.get_next_phase()
def total_phases(self):
return self.scope_provider.get_prev_phase() + 1
def __call__(self, *args):
"""Calls forward. Only accepts positional arguments."""
with self.scope_provider(self._builder, self.scope):
self.__create_params__()
return self.forward(*args)
@abstractmethod
def forward(self, *args):
raise NotImplementedError
| StarcoderdataPython |
3294879 | <filename>doc/jupyter_execute/notebooks/explainer_examples_v2.py
#!/usr/bin/env python
# coding: utf-8
# # Example model explanations with Seldon and v2 Protocol - Incubating
#
# In this notebook we will show examples that illustrate how to explain models using [MLServer] (https://github.com/SeldonIO/MLServer).
#
# MLServer is a Python server for your machine learning models through a REST and gRPC interface, fully compliant with KFServing's v2 Dataplane spec.
# ## Running this Notebook
#
# This should install the required package dependencies, if not please also install:
#
# - install and configure `mc`, follow the relevant section in this [link](https://docs.seldon.io/projects/seldon-core/en/latest/examples/minio_setup.html)
#
# - run this jupyter notebook in conda environment
# ```bash
# $ conda create --name python3.8-example python=3.8 -y
# $ conda activate python3.8-example
# $ pip install jupyter
# $ jupyter notebook
# ```
#
# - instal requirements
# - [alibi package](https://pypi.org/project/alibi/)
# - `sklearn`
# In[ ]:
get_ipython().system('pip install sklearn alibi')
# ## Setup Seldon Core
#
# Follow the instructions to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core).
#
# Then port-forward to that ingress on localhost:8003 in a separate terminal either with:
#
# * Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`
# * Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:8080`
#
# ### Setup MinIO
#
# Use the provided [notebook](https://docs.seldon.io/projects/seldon-core/en/latest/examples/minio_setup.html) to install Minio in your cluster and configure `mc` CLI tool.
# Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/minio_setup.html).
# ## Train `iris` model using `sklearn`
# In[ ]:
import os
import shutil
from joblib import dump
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
# ### Train model
# In[ ]:
iris_data = load_iris()
clf = LogisticRegression(solver="liblinear", multi_class="ovr")
clf.fit(iris_data.data, iris_data.target)
# ### Save model
# In[ ]:
modelpath = "/tmp/sklearn_iris"
if os.path.exists(modelpath):
shutil.rmtree(modelpath)
os.makedirs(modelpath)
modelfile = os.path.join(modelpath, "model.joblib")
dump(clf, modelfile)
# ## Create `AnchorTabular` explainer
# ### Create explainer artifact
# In[ ]:
from alibi.explainers import AnchorTabular
explainer = AnchorTabular(clf.predict, feature_names=iris_data.feature_names)
explainer.fit(iris_data.data, disc_perc=(25, 50, 75))
# ### Save explainer
# In[ ]:
explainerpath = "/tmp/iris_anchor_tabular_explainer_v2"
if os.path.exists(explainerpath):
shutil.rmtree(explainerpath)
explainer.save(explainerpath)
# ## Install dependencies to pack the enviornment for deployment
# In[ ]:
pip install conda-pack mlserver==0.6.0.dev2 mlserver-alibi-explain==0.6.0.dev2
# ## Pack enviornment
# In[ ]:
import conda_pack
env_file_path = os.path.join(explainerpath, "environment.tar.gz")
conda_pack.pack(
output=str(env_file_path),
force=True,
verbose=True,
ignore_editable_packages=False,
ignore_missing_files=True,
)
# ## Copy artifacts to object store (`minio`)
#
# ### Configure `mc` to access the minio service in the local kind cluster
# note: make sure that minio ip is reflected properly below, run:
# - `kubectl get service -n minio-system`
# - `mc config host add minio-seldon [ip] minioadmin minioadmin`
# In[ ]:
target_bucket = "minio-seldon/models"
os.system(f"mc rb --force {target_bucket}")
os.system(f"mc mb {target_bucket}")
os.system(f"mc cp --recursive {modelpath} {target_bucket}")
os.system(f"mc cp --recursive {explainerpath} {target_bucket}")
# ## Deploy to local `kind` cluster
# ### Create deployment CRD
# In[ ]:
get_ipython().run_cell_magic('writefile', 'iris-with-explainer-v2.yaml', 'apiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: iris\nspec:\n protocol: kfserving # Activate v2 protocol / mlserver usage\n name: iris\n annotations:\n seldon.io/rest-timeout: "100000"\n predictors:\n - graph:\n children: []\n implementation: SKLEARN_SERVER\n modelUri: s3://models/sklearn_iris\n envSecretRefName: seldon-rclone-secret\n name: classifier\n explainer:\n type: AnchorTabular\n modelUri: s3://models/iris_anchor_tabular_explainer_v2\n envSecretRefName: seldon-rclone-secret\n name: default\n replicas: 1')
# ### Deploy
# In[1]:
get_ipython().system('kubectl apply -f iris-with-explainer-v2.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=iris -o jsonpath='{.items[0].metadata.name}')")
# ### Test explainer
# In[ ]:
get_ipython().system('pip install numpy requests')
# In[ ]:
import json
import numpy as np
import requests
# In[ ]:
endpoint = "http://localhost:8003/seldon/seldon/iris-explainer/default/v2/models/iris-default-explainer/infer"
test_data = np.array([[5.964, 4.006, 2.081, 1.031]])
inference_request = {
"parameters": {"content_type": "np"},
"inputs": [
{
"name": "explain",
"shape": test_data.shape,
"datatype": "FP32",
"data": test_data.tolist(),
"parameters": {"content_type": "np"},
},
],
}
response = requests.post(endpoint, json=inference_request)
explanation = json.loads(response.json()["outputs"][0]["data"])
print("Anchor: %s" % (" AND ".join(explanation["data"]["anchor"])))
print("Precision: %.2f" % explanation["data"]["precision"])
print("Coverage: %.2f" % explanation["data"]["coverage"])
# In[ ]:
get_ipython().system('kubectl delete -f iris-with-explainer-v2.yaml')
# In[ ]:
| StarcoderdataPython |
1752608 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import connection, migrations, models
from django.utils.timezone import utc
import datetime
def update_totals(apps, schema_editor):
model = apps.get_model("projects", "Project")
type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
sql="""
UPDATE projects_project
SET
totals_updated_datetime = totals.totals_updated_datetime,
total_fans = totals.total_fans,
total_fans_last_week = totals.total_fans_last_week,
total_fans_last_month = totals.total_fans_last_month,
total_fans_last_year = totals.total_fans_last_year,
total_activity = totals.total_activity,
total_activity_last_week = totals.total_activity_last_week,
total_activity_last_month = totals.total_activity_last_month,
total_activity_last_year = totals.total_activity_last_year
FROM (
WITH
totals_activity AS (SELECT
split_part(timeline_timeline.namespace, ':', 2)::integer as project_id,
count(timeline_timeline.namespace) total_activity,
MAX (created) updated_datetime
FROM timeline_timeline
WHERE namespace LIKE 'project:%'
GROUP BY namespace),
totals_activity_week AS (SELECT
split_part(timeline_timeline.namespace, ':', 2)::integer as project_id,
count(timeline_timeline.namespace) total_activity_last_week
FROM timeline_timeline
WHERE namespace LIKE 'project:%'
AND timeline_timeline.created > current_date - interval '7' day
GROUP BY namespace),
totals_activity_month AS (SELECT
split_part(timeline_timeline.namespace, ':', 2)::integer as project_id,
count(timeline_timeline.namespace) total_activity_last_month
FROM timeline_timeline
WHERE namespace LIKE 'project:%'
AND timeline_timeline.created > current_date - interval '30' day
GROUP BY namespace),
totals_activity_year AS (SELECT
split_part(timeline_timeline.namespace, ':', 2)::integer as project_id,
count(timeline_timeline.namespace) total_activity_last_year
FROM timeline_timeline
WHERE namespace LIKE 'project:%'
AND timeline_timeline.created > current_date - interval '365' day
GROUP BY namespace),
totals_fans AS (SELECT
object_id as project_id,
COUNT(likes_like.object_id) total_fans,
MAX (created_date) updated_datetime
FROM likes_like
WHERE content_type_id = {type_id}
GROUP BY object_id),
totals_fans_week AS (SELECT
object_id as project_id,
COUNT(likes_like.object_id) total_fans_last_week
FROM likes_like
WHERE content_type_id = {type_id}
AND likes_like.created_date > current_date - interval '7' day
GROUP BY object_id),
totals_fans_month AS (SELECT
object_id as project_id,
COUNT(likes_like.object_id) total_fans_last_month
FROM likes_like
WHERE content_type_id = {type_id}
AND likes_like.created_date > current_date - interval '30' day
GROUP BY object_id),
totals_fans_year AS (SELECT
object_id as project_id,
COUNT(likes_like.object_id) total_fans_last_year
FROM likes_like
WHERE content_type_id = {type_id}
AND likes_like.created_date > current_date - interval '365' day
GROUP BY object_id)
SELECT
totals_activity.project_id,
COALESCE(total_activity, 0) total_activity,
COALESCE(total_activity_last_week, 0) total_activity_last_week,
COALESCE(total_activity_last_month, 0) total_activity_last_month,
COALESCE(total_activity_last_year, 0) total_activity_last_year,
COALESCE(total_fans, 0) total_fans,
COALESCE(total_fans_last_week, 0) total_fans_last_week,
COALESCE(total_fans_last_month, 0) total_fans_last_month,
COALESCE(total_fans_last_year, 0) total_fans_last_year,
totals_activity.updated_datetime totals_updated_datetime
FROM totals_activity
LEFT JOIN totals_fans ON totals_activity.project_id = totals_fans.project_id
LEFT JOIN totals_fans_week ON totals_activity.project_id = totals_fans_week.project_id
LEFT JOIN totals_fans_month ON totals_activity.project_id = totals_fans_month.project_id
LEFT JOIN totals_fans_year ON totals_activity.project_id = totals_fans_year.project_id
LEFT JOIN totals_activity_week ON totals_activity.project_id = totals_activity_week.project_id
LEFT JOIN totals_activity_month ON totals_activity.project_id = totals_activity_month.project_id
LEFT JOIN totals_activity_year ON totals_activity.project_id = totals_activity_year.project_id
) totals
WHERE projects_project.id = totals.project_id
""".format(type_id=type.id)
cursor = connection.cursor()
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('projects', '0029_project_is_looking_for_people'),
('likes', '0001_initial'),
('timeline', '0004_auto_20150603_1312'),
('likes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='total_activity',
field=models.PositiveIntegerField(default=0, verbose_name='count', db_index=True),
),
migrations.AddField(
model_name='project',
name='total_activity_last_month',
field=models.PositiveIntegerField(default=0, verbose_name='activity last month', db_index=True),
),
migrations.AddField(
model_name='project',
name='total_activity_last_week',
field=models.PositiveIntegerField(default=0, verbose_name='activity last week', db_index=True),
),
migrations.AddField(
model_name='project',
name='total_activity_last_year',
field=models.PositiveIntegerField(default=0, verbose_name='activity last year', db_index=True),
),
migrations.AddField(
model_name='project',
name='total_fans',
field=models.PositiveIntegerField(default=0, verbose_name='count', db_index=True),
),
migrations.AddField(
model_name='project',
name='total_fans_last_month',
field=models.PositiveIntegerField(default=0, verbose_name='fans last month', db_index=True),
),
migrations.AddField(
model_name='project',
name='total_fans_last_week',
field=models.PositiveIntegerField(default=0, verbose_name='fans last week', db_index=True),
),
migrations.AddField(
model_name='project',
name='total_fans_last_year',
field=models.PositiveIntegerField(default=0, verbose_name='fans last year', db_index=True),
),
migrations.AddField(
model_name='project',
name='totals_updated_datetime',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 28, 7, 57, 11, 743976, tzinfo=utc), auto_now_add=True, verbose_name='updated date time', db_index=True),
preserve_default=False,
),
migrations.RunPython(update_totals),
]
| StarcoderdataPython |
3351152 | <gh_stars>1-10
# geomdl-cli - Copyright (c) 2018-2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# User configuration loading functions and entry point definitions
#
import os
import os.path
import sys
import importlib
import json
from . import __cli_name__, __cli_commands__, __cli_config__, __cli_config_dir__, __cli_config_file__
def enable_user_config(data):
""" Activates user configuration.
:param data: data dictionary
:typr data: dict
"""
if 'configuration' in data:
__cli_config__.update(data['configuration'])
__cli_config__['user_override'] = True
def enable_user_commands(data):
""" Activates user-defined commands.
:param data: data dictionary
:type data: dict
"""
if 'commands' in data:
__cli_commands__.update(data['commands'])
def load_custom_config(root_path):
""" Reads custom configuration files and makes them available for the command-line application.
:param root_path: path to the directory containing the custom configuration file
:type root_path: str
"""
config_dir = os.path.join(root_path, __cli_config_dir__)
config_file = os.path.join(config_dir, __cli_config_file__)
if os.path.isfile(config_file):
# Add config directory to the path
sys.path.append(config_dir)
# Open config file
try:
with open(config_file, 'r') as fp:
# Load the JSON file
json_data = json.load(fp)
# Activate user configuration
enable_user_config(json_data)
# Activate user commands
enable_user_commands(json_data)
except IOError:
print("Cannot read", config_file, "for reading. Skipping...")
except Exception as e:
print("Error in reading custom configuration file {fn}: {e}".format(fn=config_file, e=e.args[-1]))
sys.exit(1)
def main():
"""Entry point for the command-line application"""
# Default user configuration directories
user_config_root_dirs = [os.getcwd(), os.path.expanduser("~")]
# Load user commands
for root_dir in user_config_root_dirs:
load_custom_config(root_dir)
# Extract command parameters and update sys.argv
command_params = {}
new_sysargv = []
for s in sys.argv:
if s.startswith("--"):
s_arr = s[2:].split("=")
try:
command_params[s_arr[0]] = s_arr[1]
except IndexError:
command_params[s_arr[0]] = 1
else:
new_sysargv.append(s)
sys.argv = new_sysargv
# Get number of command line arguments
argc = len(sys.argv)
# Show help if there are no command line arguments
if argc < 2:
print("No commands specified. Please run '" + __cli_name__ + " help' to see the list of commands available.")
sys.exit(0)
# Get command name
cmd_name = str(sys.argv[1])
# Command execution
try:
# Load the command information from the command dictionary
command = __cli_commands__[cmd_name]
# Import the module and get the function to be executed
module = importlib.import_module(command['module'])
func = getattr(module, command['func'])
# Print command help if "--help" is present in the command arguments
if "help" in command_params:
func_doc = func.__doc__ if func.__doc__ else "No help available for " + cmd_name.upper() + " command"
print(func_doc)
sys.exit(0)
# Run the command
try:
cmd_args = int(command['func_args']) if 'func_args' in command else 0
if cmd_args > 0:
if argc - 2 < cmd_args:
print(cmd_name.upper() + " expects " + str(cmd_args) + " argument(s). Please run '" +
__cli_name__ + " " + cmd_name + " --help' for command help.")
sys.exit(0)
# Call the command with the command arguments
func(*sys.argv[2:], **command_params)
else:
# Call the command without the command arguments
func(**command_params)
except KeyError:
print("Problem executing " + cmd_name.upper() + " command. Please see the documentation for details.")
sys.exit(1)
except KeyError:
print("The command " + cmd_name.upper() + " is not available. Please run '" + __cli_name__ +
" help' to see the list of commands available.")
sys.exit(1)
except Exception as e:
print("An error occurred: {}".format(e.args[-1]))
if "debug" in command_params:
import traceback; traceback.print_exc()
sys.exit(1)
# Command execution completed
sys.exit(0)
| StarcoderdataPython |
1736411 | #!/usr/bin/env python3
import os
import shutil
import sys
# A simple script to copy items in a folder into one of two subfolders,
# depending on whether they were created before or after a given time
# Created by brendon-ai, September 2017
# Names of directories to move files into
SUBFOLDER_NAMES = ('before', 'after')
# Verify that the number of command line arguments is correct
if len(sys.argv) != 3:
print('Usage: {} <image folder> <threshold Unix time>'.format(sys.argv[0]))
sys.exit()
# Get a folder from the first command line argument
folder = os.path.expanduser(sys.argv[1])
# Get a Unix time value from the second argument
threshold_time = float(sys.argv[2])
# List for the full paths of the subfolders that will be created
subfolders = []
# Create a before directory and an after directory
for subfolder in SUBFOLDER_NAMES:
# Format the full path
subfolder_path = os.path.join(folder, subfolder)
# Create the folder if it does not exist
if not os.path.exists(subfolder_path):
os.makedirs(subfolder_path)
# Also add the folder's path to a list
subfolders.append(subfolder_path)
# For each of the files in the main folder
for file_name in os.listdir(folder):
# Format the file's full path
file_path = os.path.join(folder, file_name)
# If it is a file and not a folder
if os.path.isfile(file_path):
# If the file's modification time is lower than the provided time
if os.path.getmtime(file_path) < threshold_time:
# Copy the file to the before subfolder
shutil.copy(file_path, subfolders[0])
# Otherwise copy it to the after subfolder
else:
shutil.copy(file_path, subfolders[1])
| StarcoderdataPython |
140373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Pygonal
(c) 2016 Copyright <NAME> <<EMAIL>>
Portions copyright (c) 2010 by <NAME>
Portions copyright (c) 2009 The Super Effective Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See LICENSE.txt and CREDITS.txt
'''
def test_cached_property():
from pygonal.util import cached_property
class Thingy(object):
not_cached_calls = 0
cached_calls = 0
@property
def not_cached(self):
"""Nay"""
self.not_cached_calls += 1
return 'not cached'
@cached_property
def cached(self):
"""Yay"""
self.cached_calls += 1
return 'cached'
thing = Thingy()
assert thing.not_cached_calls == 0
assert Thingy.not_cached.__doc__ == 'Nay'
assert thing.cached_calls == 0
assert Thingy.cached.__doc__ == 'Yay'
not_cached_value = thing.not_cached
assert thing.not_cached_calls == 1
cached_value = thing.cached
assert thing.cached_calls == 1
assert not_cached_value == thing.not_cached
assert thing.not_cached_calls == 2
assert cached_value == thing.cached
assert thing.cached_calls == 1
assert not_cached_value == thing.not_cached
assert thing.not_cached_calls == 3
assert cached_value == thing.cached
assert thing.cached_calls == 1
# vim: ai ts=4 sts=4 et sw=4 tw=78
| StarcoderdataPython |
172983 | import os, sys, re, string, operator, math, datetime, time, signal, subprocess, shutil, glob, pkgutil
import params as p
import common_functions as cf
before = -1
###############################################################################
# Check for timeout and kill the job if it has passed the threshold
###############################################################################
def is_timeout(app, pr): # check if the process is active every 'factor' sec for timeout threshold
factor = 0.5
retcode = None
tt = p.TIMEOUT_THRESHOLD * p.apps[app][3] # p.apps[app][2] = expected runtime
if tt < 10: tt = 10
to_th = tt / factor
while to_th > 0:
retcode = pr.poll()
if retcode is not None:
break
to_th -= 1
time.sleep(factor)
print(to_th,retcode)
if to_th == 0:
os.killpg(pr.pid, signal.SIGINT) # pr.kill()
print ("timeout")
return [True, pr.poll()]
else:
return [False, retcode]
def get_dmesg_delta(dm_before, dm_after):
llb = dm_before.splitlines()[-1] # last lin
pos = dm_after.find(llb)
return str(dm_after[pos+len(llb)+1:])
def cmdline(command):
process = subprocess.Popen(args=command, stdout=subprocess.PIPE, shell=True)
return process.communicate()[0]
def print_heart_beat(nj):
global before
if before == -1:
before = datetime.datetime.now()
if (datetime.datetime.now()-before).seconds >= 60:
print ("Jobs so far: %d" %nj)
before = datetime.datetime.now()
def get_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
###############################################################################
# Classify error injection result based on stdout, stderr, application output,
# exit status, etc.
###############################################################################
def classify_injection(app, retcode, dmesg_delta):
stdout_str = ""
if os.path.isfile(stdout_fname):
stdout_str = str(open(stdout_fname).read())
if p.detectors and "- 43, Ch 00000010, engmask 00000101" in dmesg_delta and "- 13, Graphics " not in dmesg_delta and "- 31, Ch 0000" not in dmesg_delta: # this is specific for error detectors
return p.DMESG_XID_43
#DUE fault
if retcode != 0:
return p.NON_ZERO_EC
inj_log_str = ""
if os.path.isfile(injrun_fname):
inj_log_str = str(open(injrun_fname, "r").read())
else:
print "fail"
if "ERROR FAIL Detected Signal SIGKILL" in inj_log_str:
if p.verbose: print ("Detected SIGKILL: %s, %s, %s, %s, %s, %s" %(igid, kname, kcount, iid, opid, bid))
return p.OTHERS
if "Error not injected" in inj_log_str or "ERROR FAIL in kernel execution; Expected reg value doesn't match;" in inj_log_str:
print (inj_log_str)
if p.verbose: print ("Error Not Injected: %s, %s, %s, %s, %s, %s" %(igid, kname, kcount, iid, opid, bid))
return p.OTHERS
if "Error: misaligned address" in stdout_str:
return p.STDOUT_ERROR_MESSAGE
if "Error: an illegal memory access was encountered" in stdout_str:
return p.STDOUT_ERROR_MESSAGE
if "Error: misaligned address" in str(open(stderr_fname).read()): # if error is found in the log standard err
return p.STDOUT_ERROR_MESSAGE
#SDC fault or Masked are identified by compare_stdout() function
code = compare_stdout()
if code == 1:
return p.SAFE
if code == 2:
return p.CRITICAL
else:
return p.MASKED_OTHER
#pr = subprocess.Popen(sdc_fname, shell=True, executable='/bin/bash', preexec_fn=os.setsid);
#if os.path.isfile(stdoutdiff_fname) and os.path.isfile(stderrdiff_fname):
# if os.path.getsize(stdoutdiff_fname) == 0 and os.path.getsize(stderrdiff_fname) == 0: # no diff is observed
# if "ERROR FAIL in kernel execution" in inj_log_str:
# return p.MASKED_KERNEL_ERROR # masked_kernel_error
# else:
# return p.MASKED_OTHER # if not app specific error, mark it as masked
# elif os.path.getsize(stdoutdiff_fname) != 0 and os.path.getsize(stderrdiff_fname) == 0:
# if "Xid" in dmesg_delta:
# return p.DMESG_STDOUT_ONLY_DIFF
# elif "ERROR FAIL in kernel execution" in inj_log_str:
# return p.SDC_KERNEL_ERROR
# else:
# return p.STDOUT_ONLY_DIFF
# elif os.path.getsize(stderrdiff_fname) != 0 and os.path.getsize(stdoutdiff_fname) == 0:
# if "Xid" in dmesg_delta:
# return p.DMESG_STDERR_ONLY_DIFF
# elif "ERROR FAIL in kernel execution" in inj_log_str:
# return p.SDC_KERNEL_ERROR
# else:
# return p.STDERR_ONLY_DIFF
# else:
# if p.verbose:
# print ("Other from here")
# return p.OTHERS
#else: # one of the files is not found, strange
# print (" %s, %s not found" %(stdoutdiff_fname, stderrdiff_fname))
# return p.OTHERS
def receiveSignal():
print ("segegv/n")
###############################################################################
# Run the actual injection run
###############################################################################
def run_injections():
[ threadID, reg, mask, SM, stuck_at] = ["", "", "", "",""]
fName = p.script_dir[app] + "/injectionsRF" + ".txt"
total_jobs = 0
start = datetime.datetime.now()
i = 0
if os.path.isfile(inj_run_logname):
logf = open(fName, "r")
for line in logf:
threadID = line.split(' ')[0].strip()
reg = line.split(' ')[1].strip()
mask = line.split(' ')[2].strip()
SM = line.split(' ')[3].strip()
stuck_at = line.split(' ')[4].strip()
print("%s %s %s %s %s" %( threadID, reg, mask, SM, stuck_at))
f = open(injection_seeds_file,"w")
f.write("%s\n%s\n%s\n%s\n%s\n" %(threadID, reg, mask, SM, stuck_at))
f.close()
cwd = os.getcwd()
dmesg_before = cmdline("dmesg")
total_jobs += 1
pr = subprocess.Popen(script_fname, shell=True, executable='/bin/bash', preexec_fn=os.setsid) # run the injection job
[timeout_flag, retcode] = is_timeout(app, pr)
# Record kernel error messages (dmesg)
dmesg_after = cmdline("dmesg")
dmesg_delta = get_dmesg_delta(dmesg_before, dmesg_after)
dmesg_delta = dmesg_delta.replace("\n", "; ").replace(":", "-")
if timeout_flag:
[threadID,reg,mask,SMID,stuckat] = get_inj_info()
ret_cat = p.TIMEOUT
else:
[threadID,reg,mask,SMID,stuckat] = get_inj_info()
ret_cat = classify_injection(app, retcode, dmesg_delta)
if ret_cat == 5:
threadID = line.split(' ')[0].strip()
reg = line.split(' ')[1].strip()
mask = line.split(' ')[2].strip()
SMID = line.split(' ')[3].strip()
stuckat = line.split(' ')[4].strip()
if os.path.isfile(report_fname):
with open(report_fname,"a") as f:
f.write("threadID %s, reg %s, mask %s, SMID %s,stuckat %s; Outcome %s (code %s)\n" %(threadID,reg,mask,SMID,stuckat,p.CAT_STR[ret_cat-1], ret_cat))
else:
with open(report_fname,"w+") as f:
f.write("threadID %s, reg %s, mask %s, SMID %s,stuckat %s; Outcome %s (code %s)\n" %(threadID,reg,mask,SMID,stuckat,p.CAT_STR[ret_cat-1], ret_cat))
print ("threadID %s, reg %s, mask %s, SMID %s,stuckat %s; Outcome %s (code %s)\n" %(threadID,reg,mask,SMID,stuckat,p.CAT_STR[ret_cat-1], ret_cat))
os.chdir(cwd) # return to the main dir
print_heart_beat(total_jobs)
#sec = input('Let us wait for user input.\n')
seconds = str(get_seconds(datetime.datetime.now() - start))
minutes = int(((float(seconds))//60)%60)
hours = int((float(seconds))//3600)
print("Elapsed %s:%s" %(str(hours),str(minutes)))
logf.close()
if os.path.isfile(report_fname):
with open(report_fname,"a") as f:
f.write("Simulation time: %s:%s\n" %(str(hours),str(minutes)))
return ret_cat
def get_inj_info():
[threadID,reg,mask,SMID,stuckat] = ["","", "", "", ""]
if os.path.isfile(inj_run_logname):
logf = open(inj_run_logname, "r")
for line in logf:
if "thread :" in line:
threadID = line.split(';')[1].split('thread : ')[1].strip()
if "Register :" in line:
reg = line.split(';')[2].split('Register : ')[1].strip()
if "Mask :" in line:
mask = line.split(';')[3].split('Mask : ')[1].strip()
if "SMID :" in line:
SMID = line.split(';')[4].split('SMID : ')[1].strip()
if "Stuck at :" in line:
stuckat = line.split(';')[5].split('Stuck at : ')[1].strip()
logf.close()
return [threadID,reg,mask,SMID,stuckat]
########Compare the results: Golden_stdout.txt and stdout.txt
def compare_stdout():
gold_class = list(0.0 for i in range(0, 10))
out_class = list(0.0 for i in range(0, 10))
list_elements = ['Zero','One','Two','Three','Four','Five','Six','Seven','Eight','Nine']
if os.path.isfile(golden_stdout_fname) and os.path.isfile(stdout_fname):
logf = open(golden_stdout_fname, "r")
logg = open(stdout_fname, "r")
for line in logf:
if "Zero" in line:
gold_class[0] = float(line.split(':')[0].split('%')[0].strip())
if "One" in line:
gold_class[1] = float(line.split(':')[0].split('%')[0].strip())
if "Two" in line:
gold_class[2] = float(line.split(':')[0].split('%')[0].strip())
if "Three" in line:
gold_class[3] = float(line.split(':')[0].split('%')[0].strip())
if "Four" in line:
gold_class[4] = float(line.split(':')[0].split('%')[0].strip())
if "Five" in line:
gold_class[5] = float(line.split(':')[0].split('%')[0].strip())
if "Six" in line:
gold_class[6] = float(line.split(':')[0].split('%')[0].strip())
if "Seven" in line:
gold_class[7] = float(line.split(':')[0].split('%')[0].strip())
if "Eight" in line:
gold_class[8] = float(line.split(':')[0].split('%')[0].strip())
if "Nine" in line:
gold_class[9] = float(line.split(':')[0].split('%')[0].strip())
logf.close()
for line in logg:
if "Zero" in line:
out_class[0] = float(line.split(':')[0].split('%')[0].strip())
if "One" in line:
out_class[1] = float(line.split(':')[0].split('%')[0].strip())
if "Two" in line:
out_class[2] = float(line.split(':')[0].split('%')[0].strip())
if "Three" in line:
out_class[3] = float(line.split(':')[0].split('%')[0].strip())
if "Four" in line:
out_class[4] = float(line.split(':')[0].split('%')[0].strip())
if "Five" in line:
out_class[5] = float(line.split(':')[0].split('%')[0].strip())
if "Six" in line:
out_class[6] = float(line.split(':')[0].split('%')[0].strip())
if "Seven" in line:
out_class[7] = float(line.split(':')[0].split('%')[0].strip())
if "Eight" in line:
out_class[8] = float(line.split(':')[0].split('%')[0].strip())
if "Nine" in line:
out_class[9] = float(line.split(':')[0].split('%')[0].strip())
logg.close()
##Find the CNN results
gold_highest = -1
gold_highest_pos = -1
for i in range(0, 9):
if (gold_class[i] > gold_highest and str(gold_class[i]) != 'nan'):
gold_highest = gold_class[i]
gold_highest_pos = i
out_highest = -1
out_highest_pos = -1
for i in range(0, 9):
if (out_class[i] > out_highest and str(out_class[i]) != 'nan'):
out_highest = out_class[i]
out_highest_pos = i
if (gold_highest_pos != out_highest_pos):
return 2 #CRITICAL FAULT
for i in range(0, 9):
if (out_class[i] != gold_class[i]):
return 1 #differnecte percentages! NOT CRITICAL FAULT since outcome is still correct
return 0
###############################################################################
# Set enviroment variables for run_script
###############################################################################
def set_env_variables(): # Set directory paths
p.set_paths() # update paths
global stdout_fname,golden_stdout_fname, stderr_fname, injection_seeds_file, new_directory, injrun_fname, stdoutdiff_fname, stderrdiff_fname , script_fname, sdc_fname, inj_run_logname, report_fname
#new_directory = p.NVBITFI_HOME + "/logs/" + app + "/" + app + "-group" + igid + "-model" + bfm + "-icount" + icount
new_directory = p.script_dir[app] + "/logs"
if not os.path.isdir(new_directory): os.system("mkdir -p " + new_directory)
stdout_fname = p.script_dir[app] + "/" + p.stdout_file
stdout_fname = p.script_dir[app] + "/" + p.stdout_file
golden_stdout_fname = p.script_dir[app] + "/" + p.golden_stdout_file
stderr_fname = p.script_dir[app] + "/" + p.stderr_file
injection_seeds_file =p.NVBITFI_HOME + "/pf_injector/" + p.injection_seeds
injrun_fname =p.NVBITFI_HOME + "/pf_injector/" + p.inj_run_log
stdoutdiff_fname=p.NVBITFI_HOME +"/scripts/" + p.stdout_diff_log
stderrdiff_fname=p.NVBITFI_HOME +"/scripts/" +p.stderr_diff_log
script_fname =p.script_dir[app] + "/" + p.run_script_pf
sdc_fname =p.script_dir[app] + "/" + p.sdc_check_pf
inj_run_logname =p.NVBITFI_HOME +"/pf_injector/" + p.inj_run_log
report_fname = new_directory + "/" + p.report
def main():
global app
for app in p.apps:
set_env_variables()
err_cat = run_injections()
if __name__ == "__main__" :
main()
| StarcoderdataPython |
108588 | # Generated by Django 3.2.3 on 2021-07-19 17:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hedera', '0003_profile_show_node_ids'),
]
operations = [
migrations.AddField(
model_name='profile',
name='lang',
field=models.CharField(max_length=3, null=True),
),
]
| StarcoderdataPython |
3314277 | """Integration tests for Subreg"""
from unittest import TestCase
from lexicon.tests.providers.integration_tests import IntegrationTests
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class SubregProviderTests(TestCase, IntegrationTests):
"""TestCase for Subreg"""
provider_name = 'subreg'
domain = 'oldium.xyz'
| StarcoderdataPython |
1748418 | <filename>modules/ethnologue.py
#!/usr/bin/python3
"""
ethnologue.py - Ethnologue.com language lookup
author: mattr555
"""
from lxml import html
from string import ascii_lowercase
import web
import logging
logger = logging.getLogger('phenny')
def shorten_num(n):
if n < 1000:
return '{:,}'.format(n)
elif n < 1000000:
return '{}K'.format(str(round(n/1000, 1)).rstrip('0').rstrip('.'))
elif n < 1000000000:
return '{}M'.format(str(round(n/1000000, 1)).rstrip('0').rstrip('.'))
def scrape_ethnologue_codes(phenny):
data = {}
def scrape_ethnologue_code(doc):
for e in doc.find_class('views-field-field-iso-639-3'):
code = e.find('div/a').text
name = e.find('div/a').attrib['title']
data[code] = name
base_url = 'https://www.ethnologue.com/browse/codes/'
for letter in ascii_lowercase:
web.with_scraped_page(base_url + letter)(scrape_ethnologue_code)()
phenny.ethno_data = data
def write_ethnologue_codes(phenny, raw=None):
if raw is None or raw.admin:
scrape_ethnologue_codes(phenny)
logger.debug('Ethnologue iso-639 code fetch successful')
if raw:
phenny.say('Ethnologue iso-639 code fetch successful')
else:
phenny.say('Only admins can execute that command!')
write_ethnologue_codes.name = 'write_ethnologue_codes'
write_ethnologue_codes.commands = ['write-ethno-codes']
write_ethnologue_codes.priority = 'low'
def parse_num_speakers(s):
hits = []
for i in s.split(' '):
if len(i) <= 3 or ',' in i:
if i.replace(',', '').replace('.', '').isdigit():
hits.append(int(i.replace(',', '').replace('.', '')))
if hits and 'no known l1 speakers' in s.lower():
return 'No primary'
elif hits and 'ethnic population' in s.lower() or 'l2 users worldwide' in s.lower():
return shorten_num(sorted(hits, reverse=True)[1])
elif hits:
return shorten_num(max(hits))
return 'No primary'
def ethnologue(phenny, input):
""".ethnologue <lg> - gives ethnologue info from partial language name or iso639"""
raw = str(input.group(1)).lower()
iso = []
if len(raw) == 3 and raw in phenny.ethno_data:
iso.append(raw)
elif len(raw) == 2 and raw in phenny.iso_conversion_data:
iso.append(phenny.iso_conversion_data[raw])
elif len(raw) > 3:
for code, lang in phenny.ethno_data.items():
if raw in lang.lower():
iso.append(code)
if len(iso) == 1:
url = "http://www.ethnologue.com/language/" + iso[0]
try:
resp = web.get(url)
except web.HTTPError as e:
phenny.say('Oh noes! Ethnologue responded with ' + str(e.code) + ' ' + e.msg)
return
h = html.document_fromstring(resp)
if "macrolanguage" in h.find_class('field-name-a-language-of')[0].find('div/div/h2').text:
name = h.get_element_by_id('page-title').text
iso_code = h.find_class('field-name-language-iso-link-to-sil-org')[0].find('div/div/a').text
num_speakers_field = h.find_class('field-name-field-population')[0].find('div/div/p').text
num_speakers = parse_num_speakers(num_speakers_field)
child_langs = map(lambda e:e.text[1:-1], h.find_class('field-name-field-comments')[0].findall('div/div/p/a'))
response = "{} ({}) is a macrolanguage with {} speakers and the following languages: {}. Src: {}".format(
name, iso_code, num_speakers, ', '.join(child_langs), url)
else:
name = h.get_element_by_id('page-title').text
iso_code = h.find_class('field-name-language-iso-link-to-sil-org')[0].find('div/div/a').text
where_spoken = h.find_class('field-name-a-language-of')[0].find('div/div/h2/a').text
where_spoken_cont = h.find_class('field-name-field-region')
if where_spoken_cont:
where_spoken_cont = where_spoken_cont[0].find('div/div/p').text[:100]
if len(where_spoken_cont) > 98:
where_spoken_cont += '...'
where_spoken += ', ' + where_spoken_cont
if where_spoken[-1] != '.':
where_spoken += '.'
num_speakers_field = h.find_class('field-name-field-population')[0].find('div/div/p').text
num_speakers = parse_num_speakers(num_speakers_field)
language_status = h.find_class('field-name-language-status')[0].find('div/div/p').text.split('.')[0] + '.'
response = "{} ({}): spoken in {} {} speakers. Status: {} Src: {}".format(
name, iso_code, where_spoken, num_speakers, language_status, url)
elif len(iso) > 1:
did_you_mean = ['{} ({})'.format(i, phenny.ethno_data[i]) for i in iso if len(i) == 3]
response = "Try .iso639 for better results. Did you mean: " + ', '.join(did_you_mean) + "?"
else:
response = "That ISO code wasn't found. (Hint: use .iso639 for better results)"
phenny.say(response)
ethnologue.name = 'ethnologue'
ethnologue.commands = ['ethnologue', 'ethno', 'logue', 'lg', 'eth']
ethnologue.example = '.ethnologue khk'
ethnologue.priority = 'low'
def setup(phenny):
scrape_ethnologue_codes(phenny)
| StarcoderdataPython |
1661097 | #!/usr/bin/env python
# coding=utf-8
#
# Author: liufr
# Github: https://github.com/Fengrui-Liu
# LastEditTime: 2021-01-11 14:35:09
# Copyright 2021 liufr
# Description:
#
from .stream_generator import StreamGenerator
from .math_toolkit import StreamStatistic
from .dataset import MultivariateDS, UnivariateDS, CustomDS
from .plot import plot
__all__ = [
"StreamGenerator",
"StreamStatistic",
"MultivariateDS",
"UnivariateDS",
"CustomDS",
"plot",
]
| StarcoderdataPython |
3332970 | <filename>api/main.py
from fastapi import FastAPI
from api.config import Config
from api.satellite import views as satellite_views
from api.satellite.data import get_satellites
app = FastAPI()
app.include_router(satellite_views.router, prefix="/satellites")
config = Config()
@app.get("/health")
def health():
return {"status": "ok"}
@app.on_event("startup")
async def load_satellite_data():
await get_satellites(config.SATELLITE_REPOSITORY_URL)
| StarcoderdataPython |
3268694 | <gh_stars>1-10
from . import abstract_models
class Transaction(abstract_models.AbstractTransaction):
pass
class Source(abstract_models.AbstractSource):
pass
class SourceType(abstract_models.AbstractSourceType):
pass
class Bankcard(abstract_models.AbstractBankcard):
pass
| StarcoderdataPython |
1648969 | from passlib.hash import sha512_crypt
s = "penguins"
for ip in range(1000,2000):
sp = str(ip)
p = sp[1:]
h = sha512_crypt.using(salt=s, rounds=5000).hash(p)
if h[12:16] == "PcSL":
print p, h
| StarcoderdataPython |
1639499 | <reponame>ghanashyamchalla/cis_interface
from yggdrasil.tests import assert_raises
from yggdrasil.metaschema.datatypes.tests import test_MetaschemaType as parent
class TestAnyMetaschemaType(parent.TestMetaschemaType):
r"""Test class for AnyMetaschemaType class."""
_mod = 'AnyMetaschemaType'
_cls = 'AnyMetaschemaType'
@staticmethod
def after_class_creation(cls):
r"""Actions to be taken during class construction."""
parent.TestMetaschemaType.after_class_creation(cls)
cls._value = {'a': int(1), 'b': float(1)}
cls._valid_encoded = [dict(cls._typedef,
type=cls.get_import_cls().name,
temptype={'type': 'int'})]
cls._valid_decoded = [cls._value, object]
cls._invalid_validate = []
cls._invalid_encoded = [{}]
cls._invalid_decoded = []
cls._compatible_objects = [(cls._value, cls._value, None)]
def test_decode_data_errors(self):
r"""Test errors in decode_data."""
assert_raises(ValueError, self.import_cls.decode_data, 'hello', None)
| StarcoderdataPython |
95902 | test = {
'name': 'q3_1_1',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> len(my_20_features)
20
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> np.all([f in test_movies.labels for f in my_20_features])
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # It looks like there are many movies in the training set that;
>>> # don't have any of your chosen words. That will make your;
>>> # classifier perform very poorly in some cases. Try choosing;
>>> # at least 1 common word.;
>>> train_f = train_movies.select(my_20_features);
>>> np.count_nonzero(train_f.apply(lambda r: np.sum(np.abs(np.array(r))) == 0)) < 20
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # It looks like there are many movies in the test set that;
>>> # don't have any of your chosen words. That will make your;
>>> # classifier perform very poorly in some cases. Try choosing;
>>> # at least 1 common word.;
>>> test_f = test_movies.select(my_20_features);
>>> np.count_nonzero(test_f.apply(lambda r: np.sum(np.abs(np.array(r))) == 0)) < 5
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # It looks like you may have duplicate words! Make sure not to!;
>>> len(set(my_20_features)) >= 20
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| StarcoderdataPython |
4817405 | <reponame>CrispenGari/days-of-python
def four_sum(numbers: list, target: int) -> list:
result = []
# don't want to handle empty lists and those with less than 4 numbers
if not numbers and len(numbers) < 4:
return result
# use the pointer method when sorting
numbers = sorted(numbers)
# i is the "start" pointer
for i in range(len(numbers) - 3):
# always do the first number
# if not doing the first number, skip duplicates
if i != 0 and numbers[i] == numbers[i - 1]:
continue
# j is a "second start" pointer
for j in range(i + 1, len(numbers) - 2):
# always do the second number
# if not doing the second, skip duplicates
if j != i + 1 and numbers[j] == numbers[j - 1]:
continue
# k and j are the "search start" and "search end" pointers
k = j + 1
l = len(numbers) - 1
while k < l: # the standard search method here
x = numbers[i] + numbers[j] + numbers[k] + numbers[l]
# if lower than the target, increase the "search start" pointer
if x < target:
k += 1
# if higher than the target, decrease the "search end" pointer
elif x > target:
l -= 1
else:
# if we find a result, append it
result.append([numbers[i], numbers[j], numbers[k], numbers[l]])
# move the pointers
k += 1
l -= 1
# while still valid pointers, make sure we ignore duplicates
while k < l and numbers[l] == numbers[l + 1]:
l -= 1
while k < l and numbers[k] == numbers[k - 1]:
k += 1
return result
S = [1, 0, -1, 0, -2, 2]
print(four_sum(S, 0)) | StarcoderdataPython |
3328915 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : memory_profiler_demo
# @Time : 2021/1/25 1:25 下午
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
from memory_profiler import profile
@profile
def my_func():
a = [1] * (10 ** 6)
b = [2] * (2 * 10 ** 7)
del b
return a
if __name__ == '__main__':
my_func()
# mprof run <script>
# mprof plot
| StarcoderdataPython |
192352 | <filename>visualizer/scripts/solver_interactive.py
#! /usr/bin/env python
from solver_inc import *
VERSION = '0.1.1'
class Interactive_Solver(Incremental_Solver):
def __init__(self):
super(Interactive_Solver, self).__init__()
self._inits = []
def on_data(self, data):
if self._reset == True:
self._inits = []
self._sended = - 1
self._to_send = {}
self.send('%$RESET.\n')
self._reset = False
for atom in data:
self._inits.append(atom)
self._control = clingo.Control()
for atom in self._inits:
self._control.add('base', [], atom + '.')
for ii in xrange(0, self._sended + 1):
if ii in self._to_send:
for atom in self._to_send[ii]:
self._control.add('base', [], str(atom) + '.')
if self.solve() < 0:
return
if self._sended + 1 in self._to_send:
self.send_step(self._sended + 1)
elif self._sended + 2 in self._to_send:
self.send_step(self._sended + 2)
def on_model(self, model):
print 'found solution'
self._to_send = {}
for atom in model.symbols(atoms=True):
if (atom.name == 'occurs'
and len(atom.arguments) == 3
and atom.arguments[0].name == 'object'
and atom.arguments[1].name == 'action'):
step = atom.arguments[2].number
if step not in self._to_send:
self._to_send[step] = []
self._to_send[step].append(atom)
return True
if __name__ == "__main__":
solver = Interactive_Solver()
solver.run()
| StarcoderdataPython |
3232490 | <reponame>fossabot/atlas<filename>ui-tests/testRunner.py
import unittest
import glob
import os
import importlib
suite = unittest.TestSuite()
for file in glob.glob('tests/test_*.py'):
modname = os.path.splitext(file)[0].replace('/', '.')
print modname
module = importlib.import_module(modname)
suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
unittest.TextTestRunner(verbosity=2).run(suite) | StarcoderdataPython |
1649834 | <gh_stars>1-10
from .bosch_request_192 import BoschRequest192
| StarcoderdataPython |
1761152 | #!/usr/bin/python
from pptx import Presentation
from pptx.util import Inches
from wand.image import Image
import os, os.path
layoutMode = {
'TITLE' : 0,
'TITLE_AND_CONTENT' : 1,
'SECTION_HEADER' : 2,
'SEQUE' : 2,
'TWO_CONTENT' : 3,
'COMPARISON' : 4,
'TITLE_ONLY' : 5,
'BLANK' : 6,
'CONTENT_WITH_CAPTION' : 7,
'PICTURE_WITH_CAPTION' : 8,
'' : 6
}
# Crop images
def crop_image(path):
print "Entering crop_image()"
subfiles = os.listdir(path)
left = int(raw_input("LEFT CROP: "))
top = int(raw_input("TOP CROP: "))
# TODO: Figure out how to configure right and bottom crop
#right = raw_input(float("RIGHT CROP: ").strip())
#bottom = raw_input(float("BOTTOM CROP: ").strip())
for sf in subfiles:
if os.path.join(path, sf).lower().endswith(('.jpg', '.png', '.jpeg', '.gif', '.tiff',)):
print "cropping %s" % (os.path.join(path, sf))
with Image(filename=os.path.join(path, sf)) as img:
img.crop(left=left, top=top, width=img.width, height=img.height)
img.save(filename=os.path.join(path, sf))
def pdf2image(path, *pages):
# converting first page into JPG
if pages:
for page in pages:
newpath = path + ('[%s]' % page)
with Image(filename=newpath) as img:
imagepath = os.path.splitext(path)[0] + '.jpg'
img.save(filename=imagepath)
# TODO: Refactor so that yielding is done only in filter_files()
yield imagepath
# Filter files and images
def filter_files(path):
files = os.listdir(path)
for f in files:
root = os.path.join(path, f)
if os.path.isdir(root):
print "Expecting FILE, got DIR!"
if os.path.basename(root) == 'crop':
print "Found a subdirectory named 'crop'"
print """
######################## CROP IMAGES #######################\r
+ Set CROPPING for all images inside 'crop' directory.\r
+ The values are LEFT, TOP, RIGHT, and BOTTOM.\r
+ OR /images for relative path.\r
############################################################\n
"""
crop_image(root)
subfiles = os.listdir(root)
for sf in subfiles:
if sf.lower().endswith(('.jpg', '.png', '.jpeg', '.gif', '.tiff',)):
yield os.path.join(root, sf)
elif os.path.isfile(root):
if root.lower().endswith(('.jpg', '.png', '.jpeg', '.gif', '.tiff',)):
yield root
elif root.lower().endswith('.pdf'):
pdf2image(root, 0)
print """
#################### LOCATE DIRECTORY #######################\r
+ Locate the directory where your images are located.\r
+ For example: User/Macbook/Pictures for absolute path,\r
+ OR /images for relative path.\r
+ Make sure no subdirectories are present in the directory.\r
+ Optionally, you can drag the directory into the terminal\r
+ window after the prompt.\r
#############################################################\n
"""
img_path = raw_input("Where is the images folder?: ").strip()
# Create a presentation file
print "Creating presentation..."
prs = Presentation()
print """
##################### CHOOSE LAYOUT STYLE ######################\r
+ Powerpoint comes with several layout styles for slides.\r
+ For example: TITLE, TITLE_WITH_CONTENT, BLANK, etc.\r
+ Type the preferred style in UPPERCASE into the next prompt\r
+ OR hit RETURN for default BLANK style.\r
"""
for mode in (k for k in layoutMode if k != ''):
print mode
print """
################################################################\r
"""
layout_mode = raw_input("\nWhat's your slides layout style?: ").strip()
slide_layout = prs.slide_layouts[layoutMode[layout_mode]]
print"""
######################## SET MARGINS ###########################\r
+ Set LEFT, TOP, and RIGHT margins of your images.\r
+ Note that if RIGHT margin is set, images will be scaled\r
+ proportionally to fit. Otherwise, hit RETURN when\r
+ prompted to set margin to 0 (fit to the slide).\r
+ Margins are in inches.\r
#################################################################\n
"""
left = Inches(float( raw_input("LEFT MARGIN: ") or 0 ))
top = Inches(float( raw_input("TOP MARGIN: ") or 0 ))
width = prs.slide_width - (left + Inches(float(raw_input("RIGHT MARGIN: ") or 0)))
for path in filter_files(img_path):
print "Creating slide..."
slide = prs.slides.add_slide(slide_layout)
print "Adding " + path
pic = slide.shapes.add_picture(path, left, top, width)
print"""
##################### SAVE TO DIRECTORY ########################\r
+ CONGRATS! I finished adding images to slides alright.\r
+ Now tell me where to save your powerpoint file.\r
+ If you provide me with just a name i.e. 'test.pptx',\r
+ I will save the file to your images directory. Otherwise,\r
+ give a path like 'User/Macbook/Documents/test.pptx'\r
+ or drag the directory into this window as usual.\r
#################################################################\n
"""
save_to = raw_input("Where to save your powerpoint to?: ").strip()
# save_to = 'test.pptx'
if save_to.rpartition('/')[0] == '' and save_to.rpartition('/')[1] == '':
if not save_to.lower().endswith('.pptx'):
prs.save(os.path.join(img_path, save_to + '.pptx'))
print "Your file is saved to -> " + os.path.join(img_path, save_to + '.pptx')
else:
prs.save(os.path.join(img_path, save_to))
print "Your file is saved to -> " + os.path.join(img_path, save_to)
elif save_to.rpartition('/')[0] != '' and save_to.lower().endswith('.pptx'):
# '/' found, look like a absolute path
prs.save(save_to)
print "Your file is saved to -> " + save_to
elif save_to.rpartition('/')[0] != '' and not save_to.endswith('.pptx'):
print "Look like you have a path, but still missing the file name..."
name = raw_input("Please type your preferred file name: ")
name = name if name.endswith('.pptx') else (name + '.pptx')
prs.save(os.path.join(save_to, name))
print "Your file is saved to -> " + os.path.join(save_to, name)
else:
print "There's something fishy with the file name and directory. Would you mind starting over?"
| StarcoderdataPython |
155571 | <filename>473-matchsticks-to-square/473-matchsticks-to-square.py
class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
"""
[1,1,2,2,2] total = 8, k = 4, subset = 2
subproblems:
Can I make 4 subsets with equal sum out of the given numbers?
Find all subsets, starting from 0 .... n-1
if can make 4 subsets and use all numbers -> answer true
else answer is false
find_subsets(k, i, sum) = find_subsets(k-1...0, i...n-1, sum...0)
base cases:
if k == 0:
return True
if sum == 0:
return find(subsets)
if sum < 0:
return False
answer -> if can make 4 subsets and use all numbers -> answer true
else answer is false
Compelexities:
Time O(2^N * N)
Space O(2^N)
"""
@lru_cache(None)
def find_subsets(mask, k, curr_sum):
if k == 0:
return True
if curr_sum == 0:
return find_subsets(mask, k-1, subset_sum)
if curr_sum < 0:
return False
for j in range(0, len(matchsticks)):
if mask & (1 << j) != 0:
continue
if find_subsets(mask ^ (1 << j), k, curr_sum - matchsticks[j]):
return True
return False
total_sum = sum(matchsticks)
subsets_count = 4
if total_sum % subsets_count != 0:
return False
subset_sum = total_sum // subsets_count
return find_subsets(0, subsets_count-1, subset_sum) | StarcoderdataPython |
4840519 | from attacks.clf_pgd import *
from attacks.bpda import *
from attacks.square import *
from attacks.bpda_strong import *
from attacks.bpda_score import *
from attacks.bpda_total import *
| StarcoderdataPython |
1628097 | <gh_stars>1-10
import pytest
from pyspark.sql import SparkSession
from sparkql.exceptions import InvalidDataFrameError
from sparkql import Struct, String
def test_validation_example(spark_session: SparkSession):
dframe = spark_session.createDataFrame([{"title": "abc"}])
class Article(Struct):
title = String()
body = String()
validation_result = Article.validate_data_frame(dframe)
assert not validation_result.is_valid
assert validation_result.report == """Struct schema...
StructType(List(
StructField(title,StringType,true),
StructField(body,StringType,true)))
DataFrame schema...
StructType(List(
StructField(title,StringType,true)))
Diff of struct -> data frame...
StructType(List(
- StructField(title,StringType,true)))
+ StructField(title,StringType,true),
+ StructField(body,StringType,true)))"""
with pytest.raises(InvalidDataFrameError):
Article.validate_data_frame(dframe).raise_on_invalid()
| StarcoderdataPython |
92108 | # -*- encoding: utf-8 -*-
"""
keri.core.coring module
"""
import re
import json
import copy
from dataclasses import dataclass, astuple
from collections import namedtuple, deque
from base64 import urlsafe_b64encode as encodeB64
from base64 import urlsafe_b64decode as decodeB64
from math import ceil
from fractions import Fraction
from orderedset import OrderedSet
import cbor2 as cbor
import msgpack
import pysodium
import blake3
import hashlib
from ..kering import (EmptyMaterialError, RawMaterialError, UnknownCodeError,
InvalidCodeIndexError, InvalidCodeSizeError,
ConversionError,
ValidationError, VersionError, DerivationError,
ShortageError, UnexpectedCodeError, DeserializationError,
UnexpectedCountCodeError, UnexpectedOpCodeError)
from ..kering import Versionage, Version
from ..help.helping import sceil, nowIso8601
Serialage = namedtuple("Serialage", 'json mgpk cbor')
Serials = Serialage(json='JSON', mgpk='MGPK', cbor='CBOR')
Mimes = Serialage(json='application/keri+json',
mgpk='application/keri+msgpack',
cbor='application/keri+cbor',)
VERRAWSIZE = 6 # hex characters in raw serialization size in version string
# "{:0{}x}".format(300, 6) # make num char in hex a variable
# '00012c'
VERFMT = "KERI{:x}{:x}{}{:0{}x}_" # version format string
VERFULLSIZE = 17 # number of characters in full versions string
def Versify(version=None, kind=Serials.json, size=0):
"""
Return version string
"""
if kind not in Serials:
raise ValueError("Invalid serialization kind = {}".format(kind))
version = version if version else Version
return VERFMT.format(version[0], version[1], kind, size, VERRAWSIZE)
Vstrings = Serialage(json=Versify(kind=Serials.json, size=0),
mgpk=Versify(kind=Serials.mgpk, size=0),
cbor=Versify(kind=Serials.cbor, size=0))
VEREX = b'KERI(?P<major>[0-9a-f])(?P<minor>[0-9a-f])(?P<kind>[A-Z]{4})(?P<size>[0-9a-f]{6})_'
Rever = re.compile(VEREX) #compile is faster
MINSNIFFSIZE = 12 + VERFULLSIZE # min bytes in buffer to sniff else need more
def Deversify(vs):
"""
Returns tuple(kind, version, size)
Where:
kind is serialization kind, one of Serials
json='JSON', mgpk='MGPK', cbor='CBOR'
version is version tuple of type Version
size is int of raw size
Parameters:
vs is version string str
Uses regex match to extract:
serialization kind
keri version
serialization size
"""
match = Rever.match(vs.encode("utf-8")) # match takes bytes
if match:
major, minor, kind, size = match.group("major", "minor", "kind", "size")
version = Versionage(major=int(major, 16), minor=int(minor, 16))
kind = kind.decode("utf-8")
if kind not in Serials:
raise ValueError("Invalid serialization kind = {}".format(kind))
size = int(size, 16)
return(kind, version, size)
raise ValueError("Invalid version string = {}".format(vs))
Ilkage = namedtuple("Ilkage", 'icp rot ixn dip drt rct vrc') # Event ilk (type of event)
Ilks = Ilkage(icp='icp', rot='rot', ixn='ixn', dip='dip', drt='drt', rct='rct',
vrc='vrc')
# Base64 utilities
BASE64_PAD = b'='
# Mappings between Base64 Encode Index and Decode Characters
# B64ChrByIdx is dict where each key is a B64 index and each value is the B64 char
# B64IdxByChr is dict where each key is a B64 char and each value is the B64 index
# Map Base64 index to char
B64ChrByIdx = dict((index, char) for index, char in enumerate([chr(x) for x in range(65, 91)]))
B64ChrByIdx.update([(index + 26, char) for index, char in enumerate([chr(x) for x in range(97, 123)])])
B64ChrByIdx.update([(index + 52, char) for index, char in enumerate([chr(x) for x in range(48, 58)])])
B64ChrByIdx[62] = '-'
B64ChrByIdx[63] = '_'
# Map char to Base64 index
B64IdxByChr = {char: index for index, char in B64ChrByIdx.items()}
def intToB64(i, l=1):
"""
Returns conversion of int i to Base64 str
l is min number of b64 digits left padded with Base64 0 == "A" char
"""
d = deque() # deque of characters base64
d.appendleft(B64ChrByIdx[i % 64])
i = i // 64
while i:
d.appendleft(B64ChrByIdx[i % 64])
i = i // 64
for j in range(l - len(d)): # range(x) x <= 0 means do not iterate
d.appendleft("A")
return ( "".join(d))
def intToB64b(i, l=1):
"""
Returns conversion of int i to Base64 bytes
l is min number of b64 digits left padded with Base64 0 == "A" char
"""
return (intToB64(i=i, l=l).encode("utf-8"))
def b64ToInt(s):
"""
Returns conversion of Base64 str s or bytes to int
"""
if hasattr(s, 'decode'):
s = s.decode("utf-8")
i = 0
for e, c in enumerate(reversed(s)):
i |= B64IdxByChr[c] << (e * 6) # same as i += B64IdxByChr[c] * (64 ** e)
return i
def b64ToB2(s):
"""
Returns conversion (decode) of Base64 chars to Base2 bytes.
Where the number of total bytes returned is equal to the minimun number of
octets sufficient to hold the total converted concatenated sextets from s,
with one sextet per each Base64 decoded char of s. Assumes no pad chars in s.
Sextets are left aligned with pad bits in last (rightmost) byte.
This is useful for decoding as bytes, code characters from the front of
a Base64 encoded string of characters.
"""
i = b64ToInt(s)
i <<= 2 * (len(s) % 4) # add 2 bits right padding for each sextet
n = sceil(len(s) * 3 / 4) # compute min number of ocetets to hold all sextets
return (i.to_bytes(n, 'big'))
def b2ToB64(b, l):
"""
Returns conversion (encode) of l Base2 sextets from front of b to Base64 chars.
One char for each of l sextets from front (left) of b.
This is useful for encoding as code characters, sextets from the front of
a Base2 bytes (byte string). Must provide l because of ambiguity between l=3
and l=4. Both require 3 bytes in b.
"""
if hasattr(b, 'encode'):
b = b.encode("utf-8") # convert to bytes
n = sceil(l * 3 / 4) # number of bytes needed for l sextets
if n > len(b):
raise ValueError("Not enough bytes in {} to nab {} sextets.".format(b, l))
i = int.from_bytes(b[:n], 'big')
i >>= 2 * (l % 4) # shift out padding bits make right aligned
return (intToB64(i, l))
def nabSextets(b, l):
"""
Return first l sextets from front (left) of b as bytes (byte string).
Length of bytes returned is minimum sufficient to hold all l sextets.
Last byte returned is right bit padded with zeros
b is bytes or str
"""
if hasattr(b, 'encode'):
b = b.encode("utf-8") # convert to bytes
n = sceil(l * 3 / 4) # number of bytes needed for l sextets
if n > len(b):
raise ValueError("Not enough bytes in {} to nab {} sextets.".format(b, l))
i = int.from_bytes(b[:n], 'big')
p = 2 * (l % 4)
i >>= p # strip of last bits
i <<= p # pad with empty bits
return (i.to_bytes(n, 'big'))
def generateSigners(salt=None, count=8, transferable=True):
"""
Returns list of Signers for Ed25519
Parameters:
salt is bytes 16 byte long root cryptomatter from which seeds for Signers
in list are derived
random salt created if not provided
count is number of signers in list
transferable is boolean true means signer.verfer code is transferable
non-transferable otherwise
"""
if not salt:
salt = pysodium.randombytes(pysodium.crypto_pwhash_SALTBYTES)
signers = []
for i in range(count):
path = "{:x}".format(i)
# algorithm default is argon2id
seed = pysodium.crypto_pwhash(outlen=32,
passwd=path,
salt=salt,
opslimit=pysodium.crypto_pwhash_OPSLIMIT_INTERACTIVE,
memlimit=pysodium.crypto_pwhash_MEMLIMIT_INTERACTIVE,
alg=pysodium.crypto_pwhash_ALG_DEFAULT)
signers.append(Signer(raw=seed, transferable=transferable))
return signers
def generateSecrets(salt=None, count=8):
"""
Returns list of fully qualified Base64 secret seeds for Ed25519 private keys
Parameters:
salt is bytes 16 byte long root cryptomatter from which seeds for Signers
in list are derived
random salt created if not provided
count is number of signers in list
"""
signers = generateSigners(salt=salt, count=count)
return [signer.qb64 for signer in signers] # fetch the qb64 as secret
@dataclass(frozen=True)
class CryNonTransCodex:
"""
CryNonTransCodex is codex all non-transferable derivation codes
Only provide defined codes.
Undefined are left out so that inclusion(exclusion) via 'in' operator works.
"""
Ed25519N: str = 'B' # Ed25519 verification key non-transferable, basic derivation.
ECDSA_256k1N: str = "1AAA" # ECDSA secp256k1 verification key non-transferable, basic derivation.
Ed448N: str = "1AAC" # Ed448 non-transferable prefix public signing verification key. Basic derivation.
def __iter__(self):
return iter(astuple(self))
CryNonTransDex = CryNonTransCodex() # Make instance
@dataclass(frozen=True)
class CryDigCodex:
"""
CryDigCodex is codex all digest derivation codes. This is needed to ensure
delegated inception using a self-addressing derivation i.e. digest derivation
code.
Only provide defined codes.
Undefined are left out so that inclusion(exclusion) via 'in' operator works.
"""
Blake3_256: str = 'E' # Blake3 256 bit digest self-addressing derivation.
Blake2b_256: str = 'F' # Blake2b 256 bit digest self-addressing derivation.
Blake2s_256: str = 'G' # Blake2s 256 bit digest self-addressing derivation.
SHA3_256: str = 'H' # SHA3 256 bit digest self-addressing derivation.
SHA2_256: str = 'I' # SHA2 256 bit digest self-addressing derivation.
def __iter__(self):
return iter(astuple(self))
CryDigDex = CryDigCodex() # Make instance
# secret derivation security tier
Tierage = namedtuple("Tierage", 'low med high')
Tiers = Tierage(low='low', med='med', high='high')
@dataclass(frozen=True)
class MatterCodex:
"""
MatterCodex is codex code (stable) part of all matter derivation codes.
Only provide defined codes.
Undefined are left out so that inclusion(exclusion) via 'in' operator works.
"""
Ed25519_Seed: str = 'A' # Ed25519 256 bit random seed for private key
Ed25519N: str = 'B' # Ed25519 verification key non-transferable, basic derivation.
X25519: str = 'C' # X25519 public encryption key, converted from Ed25519.
Ed25519: str = 'D' # Ed25519 verification key basic derivation
Blake3_256: str = 'E' # Blake3 256 bit digest self-addressing derivation.
Blake2b_256: str = 'F' # Blake2b 256 bit digest self-addressing derivation.
Blake2s_256: str = 'G' # Blake2s 256 bit digest self-addressing derivation.
SHA3_256: str = 'H' # SHA3 256 bit digest self-addressing derivation.
SHA2_256: str = 'I' # SHA2 256 bit digest self-addressing derivation.
ECDSA_256k1_Seed: str = 'J' # ECDSA secp256k1 256 bit random Seed for private key
Ed448_Seed: str = 'K' # Ed448 448 bit random Seed for private key
X448: str = 'L' # X448 public encryption key, converted from Ed448
Short: str = 'M' # Short 2 byte number
Salt_128: str = '0A' # 128 bit random seed or 128 bit number
Ed25519_Sig: str = '0B' # Ed25519 signature.
ECDSA_256k1_Sig: str = '0C' # ECDSA secp256k1 signature.
Blake3_512: str = '0D' # Blake3 512 bit digest self-addressing derivation.
Blake2b_512: str = '0E' # Blake2b 512 bit digest self-addressing derivation.
SHA3_512: str = '0F' # SHA3 512 bit digest self-addressing derivation.
SHA2_512: str = '0G' # SHA2 512 bit digest self-addressing derivation.
Long: str = '0H' # Long 4 byte number
ECDSA_256k1N: str = '1AAA' # ECDSA secp256k1 verification key non-transferable, basic derivation.
ECDSA_256k1: str = '1AAB' # Ed25519 public verification or encryption key, basic derivation
Ed448N: str = '1AAC' # Ed448 non-transferable prefix public signing verification key. Basic derivation.
Ed448: str = '1AAD' # Ed448 public signing verification key. Basic derivation.
Ed448_Sig: str = '1AAE' # Ed448 signature. Self-signing derivation.
Tag: str = '1AAF' # Base64 4 char tag or 3 byte number.
DateTime: str = '1AAG' # Base64 custom encoded 32 char ISO-8601 DateTime
def __iter__(self):
return iter(astuple(self)) # enables inclusion test with "in"
MtrDex = MatterCodex()
@dataclass(frozen=True)
class NonTransCodex:
"""
NonTransCodex is codex all non-transferable derivation codes
Only provide defined codes.
Undefined are left out so that inclusion(exclusion) via 'in' operator works.
"""
Ed25519N: str = 'B' # Ed25519 verification key non-transferable, basic derivation.
ECDSA_256k1N: str = "1AAA" # ECDSA secp256k1 verification key non-transferable, basic derivation.
Ed448N: str = "1AAC" # Ed448 non-transferable prefix public signing verification key. Basic derivation.
def __iter__(self):
return iter(astuple(self))
NonTransDex = NonTransCodex() # Make instance
@dataclass(frozen=True)
class DigCodex:
"""
DigCodex is codex all digest derivation codes. This is needed to ensure
delegated inception using a self-addressing derivation i.e. digest derivation
code.
Only provide defined codes.
Undefined are left out so that inclusion(exclusion) via 'in' operator works.
"""
Blake3_256: str = 'E' # Blake3 256 bit digest self-addressing derivation.
Blake2b_256: str = 'F' # Blake2b 256 bit digest self-addressing derivation.
Blake2s_256: str = 'G' # Blake2s 256 bit digest self-addressing derivation.
SHA3_256: str = 'H' # SHA3 256 bit digest self-addressing derivation.
SHA2_256: str = 'I' # SHA2 256 bit digest self-addressing derivation.
Blake3_512: str = '0D' # Blake3 512 bit digest self-addressing derivation.
Blake2b_512: str = '0E' # Blake2b 512 bit digest self-addressing derivation.
SHA3_512: str = '0F' # SHA3 512 bit digest self-addressing derivation.
SHA2_512: str = '0G' # SHA2 512 bit digest self-addressing derivation.
def __iter__(self):
return iter(astuple(self))
DigDex =DigCodex() # Make instance
# namedtuple for size entries in matter derivation code tables
# hs is the hard size int number of chars in hard (stable) part of code
# ss is the soft size int number of chars in soft (unstable) part of code
# fs is the full size int number of chars in code plus appended material if any
Sizage = namedtuple("Sizage", "hs ss fs")
class Matter:
"""
Matter is fully qualified cryptographic material primitive base class for
non-indexed primitives.
Sub classes are derivation code and key event element context specific.
Includes the following attributes and properties:
Attributes:
Properties:
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.transferable is Boolean, True when transferable derivation code False otherwise
.digestive is Boolean, True when digest derivation code False otherwise
Hidden:
._code is str value for .code property
._raw is bytes value for .raw property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
Codex = MtrDex
# Sizes table maps from bytes Base64 first code char to int of hard size, hs,
# (stable) of code. The soft size, ss, (unstable) is always 0 for Matter.
Sizes = ({chr(c): 1 for c in range(65, 65+26)}) # size of hard part of code
Sizes.update({chr(c): 1 for c in range(97, 97+26)})
Sizes.update([('0', 2), ('1', 4), ('2', 5), ('3', 6), ('4', 8), ('5', 9), ('6', 10)])
# Codes table maps to Sizage namedtuple of (hs, ss, fs) from hs chars of code
# where hs is hard size, ss is soft size, and fs is full size
# soft size, ss, should always be 0 for Matter
Codes = {
'A': Sizage(hs=1, ss=0, fs=44),
'B': Sizage(hs=1, ss=0, fs=44),
'C': Sizage(hs=1, ss=0, fs=44),
'D': Sizage(hs=1, ss=0, fs=44),
'E': Sizage(hs=1, ss=0, fs=44),
'F': Sizage(hs=1, ss=0, fs=44),
'G': Sizage(hs=1, ss=0, fs=44),
'H': Sizage(hs=1, ss=0, fs=44),
'I': Sizage(hs=1, ss=0, fs=44),
'J': Sizage(hs=1, ss=0, fs=44),
'K': Sizage(hs=1, ss=0, fs=76),
'L': Sizage(hs=1, ss=0, fs=76),
'M': Sizage(hs=1, ss=0, fs=4),
'0A': Sizage(hs=2, ss=0, fs=24),
'0B': Sizage(hs=2, ss=0, fs=88),
'0C': Sizage(hs=2, ss=0, fs=88),
'0D': Sizage(hs=2, ss=0, fs=88),
'0E': Sizage(hs=2, ss=0, fs=88),
'0F': Sizage(hs=2, ss=0, fs=88),
'0G': Sizage(hs=2, ss=0, fs=88),
'0H': Sizage(hs=2, ss=0, fs=8),
'1AAA': Sizage(hs=4, ss=0, fs=48),
'1AAB': Sizage(hs=4, ss=0, fs=48),
'1AAC': Sizage(hs=4, ss=0, fs=80),
'1AAD': Sizage(hs=4, ss=0, fs=80),
'1AAE': Sizage(hs=4, ss=0, fs=56),
'1AAF': Sizage(hs=4, ss=0, fs=8),
'1AAG': Sizage(hs=4, ss=0, fs=36),
}
# Bizes table maps to hard size, hs, of code from bytes holding sextets
# converted from first code char. Used for ._bexfil.
Bizes = ({b64ToB2(c): hs for c, hs in Sizes.items()})
def __init__(self, raw=None, code=MtrDex.Ed25519N, qb64b=None, qb64=None, qb2=None):
"""
Validate as fully qualified
Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
code is str of stable (hard) part of derivation code
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
Needs either (raw and code) or qb64b or qb64 or qb2
Otherwise raises EmptyMaterialError
When raw and code provided then validate that code is correct for length of raw
and assign .raw
Else when qb64b or qb64 or qb2 provided extract and assign .raw and .code
"""
if raw is not None: # raw provided
if not code:
raise EmptyMaterialError("Improper initialization need either "
"(raw and code) or qb64b or qb64 or qb2.")
if not isinstance(raw, (bytes, bytearray)):
raise TypeError("Not a bytes or bytearray, raw={}.".format(raw))
if code not in self.Codes:
raise UnknownCodeError("Unsupported code={}.".format(code))
#hs, ss, fs = self.Codes[code] # get sizes
#rawsize = (fs - (hs + ss)) * 3 // 4
rawsize = Matter._rawSize(code)
raw = raw[:rawsize] # copy only exact size from raw stream
if len(raw) != rawsize: # forbids shorter
raise RawMaterialError("Not enougth raw bytes for code={}"
"expected {} got {}.".format(code,
rawsize,
len(raw)))
self._code = code
self._raw = bytes(raw) # crypto ops require bytes not bytearray
elif qb64b is not None:
self._exfil(qb64b)
elif qb64 is not None:
self._exfil(qb64)
elif qb2 is not None:
self._bexfil(qb2)
else:
raise EmptyMaterialError("Improper initialization need either "
"(raw and code) or qb64b or qb64 or qb2.")
@classmethod
def _rawSize(cls, code):
"""
Returns raw size in bytes for a given code
"""
hs, ss, fs = cls.Codes[code] # get sizes
return ( (fs - (hs + ss)) * 3 // 4 )
@property
def code(self):
"""
Returns ._code
Makes .code read only
"""
return self._code
@property
def raw(self):
"""
Returns ._raw
Makes .raw read only
"""
return self._raw
@property
def qb64b(self):
"""
Property qb64b:
Returns Fully Qualified Base64 Version encoded as bytes
Assumes self.raw and self.code are correctly populated
"""
return self._infil()
@property
def qb64(self):
"""
Property qb64:
Returns Fully Qualified Base64 Version
Assumes self.raw and self.code are correctly populated
"""
return self.qb64b.decode("utf-8")
@property
def qb2(self):
"""
Property qb2:
Returns Fully Qualified Binary Version Bytes
"""
return self._binfil()
@property
def transferable(self):
"""
Property transferable:
Returns True if identifier does not have non-transferable derivation code,
False otherwise
"""
return(self.code not in NonTransDex)
@property
def digestive(self):
"""
Property digestable:
Returns True if identifier has digest derivation code,
False otherwise
"""
return(self.code in DigDex)
def _infil(self):
"""
Returns bytes of fully qualified base64 characters
self.code + converted self.raw to Base64 with pad chars stripped
"""
code = self.code # codex value
raw = self.raw # bytes or bytearray
ps = (3 - (len(raw) % 3)) % 3 # pad size
# check valid pad size for code size
if len(code) % 4 != ps: # pad size is not remainder of len(code) % 4
raise InvalidCodeSizeError("Invalid code = {} for converted raw "
"pad size= {}.".format(code, ps))
# prepend derivation code and strip off trailing pad characters
return (code.encode("utf-8") + encodeB64(raw)[:-ps if ps else None])
def _exfil(self, qb64b):
"""
Extracts self.code and self.raw from qualified base64 bytes qb64b
"""
if not qb64b: # empty need more bytes
raise ShortageError("Empty material, Need more characters.")
first = qb64b[:1] # extract first char code selector
if hasattr(first, "decode"):
first = first.decode("utf-8")
if first not in self.Sizes:
if first[0] == '-':
raise UnexpectedCountCodeError("Unexpected count code start"
"while extracing Matter.")
elif first[0] == '_':
raise UnexpectedOpCodeError("Unexpected op code start"
"while extracing Matter.")
else:
raise UnexpectedCodeError("Unsupported code start char={}.".format(first))
cs = self.Sizes[first] # get hard code size
if len(qb64b) < cs: # need more bytes
raise ShortageError("Need {} more characters.".format(cs-len(qb64b)))
code = qb64b[:cs] # extract hard code
if hasattr(code, "decode"):
code = code.decode("utf-8")
if code not in self.Codes:
raise UnexpectedCodeError("Unsupported code ={}.".format(code))
hs, ss, fs = self.Codes[code]
bs = hs + ss # both hs and ss
# assumes that unit tests on Matter and MatterCodex ensure that
# .Codes and .Sizes are well formed.
# hs == cs and ss == 0 and not fs % 4 and hs > 0 and fs > hs
if len(qb64b) < fs: # need more bytes
raise ShortageError("Need {} more chars.".format(fs-len(qb64b)))
qb64b = qb64b[:fs] # fully qualified primitive code plus material
if hasattr(qb64b, "encode"): # only convert extracted chars from stream
qb64b = qb64b.encode("utf-8")
# strip off prepended code and append pad characters
ps = bs % 4 # pad size ps = bs mod 4
base = qb64b[bs:] + ps * BASE64_PAD
raw = decodeB64(base)
if len(raw) != (len(qb64b) - bs) * 3 // 4: # exact lengths
raise ConversionError("Improperly qualified material = {}".format(qb64b))
self._code = code
self._raw = raw
def _binfil(self):
"""
Returns bytes of fully qualified base2 bytes, that is .qb2
self.code converted to Base2 + self.raw left shifted with pad bits
equivalent of Base64 decode of .qb64 into .qb2
"""
code = self.code # codex value
raw = self.raw # bytes or bytearray
hs, ss, fs = self.Codes[code]
bs = hs + ss
if len(code) != bs:
raise InvalidCodeSizeError("Mismatch code size = {} with table = {}."
.format(bs, len(code)))
n = sceil(bs * 3 / 4) # number of b2 bytes to hold b64 code
bcode = b64ToInt(code).to_bytes(n,'big') # right aligned b2 code
full = bcode + raw
bfs = len(full)
if bfs % 3 or (bfs * 4 // 3) != fs: # invalid size
raise InvalidCodeSizeError("Invalid code = {} for raw size= {}."
.format(code, len(raw)))
i = int.from_bytes(full, 'big') << (2 * (bs % 4)) # left shift in pad bits
return (i.to_bytes(bfs, 'big'))
def _bexfil(self, qb2):
"""
Extracts self.code and self.raw from qualified base2 bytes qb2
"""
if not qb2: # empty need more bytes
raise ShortageError("Empty material, Need more bytes.")
first = nabSextets(qb2, 1) # extract first sextet as code selector
if first not in self.Bizes:
if first[0] == b'\xf8': # b64ToB2('-')
raise UnexpectedCountCodeError("Unexpected count code start"
"while extracing Matter.")
elif first[0] == b'\xfc': # b64ToB2('_')
raise UnexpectedOpCodeError("Unexpected op code start"
"while extracing Matter.")
else:
raise UnexpectedCodeError("Unsupported code start sextet={}.".format(first))
cs = self.Bizes[first] # get code hard size equvalent sextets
bcs = sceil(cs * 3 / 4) # bcs is min bytes to hold cs sextets
if len(qb2) < bcs: # need more bytes
raise ShortageError("Need {} more bytes.".format(bcs-len(qb2)))
# bode = nabSextets(qb2, cs) # b2 version of hard part of code
code = b2ToB64(qb2, cs) # extract and convert hard part of code
if code not in self.Codes:
raise UnexpectedCodeError("Unsupported code ={}.".format(code))
hs, ss, fs = self.Codes[code]
bs = hs + ss # both hs and ss
# assumes that unit tests on Matter and MatterCodex ensure that
# .Codes and .Sizes are well formed.
# hs == cs and ss == 0 and not fs % 4 and hs > 0 and fs > hs
bfs = sceil(fs * 3 / 4) # bfs is min bytes to hold fs sextets
if len(qb2) < bfs: # need more bytes
raise ShortageError("Need {} more bytes.".format(bfs-len(qb2)))
qb2 = qb2[:bfs] # fully qualified primitive code plus material
# right shift to right align raw material
i = int.from_bytes(qb2, 'big')
i >>= 2 * (bs % 4)
bbs = ceil(bs * 3 / 4) # bbs is min bytes to hold bs sextets
raw = i.to_bytes(bfs, 'big')[bbs:] # extract raw
if len(raw) != (len(qb2) - bbs): # exact lengths
raise ConversionError("Improperly qualified material = {}".format(qb2))
self._code = code
self._raw = raw
class Seqner(Matter):
"""
Seqner is subclass of Matter, cryptographic material, for ordinal numbers
such as sequence numbers or first seen ordering numbers.
Seqner provides fully qualified format for ordinals (sequence numbers etc)
when provided as attached cryptographic material elements.
Useful when parsing attached receipt groupings with sn from stream or database
Uses default initialization code = CryTwoDex.Salt_128
Raises error on init if code not CryTwoDex.Salt_128
Attributes:
Inherited Properties: (See Matter)
.pad is int number of pad chars given raw
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.nontrans is Boolean, True when non-transferable derivation code False otherwise
Properties:
.sn is int sequence number
.snh is hex string representation of sequence number no leading zeros
Hidden:
._pad is method to compute .pad property
._code is str value for .code property
._raw is bytes value for .raw property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
Methods:
"""
def __init__(self, raw=None, qb64b=None, qb64=None, qb2=None,
code=MtrDex.Salt_128, sn=None, snh=None, **kwa):
"""
Inhereited Parameters: (see Matter)
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
sn is int sequence number
snh is hex string of sequence number
"""
if sn is None:
if snh is None:
sn = 0
else:
sn = int(snh, 16)
if raw is None and qb64b is None and qb64 is None and qb2 is None:
raw = sn.to_bytes(Matter._rawSize(MtrDex.Salt_128), 'big')
super(Seqner, self).__init__(raw=raw, qb64b=qb64b, qb64=qb64, qb2=qb2,
code=code, **kwa)
if self.code != MtrDex.Salt_128:
raise ValidationError("Invalid code = {} for SeqNumber."
"".format(self.code))
@property
def sn(self):
"""
Property sn:
Returns .raw converted to int
"""
return int.from_bytes(self.raw, 'big')
@property
def snh(self):
"""
Property snh:
Returns .raw converted to hex str
"""
return "{:x}".format(self.sn)
class Dater(Matter):
"""
Dater is subclass of Matter, cryptographic material, for ISO-8601 datetimes.
Dater provides a custom Base64 coding of an ASCII ISO-8601 datetime by replacing
the three non-Base64 characters ':.+' with the Base64 equivalents 'cdp'.
Dater provides a more compact representation than would be obtained by converting
the raw ASCII ISO-8601 datetime to Base64.
Dater supports datetimes as attached crypto material in replay of events for
the datetime of when the event was first seen.
Restricted to specific 32 byte variant of ISO-8601 date time with microseconds
and UTC offset in HH:MM. For example:
'2020-08-22T17:50:09.988921+00:00'
'2020-08-22T17:50:09.988921-01:00'
The fully encoded versions are respectively
'1AAG2020-08-22T17c50c09d988921p00c00'
'1AAG2020-08-22T17c50c09d988921-01c00'
Useful when parsing attached first seen couples with fn + dt
Uses default initialization code = MtrDex.DateTime
Raises error on init if code not MtrDex.DateTime
Attributes:
Inherited Properties: (See Matter)
.pad is int number of pad chars given raw
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.nontrans is Boolean, True when non-transferable derivation code False otherwise
Properties:
.dts is the ISO-8601 datetime string
Hidden:
._pad is method to compute .pad property
._code is str value for .code property
._raw is bytes value for .raw property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
Methods:
"""
ToB64 = str.maketrans(":.+", "cdp")
FromB64 = str.maketrans("cdp", ":.+")
def __init__(self, raw=None, qb64b=None, qb64=None, qb2=None,
code=MtrDex.Salt_128, dts=None, **kwa):
"""
Inhereited Parameters: (see Matter)
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
dt the ISO-8601 datetime as str or bytes
"""
if raw is None and qb64b is None and qb64 is None and qb2 is None:
if dts is None: # defaults to now
dts = nowIso8601()
if len(dts) != 32:
raise ValueError("Invalid length of date time string")
if hasattr(dts, "decode"):
dts = dts.decode("utf-8")
qb64 = MtrDex.DateTime + dts.translate(self.ToB64)
super(Dater, self).__init__(raw=raw, qb64b=qb64b, qb64=qb64, qb2=qb2,
code=code, **kwa)
if self.code != MtrDex.DateTime:
raise ValidationError("Invalid code = {} for Dater date time."
"".format(self.code))
@property
def dts(self):
"""
Property sn:
Returns .raw converted to int
"""
return self.qb64[self.Codes[self.code].hs:].translate(self.FromB64)
class Verfer(Matter):
"""
Verfer is Matter subclass with method to verify signature of serialization
using the .raw as verifier key and .code for signature cipher suite.
See Matter for inherited attributes and properties:
Attributes:
Properties:
Methods:
verify: verifies signature
"""
def __init__(self, **kwa):
"""
Assign verification cipher suite function to ._verify
"""
super(Verfer, self).__init__(**kwa)
if self.code in [MtrDex.Ed25519N, MtrDex.Ed25519]:
self._verify = self._ed25519
else:
raise ValueError("Unsupported code = {} for verifier.".format(self.code))
def verify(self, sig, ser):
"""
Returns True if bytes signature sig verifies on bytes serialization ser
using .raw as verifier public key for ._verify cipher suite determined
by .code
Parameters:
sig is bytes signature
ser is bytes serialization
"""
return (self._verify(sig=sig, ser=ser, key=self.raw))
@staticmethod
def _ed25519(sig, ser, key):
"""
Returns True if verified False otherwise
Verifiy ed25519 sig on ser using key
Parameters:
sig is bytes signature
ser is bytes serialization
key is bytes public key
"""
try: # verify returns None if valid else raises ValueError
pysodium.crypto_sign_verify_detached(sig, ser, key)
except Exception as ex:
return False
return True
class Cigar(Matter):
"""
Cigar is Matter subclass holding a nonindexed signature with verfer property.
From Matter .raw is signature and .code is signature cipher suite
Adds .verfer property to hold Verfer instance of associated verifier public key
Verfer's .raw as verifier key and .code is verifier cipher suite.
See Matter for inherited attributes and properties:
Attributes:
Inherited Properties:
.pad is int number of pad chars given raw
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.nontrans is Boolean, True when non-transferable derivation code False otherwise
Properties:
.verfer is verfer of public key used to verify signature
Methods:
Hidden:
._pad is method to compute .pad property
._code is str value for .code property
._raw is bytes value for .raw property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
def __init__(self, verfer=None, **kwa):
"""
Assign verfer to ._verfer attribute
"""
super(Cigar, self).__init__(**kwa)
self._verfer = verfer
@property
def verfer(self):
"""
Property verfer:
Returns Verfer instance
Assumes ._verfer is correctly assigned
"""
return self._verfer
@verfer.setter
def verfer(self, verfer):
""" verfer property setter """
self._verfer = verfer
class Signer(Matter):
"""
Signer is Matter subclass with method to create signature of serialization
using the .raw as signing (private) key seed, .code as cipher suite for
signing and new property .verfer whose property .raw is public key for signing.
If not provided .verfer is generated from private key seed using .code
as cipher suite for creating key-pair.
See Matter for inherited attributes and properties:
Attributes:
Properties:
.verfer is Verfer object instance
Methods:
sign: create signature
"""
def __init__(self,raw=None, code=MtrDex.Ed25519_Seed, transferable=True, **kwa):
"""
Assign signing cipher suite function to ._sign
Parameters: See Matter for inherted parameters
raw is bytes crypto material seed or private key
code is derivation code
transferable is Boolean True means verifier code is transferable
False othersize non-transerable
"""
try:
super(Signer, self).__init__(raw=raw, code=code, **kwa)
except EmptyMaterialError as ex:
if code == MtrDex.Ed25519_Seed:
raw = pysodium.randombytes(pysodium.crypto_sign_SEEDBYTES)
super(Signer, self).__init__(raw=raw, code=code, **kwa)
else:
raise ValueError("Unsupported signer code = {}.".format(code))
if self.code == MtrDex.Ed25519_Seed:
self._sign = self._ed25519
verkey, sigkey = pysodium.crypto_sign_seed_keypair(self.raw)
verfer = Verfer(raw=verkey,
code=MtrDex.Ed25519 if transferable
else MtrDex.Ed25519N )
else:
raise ValueError("Unsupported signer code = {}.".format(self.code))
self._verfer = verfer
@property
def verfer(self):
"""
Property verfer:
Returns Verfer instance
Assumes ._verfer is correctly assigned
"""
return self._verfer
def sign(self, ser, index=None):
"""
Returns either Cigar or Siger (indexed) instance of cryptographic
signature material on bytes serialization ser
If index is None
return Cigar instance
Else
return Siger instance
Parameters:
ser is bytes serialization
index is int index of associated verifier key in event keys
"""
return (self._sign(ser=ser,
seed=self.raw,
verfer=self.verfer,
index=index))
@staticmethod
def _ed25519(ser, seed, verfer, index):
"""
Returns signature
Parameters:
ser is bytes serialization
seed is bytes seed (private key)
verfer is Verfer instance. verfer.raw is public key
index is index of offset into signers list or None
"""
sig = pysodium.crypto_sign_detached(ser, seed + verfer.raw)
if index is None:
return Cigar(raw=sig, code=MtrDex.Ed25519_Sig, verfer=verfer)
else:
return Siger(raw=sig,
code=IdrDex.Ed25519_Sig,
index=index,
verfer=verfer)
class Salter(Matter):
"""
Salter is Matter subclass to maintain random salt for secrets (private keys)
Its .raw is random salt, .code as cipher suite for salt
Attributes:
.level is str security level code. Provides default level
Inherited Properties
.pad is int number of pad chars given raw
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.nontrans is Boolean, True when non-transferable derivation code False otherwise
Properties:
Methods:
Hidden:
._pad is method to compute .pad property
._code is str value for .code property
._raw is bytes value for .raw property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
Tier = Tiers.low
def __init__(self,raw=None, code=MtrDex.Salt_128, tier=None, **kwa):
"""
Initialize salter's raw and code
Inherited Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
"""
try:
super(Salter, self).__init__(raw=raw, code=code, **kwa)
except EmptyMaterialError as ex:
if code == MtrDex.Salt_128:
raw = pysodium.randombytes(pysodium.crypto_pwhash_SALTBYTES)
super(Salter, self).__init__(raw=raw, code=code, **kwa)
else:
raise ValueError("Unsupported salter code = {}.".format(code))
if self.code not in (MtrDex.Salt_128, ):
raise ValueError("Unsupported salter code = {}.".format(self.code))
self.tier = tier if tier is not None else self.Tier
def signer(self, path="", tier=None, code=MtrDex.Ed25519_Seed,
transferable=True, temp=False):
"""
Returns Signer instance whose .raw secret is derived from path and
salter's .raw and streched to size given by code. The signers public key
for its .verfer is derived from code and transferable.
Parameters:
path is str of unique chars used in derivation of secret seed for signer
code is str code of secret crypto suite
transferable is Boolean, True means use transferace code for public key
temp is Boolean, True means use quick method to stretch salt
for testing only, Otherwise use more time to stretch
"""
tier = tier if tier is not None else self.tier
if temp:
opslimit = pysodium.crypto_pwhash_OPSLIMIT_MIN
memlimit = pysodium.crypto_pwhash_MEMLIMIT_MIN
else:
if tier == Tiers.low:
opslimit = pysodium.crypto_pwhash_OPSLIMIT_INTERACTIVE
memlimit = pysodium.crypto_pwhash_MEMLIMIT_INTERACTIVE
elif tier == Tiers.med:
opslimit = pysodium.crypto_pwhash_OPSLIMIT_MODERATE
memlimit = pysodium.crypto_pwhash_MEMLIMIT_MODERATE
elif tier == Tiers.high:
opslimit = pysodium.crypto_pwhash_OPSLIMIT_SENSITIVE
memlimit = pysodium.crypto_pwhash_MEMLIMIT_SENSITIVE
else:
raise ValueError("Unsupported security tier = {}.".format(tier))
# stretch algorithm is argon2id
seed = pysodium.crypto_pwhash(outlen=Matter._rawSize(code),
passwd=path,
salt=self.raw,
opslimit=opslimit,
memlimit=memlimit,
alg=pysodium.crypto_pwhash_ALG_DEFAULT)
return (Signer(raw=seed, code=code, transferable=transferable))
class Diger(Matter):
"""
Diger is Matter subclass with method to verify digest of serialization
using .raw as digest and .code for digest algorithm.
See Matter for inherited attributes and properties:
Inherited Properties:
.pad is int number of pad chars given raw
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.nontrans is Boolean, True when non-transferable derivation code False otherwise
Methods:
verify: verifies digest given ser
compare: compares provide digest given ser to this digest of ser.
enables digest agility of different digest algos to compare.
Hidden:
._pad is method to compute .pad property
._code is str value for .code property
._raw is bytes value for .raw property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
def __init__(self, raw=None, ser=None, code=MtrDex.Blake3_256, **kwa):
"""
Assign digest verification function to ._verify
See Matter for inherited parameters
Inherited Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
ser is bytes serialization from which raw is computed if not raw
"""
try:
super(Diger, self).__init__(raw=raw, code=code, **kwa)
except EmptyMaterialError as ex:
if not ser:
raise ex
if code == MtrDex.Blake3_256:
dig = blake3.blake3(ser).digest()
elif code == MtrDex.Blake2b_256:
dig = hashlib.blake2b(ser, digest_size=32).digest()
elif code == MtrDex.Blake2s_256:
dig = hashlib.blake2s(ser, digest_size=32).digest()
elif code == MtrDex.SHA3_256:
dig = hashlib.sha3_256(ser).digest()
elif code == MtrDex.SHA2_256:
dig = hashlib.sha256(ser).digest()
else:
raise ValueError("Unsupported code = {} for digester.".format(code))
super(Diger, self).__init__(raw=dig, code=code, **kwa)
if self.code == MtrDex.Blake3_256:
self._verify = self._blake3_256
elif self.code == MtrDex.Blake2b_256:
self._verify = self._blake2b_256
elif self.code == MtrDex.Blake2s_256:
self._verify = self._blake2s_256
elif self.code == MtrDex.SHA3_256:
self._verify = self._sha3_256
elif self.code == MtrDex.SHA2_256:
self._verify = self._sha2_256
else:
raise ValueError("Unsupported code = {} for digester.".format(self.code))
def verify(self, ser):
"""
Returns True if digest of bytes serialization ser matches .raw
using .raw as reference digest for ._verify digest algorithm determined
by .code
Parameters:
ser is bytes serialization
"""
return (self._verify(ser=ser, raw=self.raw))
def compare(self, ser, dig=None, diger=None):
"""
Returns True if dig and .qb64 or .qb64b match or
if both .raw and dig are valid digests of ser
Otherwise returns False
Parameters:
ser is bytes serialization
dig is qb64b or qb64 digest of ser to compare with self
diger is Diger instance of digest of ser to compare with self
if both supplied dig takes precedence
If both match then as optimization returns True and does not verify either
as digest of ser
If both have same code but do not match then as optimization returns False
and does not verify if either is digest of ser
But if both do not match then recalcs both digests to verify they
they are both digests of ser with or without matching codes.
"""
if dig is not None:
if hasattr(dig, "encode"):
dig = dig.encode('utf-8') # makes bytes
if dig == self.qb64b: # matching
return True
diger = Diger(qb64b=dig) # extract code
elif diger is not None:
if diger.qb64b == self.qb64b:
return True
else:
raise ValueError("Both dig and diger may not be None.")
if diger.code == self.code: # digest not match but same code
return False
if diger.verify(ser=ser) and self.verify(ser=ser): # both verify on ser
return True
return (False)
@staticmethod
def _blake3_256(ser, raw):
"""
Returns True if verified False otherwise
Verifiy blake3_256 digest of ser matches raw
Parameters:
ser is bytes serialization
dig is bytes reference digest
"""
return(blake3.blake3(ser).digest() == raw)
@staticmethod
def _blake2b_256(ser, raw):
"""
Returns True if verified False otherwise
Verifiy blake2b_256 digest of ser matches raw
Parameters:
ser is bytes serialization
dig is bytes reference digest
"""
return(hashlib.blake2b(ser, digest_size=32).digest() == raw)
@staticmethod
def _blake2s_256(ser, raw):
"""
Returns True if verified False otherwise
Verifiy blake2s_256 digest of ser matches raw
Parameters:
ser is bytes serialization
dig is bytes reference digest
"""
return(hashlib.blake2s(ser, digest_size=32).digest() == raw)
@staticmethod
def _sha3_256(ser, raw):
"""
Returns True if verified False otherwise
Verifiy blake2s_256 digest of ser matches raw
Parameters:
ser is bytes serialization
dig is bytes reference digest
"""
return(hashlib.sha3_256(ser).digest() == raw)
@staticmethod
def _sha2_256(ser, raw):
"""
Returns True if verified False otherwise
Verifiy blake2s_256 digest of ser matches raw
Parameters:
ser is bytes serialization
dig is bytes reference digest
"""
return(hashlib.sha256(ser).digest() == raw)
class Nexter(Matter):
"""
Nexter is Matter subclass with support to derive itself from
next sith and next keys given code.
See Diger for inherited attributes and properties:
Attributes:
Inherited Properties:
.code str derivation code to indicate cypher suite
.raw bytes crypto material only without code
.pad int number of pad chars given raw
.qb64 str in Base64 fully qualified with derivation code + crypto mat
.qb64b bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 bytes in binary with derivation code + crypto material
.nontrans True when non-transferable derivation code False otherwise
Properties:
Methods:
Hidden:
._digest is digest method
._derive is derivation method
"""
def __init__(self, limen=None, sith=None, digs=None, keys=None, ked=None,
code=MtrDex.Blake3_256, **kwa):
"""
Assign digest verification function to ._verify
Inherited Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
limen is string extracted from sith expression in event
sith is int threshold or lowercase hex str no leading zeros
digs is list of qb64 digests of public keys
keys is list of keys each is qb64 public key str
ked is key event dict
Raises error if not any of raw, digs,keys, ked
if not raw
use digs
If digs not provided
use keys
if keys not provided
get keys from ked
compute digs from keys
If sith not provided
get sith from ked
but if not ked then compute sith as simple majority of keys
"""
try:
super(Nexter, self).__init__(code=code, **kwa)
except EmptyMaterialError as ex:
if not digs and not keys and not ked:
raise ex
if code == MtrDex.Blake3_256:
self._digest = self._blake3_256
else:
raise ValueError("Unsupported code = {} for nexter.".format(code))
raw = self._derive(code=code, limen=limen, sith=sith, digs=digs,
keys=keys, ked=ked) # derive nxt raw
super(Nexter, self).__init__(raw=raw, code=code, **kwa) # attaches code etc
else:
if self.code == MtrDex.Blake3_256:
self._digest = self._blake3_256
else:
raise ValueError("Unsupported code = {} for nexter.".format(code))
def verify(self, raw=b'', limen=None, sith=None, digs=None, keys=None, ked=None):
"""
Returns True if digest of bytes nxt raw matches .raw
Uses .raw as reference nxt raw for ._verify algorithm determined by .code
If raw not provided then extract raw from either (sith, keys) or ked
Parameters:
raw is bytes serialization
sith is str lowercase hex
keys is list of keys qb64
ked is key event dict
"""
if not raw:
raw = self._derive(code=self.code, limen=limen, sith=sith, digs=digs,
keys=keys, ked=ked)
return (raw == self.raw)
def _derive(self, code, limen=None, sith=None, digs=None, keys=None, ked=None):
"""
Returns ser where ser is serialization derived from code, sith, keys, or ked
"""
if not digs:
if not keys:
try:
keys = ked["k"]
except KeyError as ex:
raise DerivationError("Error extracting keys from"
" ked = {}".format(ex))
if not keys: # empty keys
raise DerivationError("Empty keys.")
keydigs = [self._digest(key.encode("utf-8")) for key in keys]
else:
digers = [Diger(qb64=dig) for dig in digs]
for diger in digers:
if diger.code != code:
raise DerivationError("Mismatch of public key digest "
"code = {} for next digest code = {}."
"".format(diger.code, code))
keydigs = [diger.raw for diger in digers]
if limen is None: # compute default limen
if sith is None: # need len keydigs to compute default sith
try:
sith = ked["kt"]
except Exception as ex:
# default simple majority
sith = "{:x}".format(max(1, ceil(len(keydigs) / 2)))
limen = Tholder(sith=sith).limen
kints = [int.from_bytes(keydig, 'big') for keydig in keydigs]
sint = int.from_bytes(self._digest(limen.encode("utf-8")), 'big')
for kint in kints:
sint ^= kint # xor together
return (sint.to_bytes(Matter._rawSize(code), 'big'))
@staticmethod
def _blake3_256(raw):
"""
Returns digest of raw using Blake3_256
Parameters:
raw is bytes serialization of nxt raw
"""
return(blake3.blake3(raw).digest())
class Prefixer(Matter):
"""
Prefixer is Matter subclass for autonomic identifier prefix using
derivation as determined by code from ked
Attributes:
Inherited Properties: (see Matter)
.pad is int number of pad chars given raw
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
.nontrans is Boolean, True when non-transferable derivation code False otherwise
Properties:
Methods:
verify(): Verifies derivation of aid prefix from a ked
Hidden:
._pad is method to compute .pad property
._code is str value for .code property
._raw is bytes value for .raw property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
Dummy = "#" # dummy spaceholder char for pre. Must not be a valid Base64 char
# element labels to exclude in digest or signature derivation from inception icp
IcpExcludes = ["i"]
# element labels to exclude in digest or signature derivation from delegated inception dip
DipExcludes = ["i"]
def __init__(self, raw=None, code=None, ked=None,
seed=None, secret=None, **kwa):
"""
assign ._derive to derive derivatin of aid prefix from ked
assign ._verify to verify derivation of aid prefix from ked
Default code is None to force EmptyMaterialError when only raw provided but
not code.
Inherited Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
code is str of derivation code
index is int of count of attached receipts for CryCntDex codes
Parameters:
seed is bytes seed when signature derivation
secret is qb64 when signature derivation when applicable
one of seed or secret must be provided when signature derivation
"""
try:
super(Prefixer, self).__init__(raw=raw, code=code, **kwa)
except EmptyMaterialError as ex:
if not ked or (not code and "i" not in ked):
raise ex
if not code: # get code from pre in ked
super(Prefixer, self).__init__(qb64=ked["i"], code=code, **kwa)
code = self.code
if code == MtrDex.Ed25519N:
self._derive = self._derive_ed25519N
elif code == MtrDex.Ed25519:
self._derive = self._derive_ed25519
elif code == MtrDex.Blake3_256:
self._derive = self._derive_blake3_256
elif code == MtrDex.Ed25519_Sig:
self._derive = self._derive_sig_ed25519
else:
raise ValueError("Unsupported code = {} for prefixer.".format(code))
# use ked and ._derive from code to derive aid prefix and code
raw, code = self._derive(ked=ked, seed=seed, secret=secret)
super(Prefixer, self).__init__(raw=raw, code=code, **kwa)
if self.code == MtrDex.Ed25519N:
self._verify = self._verify_ed25519N
elif self.code == MtrDex.Ed25519:
self._verify = self._verify_ed25519
elif self.code == MtrDex.Blake3_256:
self._verify = self._verify_blake3_256
elif code == MtrDex.Ed25519_Sig:
self._verify = self._verify_sig_ed25519
else:
raise ValueError("Unsupported code = {} for prefixer.".format(self.code))
def derive(self, ked, seed=None, secret=None):
"""
Returns tuple (raw, code) of aid prefix as derived from key event dict ked.
uses a derivation code specific _derive method
Parameters:
ked is inception key event dict
seed is only used for sig derivation it is the secret key/secret
"""
if ked["t"] not in (Ilks.icp, Ilks.dip):
raise ValueError("Nonincepting ilk={} for prefix derivation.".format(ked["t"]))
return (self._derive(ked=ked, seed=seed, secret=secret))
def verify(self, ked, prefixed=False):
"""
Returns True if derivation from ked for .code matches .qb64 and
If prefixed also verifies ked["i"] matches .qb64
False otherwise
Parameters:
ked is inception key event dict
"""
if ked["t"] not in (Ilks.icp, Ilks.dip):
raise ValueError("Nonincepting ilk={} for prefix derivation.".format(ked["t"]))
return (self._verify(ked=ked, pre=self.qb64, prefixed=prefixed))
def _derive_ed25519N(self, ked, seed=None, secret=None):
"""
Returns tuple (raw, code) of basic nontransferable Ed25519 prefix (qb64)
as derived from inception key event dict ked keys[0]
"""
ked = dict(ked) # make copy so don't clobber original ked
try:
keys = ked["k"]
if len(keys) != 1:
raise DerivationError("Basic derivation needs at most 1 key "
" got {} keys instead".format(len(keys)))
verfer = Verfer(qb64=keys[0])
except Exception as ex:
raise DerivationError("Error extracting public key ="
" = {}".format(ex))
if verfer.code not in [MtrDex.Ed25519N]:
raise DerivationError("Mismatch derivation code = {}."
"".format(verfer.code))
try:
if verfer.code == MtrDex.Ed25519N and ked["n"]:
raise DerivationError("Non-empty nxt = {} for non-transferable"
" code = {}".format(ked["n"],
verfer.code))
except Exception as ex:
raise DerivationError("Error checking nxt = {}".format(ex))
return (verfer.raw, verfer.code)
def _verify_ed25519N(self, ked, pre, prefixed=False):
"""
Returns True if verified False otherwise
Verify derivation of fully qualified Base64 pre from inception iked dict
Parameters:
ked is inception key event dict
pre is Base64 fully qualified prefix default to .qb64
"""
try:
keys = ked["k"]
if len(keys) != 1:
return False
if keys[0] != pre:
return False
if prefixed and ked["i"] != pre:
return False
if ked["n"]: # must be empty
return False
except Exception as ex:
return False
return True
def _derive_ed25519(self, ked, seed=None, secret=None):
"""
Returns tuple (raw, code) of basic Ed25519 prefix (qb64)
as derived from inception key event dict ked keys[0]
"""
ked = dict(ked) # make copy so don't clobber original ked
try:
keys = ked["k"]
if len(keys) != 1:
raise DerivationError("Basic derivation needs at most 1 key "
" got {} keys instead".format(len(keys)))
verfer = Verfer(qb64=keys[0])
except Exception as ex:
raise DerivationError("Error extracting public key ="
" = {}".format(ex))
if verfer.code not in [MtrDex.Ed25519]:
raise DerivationError("Mismatch derivation code = {}"
"".format(verfer.code))
return (verfer.raw, verfer.code)
def _verify_ed25519(self, ked, pre, prefixed=False):
"""
Returns True if verified False otherwise
Verify derivation of fully qualified Base64 prefix from
inception key event dict (ked)
Parameters:
ked is inception key event dict
pre is Base64 fully qualified prefix default to .qb64
"""
try:
keys = ked["k"]
if len(keys) != 1:
return False
if keys[0] != pre:
return False
if prefixed and ked["i"] != pre:
return False
except Exception as ex:
return False
return True
def _derive_blake3_256(self, ked, seed=None, secret=None):
"""
Returns tuple (raw, code) of basic Ed25519 pre (qb64)
as derived from inception key event dict ked
"""
ked = dict(ked) # make copy so don't clobber original ked
ilk = ked["t"]
if ilk == Ilks.icp:
labels = [key for key in ked if key not in self.IcpExcludes]
elif ilk == Ilks.dip:
labels = [key for key in ked if key not in self.DipExcludes]
else:
raise DerivationError("Invalid ilk = {} to derive pre.".format(ilk))
# put in dummy pre to get size correct
ked["i"] = "{}".format(self.Dummy*Matter.Codes[MtrDex.Blake3_256].fs)
serder = Serder(ked=ked)
ked = serder.ked # use updated ked with valid vs element
for l in labels:
if l not in ked:
raise DerivationError("Missing element = {} from ked.".format(l))
dig = blake3.blake3(serder.raw).digest()
return (dig, MtrDex.Blake3_256)
def _verify_blake3_256(self, ked, pre, prefixed=False):
"""
Returns True if verified False otherwise
Verify derivation of fully qualified Base64 prefix from
inception key event dict (ked)
Parameters:
ked is inception key event dict
pre is Base64 fully qualified default to .qb64
"""
try:
raw, code = self._derive_blake3_256(ked=ked)
crymat = Matter(raw=raw, code=MtrDex.Blake3_256)
if crymat.qb64 != pre:
return False
if prefixed and ked["i"] != pre:
return False
except Exception as ex:
return False
return True
def _derive_sig_ed25519(self, ked, seed=None, secret=None):
"""
Returns tuple (raw, code) of basic Ed25519 pre (qb64)
as derived from inception key event dict ked
"""
ked = dict(ked) # make copy so don't clobber original ked
ilk = ked["t"]
if ilk == Ilks.icp:
labels = [key for key in ked if key not in self.IcpExcludes]
elif ilk == Ilks.dip:
labels = [key for key in ked if key not in self.DipExcludes]
else:
raise DerivationError("Invalid ilk = {} to derive pre.".format(ilk))
# put in dummy pre to get size correct
ked["i"] = "{}".format(self.Dummy*Matter.Codes[MtrDex.Ed25519_Sig].fs)
serder = Serder(ked=ked)
ked = serder.ked # use updated ked with valid vs element
for l in labels:
if l not in ked:
raise DerivationError("Missing element = {} from ked.".format(l))
try:
keys = ked["k"]
if len(keys) != 1:
raise DerivationError("Basic derivation needs at most 1 key "
" got {} keys instead".format(len(keys)))
verfer = Verfer(qb64=keys[0])
except Exception as ex:
raise DerivationError("Error extracting public key ="
" = {}".format(ex))
if verfer.code not in [MtrDex.Ed25519]:
raise DerivationError("Invalid derivation code = {}"
"".format(verfer.code))
if not (seed or secret):
raise DerivationError("Missing seed or secret.")
signer = Signer(raw=seed, qb64=secret)
if verfer.raw != signer.verfer.raw:
raise DerivationError("Key in ked not match seed.")
cigar = signer.sign(ser=serder.raw)
# sig = pysodium.crypto_sign_detached(ser, signer.raw + verfer.raw)
return (cigar.raw, MtrDex.Ed25519_Sig)
def _verify_sig_ed25519(self, ked, pre, prefixed=False):
"""
Returns True if verified False otherwise
Verify derivation of fully qualified Base64 prefix from
inception key event dict (ked)
Parameters:
ked is inception key event dict
pre is Base64 fully qualified prefix default to .qb64
"""
try:
dked = dict(ked) # make copy so don't clobber original ked
ilk = dked["t"]
if ilk == Ilks.icp:
labels = [key for key in dked if key not in self.IcpExcludes]
elif ilk == Ilks.dip:
labels = [key for key in dked if key not in self.DipExcludes]
else:
raise DerivationError("Invalid ilk = {} to derive prefix.".format(ilk))
# put in dummy pre to get size correct
dked["i"] = "{}".format(self.Dummy*Matter.Codes[MtrDex.Ed25519_Sig].fs)
serder = Serder(ked=dked)
dked = serder.ked # use updated ked with valid vs element
for l in labels:
if l not in dked:
raise DerivationError("Missing element = {} from ked.".format(l))
try:
keys = dked["k"]
if len(keys) != 1:
raise DerivationError("Basic derivation needs at most 1 key "
" got {} keys instead".format(len(keys)))
verfer = Verfer(qb64=keys[0])
except Exception as ex:
raise DerivationError("Error extracting public key ="
" = {}".format(ex))
if verfer.code not in [MtrDex.Ed25519]:
raise DerivationError("Mismatched derivation code = {}"
"".format(verfer.code))
if prefixed and ked["i"] != pre:
return False
cigar = Cigar(qb64=pre, verfer=verfer)
result = cigar.verfer.verify(sig=cigar.raw, ser=serder.raw)
return result
#try: # verify returns None if valid else raises ValueError
#result = pysodium.crypto_sign_verify_detached(sig, ser, verfer.raw)
#except Exception as ex:
#return False
except Exception as ex:
return False
return True
@dataclass(frozen=True)
class IndexerCodex:
"""
IndexerCodex is codex hard (stable) part of all indexer derivation codes.
Only provide defined codes.
Undefined are left out so that inclusion(exclusion) via 'in' operator works.
"""
Ed25519_Sig: str = 'A' # Ed25519 signature.
ECDSA_256k1_Sig: str = 'B' # ECDSA secp256k1 signature.
Ed448_Sig: str = '0A' # Ed448 signature.
Label: str = '0B' # Variable len bytes label L=N*4 <= 4095 char quadlets
def __iter__(self):
return iter(astuple(self)) # enables inclusion test with "in"
IdrDex = IndexerCodex()
class Indexer:
"""
Indexer is fully qualified cryptographic material primitive base class for
indexed primitives.
Sub classes are derivation code and key event element context specific.
Includes the following attributes and properties:
Attributes:
Properties:
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.pad is int number of pad chars given raw
.index is int count of attached crypto material by context (receipts)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
Hidden:
._code is str value for .code property
._raw is bytes value for .raw property
._pad is method to compute .pad property
._index is int value for .index property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
Codex = IdrDex
# Sizes table maps from bytes Base64 first code char to int of hard size, hs,
# (stable) of code. The soft size, ss, (unstable) is always > 0 for Indexer.
Sizes = ({chr(c): 1 for c in range(65, 65+26)})
Sizes.update({chr(c): 1 for c in range(97, 97+26)})
Sizes.update([('0', 2), ('1', 2), ('2', 2), ('3', 2), ('4', 3), ('5', 4)])
# Codes table maps hs chars of code to Sizage namedtuple of (hs, ss, fs)
# where hs is hard size, ss is soft size, and fs is full size
# soft size, ss, should always be > 0 for Indexer
Codes = {
'A': Sizage(hs=1, ss=1, fs=88),
'B': Sizage(hs=1, ss=1, fs=88),
'0A': Sizage(hs=2, ss=2, fs=156),
'0B': Sizage(hs=2, ss=2, fs=None),
}
# Bizes table maps to hard size, hs, of code from bytes holding sextets
# converted from first code char. Used for ._bexfil.
Bizes = ({b64ToB2(c): hs for c, hs in Sizes.items()})
def __init__(self, raw=None, code=IdrDex.Ed25519_Sig, index=0,
qb64b=None, qb64=None, qb2=None):
"""
Validate as fully qualified
Parameters:
raw is bytes of unqualified crypto material usable for crypto operations
code is str of stable (hard) part of derivation code
index is int of offset index into key or id list or length of material
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
Needs either (raw and code and index) or qb64b or qb64 or qb2
Otherwise raises EmptyMaterialError
When raw and code and index provided then validate that code is correct
for length of raw and assign .raw
Else when qb64b or qb64 or qb2 provided extract and assign
.raw and .code and .index
"""
if raw is not None: # raw provided
if not code:
raise EmptyMaterialError("Improper initialization need either "
"(raw and code) or qb64b or qb64 or qb2.")
if not isinstance(raw, (bytes, bytearray)):
raise TypeError("Not a bytes or bytearray, raw={}.".format(raw))
if code not in self.Codes:
raise UnexpectedCodeError("Unsupported code={}.".format(code))
hs, ss, fs = self.Codes[code] # get sizes for code
bs = hs + ss # both hard + soft code size
if index < 0 or index > (64 ** ss - 1):
raise InvalidCodeIndexError("Invalid index={} for code={}.".format(index, code))
if not fs: # compute fs from index
if bs % 4:
raise InvalidCodeSizeError("Whole code size not multiple of 4 for "
"variable length material. bs={}.".format(bs))
fs = (index * 4) + bs
rawsize = (fs - bs) * 3 // 4
raw = raw[:rawsize] # copy only exact size from raw stream
if len(raw) != rawsize: # forbids shorter
raise RawMaterialError("Not enougth raw bytes for code={}"
"and index={} ,expected {} got {}."
"".format(code, index, rawsize, len(raw)))
self._code = code
self._index = index
self._raw = bytes(raw) # crypto ops require bytes not bytearray
elif qb64b is not None:
self._exfil(qb64b)
elif qb64 is not None:
self._exfil(qb64)
elif qb2 is not None:
self._bexfil(qb2)
else:
raise EmptyMaterialError("Improper initialization need either "
"(raw and code and index) or qb64b or "
"qb64 or qb2.")
@classmethod
def _rawSize(cls, code):
"""
Returns raw size in bytes for a given code
"""
hs, ss, fs = cls.Codes[code] # get sizes
return ( (fs - (hs + ss)) * 3 // 4 )
@property
def code(self):
"""
Returns ._code
Makes .code read only
"""
return self._code
@property
def raw(self):
"""
Returns ._raw
Makes .raw read only
"""
return self._raw
@property
def index(self):
"""
Returns ._index
Makes .index read only
"""
return self._index
@property
def qb64b(self):
"""
Property qb64b:
Returns Fully Qualified Base64 Version encoded as bytes
Assumes self.raw and self.code are correctly populated
"""
return self._infil()
@property
def qb64(self):
"""
Property qb64:
Returns Fully Qualified Base64 Version
Assumes self.raw and self.code are correctly populated
"""
return self.qb64b.decode("utf-8")
@property
def qb2(self):
"""
Property qb2:
Returns Fully Qualified Binary Version Bytes
"""
return self._binfil()
def _infil(self):
"""
Returns fully qualified attached sig base64 bytes computed from
self.raw, self.code and self.index.
"""
code = self.code # codex value chars hard code
index = self.index # index value int used for soft
raw = self.raw # bytes or bytearray
hs, ss, fs = self.Codes[code]
bs = hs + ss # both hard + soft size
if not fs: # compute fs from index
if bs % 4:
raise InvalidCodeSizeError("Whole code size not multiple of 4 for "
"variable length material. bs={}.".format(bs))
fs = (index * 4) + bs
if index < 0 or index > (64 ** ss - 1):
raise InvalidCodeIndexError("Invalid index={} for code={}."
"".format(index, code))
# both is hard code + converted index
both = "{}{}".format(code, intToB64(index, l=ss))
ps = (3 - (len(raw) % 3)) % 3 # pad size
# check valid pad size for whole code size
if len(both) % 4 != ps: # pad size is not remainder of len(both) % 4
raise InvalidCodeSizeError("Invalid code = {} for converted raw pad size = {}."
.format(both, ps))
# prepending full derivation code with index and strip off trailing pad characters
return (both.encode("utf-8") + encodeB64(raw)[:-ps if ps else None])
def _exfil(self, qb64b):
"""
Extracts self.code, self.index, and self.raw from qualified base64 bytes qb64b
"""
if not qb64b: # empty need more bytes
raise ShortageError("Empty material, Need more characters.")
first = qb64b[:1] # extract first char code selector
if hasattr(first, "decode"):
first = first.decode("utf-8")
if first not in self.Sizes:
if first[0] == '-':
raise UnexpectedCountCodeError("Unexpected count code start"
"while extracing Indexer.")
elif first[0] == '_':
raise UnexpectedOpCodeError("Unexpected op code start"
"while extracing Indexer.")
else:
raise UnexpectedCodeError("Unsupported code start char={}.".format(first))
cs = self.Sizes[first] # get hard code size
if len(qb64b) < cs: # need more bytes
raise ShortageError("Need {} more characters.".format(cs-len(qb64b)))
hard = qb64b[:cs] # get hard code
if hasattr(hard, "decode"):
hard = hard.decode("utf-8")
if hard not in self.Codes:
raise UnexpectedCodeError("Unsupported code ={}.".format(hard))
hs, ss, fs = self.Codes[hard]
bs = hs + ss # both hard + soft code size
# assumes that unit tests on Indexer and IndexerCodex ensure that
# .Codes and .Sizes are well formed.
# hs == cs and hs > 0 and ss > 0 and (fs >= hs + ss if fs is not None else True)
if len(qb64b) < bs: # need more bytes
raise ShortageError("Need {} more characters.".format(bs-len(qb64b)))
index = qb64b[hs:hs+ss] # extract index chars
if hasattr(index, "decode"):
index = index.decode("utf-8")
index = b64ToInt(index) # compute int index
if not fs: # compute fs from index
if bs % 4:
raise ValidationError("Whole code size not multiple of 4 for "
"variable length material. bs={}.".format(bs))
fs = (index * 4) + bs
if len(qb64b) < fs: # need more bytes
raise ShortageError("Need {} more chars.".format(fs-len(qb64b)))
qb64b = qb64b[:fs] # fully qualified primitive code plus material
if hasattr(qb64b, "encode"): # only convert extracted chars from stream
qb64b = qb64b.encode("utf-8")
# strip off prepended code and append pad characters
ps = bs % 4 # pad size ps = cs mod 4
base = qb64b[bs:] + ps * BASE64_PAD
raw = decodeB64(base)
if len(raw) != (len(qb64b) - bs) * 3 // 4: # exact lengths
raise ConversionError("Improperly qualified material = {}".format(qb64b))
self._code = hard
self._index = index
self._raw = raw
def _binfil(self):
"""
Returns bytes of fully qualified base2 bytes, that is .qb2
self.code and self.index converted to Base2 + self.raw left shifted
with pad bits equivalent of Base64 decode of .qb64 into .qb2
"""
code = self.code # codex chars hard code
index = self.index # index value int used for soft
raw = self.raw # bytes or bytearray
hs, ss, fs = self.Codes[code]
bs = hs + ss
if not fs: # compute fs from index
if bs % 4:
raise InvalidCodeSizeError("Whole code size not multiple of 4 for "
"variable length material. bs={}.".format(bs))
fs = (index * 4) + bs
if index < 0 or index > (64 ** ss - 1):
raise InvalidCodeIndexError("Invalid index={} for code={}.".format(index, code))
# both is hard code + converted index
both = "{}{}".format(code, intToB64(index, l=ss))
if len(both) != bs:
raise InvalidCodeSizeError("Mismatch code size = {} with table = {}."
.format(bs, len(both)))
n = sceil(bs * 3 / 4) # number of b2 bytes to hold b64 code + index
bcode = b64ToInt(both).to_bytes(n,'big') # right aligned b2 code
full = bcode + raw
bfs = len(full)
if bfs % 3 or (bfs * 4 // 3) != fs: # invalid size
raise InvalidCodeSizeError("Invalid code = {} for raw size= {}."
.format(both, len(raw)))
i = int.from_bytes(full, 'big') << (2 * (bs % 4)) # left shift in pad bits
return (i.to_bytes(bfs, 'big'))
def _bexfil(self, qb2):
"""
Extracts self.code, self.index, and self.raw from qualified base2 bytes qb2
"""
if not qb2: # empty need more bytes
raise ShortageError("Empty material, Need more bytes.")
first = nabSextets(qb2, 1) # extract first sextet as code selector
if first not in self.Bizes:
if first[0] == b'\xf8': # b64ToB2('-')
raise UnexpectedCountCodeError("Unexpected count code start"
"while extracing Matter.")
elif first[0] == b'\xfc': # b64ToB2('_')
raise UnexpectedOpCodeError("Unexpected op code start"
"while extracing Matter.")
else:
raise UnexpectedCodeError("Unsupported code start sextet={}.".format(first))
cs = self.Bizes[first] # get code hard size equvalent sextets
bcs = sceil(cs * 3 / 4) # bcs is min bytes to hold cs sextets
if len(qb2) < bcs: # need more bytes
raise ShortageError("Need {} more bytes.".format(bcs-len(qb2)))
# bode = nabSextets(qb2, cs) # b2 version of hard part of code
hard = b2ToB64(qb2, cs) # extract and convert hard part of code
if hard not in self.Codes:
raise UnexpectedCodeError("Unsupported code ={}.".format(hard))
hs, ss, fs = self.Codes[hard]
bs = hs + ss # both hs and ss
# assumes that unit tests on Indexer and IndexerCodex ensure that
# .Codes and .Sizes are well formed.
# hs == cs and hs > 0 and ss > 0 and (fs >= hs + ss if fs is not None else True)
bbs = ceil(bs * 3 / 4) # bbs is min bytes to hold bs sextets
if len(qb2) < bbs: # need more bytes
raise ShortageError("Need {} more bytes.".format(bbs-len(qb2)))
both = b2ToB64(qb2, bs) # extract and convert both hard and soft part of code
index = b64ToInt(both[hs:hs+ss]) # get index
bfs = sceil(fs * 3 / 4) # bfs is min bytes to hold fs sextets
if len(qb2) < bfs: # need more bytes
raise ShortageError("Need {} more bytes.".format(bfs-len(qb2)))
qb2 = qb2[:bfs] # fully qualified primitive code plus material
# right shift to right align raw material
i = int.from_bytes(qb2, 'big')
i >>= 2 * (bs % 4)
raw = i.to_bytes(bfs, 'big')[bbs:] # extract raw
if len(raw) != (len(qb2) - bbs): # exact lengths
raise ConversionError("Improperly qualified material = {}".format(qb2))
self._code = hard
self._index = index
self._raw = raw
class Siger(Indexer):
"""
Siger is subclass of Indexer, indexed signature material,
Adds .verfer property which is instance of Verfer that provides
associated signature verifier.
See Indexer for inherited attributes and properties:
Attributes:
Properties:
.verfer is Verfer object instance
Methods:
"""
def __init__(self, verfer=None, **kwa):
"""
Assign verfer to ._verfer
Parameters: See Matter for inherted parameters
verfer if Verfer instance if any
"""
super(Siger, self).__init__(**kwa)
self._verfer = verfer
@property
def verfer(self):
"""
Property verfer:
Returns Verfer instance
Assumes ._verfer is correctly assigned
"""
return self._verfer
@verfer.setter
def verfer(self, verfer):
""" verfer property setter """
self._verfer = verfer
@dataclass(frozen=True)
class CounterCodex:
"""
CounterCodex is codex hard (stable) part of all counter derivation codes.
Only provide defined codes.
Undefined are left out so that inclusion(exclusion) via 'in' operator works.
"""
ControllerIdxSigs: str = '-A' # Qualified Base64 Indexed Signature.
WitnessIdxSigs: str = '-B' # Qualified Base64 Indexed Signature.
NonTransReceiptCouples: str = '-C' # Composed Base64 Couple, pre + sig.
TransReceiptQuadruples: str = '-D' # Composed Base64 Quadruple, pre + snu + dig + sig.
FirstSeenReplayCouples: str = '-E' # Composed Base64 Couple, fn + dt.
MessageDataGroups: str = '-U' # Composed Message Data Group or Primitive
AttachedMaterialQuadlets: str = '-V' # Composed Grouped Attached Material Quadlet (4 char each)
MessageDataMaterialQuadlets: str = '-W' # Composed Grouped Message Data Quadlet (4 char each)
CombinedMaterialQuadlets: str = '-X' # Combined Message Data + Attachments Quadlet (4 char each)
MaterialGroups: str = '-Y' # Composed Generic Material Group or Primitive
Material: str = '-Z' # Composed Generic Material Quadlet (4 char each)
AnchorSealGroups: str = '-a' # Composed Anchor Seal Material Group
ConfigTraits: str = '-c' # Composed Config Trait Material Group
DigestSealQuadlets: str = '-d' # Composed Digest Seal Quadlet (4 char each)
EventSealQuadlets: str = '-e' # Composed Event Seal Quadlet (4 char each)
Keys: str = '-k' # Composed Key Material Primitive
LocationSealQuadlets: str = '-l' # Composed Location Seal Quadlet (4 char each)
RootDigestSealQuadlets: str = '-r' # Composed Root Digest Seal Quadlet (4 char each)
Witnesses: str = '-w' # Composed Witness Prefix Material Primitive
BigMessageDataGroups: str = '-0U' # Composed Message Data Group or Primitive
BigAttachedMaterialQuadlets: str = '-0V' # Composed Grouped Attached Material Quadlet (4 char each)
BigMessageDataMaterialQuadlets: str = '-0W' # Composed Grouped Message Data Quadlet (4 char each)
BigCombinedMaterialQuadlets: str = '-0X' # Combined Message Data + Attachments Quadlet (4 char each)
BigMaterialGroups: str = '-0Y' # Composed Generic Material Group or Primitive
BigMaterial: str = '-0Z' # Composed Generic Material Quadlet (4 char each)
def __iter__(self):
return iter(astuple(self)) # enables inclusion test with "in"
CtrDex = CounterCodex()
class Counter:
"""
Counter is fully qualified cryptographic material primitive base class for
counter primitives (framing composition grouping count codes).
Sub classes are derivation code and key event element context specific.
Includes the following attributes and properties:
Attributes:
Properties:
.code is str derivation code to indicate cypher suite
.raw is bytes crypto material only without code
.pad is int number of pad chars given raw
.count is int count of grouped following material (not part of counter)
.qb64 is str in Base64 fully qualified with derivation code + crypto mat
.qb64b is bytes in Base64 fully qualified with derivation code + crypto mat
.qb2 is bytes in binary with derivation code + crypto material
Hidden:
._code is str value for .code property
._raw is bytes value for .raw property
._pad is method to compute .pad property
._count is int value for .count property
._infil is method to compute fully qualified Base64 from .raw and .code
._exfil is method to extract .code and .raw from fully qualified Base64
"""
Codex = CtrDex
# Sizes table maps from bytes Base64 first two code chars to int of
# hard size, hs,(stable) of code. The soft size, ss, (unstable) for Counter
# is always > 0 and hs + ss = fs always
Sizes = ({('-' + chr(c)): 2 for c in range(65, 65+26)})
Sizes.update({('-' + chr(c)): 2 for c in range(97, 97+26)})
Sizes.update([('-0', 3)])
# Codes table maps hs chars of code to Sizage namedtuple of (hs, ss, fs)
# where hs is hard size, ss is soft size, and fs is full size
# soft size, ss, should always be > 0 and hs+ss=fs for Counter
Codes = {
'-A': Sizage(hs=2, ss=2, fs=4),
'-B': Sizage(hs=2, ss=2, fs=4),
'-C': Sizage(hs=2, ss=2, fs=4),
'-D': Sizage(hs=2, ss=2, fs=4),
'-E': Sizage(hs=2, ss=2, fs=4),
'-U': Sizage(hs=2, ss=2, fs=4),
'-V': Sizage(hs=2, ss=2, fs=4),
'-W': Sizage(hs=2, ss=2, fs=4),
'-X': Sizage(hs=2, ss=2, fs=4),
'-Y': Sizage(hs=2, ss=2, fs=4),
'-Z': Sizage(hs=2, ss=2, fs=4),
'-a': Sizage(hs=2, ss=2, fs=4),
'-c': Sizage(hs=2, ss=2, fs=4),
'-d': Sizage(hs=2, ss=2, fs=4),
'-e': Sizage(hs=2, ss=2, fs=4),
'-k': Sizage(hs=2, ss=2, fs=4),
'-l': Sizage(hs=2, ss=2, fs=4),
'-r': Sizage(hs=2, ss=2, fs=4),
'-w': Sizage(hs=2, ss=2, fs=4),
'-0U': Sizage(hs=3, ss=5, fs=8),
'-0V': Sizage(hs=3, ss=5, fs=8),
'-0W': Sizage(hs=3, ss=5, fs=8),
'-0X': Sizage(hs=3, ss=5, fs=8),
'-0Y': Sizage(hs=3, ss=5, fs=8),
'-0Z': Sizage(hs=3, ss=5, fs=8)
}
# Bizes table maps to hard size, hs, of code from bytes holding sextets
# converted from first two code char. Used for ._bexfil.
Bizes = ({b64ToB2(c): hs for c, hs in Sizes.items()})
def __init__(self, code=None, count=1, qb64b=None, qb64=None, qb2=None):
"""
Validate as fully qualified
Parameters:
code is str of stable (hard) part of derivation code
count is int count for following group of items (primitives or groups)
qb64b is bytes of fully qualified crypto material
qb64 is str or bytes of fully qualified crypto material
qb2 is bytes of fully qualified crypto material
Needs either (code and count) or qb64b or qb64 or qb2
Otherwise raises EmptyMaterialError
When code and count provided then validate that code and count are correct
Else when qb64b or qb64 or qb2 provided extract and assign
.code and .count
"""
if code is not None: # code provided
if code not in self.Codes:
raise UnknownCodeError("Unsupported code={}.".format(code))
hs, ss, fs = self.Codes[code] # get sizes for code
bs = hs + ss # both hard + soft code size
if fs != bs or bs % 4: # fs must be bs and multiple of 4 for count codes
raise InvalidCodeSizeError("Whole code size not full size or not "
"multiple of 4. bs={} fs={}.".format(bs, fs))
if count < 0 or count > (64 ** ss - 1):
raise InvalidCodeIndexError("Invalid count={} for code={}.".format(count, code))
self._code = code
self._count = count
elif qb64b is not None:
self._exfil(qb64b)
elif qb64 is not None:
self._exfil(qb64)
elif qb2 is not None: # rewrite to use direct binary exfiltration
self._bexfil(qb2)
else:
raise EmptyMaterialError("Improper initialization need either "
"(code and count) or qb64b or "
"qb64 or qb2.")
@property
def code(self):
"""
Returns ._code
Makes .code read only
"""
return self._code
@property
def count(self):
"""
Returns ._count
Makes ._count read only
"""
return self._count
@property
def qb64b(self):
"""
Property qb64b:
Returns Fully Qualified Base64 Version encoded as bytes
Assumes self.raw and self.code are correctly populated
"""
return self._infil()
@property
def qb64(self):
"""
Property qb64:
Returns Fully Qualified Base64 Version
Assumes self.raw and self.code are correctly populated
"""
return self.qb64b.decode("utf-8")
@property
def qb2(self):
"""
Property qb2:
Returns Fully Qualified Binary Version Bytes
"""
return self._binfil()
def _infil(self):
"""
Returns fully qualified attached sig base64 bytes computed from
self.code and self.count.
"""
code = self.code # codex value chars hard code
count = self.count # index value int used for soft
hs, ss, fs = self.Codes[code]
bs = hs + ss # both hard + soft size
if fs != bs or bs % 4: # fs must be bs and multiple of 4 for count codes
raise InvalidCodeSizeError("Whole code size not full size or not "
"multiple of 4. bs={} fs={}.".format(bs, fs))
if count < 0 or count > (64 ** ss - 1):
raise InvalidCodeIndexError("Invalid count={} for code={}.".format(count, code))
# both is hard code + converted count
both = "{}{}".format(code, intToB64(count, l=ss))
# check valid pad size for whole code size
if len(both) % 4: # no pad
raise InvalidCodeSizeError("Invalid size = {} of {} not a multiple of 4."
.format(len(both), both))
# prepending full derivation code with index and strip off trailing pad characters
return (both.encode("utf-8"))
def _exfil(self, qb64b):
"""
Extracts self.code and self.count from qualified base64 bytes qb64b
"""
if not qb64b: # empty need more bytes
raise ShortageError("Empty material, Need more characters.")
first = qb64b[:2] # extract first two char code selector
if hasattr(first, "decode"):
first = first.decode("utf-8")
if first not in self.Sizes:
if first[0] == '_':
raise UnexpectedOpCodeError("Unexpected op code start"
"while extracing Counter.")
else:
raise UnexpectedCodeError("Unsupported code start ={}.".format(first))
cs = self.Sizes[first] # get hard code size
if len(qb64b) < cs: # need more bytes
raise ShortageError("Need {} more characters.".format(cs-len(qb64b)))
hard = qb64b[:cs] # get hard code
if hasattr(hard, "decode"):
hard = hard.decode("utf-8")
if hard not in self.Codes:
raise UnexpectedCodeError("Unsupported code ={}.".format(hard))
hs, ss, fs = self.Codes[hard]
bs = hs + ss # both hard + soft code size
# assumes that unit tests on Counter and CounterCodex ensure that
# .Codes and .Sizes are well formed.
# hs == cs and hs > 0 and ss > 0 and fs = hs + ss and not fs % 4
if len(qb64b) < bs: # need more bytes
raise ShortageError("Need {} more characters.".format(bs-len(qb64b)))
count = qb64b[hs:hs+ss] # extract count chars
if hasattr(count, "decode"):
count = count.decode("utf-8")
count = b64ToInt(count) # compute int count
self._code = hard
self._count = count
def _binfil(self):
"""
Returns bytes of fully qualified base2 bytes, that is .qb2
self.code converted to Base2 left shifted with pad bits
equivalent of Base64 decode of .qb64 into .qb2
"""
code = self.code # codex chars hard code
count = self.count # index value int used for soft
hs, ss, fs = self.Codes[code]
bs = hs + ss
if fs != bs or bs % 4: # fs must be bs and multiple of 4 for count codes
raise InvalidCodeSizeError("Whole code size not full size or not "
"multiple of 4. bs={} fs={}.".format(bs, fs))
if count < 0 or count > (64 ** ss - 1):
raise InvalidCodeIndexError("Invalid count={} for code={}.".format(count, code))
# both is hard code + converted count
both = "{}{}".format(code, intToB64(count, l=ss))
if len(both) != bs:
raise InvalidCodeSizeError("Mismatch code size = {} with table = {}."
.format(bs, len(both)))
return (b64ToB2(both)) # convert to b2 left shift if any
def _bexfil(self, qb2):
"""
Extracts self.code and self.count from qualified base2 bytes qb2
"""
if not qb2: # empty need more bytes
raise ShortageError("Empty material, Need more bytes.")
first = nabSextets(qb2, 2) # extract first two sextets as code selector
if first not in self.Bizes:
if first[0] == b'\xfc': # b64ToB2('_')
raise UnexpectedOpCodeError("Unexpected op code start"
"while extracing Matter.")
else:
raise UnexpectedCodeError("Unsupported code start sextet={}.".format(first))
cs = self.Bizes[first] # get code hard size equvalent sextets
bcs = sceil(cs * 3 / 4) # bcs is min bytes to hold cs sextets
if len(qb2) < bcs: # need more bytes
raise ShortageError("Need {} more bytes.".format(bcs-len(qb2)))
hard = b2ToB64(qb2, cs) # extract and convert hard part of code
if hard not in self.Codes:
raise UnexpectedCodeError("Unsupported code ={}.".format(hard))
hs, ss, fs = self.Codes[hard]
bs = hs + ss # both hs and ss
# assumes that unit tests on Counter and CounterCodex ensure that
# .Codes and .Sizes are well formed.
# hs == cs and hs > 0 and ss > 0 and fs = hs + ss and not fs % 4
bbs = ceil(bs * 3 / 4) # bbs is min bytes to hold bs sextets
if len(qb2) < bbs: # need more bytes
raise ShortageError("Need {} more bytes.".format(bbs-len(qb2)))
both = b2ToB64(qb2, bs) # extract and convert both hard and soft part of code
count = b64ToInt(both[hs:hs+ss]) # get count
self._code = hard
self._count = count
class Serder:
"""
Serder is KERI key event serializer-deserializer class
Only supports current version VERSION
Has the following public properties:
Properties:
.raw is bytes of serialized event only
.ked is key event dict
.kind is serialization kind string value (see namedtuple coring.Serials)
.version is Versionage instance of event version
.size is int of number of bytes in serialed event only
.diger is Diger instance of digest of .raw
.dig is qb64 digest from .diger
.digb is qb64b digest from .diger
.verfers is list of Verfers converted from .ked["k"]
.sn is int sequence number converted from .ked["s"]
.pre is qb64 str of identifier prefix from .ked["i"]
.preb is qb64b bytes of identifier prefix from .ked["i"]
Hidden Attributes:
._raw is bytes of serialized event only
._ked is key event dict
._kind is serialization kind string value (see namedtuple coring.Serials)
supported kinds are 'json', 'cbor', 'msgpack', 'binary'
._version is Versionage instance of event version
._size is int of number of bytes in serialed event only
._code is default code for .diger
._diger is Diger instance of digest of .raw
Note:
loads and jumps of json use str whereas cbor and msgpack use bytes
"""
def __init__(self, raw=b'', ked=None, kind=None, code=MtrDex.Blake3_256):
"""
Deserialize if raw provided
Serialize if ked provided but not raw
When serilaizing if kind provided then use kind instead of field in ked
Parameters:
raw is bytes of serialized event plus any attached signatures
ked is key event dict or None
if None its deserialized from raw
kind is serialization kind string value or None (see namedtuple coring.Serials)
supported kinds are 'json', 'cbor', 'msgpack', 'binary'
if kind is None then its extracted from ked or raw
code is .diger default digest code
"""
self._code = code # need default code for .diger
if raw: # deserialize raw using property setter
self.raw = raw # raw property setter does the deserialization
elif ked: # serialize ked using property setter
self._kind = kind
self.ked = ked # ked property setter does the serialization
else:
raise ValueError("Improper initialization need raw or ked.")
@staticmethod
def _sniff(raw):
"""
Returns serialization kind, version and size from serialized event raw
by investigating leading bytes that contain version string
Parameters:
raw is bytes of serialized event
"""
if len(raw) < MINSNIFFSIZE:
raise ShortageError("Need more bytes.")
match = Rever.search(raw) # Rever's regex takes bytes
if not match or match.start() > 12:
raise VersionError("Invalid version string in raw = {}".format(raw))
major, minor, kind, size = match.group("major", "minor", "kind", "size")
version = Versionage(major=int(major, 16), minor=int(minor, 16))
kind = kind.decode("utf-8")
if kind not in Serials:
raise DeserializationError("Invalid serialization kind = {}".format(kind))
size = int(size, 16)
return(kind, version, size)
def _inhale(self, raw):
"""
Parses serilized event ser of serialization kind and assigns to
instance attributes.
Parameters:
raw is bytes of serialized event
kind is str of raw serialization kind (see namedtuple Serials)
size is int size of raw to be deserialized
Note:
loads and jumps of json use str whereas cbor and msgpack use bytes
"""
kind, version, size = self._sniff(raw)
if version != Version:
raise VersionError("Unsupported version = {}.{}, expected {}."
"".format(version.major, version.minor, Version))
if len(raw) < size:
raise ShortageError("Need more bytes.")
if kind == Serials.json:
try:
ked = json.loads(raw[:size].decode("utf-8"))
except Exception as ex:
raise DeserializationError("Error deserializing JSON: {}"
"".format(raw[:size].decode("utf-8")))
elif kind == Serials.mgpk:
try:
ked = msgpack.loads(raw[:size])
except Exception as ex:
raise DeserializationError("Error deserializing MGPK: {}"
"".format(raw[:size]))
elif kind == Serials.cbor:
try:
ked = cbor.loads(raw[:size])
except Exception as ex:
raise DeserializationError("Error deserializing CBOR: {}"
"".format(raw[:size]))
else:
ked = None
return (ked, kind, version, size)
def _exhale(self, ked, kind=None):
"""
ked is key event dict
kind is serialization if given else use one given in ked
Returns tuple of (raw, kind, ked, version) where:
raw is serialized event as bytes of kind
kind is serialzation kind
ked is key event dict
version is Versionage instance
Assumes only supports Version
"""
if "v" not in ked:
raise ValueError("Missing or empty version string in key event dict = {}".format(ked))
knd, version, size = Deversify(ked["v"]) # extract kind and version
if version != Version:
raise ValueError("Unsupported version = {}.{}".format(version.major,
version.minor))
if not kind:
kind = knd
if kind not in Serials:
raise ValueError("Invalid serialization kind = {}".format(kind))
if kind == Serials.json:
raw = json.dumps(ked, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
elif kind == Serials.mgpk:
raw = msgpack.dumps(ked)
elif kind == Serials.cbor:
raw = cbor.dumps(ked)
else:
raise ValueError("Invalid serialization kind = {}".format(kind))
size = len(raw)
match = Rever.search(raw) # Rever's regex takes bytes
if not match or match.start() > 12:
raise ValueError("Invalid version string in raw = {}".format(raw))
fore, back = match.span() # full version string
# update vs with latest kind version size
vs = Versify(version=version, kind=kind, size=size)
# replace old version string in raw with new one
raw = b'%b%b%b' % (raw[:fore], vs.encode("utf-8"), raw[back:])
if size != len(raw): # substitution messed up
raise ValueError("Malformed version string size = {}".format(vs))
ked["v"] = vs # update ked
return (raw, kind, ked, version)
def compare(self, dig=None, diger=None):
"""
Returns True if dig and either .diger.qb64 or .diger.qb64b match or
if both .diger.raw and dig are valid digests of self.raw
Otherwise returns False
Convenience method to allow comparison of own .diger digest self.raw
with some other purported digest of self.raw
Parameters:
dig is qb64b or qb64 digest of ser to compare with .diger.raw
diger is Diger instance of digest of ser to compare with .diger.raw
if both supplied dig takes precedence
If both match then as optimization returns True and does not verify either
as digest of ser
If both have same code but do not match then as optimization returns False
and does not verify if either is digest of ser
But if both do not match then recalcs both digests to verify they
they are both digests of ser with or without matching codes.
"""
return (self.diger.compare(ser=self.raw, dig=dig, diger=diger))
@property
def raw(self):
""" raw property getter """
return self._raw
@raw.setter
def raw(self, raw):
""" raw property setter """
ked, kind, version, size = self._inhale(raw=raw)
self._raw = bytes(raw[:size]) # crypto ops require bytes not bytearray
self._ked = ked
self._kind = kind
self._version = version
self._size = size
self._diger = Diger(ser=self._raw, code=self._code)
@property
def ked(self):
""" ked property getter"""
return self._ked
@ked.setter
def ked(self, ked):
""" ked property setter assumes ._kind """
raw, kind, ked, version = self._exhale(ked=ked, kind=self._kind)
size = len(raw)
self._raw = raw[:size]
self._ked = ked
self._kind = kind
self._size = size
self._version = version
self._diger = Diger(ser=self._raw, code=self._code)
@property
def kind(self):
""" kind property getter"""
return self._kind
@kind.setter
def kind(self, kind):
""" kind property setter Assumes ._ked """
raw, kind, ked, version = self._exhale(ked=self._ked, kind=kind)
size = len(raw)
self._raw = raw[:size]
self._ked = ked
self._kind = kind
self._size = size
self._version = version
self._diger = Diger(ser=self._raw, code=self._code)
@property
def version(self):
""" version property getter"""
return self._version
@property
def size(self):
""" size property getter"""
return self._size
@property
def diger(self):
"""
Returns Diger of digest of self.raw
diger (digest material) property getter
"""
return self._diger
@property
def dig(self):
"""
Returns qualified Base64 digest of self.raw
dig (digest) property getter
"""
return self.diger.qb64
@property
def digb(self):
"""
Returns qualified Base64 digest of self.raw
dig (digest) property getter
"""
return self.diger.qb64b
@property
def verfers(self):
"""
Returns list of Verifier instances as converted from .ked.keys
verfers property getter
"""
if "k" in self.ked: # establishment event
keys = self.ked["k"]
else: # non-establishment event
keys = []
return [Verfer(qb64=key) for key in keys]
@property
def sn(self):
"""
Returns int of .ked["s"] (sequence number)
sn (sequence number) property getter
"""
return int(self.ked["s"], 16)
@property
def pre(self):
"""
Returns str qb64 of .ked["i"] (identifier prefix)
pre (identifier prefix) property getter
"""
return self.ked["i"]
@property
def preb(self):
"""
Returns bytes qb64b of .ked["i"] (identifier prefix)
preb (identifier prefix) property getter
"""
return self.pre.encode("utf-8")
class Tholder:
"""
Tholder is KERI Signing Threshold Satisfactionclass
.satisfy method evaluates satisfaction based on ordered list of indices of
verified signatures where indices correspond to offsets in key list of
associated signatures.
Has the following public properties:
Properties:
.sith is original signing threshold
.thold is parsed signing threshold
.limen is the extracted string for the next commitment to the threshold
.weighted is Boolean True if fractional weighted threshold False if numeric
.size is int of minimun size of keys list
Hidden:
._sith is original signing threshold
._thold is parsed signing threshold
._limen is extracted string for the next commitment to threshold
._weighted is Boolean, True if fractional weighted threshold False if numeric
._size is int minimum size of of keys list
._satisfy is method reference of threshold specified verification method
._satisfy_numeric is numeric threshold verification method
._satisfy_weighted is fractional weighted threshold verification method
"""
def __init__(self, sith=''):
"""
Parse threshold
Parameters:
sith is either hex string of threshold number or iterable of fractional
weights. Fractional weights may be either an iterable of
fraction strings or an iterable of iterables of fractions strings.
The verify method appropriately evaluates each of the threshold
forms.
"""
self._sith = sith
if isinstance(sith, str):
self._weighted = False
thold = int(sith, 16)
if thold < 1:
raise ValueError("Invalid sith = {} < 1.".format(thold))
self._thold = thold
self._size = self._thold # used to verify that keys list size is at least size
self._satisfy = self._satisfy_numeric
self._limen = self._sith # just use hex string
else: # assumes iterable of weights or iterable of iterables of weights
self._weighted = True
if not sith: # empty iterable
raise ValueError("Invalid sith = {}, empty weight list.".format(sith))
mask = [isinstance(w, str) for w in sith]
if mask and all(mask): # not empty and all strings
sith = [sith] # make list of list so uniform
elif any(mask): # some strings but not all
raise ValueError("Invalid sith = {} some weights non non string."
"".format(sith))
# replace fractional strings with fractions
thold = []
for clause in sith: # convert string fractions to Fractions
thold.append([Fraction(w) for w in clause]) # append list of Fractions
for clause in thold: # sum of fractions in clause must be >= 1
if not (sum(clause) >= 1):
raise ValueError("Invalid sith cLause = {}, all clause weight "
"sums must be >= 1.".format(thold))
self._thold = thold
self._size = sum(len(clause) for clause in thold)
self._satisfy = self._satisfy_weighted
# extract limen from sith
self._limen = "&".join([",".join(clause) for clause in sith])
@property
def sith(self):
""" sith property getter """
return self._sith
@property
def thold(self):
""" thold property getter """
return self._thold
@property
def weighted(self):
""" weighted property getter """
return self._weighted
@property
def size(self):
""" size property getter """
return self._size
@property
def limen(self):
""" limen property getter """
return self._limen
def satisfy(self, indices):
"""
Returns True if indices list of verified signature key indices satisfies
threshold, False otherwise.
Parameters:
indices is list of indices (offsets into key list) of verified signatures
"""
return (self._satisfy(indices=indices))
def _satisfy_numeric(self, indices):
"""
Returns True if satisfies numeric threshold False otherwise
Parameters:
indices is list of indices (offsets into key list) of verified signatures
"""
try:
if len(indices) >= self.thold:
return True
except Exception as ex:
return False
return False
def _satisfy_weighted(self, indices):
"""
Returns True if satifies fractional weighted threshold False otherwise
Parameters:
indices is list of indices (offsets into key list) of verified signatures
"""
try:
if not indices: # empty indices
return False
# remove duplicates with set, sort low to high
indices = sorted(set(indices))
sats = [False] * self.size # default all satifactions to False
for idx in indices:
sats[idx] = True # set aat atverified signature index to True
wio = 0 # weight index offset
for clause in self.thold:
cw = 0 # init clause weight
for w in clause:
if sats[wio]: # verified signature so weight applies
cw += w
wio += 1
if cw < 1:
return False
return True # all clauses including final one cw >= 1
except Exception as ex:
return False
return False
| StarcoderdataPython |
1752095 | <filename>makeup_service/server/common.py
import os
from pathlib import Path
def get_data_folder():
root_folder = Path(__file__).parent.parent
data_folder = os.path.join(root_folder, 'data')
return data_folder
| StarcoderdataPython |
158451 | """Package containing pyleus implementation of major Storm entities.
"""
from __future__ import absolute_import
from collections import namedtuple
DEFAULT_STREAM = "default"
StormTuple = namedtuple('StormTuple', "id comp stream task values")
"""Namedtuple representing a Storm tuple.
* **id**\(``str`` or ``long``): tuple identifier
* **comp**\(``str``): name of the emitting component
* **stream**\(``str``): name of the input stream the tuple belongs to
* **values**\(``tuple``): values contained by the tuple
"""
def is_tick(tup):
"""Tell whether the tuple is a tick tuple or not.
:param tup: tuple to investigate
:type tup: :class:`~.StormTuple`
:return: ``True`` if the tuple is a tick tuple, ``False`` otherwise
:rtype: ``bool``
"""
# Tick tuples (generated by Storm; introduced 0.8) are defined as being
# from the __system component and __tick stream.
return tup.comp == '__system' and tup.stream == '__tick'
def is_heartbeat(tup):
"""Tell whether the tuple is a heartbeat tuple or not.
:param tup: tuple to investigate
:type tup: :class:`~.StormTuple`
:return: ``True`` if the tuple is a heartbeat tuple, ``False`` otherwise
:rtype: ``bool``
"""
return tup.task == -1 and tup.stream == '__heartbeat'
class StormWentAwayError(Exception):
"""Raised when the connection between the component and Storm terminates.
"""
def __init__(self):
message = "Got EOF while reading from Storm"
super(StormWentAwayError, self).__init__(message)
from pyleus.storm.bolt import Bolt, SimpleBolt
from pyleus.storm.spout import Spout
_ = [Bolt, SimpleBolt, Spout] # pyflakes
| StarcoderdataPython |
197456 |
"""
This module defines a class used for evaluating coordinate transformations at null shell junctions.
"""
import numpy as np
import interpolators as interp
from helpers import *
class active_slice:
"""
Class for handling shell and corner slicing of SSS regions. Given the region and the slice parameters,
reference arrays are created for all desired transformations, which are then defined by interpolation
and extrapolation of the reference arrays.
Unlike passive_slice, this will actively obtain new functions U(udl),V(vdl) based on the inputs U0,V0, rather
than just reading the existing ones.
Designed to simultaneously incorporate both shell and corner junctions. Input determines which behavior will
take effect. Offers protection from bad inputs in most cases. The exception is that the correct ublocks and
vblocks have to be given or everything will come out whacky.
Slice Location:
The location of the slice is determined by the values r0,u0,v0. These describe a point with coordinates
(u0,v0) in some block, and with radius r0. The values are forced to be self-consistent by the algorithm.
In corner junction mode, this is the corner point. Otherwise, it just describes a point on the shell.
In shell mode, r0 and (u0 xor v0) can be nan, in which case the parameters just specify the shell.
To fully specify the point we need to know what block it's in. So correct ublock and vblock args are needed.
Slice Location Input Requirements:
u0==finite xor v0==finite # avoids overspecifying a nonphysical point
(optional in shell mode) r0==finite # if finite, used to determine the other one of u0,v0
Corner Junction Mode Input Requirements:
r0==finite # corner junction point radius
u0==finite xor v0==finite # corner junction coordinate value
U0 not None # function U0(r) at v=v0
V0 not None # function V0(r) at u=u0
ublocks==[correct] # the correct list of block indices must be provided
vblocks==[correct] # the correct list of block indices must be provided
Shell Junction Mode Input Requirements:
For example, suppose we want a shell junction at v=v0.
(v0==finite) xor (r0==finite and u0==finite) # either of these uniquely determines the value v=v0 for the shell
U0 not None # function U0(r) at v=v0 shell
vblocks==[correct] # the correct list of block indices must be provided
For shell at u=u0, just switch u's and v's.
Inputs:
reg = The region being sliced.
ublocks = List of indices (corresponding to reg.blocks) for the blocks containing the u=u0 slice.
vblocks = List of indices (corresponding to reg.blocks) for the blocks containing the v=v0 slice.
r0 = Radius of the junction corner point. Or, radius of the specified point on the shell in shell mode. See above.
u0 = Slice location coordinate. See above.
v0 = Slice location coordinate. See above.
U0 = Function U0(r) at v=v0, or None. When None, U(udl) = udl.
V0 = Function V0(r) at u=u0, or None. When None, V(vdl) = vdl.
mu = Extrapolation parameter. When mu=0, extrapolation is just translation. When mu->inf, extrapolation is smooth linear.
(See interpolators module).
Methods:
Provides various coordinate transformation methods, which can be used by a region.
Attributes:
Input parameters as well as various reference arrays.
"""
def __init__(self, reg, ublocks=[], vblocks=[], r0=np.nan, u0=np.nan, v0=np.nan, U0=None, V0=None, mu=0., r_refs=[]):
## process and store input values
self.reg = reg
self.ublocks, self.vblocks = bcon(reg,ublocks), bcon(reg,vblocks)
self.r0, self.u0, self.v0 = set_ruv0(reg, r0=r0, u0=u0, v0=v0)
self.U0, self.V0 = U0, V0
self.mu = 1.*float(mu)
## get ref arrays
self.r = get_r_ref(self.reg, r_refs, self.r0)
self.r, self.uvdl_u0, self.uvdl_v0 = uvdl_of_r_at_uv0(self.r, self.reg, ublocks=self.ublocks, vblocks=self.vblocks, u0=self.u0, v0=self.v0)
self.U_v0 = U_of_udl_at_v0(self.r, self.uvdl_v0[0], self.U0)
self.V_u0 = V_of_vdl_at_u0(self.r, self.uvdl_u0[1], self.V0)
## use interpolated functions to get UV of ref arrays
if np.isfinite(r0) and len(ublocks)>0 and len(vblocks)>0:
self.UV_u0 = self.UV_of_uvdl(self.uvdl_u0)
self.UV_v0 = self.UV_of_uvdl(self.uvdl_v0)
"""
Coordinate transformation methods.
"""
def U_of_udl_at_v0(self, udl):
"""
Evaluate the function U(udl) = U(r(udl,vdl0)) by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
"""
return interp.interp_with_smooth_extrap(udl, self.uvdl_v0[0], self.U_v0, mu=self.mu)
def V_of_vdl_at_u0(self, vdl):
"""
Evaluate the function V(vdl) = V(r(udl0,vdl)) by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
"""
return interp.interp_with_smooth_extrap(vdl, self.uvdl_u0[1], self.V_u0, mu=self.mu)
def UV_of_uvdl(self, uvdl):
"""
Combine U(udl) and V(vdl) into UV(uvdl).
"""
U_temp = self.U_of_udl_at_v0(uvdl[0])
V_temp = self.V_of_vdl_at_u0(uvdl[1])
UV_temp = np.array([U_temp,V_temp])
return UV_temp
def U_of_r_at_v0(self, r):
"""
Evaluate the function U(r) at v0 by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
Setting mu to nan because this should not be extrapolated.
"""
return interp.interp_with_smooth_extrap(r, self.r, self.U_v0, mu=np.nan)
def V_of_r_at_u0(self, r):
"""
Evaluate the function V(r) at u0 by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
Setting mu to nan because this should not be extrapolated.
"""
return interp.interp_with_smooth_extrap(r, self.r, self.V_u0, mu=np.nan)
| StarcoderdataPython |
3364964 | <gh_stars>0
#!/usr/bin/env python
import sys, re
import getopt, os
import json
import nltk
from weathercom import get_weathercom
def usage():
print "weatherbot.py [-u \"Celsius\"] [-c \"San Francisco\"]"
if __name__=="__main__":
# set user model defaults
default_unit = "Celsius"
default_city = "San Francisco"
default_date = "today"
#read command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hu:c:",
["help", "units=", "current_city="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
# parse command line arguments
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-u", "--units"):
default_unit = a
elif o in ("-c", "--current_city"):
default_city = a
else:
assert False, "unknown option"
print "Starting AwesomeProgram by Student..." # modify this line to add the name of your bot.
print "...default unit: " + default_unit
print "...default city: " + default_city
stopped = False
while not stopped:
# Step 1: get the user input
user_input = raw_input("User utterance: ")
print "User input is: " + user_input
# check if the user wants to quit
if user_input == "quit":
break # exit the loop right away
########
# Step 2: parse the user input into a semantic representation
# of the form TOPIC, LOCATION, DATE
# This is the part you should modify
########
# parse topic
topic = "notweather"
m = re.search("weather", user_input, re.I)
if m:
topic = "weather"
if topic != "weather":
print "Sorry I am not smart enough to respond."
continue
print "Parsed topic: " + topic
#parse location
location = default_city
m = re.search("in (\w+)", user_input)
if m:
location = m.group(1)
print "Parsed location: " + location
#parse date
date = default_date
m = re.search("today|tomorrow", user_input)
if m:
date = m.group(0)
print "Parsed date: " + date
######
# Steps 3 and 4: send and obtain the weather forecast
######
#get current weather and weather forecast from weather.com
weathercom_result = get_weathercom(location)
#print weathercom_result
######
# Step 5: generate output to the user
# This is the part you should modify
#######
weatherbot_output = ""
if date == "today":
weatherbot_output = "Current weather in " + \
location + \
" is " + \
weathercom_result["current_conditions"]["text"] + \
" with temperature " + \
weathercom_result["current_conditions"]["temperature"] + \
" degrees Celsius."
elif date == "tomorrow":
weatherbot_output = "The weather tomorrow in " + \
location + \
" is " + \
weathercom_result["forecasts"][1]["day"]["text"] + \
" with temperature between " + \
weathercom_result["forecasts"][1]["low"] + \
" and " + \
weathercom_result["forecasts"][1]["high"] + \
" degrees Celsius or " + \
str (int(weathercom_result["forecasts"][1]["high"])*20 - 16)
print "WeatherBot says: " + weatherbot_output
print "Goodbye."
| StarcoderdataPython |
196851 | # Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.util.udf_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import os
import subprocess
import sys
import tempfile
from unittest import mock
import tensorflow as tf
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.types import component_spec
from tfx.utils import import_utils
class _MyComponentSpec(component_spec.ComponentSpec):
PARAMETERS = {
'my_module_file':
component_spec.ExecutionParameter(type=str, optional=True),
'my_module_path':
component_spec.ExecutionParameter(type=str, optional=True),
}
INPUTS = {}
OUTPUTS = {}
class _MyComponent(base_component.BaseComponent):
SPEC_CLASS = _MyComponentSpec
EXECUTOR_SPEC = executor_spec.BeamExecutorSpec(base_executor.BaseExecutor)
class UdfUtilsTest(tf.test.TestCase):
@mock.patch.object(import_utils, 'import_func_from_source')
def testGetFnFromSource(self, mock_import_func):
exec_properties = {'module_file': 'path/to/module_file.py'}
udf_utils.get_fn(exec_properties, 'test_fn')
mock_import_func.assert_called_once_with('path/to/module_file.py',
'test_fn')
@mock.patch.object(import_utils, 'import_func_from_module')
def testGetFnFromModule(self, mock_import_func):
exec_properties = {'module_path': 'path.to.module'}
udf_utils.get_fn(exec_properties, 'test_fn')
mock_import_func.assert_called_once_with('path.to.module', 'test_fn')
@mock.patch.object(import_utils, 'import_func_from_module')
def testGetFnFromModuleFn(self, mock_import_func):
exec_properties = {'test_fn': 'path.to.module.test_fn'}
udf_utils.get_fn(exec_properties, 'test_fn')
mock_import_func.assert_called_once_with('path.to.module', 'test_fn')
def testGetFnFailure(self):
with self.assertRaises(ValueError):
udf_utils.get_fn({}, 'test_fn')
def test_ephemeral_setup_py_contents(self):
contents = udf_utils._get_ephemeral_setup_py_contents(
'my_pkg', '0.0+xyz', ['a', 'abc', 'xyz'])
self.assertIn("name='my_pkg',", contents)
self.assertIn("version='0.0+xyz',", contents)
self.assertIn("py_modules=['a', 'abc', 'xyz'],", contents)
def test_version_hash(self):
def _write_temp_file(user_module_dir, file_name, contents):
with open(os.path.join(user_module_dir, file_name), 'w') as f:
f.write(contents)
user_module_dir = tempfile.mkdtemp()
_write_temp_file(user_module_dir, 'a.py', 'aa1')
_write_temp_file(user_module_dir, 'bb.py', 'bbb2')
_write_temp_file(user_module_dir, 'ccc.py', 'cccc3')
_write_temp_file(user_module_dir, 'dddd.py', 'ddddd4')
expected_plaintext = (
# Length and encoding of "a.py".
b'\x00\x00\x00\x00\x00\x00\x00\x04a.py'
# Length and encoding of contents of "a.py".
b'\x00\x00\x00\x00\x00\x00\x00\x03aa1'
# Length and encoding of "ccc.py".
b'\x00\x00\x00\x00\x00\x00\x00\x06ccc.py'
# Length and encoding of contents of "ccc.py".
b'\x00\x00\x00\x00\x00\x00\x00\x05cccc3'
# Length and encoding of "dddd.py".
b'\x00\x00\x00\x00\x00\x00\x00\x07dddd.py'
# Length and encoding of contents of "dddd.py".
b'\x00\x00\x00\x00\x00\x00\x00\x06ddddd4')
h = hashlib.sha256()
h.update(expected_plaintext)
expected_version_hash = h.hexdigest()
self.assertEqual(
expected_version_hash,
'4fecd9af212c76ee4097037caf78c6ba02a2e82584837f2031bcffa0f21df43e')
self.assertEqual(
udf_utils._get_version_hash(user_module_dir,
['dddd.py', 'a.py', 'ccc.py']),
expected_version_hash)
def testAddModuleDependencyAndPackage(self):
# Do not test packaging in unsupported environments.
if not udf_utils.should_package_user_modules():
return
# Create a component with a testing user module file.
temp_dir = tempfile.mkdtemp()
temp_module_file = os.path.join(temp_dir, 'my_user_module.py')
with open(temp_module_file, 'w') as f:
f.write('# Test user module file.\nEXPOSED_VALUE="ABC123xyz"')
component = _MyComponent(
spec=_MyComponentSpec(my_module_file=temp_module_file))
# Add the user module file pip dependency.
udf_utils.add_user_module_dependency(component, 'my_module_file',
'my_module_path')
self.assertLen(component._pip_dependencies, 1)
dependency = component._pip_dependencies[0]
self.assertIsInstance(dependency, udf_utils.UserModuleFilePipDependency)
self.assertIs(dependency.component, component)
self.assertEqual(dependency.module_file_key, 'my_module_file')
self.assertEqual(dependency.module_path_key, 'my_module_path')
# Resolve the pip dependency and package the user module.
temp_pipeline_root = tempfile.mkdtemp()
component._resolve_pip_dependencies(temp_pipeline_root)
self.assertLen(component._pip_dependencies, 1)
dependency = component._pip_dependencies[0]
# The hash version is based on the module names and contents and thus
# should be stable.
self.assertEqual(
dependency,
os.path.join(
temp_pipeline_root, '_wheels', 'tfx_user_code_MyComponent-0.0+'
'1c9b861db85cc54c56a56cbf64f77c1b9d1ded487d60a97d082ead6b250ee62c'
'-py3-none-any.whl'))
# Test import behavior within context manager.
with udf_utils.TempPipInstallContext([dependency]):
# Test import from same process.
import my_user_module # pylint: disable=g-import-not-at-top
self.assertEqual(my_user_module.EXPOSED_VALUE, 'ABC123xyz')
del sys.modules['my_user_module']
# Test import from a subprocess.
self.assertEqual(
subprocess.check_output([
sys.executable, '-c',
'import my_user_module; print(my_user_module.EXPOSED_VALUE)'
]), b'ABC123xyz\n')
# Test that the import paths are cleaned up, so the user module can no
# longer be imported.
with self.assertRaises(ModuleNotFoundError):
import my_user_module # pylint: disable=g-import-not-at-top
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
166854 | """Version of Epson projector module."""
__version__ = '0.2.3.500'
| StarcoderdataPython |
1674723 | <gh_stars>0
from importlib import util
if util.find_spec('fio') is not None:
import fio
mod = 1000000007
def sieve_of_eratosthenes(n):
sieve = [1]*(n+1)
sieve[0] = sieve[1] = 0
for i in range(2, int(n**0.5)+1):
if sieve[i]:
for j in range(i**2, n+1, i):
sieve[j] = 0
return sieve
def primes(n):
sieve = sieve_of_eratosthenes(n)
result = list()
result.append(2)
for i in range (3, n+1, 2):
if sieve[i]:
result.append(i)
return result
def fators_of_factorial(n):
prime_list = primes(n)
final = 1
for prime in prime_list:
temp = n
factors = 0
while temp >= prime:
temp = int(temp/prime)
factors = (factors + temp%mod)%mod
final = (final * (factors + 1)%mod)%mod
return final
n = int(input())
print(fators_of_factorial(n))
| StarcoderdataPython |
26006 | <filename>generative_model/generator_test.py
import torch
import torch.nn as nn
from torch.autograd import Variable
from data_loading import *
from rdkit import Chem
'''
the model
'''
class generative_model(nn.Module):
def __init__(self, vocabs_size, hidden_size, output_size, embedding_dimension, n_layers):
super(generative_model, self).__init__()
self.vocabs_size = vocabs_size
self.hidden_size = hidden_size
self.output_size = output_size
self.embedding_dimension = embedding_dimension
self.n_layers = n_layers
self.embedding = nn.Embedding(vocabs_size, embedding_dimension)
self.rnn = nn.LSTM(embedding_dimension, hidden_size, n_layers, dropout = 0.2)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
batch_size = input.size(0)
input = self.embedding(input)
output, hidden = self.rnn(input.view(1, batch_size, -1), hidden)
output = self.linear(output.view(batch_size, -1))
return output, hidden
def init_hidden(self, batch_size):
hidden=(Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)),
Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)))
return hidden
data,vocabs=load_data()
data = set(list(data))
vocabs = list(vocabs)
vocabs_size = len(vocabs)
output_size = len(vocabs)
batch_size = 128
cuda = True
hidden_size = 1024
embedding_dimension = 248
n_layers=3
end_token = ' '
model = generative_model(vocabs_size,hidden_size,output_size,embedding_dimension,n_layers)
model.load_state_dict(torch.load('mytraining.pt'))
if cuda:
model = model.cuda()
model.eval()
def evaluate(prime_str='!', temperature=0.4):
max_length = 200
inp = Variable(tensor_from_chars_list(prime_str,vocabs,cuda)).cuda()
batch_size = inp.size(0)
hidden = model.init_hidden(batch_size)
if cuda:
hidden = (hidden[0].cuda(), hidden[1].cuda())
predicted = prime_str
while True:
output, hidden = model(inp, hidden)
# Sample from the network as a multinomial distribution
output_dist = output.data.view(-1).div(temperature).exp()
top_i = torch.multinomial(output_dist, 1)[0]
# Add predicted character to string and use as next input
predicted_char = vocabs[top_i]
if predicted_char ==end_token or len(predicted)>max_length:
return predicted
predicted += predicted_char
inp = Variable(tensor_from_chars_list(predicted_char,vocabs,cuda)).cuda()
return predicted
def valid_smile(smile):
return Chem.MolFromSmiles(smile) is not None
def get_canonical_smile(smile):
return Chem.MolToSmiles(Chem.MolFromSmiles(smile))
def valid_smiles_at_temp(temp):
range_test = 100
c=0
for i in range(range_test):
s= evaluate(prime_str='!', temperature=temp)[1:] # remove the first character !.
if valid_smile(s):
print(s)
c+=1
return float(c)/range_test
def smiles_in_db(smile):
smile = '!'+get_canonical_smile(smile)+' '
if smile in data:
return True
return False
def percentage_variety_of_valid_at_temp(temp):
range_test = 100
c_v=0
c_nd=0
for i in range(range_test):
s= evaluate(prime_str='!', temperature=temp)[1:] # remove the first character !.
if valid_smile(s):
c_v+=1
if not smiles_in_db(s):
c_nd+=1
return float(c_nd)/c_v
| StarcoderdataPython |
1779200 | <reponame>rnowling/ml-weather-model<filename>model.py
import argparse
from datetime import datetime
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from feature_extractors import Extract1DBins
from feature_extractors import ExtractGPS2DBins
from feature_extractors import ExtractGPSClusters
from feature_extractors import ExtractMonth
from feature_extractors import ExtractTemperatures
from feature_extractors import ExtractWeekOfYear
from io_tools import read_data
def split_dataset(records, cutoff_date):
cutoff_idx = -1
for i, record in enumerate(records):
if cutoff_date < record.date:
cutoff_idx = i
break
return records[:i], records[i:]
def validate_date(s):
format_str = "%Y-%m-%d"
try:
return datetime.strptime(s, format_str).date()
except ValueError:
msg = "%s is not a valid date" % s
raise argparse.ArgumentTypeError(msg)
def linear_regression_v1(training, testing, extractors):
training_temperatures = ExtractTemperatures().transform(training)
testing_temperatures = ExtractTemperatures().transform(testing)
for extractor in extractors:
extractor.fit(training)
training_features = sp.hstack(map(lambda e: e.transform(training),
extractors))
testing_features = sp.hstack(map(lambda e: e.transform(testing),
extractors))
regressor = SGDRegressor(n_iter=20)
regressor.fit(training_features,
training_temperatures)
true_costs = testing_temperatures
pred_costs = regressor.predict(testing_features)
mae = mean_absolute_error(true_costs, pred_costs)
mse = mean_squared_error(true_costs, pred_costs)
r2 = regressor.score(testing_features,
true_costs)
print "MAE:", mae
print "MSE:", mse
print "R2:", r2
print
return regressor, extractors
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir",
type=str,
default="data")
parser.add_argument("--split-date",
required=True,
type=validate_date)
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
print "Reading data"
records = read_data(args.data_dir)
print "Splitting data"
training, testing = split_dataset(records, args.split_date)
print len(training), "training records"
print len(testing), "testing records"
"""
print "Evaluating months"
linear_regression_v1(training,
testing,
[ExtractMonth()])
print "Evaluating Latitude 1D Bins (bins=10)"
linear_regression_v1(training,
testing,
[Extract1DBins(10, "latitude")])
print "Evaluating Latitude 1D Bins (bins=20)"
linear_regression_v1(training,
testing,
[Extract1DBins(20, "latitude")])
print "Evaluating Latitude 1D Bins (bins=30)"
linear_regression_v1(training,
testing,
[Extract1DBins(30, "latitude")])
print "Evaluating Longitude 1D Bins (bins=10)"
linear_regression_v1(training,
testing,
[Extract1DBins(10, "longitude")])
print "Evaluating Longitude 1D Bins (bins=20)"
linear_regression_v1(training,
testing,
[Extract1DBins(20, "longitude")])
print "Evaluating Longitude 1D Bins (bins=30)"
linear_regression_v1(training,
testing,
[Extract1DBins(30, "longitude")])
print "Evaluating Longitude 1D Bins (bins=40)"
linear_regression_v1(training,
testing,
[Extract1DBins(30, "longitude")])
print "Evaluating Longitude 1D Bins (bins=50)"
linear_regression_v1(training,
testing,
[Extract1DBins(30, "longitude")])
print "Evaluating GPS 2D Bins"
linear_regression_v1(training,
testing,
[ExtractGPS2DBins(n_bins=20)])
print "Evaluate GPS Clusters (n=25)"
linear_regression_v1(training,
testing,
[ExtractGPSClusters(n_clusters=25)])
print "Evaluate GPS Clusters (n=50)"
linear_regression_v1(training,
testing,
[ExtractGPSClusters(n_clusters=50)])
print "Evaluate GPS Clusters (n=75)"
linear_regression_v1(training,
testing,
[ExtractGPSClusters(n_clusters=75)])
print "Evaluate GPS Clusters (n=100)"
linear_regression_v1(training,
testing,
[ExtractGPSClusters(n_clusters=100)])
print "Evaluate GPS Clusters (n=125)"
linear_regression_v1(training,
testing,
[ExtractGPSClusters(n_clusters=125)])
print "Evaluating Week of Year"
linear_regression_v1(training,
testing,
[ExtractWeekOfYear()])
"""
print "Evaluating GPS Cluster(n=100), Week Of Year, and months"
linear_regression_v1(training,
testing,
[ExtractGPSClusters(n_clusters=100),
ExtractWeekOfYear()])
| StarcoderdataPython |
23923 | <gh_stars>1-10
version = "20.1"
| StarcoderdataPython |
1785741 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='fuzzydirfilter',
version='1.1.2',
packages=find_packages(),
install_requires=['fuzzywuzzy', 'python-Levenshtein'],
entry_points={
'console_scripts':
'fuzzydirfilter = fuzzydirfilter.main:fuzzydirfilter_main'
},
zip_safe=False,
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
)
| StarcoderdataPython |
1739249 | <filename>src/zope/app/container/browser/tests/test_view_permissions.py<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Container View Permissions Tests
"""
import unittest
import transaction
from zope.annotation.interfaces import IAttributeAnnotatable
from zope.interface import alsoProvides
from zope.security.interfaces import Unauthorized
from zope.container.ordered import OrderedContainer
from zope.dublincore.interfaces import IZopeDublinCore
from zope.securitypolicy.interfaces import IRolePermissionManager
from zope.app.container.testing import AppContainerLayer
from zope.app.container.browser.tests import BrowserTestCase
class Tests(BrowserTestCase):
def test_default_view_permissions(self):
"""Tests the default view permissions.
"""
# add an item that can be viewed from the root folder
obj = OrderedContainer()
alsoProvides(obj, IAttributeAnnotatable)
self.getRootFolder()['obj'] = obj
IZopeDublinCore(obj).title = u'My object'
transaction.commit()
response = self.publish('/')
self.assertEqual(response.status_int, 200)
body = response.text
# confirm we can see the file name
self.assertIn('<a href="obj">obj</a>', body)
# confirm we can see the metadata title
self.assertIn('<td><span>My object</span></td>', body)
def test_deny_view(self):
"""Tests the denial of view permissions to anonymous.
This test uses the ZMI interface to deny anonymous zope.View permission
to the root folder.
"""
# deny zope.View to zope.Anonymous
prm = IRolePermissionManager(self.getRootFolder())
prm.denyPermissionToRole('zope.View', 'zope.Anonymous')
transaction.commit()
# confirm Unauthorized when viewing root folder
self.assertRaises(Unauthorized, self.publish, '/')
def test_deny_dublincore_view(self):
"""Tests the denial of dublincore view permissions to anonymous.
Users who can view a folder contents page but cannot view dublin core
should still be able to see the folder items' names, but not their
title, modified, and created info.
"""
# add an item that can be viewed from the root folder
obj = OrderedContainer()
alsoProvides(obj, IAttributeAnnotatable)
self.getRootFolder()['obj'] = obj
IZopeDublinCore(obj).title = u'My object'
# deny zope.app.dublincore.view to zope.Anonymous
prm = IRolePermissionManager(self.getRootFolder())
prm.denyPermissionToRole('zope.dublincore.view', 'zope.Anonymous')
# Try both spellings just in case we are used with an older zope.dc
prm.denyPermissionToRole('zope.app.dublincore.view', 'zope.Anonymous')
transaction.commit()
response = self.publish('/')
self.assertEqual(response.status_int, 200)
body = response.text
# confirm we can see the file name
self.assertIn('<a href="obj">obj</a>', body)
# confirm we *cannot* see the metadata title
self.assertNotIn('My object', body)
def test_suite():
suite = unittest.TestSuite()
Tests.layer = AppContainerLayer
suite.addTest(unittest.makeSuite(Tests))
return suite
| StarcoderdataPython |
1654118 | #!/usr/bin/python
# Python3 program to illustrate
# hex() function
print("The hexadecimal form of 23 is"
+ hex(23))
print("The hexadecimal form of the "
"ascii value is 'a' is " + hex(ord('a')))
print("The hexadecimal form of 3.9 is "
+ float.hex(3.9))
| StarcoderdataPython |
1703593 | #!/usr/bin/env python
import datetime
import pytimeparse
import six
from agate.data_types.base import DataType
from agate.exceptions import CastError
class TimeDelta(DataType):
"""
Data type representing the interval between two times.
"""
def cast(self, d):
"""
Cast a single value to :class:`datetime.timedelta`.
:param d:
A value to cast.
:returns:
:class:`datetime.timedelta` or :code:`None`
"""
if isinstance(d, datetime.timedelta) or d is None:
return d
elif isinstance(d, six.string_types):
d = d.strip()
if d.lower() in self.null_values:
return None
else:
raise CastError('Can not parse value "%s" as timedelta.' % d)
seconds = pytimeparse.parse(d)
if seconds is None:
raise CastError('Can not parse value "%s" to as timedelta.' % d)
return datetime.timedelta(seconds=seconds)
| StarcoderdataPython |
3313751 | <gh_stars>0
# Copyright AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import argparse
import os
import validate
import common
class ConfigException(Exception):
"""Configuration exceptions"""
def get_version():
return "0.0.1"
class Config:
"""Contains the configuration obtained from the command line.
This class defines, parses, and validates the command line arguments.
The configuration values are accessable via the member 'command_line'.
command_line has the following members and types:
xml_input_file (string)
absolute_path_xml_input_file (string)
object_path (None or string)
client_only (None or True)
lax_naming (None or True)
output_path (string)
runnable (None or True)
target_language (string)
well_known_name (None or string)
"""
def __init__(self):
"""Initialize an instance of the Config class."""
self.targets = {}
def register_target(self, target, hooks):
"""Register hooks for a supported target.
The hooks should be a dictionary with two functions:
{ 'validate_cmdline' : validate_cmdline_function,
'generate_code' : generate_code_function }
"""
self.targets[target] = hooks
def parse(self):
"""Parse and validate the command-line arguments."""
ver = get_version()
descrip = "Generate AllJoyn code from XML source. Version {0}.".format(ver)
parser = argparse.ArgumentParser(description=descrip)
help_text = """The file containing the xml definition of an object's
interface(s)."""
parser.add_argument("xml_input_file", help=help_text)
help_text = """The object path (including name) of the object being
defined in the xml input file. If the xml file contains the object
path it does not match OBJECT_PATH, this tool will exit with an
error. If the name is not defined either in the XML or using
this flag, this tool will also exit with an error."""
parser.add_argument("-b", "--object-path", help=help_text)
help_text = """Only generate the client side code; if not specified,
both the client and service code are generated."""
parser.add_argument("-c", "--client-only", help=help_text,
action="store_true")
help_text = """Relaxes the requirement that all method and signal
arguments be named. If specified, default names will be generated
for arguments."""
parser.add_argument("-l", "--lax-naming", help=help_text,
action="store_true")
help_text = """The path where the generated C++ files will be placed.
If not specified, they will be output in the current working
directory."""
parser.add_argument("-p", "--output-path", help=help_text)
help_text = """The generated client executable will make method calls
with default values and the service method handlers will reply
with default values. This option requires a valid object path to
be specified (i.e. -b)."""
parser.add_argument("-R", "--runnable", help=help_text,
action="store_true")
help_text = """The target language. 'android' is for Java code on
the Android platform. 'tl' is C code for AllJoyn Thin Library.
'ddcpp' is C++ code for the Data-driven API."""
parser.add_argument("-t", "--target-language", required=True,
choices=self.targets.keys(),
help=help_text)
ver_text = "Version {0}.".format(ver)
parser.add_argument("-v", '--version', action='version', version=ver_text)
help_text = """The well-known name that the interface will use when
requesting a bus name or advertising a name."""
parser.add_argument("-w", "--well-known-name", help=help_text)
help_text = """Output verbose information about the XML during
parsing."""
parser.add_argument("-x", "--xml", help=help_text, action="store_true")
self.command_line = parser.parse_args()
self.__validate()
self.__get_addtions()
def __get_addtions(self):
"""The target language is sometimes needed in modules that don't have easy
access to the command line. So this is added to the common module.
The absolute path of the input xml file is added to the command line as
"absolute_path_xml_input_file". The path separators are forced to be '/'
so that Eclipse doesn't claim (even in a COMMENT!) the string has invalid
unicode escape sequences."""
common.target_language = self.command_line.target_language
temp = os.path.abspath(self.command_line.xml_input_file)
self.command_line.absolute_path_xml_input_file = temp.replace("\\", "/")
def target_hook(self, name):
"""Return the hooks of the selected target."""
return self.targets[self.command_line.target_language][name]
def __validate(self):
"""Validates various command line arguments beyond simple syntax."""
self.target_hook('validate_cmdline')(self.command_line)
if self.command_line.object_path is not None:
validate.bus_object_path(self.command_line.object_path)
if self.command_line.well_known_name is not None:
validate.well_known_name(self.command_line.well_known_name)
if self.command_line.output_path is None:
self.command_line.output_path = "."
path = self.command_line.output_path
if path is not None and not os.path.exists(path):
raise ConfigException("Path '{0}' does not exist.".format(path))
return
| StarcoderdataPython |
3335253 | <filename>platform/radio/efr32_multiphy_configurator/pyradioconfig/calculator_model_framework/Utils/CalcStatus.py<gh_stars>10-100
from enum import Enum
class CalcStatus(Enum):
Success = 0
Failure = -1
Warning = 1 | StarcoderdataPython |
3391501 | wicket=int(input("Enter wicket:"))
economy_rate=float(input("Enter economy_rate per over:"))
catch=int(input("Enter no. of catch:"))
stumping=int(input("Enter no. of stumping:"))
run_out=int(input("Enter no.of run out:"))
points=0
if wicket:
points=wicket*10
print("total +10 wicket point:")
print(wicket)
if wicket==3:
points=points+5
print("total +5 extra for 3 wicket point:")
print(points)
if wicket>=5:
points=points+10
print("total +10 extra for 5 or more wicket point:")
print(points)
if (economy_rate>=3.5) and (economy_rate<=4.5):
points=points+4
print("total +4 extra for run rate per over point:")
print(points)
if (economy_rate>=2) and (economy_rate<=3.5):
points=points+7
print("total +7 extra for run rate per over point:")
print(points)
if economy_rate<2:
points=points+10
print("total +10 extra for run rate per over point:")
print(points)
if catch:
points=points+catch*10
print("total +10 catch wicket point:")
print(points)
if stumping:
points=points+stumping*10
print("total +10 stumping wicket point:")
print(points)
if run_out:
points=points+run_out*10
print("total +10 run out wicket point:")
print(points)
else:
print("closed")
p1={'name':'<NAME>', 'role':'bat', 'runs':112, '4':10, '6':0, 'balls':119, 'field':0}
p2={'name':'<NAME>', 'role':'bat', 'runs':120, '4':11, '6':2, 'balls':112, 'field':0}
p3={'name':'<NAME>', 'role':'bowl', 'wkts':1, 'overs':10, 'runs':71, 'field':1}
p4={'name':'<NAME>', 'role':'bowl', 'wkts':2, 'overs':10, 'runs':45, 'field':0}
p5={'name':'<NAME>', 'role':'bowl', 'wkts':3, 'overs':10, 'runs':34, 'field':0}
print(p1)
| StarcoderdataPython |
4840395 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# configuration to model pileup for initial physics phase
from SimGeneral.MixingModule.mixObjects_cfi import theMixObjects
from SimGeneral.MixingModule.mixPoolSource_cfi import *
from SimGeneral.MixingModule.digitizers_cfi import *
mix = cms.EDProducer("MixingModule",
digitizers = cms.PSet(theDigitizers),
LabelPlayback = cms.string(''),
maxBunch = cms.int32(3),
minBunch = cms.int32(-2), ## in terms of 25 nsec
bunchspace = cms.int32(50), ##ns
mixProdStep1 = cms.bool(False),
mixProdStep2 = cms.bool(False),
playback = cms.untracked.bool(False),
useCurrentProcessOnly = cms.bool(False),
input = cms.SecSource("EmbeddedRootSource",
type = cms.string('probFunction'),
nbPileupEvents = cms.PSet(
probFunctionVariable = cms.vint32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59),
probValue = cms.vdouble(
2.560E-06,
5.239E-06,
1.420E-05,
5.005E-05,
1.001E-04,
2.705E-04,
1.999E-03,
6.097E-03,
1.046E-02,
1.383E-02,
1.685E-02,
2.055E-02,
2.572E-02,
3.262E-02,
4.121E-02,
4.977E-02,
5.539E-02,
5.725E-02,
5.607E-02,
5.312E-02,
5.008E-02,
4.763E-02,
4.558E-02,
4.363E-02,
4.159E-02,
3.933E-02,
3.681E-02,
3.406E-02,
3.116E-02,
2.818E-02,
2.519E-02,
2.226E-02,
1.946E-02,
1.682E-02,
1.437E-02,
1.215E-02,
1.016E-02,
8.400E-03,
6.873E-03,
5.564E-03,
4.457E-03,
3.533E-03,
2.772E-03,
2.154E-03,
1.656E-03,
1.261E-03,
9.513E-04,
7.107E-04,
5.259E-04,
3.856E-04,
2.801E-04,
2.017E-04,
1.439E-04,
1.017E-04,
7.126E-05,
4.948E-05,
3.405E-05,
2.322E-05,
1.570E-05,
5.005E-06),
histoFileName = cms.untracked.string('histProbFunction.root'),
),
sequential = cms.untracked.bool(False),
manage_OOT = cms.untracked.bool(True), ## manage out-of-time pileup
## setting this to True means that the out-of-time pileup
## will have a different distribution than in-time, given
## by what is described on the next line:
OOT_type = cms.untracked.string('Poisson'), ## generate OOT with a Poisson matching the number chosen for in-time
#OOT_type = cms.untracked.string('fixed'), ## generate OOT with a fixed distribution
#intFixed_OOT = cms.untracked.int32(2),
fileNames = FileNames
),
mixObjects = cms.PSet(theMixObjects)
)
| StarcoderdataPython |
4825997 | <gh_stars>0
import datetime
import json
import logging
import time
from typing import Any, Dict, List, cast
from urllib.parse import urljoin
import requests
from dagster import (
EventMetadata,
Failure,
Field,
StringSource,
__version__,
check,
get_dagster_logger,
resource,
)
from dagster.utils.merger import deep_merge_dicts
from requests.exceptions import RequestException
from .types import DbtCloudOutput
DBT_DEFAULT_HOST = "https://cloud.getdbt.com/"
DBT_ACCOUNTS_PATH = "api/v2/accounts/"
# default polling interval (in seconds)
DEFAULT_POLL_INTERVAL = 10
class DbtCloudResourceV2:
"""This class exposes methods on top of the dbt Cloud REST API v2.
For a complete set of documentation on the dbt Cloud Administrative REST API, including expected
response JSON schemae, see the `dbt Cloud API Docs <https://docs.getdbt.com/dbt-cloud/api-v2>`_.
"""
def __init__(
self,
auth_token: str,
account_id: int,
disable_schedule_on_trigger: bool = True,
request_max_retries: int = 3,
request_retry_delay: float = 0.25,
dbt_cloud_host: str = DBT_DEFAULT_HOST,
log: logging.Logger = get_dagster_logger(),
log_requests: bool = False,
):
self._auth_token = auth_token
self._account_id = account_id
self._disable_schedule_on_trigger = disable_schedule_on_trigger
self._request_max_retries = request_max_retries
self._request_retry_delay = request_retry_delay
self._dbt_cloud_host = dbt_cloud_host
self._log = log
self._log_requests = log_requests
@property
def api_base_url(self) -> str:
return urljoin(self._dbt_cloud_host, DBT_ACCOUNTS_PATH)
def make_request(
self, method: str, endpoint: str, data: Dict[str, Any] = None, return_text: bool = False
) -> Dict[str, Any]:
"""
Creates and sends a request to the desired dbt Cloud API endpoint.
Args:
method (str): The http method to use for this request (e.g. "POST", "GET", "PATCH").
endpoint (str): The dbt Cloud API endpoint to send this request to.
data (Optional[str]): JSON-formatted data string to be included in the request.
return_text (bool): Override default behavior and return unparsed {"text": response.text}
blob instead of json.
Returns:
Dict[str, Any]: Parsed json data from the response to this request
"""
headers = {
"User-Agent": f"dagster-dbt/{__version__}",
"Content-Type": "application/json",
"Authorization": f"Bearer {self._auth_token}",
}
url = urljoin(self.api_base_url, endpoint)
if self._log_requests:
self._log.debug(f"Making Request: method={method} url={url} data={data}")
num_retries = 0
while True:
try:
response = requests.request(
method=method,
url=url,
headers=headers,
data=json.dumps(data),
allow_redirects=False,
)
response.raise_for_status()
return {"text": response.text} if return_text else response.json()["data"]
except RequestException as e:
self._log.error("Request to dbt Cloud API failed: %s", e)
if num_retries == self._request_max_retries:
break
num_retries += 1
time.sleep(self._request_retry_delay)
raise Failure("Exceeded max number of retries.")
def get_job(self, job_id: int) -> Dict[str, Any]:
"""
Gets details about a given dbt job from the dbt Cloud API.
Args:
job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to
the details page of your job in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``
Returns:
Dict[str, Any]: Parsed json data from the response to this request
"""
return self.make_request("GET", f"{self._account_id}/jobs/{job_id}/")
def update_job(self, job_id: int, **kwargs) -> Dict[str, Any]:
"""
Updates specific properties of a dbt job. Documentation on the full set of potential
parameters can be found here: https://docs.getdbt.com/dbt-cloud/api-v2#operation/updateJobById
Args:
job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to
the details page of your job in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``
kwargs: Passed in as the properties to be changed.
Returns:
Dict[str, Any]: Parsed json data from the response to this request
Examples:
.. code-block:: python
# disable schedule for job with id=12345
my_dbt_cloud_resource.update_job(12345, triggers={"schedule": False})
"""
# API requires you to supply a bunch of values, so we can just use the current state
# as the defaults
job_data = self.get_job(job_id)
return self.make_request(
"POST", f"{self._account_id}/jobs/{job_id}/", data=deep_merge_dicts(job_data, kwargs)
)
def run_job(self, job_id: int, **kwargs) -> Dict[str, Any]:
"""
Initializes a run for a job. Overrides for specific properties can be set by passing in
values to the kwargs. A full list of overridable properties can be found here:
https://docs.getdbt.com/dbt-cloud/api-v2#operation/triggerRun
Args:
job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to
the details page of your job in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``
kwargs: Passed in as the properties to be overridden.
Returns:
Dict[str, Any]: Parsed json data from the response to this request
"""
if self._disable_schedule_on_trigger:
self._log.info("Disabling dbt Cloud job schedule.")
self.update_job(job_id, triggers={"schedule": False})
self._log.info(f"Initializing run for job with job_id={job_id}")
if "cause" not in kwargs:
kwargs["cause"] = "Triggered via Dagster"
resp = self.make_request("POST", f"{self._account_id}/jobs/{job_id}/run/", data=kwargs)
self._log.info(
f"Run initialized with run_id={resp['id']}. View this run in "
f"the dbt Cloud UI: {resp['href']}"
)
return resp
def get_run(self, run_id: int, include_related: List[str] = None) -> Dict[str, Any]:
"""
Gets details about a specific job run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
include_related (List[str]): List of related fields to pull with the run. Valid values
are "trigger", "job", and "debug_logs".
Returns:
Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.
See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.
"""
query_params = f"?include_related={','.join(include_related)}" if include_related else ""
return self.make_request(
"GET",
f"{self._account_id}/runs/{run_id}/{query_params}",
)
def get_run_steps(self, run_id: int) -> List[str]:
"""
Gets the steps of an initialized dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
Returns:
List[str, Any]: List of commands for each step of the run.
"""
run_details = self.get_run(run_id, include_related=["trigger", "job"])
steps = run_details["job"]["execute_steps"]
steps_override = run_details["trigger"]["steps_override"]
return steps_override or steps
def cancel_run(self, run_id: int) -> Dict[str, Any]:
"""
Cancels a dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
Returns:
Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.
See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.
"""
self._log.info(f"Cancelling run with id '{run_id}'")
return self.make_request("POST", f"{self._account_id}/runs/{run_id}/cancel/")
def list_run_artifacts(self, run_id: int, step: int = None) -> List[str]:
"""
Lists the paths of the available run artifacts from a completed dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
step (int): The index of the step in the run to query for artifacts. The first step in
the run has the index 1. If the step parameter is omitted, then this endpoint will
return the artifacts compiled for the last step in the run
Returns:
List[str]: List of the paths of the available run artifacts
"""
query_params = f"?step={step}" if step else ""
return cast(
list,
self.make_request(
"GET",
f"{self._account_id}/runs/{run_id}/artifacts/{query_params}",
data={"step": step} if step else None,
),
)
def get_run_artifact(self, run_id: int, path: str, step: int = None) -> str:
"""
The string contents of a run artifact from a dbt Cloud run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
path (str): The path to this run artifact (e.g. 'run/my_new_project/models/example/my_first_dbt_model.sql')
step (int): The index of the step in the run to query for artifacts. The first step in
the run has the index 1. If the step parameter is omitted, then this endpoint will
return the artifacts compiled for the last step in the run.
Returns:
List[str]: List of the names of the available run artifacts
"""
query_params = f"?step={step}" if step else ""
return self.make_request(
"GET",
f"{self._account_id}/runs/{run_id}/artifacts/{path}{query_params}",
data={"step": step} if step else None,
return_text=True,
)["text"]
def get_manifest(self, run_id: int, step: int = None) -> Dict[str, Any]:
"""
The parsed contents of a manifest.json file created by a completed run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
step (int): The index of the step in the run to query for artifacts. The first step in
the run has the index 1. If the step parameter is omitted, then this endpoint will
return the artifacts compiled for the last step in the run.
Returns:
Dict[str, Any]: Parsed contents of the manifest.json file
"""
return json.loads(self.get_run_artifact(run_id, "manifest.json", step=step))
def get_run_results(self, run_id: int, step: int = None) -> Dict[str, Any]:
"""
The parsed contents of a run_results.json file created by a completed run.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
step (int): The index of the step in the run to query for artifacts. The first step in
the run has the index 1. If the step parameter is omitted, then this endpoint will
return the artifacts compiled for the last step in the run.
Returns:
Dict[str, Any]: Parsed contents of the run_results.json file
"""
return json.loads(self.get_run_artifact(run_id, "run_results.json", step=step))
def poll_run(
self,
run_id: int,
poll_interval: float = DEFAULT_POLL_INTERVAL,
poll_timeout: float = None,
href: str = None,
) -> Dict[str, Any]:
"""
Polls a dbt Cloud job run until it completes. Will raise a `dagster.Failure` exception if the
run does not complete successfully.
Args:
run_id (int): The ID of the relevant dbt Cloud run. You can find this value by going to
the details page of your run in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/``
poll_interval (float): The time (in seconds) that should be waited between successive
polls of the dbt Cloud API.
poll_timeout (float): The maximum time (in seconds) that should be waited for this run
to complete. If this threshold is exceeded, the run will be cancelled and an
exception will be thrown. By default, this will poll forver.
href (str): For internal use, generally should not be set manually.
Returns:
Dict[str, Any]: A dictionary containing the parsed contents of the dbt Cloud run details.
See: https://docs.getdbt.com/dbt-cloud/api-v2#operation/getRunById for schema.
"""
if not href:
href = self.get_run(run_id).get("href")
poll_start = datetime.datetime.now()
while True:
run_details = self.get_run(run_id)
status = run_details["status_humanized"]
self._log.info(f"Polled run {run_id}. Status: [{status}]")
# completed successfully
if status == "Success":
return self.get_run(run_id, include_related=["job", "trigger"])
elif status in ["Error", "Cancelled"]:
break
elif status not in ["Queued", "Starting", "Running"]:
check.failed(f"Received unexpected status '{status}'. This should never happen")
if poll_timeout and datetime.datetime.now() > poll_start + datetime.timedelta(
seconds=poll_timeout
):
self.cancel_run(run_id)
raise Failure(
f"Run {run_id} timed out after "
f"{datetime.datetime.now() - poll_start}. Attempted to cancel.",
metadata={"run_page_url": EventMetadata.url(href)},
)
# Sleep for the configured time interval before polling again.
time.sleep(poll_interval)
run_details = self.get_run(run_id, include_related=["trigger"])
raise Failure(
f"Run {run_id} failed. Status Message: {run_details['status_message']}",
metadata={
"run_details": EventMetadata.json(run_details),
"run_page_url": EventMetadata.url(href),
},
)
def run_job_and_poll(
self,
job_id: int,
poll_interval: float = DEFAULT_POLL_INTERVAL,
poll_timeout: float = None,
) -> DbtCloudOutput:
"""
Runs a dbt Cloud job and polls until it completes. Will raise a `dagster.Failure` exception
if the run does not complete successfully.
Args:
job_id (int): The ID of the relevant dbt Cloud job. You can find this value by going to
the details page of your job in the dbt Cloud UI. It will be the final number in the
url, e.g.: ``https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/jobs/{job_id}/``
poll_interval (float): The time (in seconds) that should be waited between successive
polls of the dbt Cloud API.
poll_timeout (float): The maximum time (in seconds) that should be waited for this run
to complete. If this threshold is exceeded, the run will be cancelled and an
exception will be thrown. By default, this will poll forver.
Returns:
:py:class:`~DbtCloudOutput`: Class containing details about the specific job run and the
parsed run results.
"""
run_details = self.run_job(job_id)
run_id = run_details["id"]
href = run_details["href"]
final_run_details = self.poll_run(
run_id, poll_interval=poll_interval, poll_timeout=poll_timeout, href=href
)
return DbtCloudOutput(run_details=final_run_details, result=self.get_run_results(run_id))
@resource(
config_schema={
"auth_token": Field(
StringSource,
is_required=True,
description="dbt Cloud API Token. User tokens can be found in the "
"[dbt Cloud UI](https://cloud.getdbt.com/#/profile/api/), or see the "
"[dbt Cloud Docs](https://docs.getdbt.com/docs/dbt-cloud/dbt-cloud-api/service-tokens) "
"for instructions on creating a Service Account token.",
),
"account_id": Field(
int,
is_required=True,
description="dbt Cloud Account ID. This value can be found in the url of a variety of "
"views in the dbt Cloud UI, e.g. https://cloud.getdbt.com/#/accounts/{account_id}/settings/.",
),
"disable_schedule_on_trigger": Field(
bool,
default_value=True,
description="Specifies if you would like any job that is triggered using this "
"resource to automatically disable its schedule.",
),
"request_max_retries": Field(
int,
default_value=3,
description="The maximum number of times requests to the dbt Cloud API should be retried "
"before failing.",
),
"request_retry_delay": Field(
float,
default_value=0.25,
description="Time (in seconds) to wait between each request retry.",
),
},
description="This resource helps interact with dbt Cloud connectors",
)
def dbt_cloud_resource(context) -> DbtCloudResourceV2:
"""
This resource allows users to programatically interface with the dbt Cloud Administrative REST
API (v2) to launch jobs and monitor their progress. This currently implements only a subset of
the functionality exposed by the API.
For a complete set of documentation on the dbt Cloud Administrative REST API, including expected
response JSON schemae, see the `dbt Cloud API Docs <https://docs.getdbt.com/dbt-cloud/api-v2>`_.
To configure this resource, we recommend using the `configured
<https://docs.dagster.io/overview/configuration#configured>`_ method.
**Config Options:**
auth_token (StringSource)
dbt Cloud API Token. User tokens can be found in the `dbt Cloud UI
<https://cloud.getdbt.com/#/profile/api/>`_, or see the `dbt Cloud Docs
<https://docs.getdbt.com/docs/dbt-cloud/dbt-cloud-api/service-tokens>`_ for instructions
on creating a Service Account token.
account_id (int)
dbt Cloud Account ID. This value can be found in the url of a variety of views in the dbt
Cloud UI, e.g. ``https://cloud.getdbt.com/#/accounts/{account_id}/settings/``.
disable_schedule_on_trigger (bool)
Specifies if you would like any job that is launched using this resource to be
automatically taken off its dbt Cloud schedule. Defaults to ``True``.
request_max_retries (int)
The maximum number of times requests to the dbt Cloud API should be retried before
failing. Defaults to ``3``.
request_retry_delay (float)
Time (in seconds) to wait between each request retry. Defaults to ``0.25``.
**Examples:**
.. code-block:: python
from dagster import job
from dagster_dbt import dbt_cloud_resource
my_dbt_cloud_resource = fivetran_resource.configured(
{
"auth_token": {"env": "DBT_CLOUD_AUTH_TOKEN"},
"account_id": 30000,
}
)
@job(resource_defs={"dbt_cloud":my_dbt_cloud_resource})
def my_dbt_cloud_job():
...
"""
return DbtCloudResourceV2(
auth_token=context.resource_config["auth_token"],
account_id=context.resource_config["account_id"],
disable_schedule_on_trigger=context.resource_config["disable_schedule_on_trigger"],
request_max_retries=context.resource_config["request_max_retries"],
request_retry_delay=context.resource_config["request_retry_delay"],
log=context.log,
)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.